2017-12-27 12:55:14 -06:00
// SPDX-License-Identifier: GPL-2.0
2007-05-01 16:40:36 +10:00
# include <linux/kernel.h>
# include <linux/of.h>
2007-05-01 16:49:51 +10:00
# include <linux/of_device.h>
2015-03-03 12:52:09 -05:00
# include <linux/of_address.h>
# include <linux/of_iommu.h>
2021-08-16 14:26:16 +01:00
# include <linux/of_reserved_mem.h>
2020-09-17 18:43:40 +02:00
# include <linux/dma-direct.h> /* for bus_dma_region */
2020-09-22 15:31:03 +02:00
# include <linux/dma-map-ops.h>
2007-05-01 16:40:36 +10:00
# include <linux/init.h>
# include <linux/mod_devicetable.h>
# include <linux/slab.h>
2017-08-31 11:32:54 +01:00
# include <linux/platform_device.h>
2007-05-01 16:40:36 +10:00
# include <asm/errno.h>
2012-12-06 14:55:41 -08:00
# include "of_private.h"
2007-05-01 16:40:36 +10:00
/**
2010-04-13 16:13:22 -07:00
* of_match_device - Tell if a struct device matches an of_device_id list
2019-04-25 09:45:54 +08:00
* @ matches : array of of device match structures to search in
2007-05-01 16:40:36 +10:00
* @ dev : the of device structure to match against
*
2010-08-06 09:25:50 -06:00
* Used by a driver to check whether an platform_device present in the
2007-05-01 16:40:36 +10:00
* system is in its list of supported devices .
*/
const struct of_device_id * of_match_device ( const struct of_device_id * matches ,
2010-04-13 16:13:22 -07:00
const struct device * dev )
2007-05-01 16:40:36 +10:00
{
2022-01-18 11:34:04 -06:00
if ( ! matches | | ! dev - > of_node | | dev - > of_node_reused )
2007-05-01 16:40:36 +10:00
return NULL ;
2010-04-13 16:13:22 -07:00
return of_match_node ( matches , dev - > of_node ) ;
2007-05-01 16:40:36 +10:00
}
EXPORT_SYMBOL ( of_match_device ) ;
2021-08-16 14:26:17 +01:00
static void
2021-08-16 14:26:16 +01:00
of_dma_set_restricted_buffer ( struct device * dev , struct device_node * np )
{
struct device_node * node , * of_node = dev - > of_node ;
int count , i ;
2021-08-16 14:26:17 +01:00
if ( ! IS_ENABLED ( CONFIG_DMA_RESTRICTED_POOL ) )
return ;
2021-08-16 14:26:16 +01:00
count = of_property_count_elems_of_size ( of_node , " memory-region " ,
sizeof ( u32 ) ) ;
/*
* If dev - > of_node doesn ' t exist or doesn ' t contain memory - region , try
* the OF node having DMA configuration .
*/
if ( count < = 0 ) {
of_node = np ;
count = of_property_count_elems_of_size (
of_node , " memory-region " , sizeof ( u32 ) ) ;
}
for ( i = 0 ; i < count ; i + + ) {
node = of_parse_phandle ( of_node , " memory-region " , i ) ;
/*
* There might be multiple memory regions , but only one
* restricted - dma - pool region is allowed .
*/
if ( of_device_is_compatible ( node , " restricted-dma-pool " ) & &
2022-07-02 09:44:49 +08:00
of_device_is_available ( node ) ) {
of_node_put ( node ) ;
2021-08-16 14:26:17 +01:00
break ;
2022-07-02 09:44:49 +08:00
}
of_node_put ( node ) ;
2021-08-16 14:26:16 +01:00
}
2021-09-17 14:14:23 +01:00
/*
* Attempt to initialize a restricted - dma - pool region if one was found .
* Note that count can hold a negative error code .
*/
if ( i < count & & of_reserved_mem_device_init_by_idx ( dev , of_node , i ) )
2021-08-16 14:26:17 +01:00
dev_warn ( dev , " failed to initialise \" restricted-dma-pool \" memory node \n " ) ;
2021-08-16 14:26:16 +01:00
}
2015-03-03 12:52:09 -05:00
/**
2021-03-18 10:40:27 +00:00
* of_dma_configure_id - Setup DMA configuration
2015-03-03 12:52:09 -05:00
* @ dev : Device to apply DMA configuration
* @ np : Pointer to OF node having DMA configuration
2018-05-03 16:25:08 +02:00
* @ force_dma : Whether device is to be set up by of_dma_configure ( ) even if
* DMA capability is not explicitly described by firmware .
2020-06-19 09:20:08 +01:00
* @ id : Optional const pointer value input id
2015-03-03 12:52:09 -05:00
*
* Try to get devices ' s DMA configuration from DT and update it
* accordingly .
*
* If platform code needs to use its own special DMA configuration , it
* can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events
* to fix up DMA configuration .
*/
2020-06-19 09:20:08 +01:00
int of_dma_configure_id ( struct device * dev , struct device_node * np ,
bool force_dma , const u32 * id )
2015-03-03 12:52:09 -05:00
{
2020-09-17 18:43:40 +02:00
const struct bus_dma_region * map = NULL ;
2022-09-29 13:48:38 +01:00
struct device_node * bus_np ;
2024-04-19 17:54:41 +01:00
u64 mask , end = 0 ;
2020-09-17 18:43:40 +02:00
bool coherent ;
2023-12-07 14:03:09 -04:00
int iommu_ret ;
2020-09-17 18:43:40 +02:00
int ret ;
2015-03-03 12:52:09 -05:00
2022-09-29 13:48:38 +01:00
if ( np = = dev - > of_node )
bus_np = __of_get_dma_parent ( np ) ;
else
bus_np = of_node_get ( np ) ;
ret = of_dma_get_range ( bus_np , & map ) ;
of_node_put ( bus_np ) ;
2015-03-03 12:52:09 -05:00
if ( ret < 0 ) {
2017-08-31 11:32:54 +01:00
/*
* For legacy reasons , we have to assume some devices need
* DMA configuration regardless of whether " dma-ranges " is
* correctly specified or not .
*/
2018-05-03 16:25:08 +02:00
if ( ! force_dma )
2017-08-31 11:32:54 +01:00
return ret = = - ENODEV ? 0 : ret ;
2015-03-03 12:52:09 -05:00
} else {
2020-09-17 18:43:40 +02:00
/* Determine the overall bounds of all DMA regions */
2024-04-19 17:54:43 +01:00
end = dma_range_map_max ( map ) ;
2015-03-03 12:52:09 -05:00
}
2017-08-31 11:32:54 +01:00
/*
2018-07-23 23:16:12 +01:00
* If @ dev is expected to be DMA - capable then the bus code that created
* it should have initialised its dma_mask pointer by this point . For
* now , we ' ll continue the legacy behaviour of coercing it to the
* coherent mask if not , but we ' ll no longer do so quietly .
2017-08-31 11:32:54 +01:00
*/
2018-07-23 23:16:12 +01:00
if ( ! dev - > dma_mask ) {
dev_warn ( dev , " DMA mask not set \n " ) ;
2017-08-31 11:32:54 +01:00
dev - > dma_mask = & dev - > coherent_dma_mask ;
2018-07-23 23:16:12 +01:00
}
2017-08-31 11:32:54 +01:00
2024-04-19 17:54:41 +01:00
if ( ! end & & dev - > coherent_dma_mask )
end = dev - > coherent_dma_mask ;
else if ( ! end )
end = ( 1ULL < < 32 ) - 1 ;
2017-08-31 11:32:54 +01:00
2015-03-03 14:44:57 -06:00
/*
* Limit coherent and dma mask based on size and default mask
* set by the driver .
*/
2019-11-21 10:26:44 +01:00
mask = DMA_BIT_MASK ( ilog2 ( end ) + 1 ) ;
of: fix DMA mask generation
Historically, DMA masks have suffered some ambiguity between whether
they represent the range of physical memory a device can access, or the
address bits a device is capable of driving, particularly since on many
platforms the two are equivalent. Whilst there are some stragglers left
(dma_max_pfn(), I'm looking at you...), the majority of DMA code has
been cleaned up to follow the latter definition, not least since it is
the only one which makes sense once IOMMUs are involved.
In this respect, of_dma_configure() has always done the wrong thing in
how it generates initial masks based on "dma-ranges". Although rounding
down did not affect the TI Keystone platform where dma_addr + size is
already a power of two, in any other case it results in a mask which is
at best unnecessarily constrained and at worst unusable.
BCM2837 illustrates the problem nicely, where we have a DMA base of 3GB
and a size of 1GB - 16MB, giving dma_addr + size = 0xff000000 and a
resultant mask of 0x7fffffff, which is then insufficient to even cover
the necessary offset, effectively making all DMA addresses out-of-range.
This has been hidden until now (mostly because we don't yet prevent
drivers from simply overwriting this initial mask later upon probe), but
due to recent changes elsewhere now shows up as USB being broken on
Raspberry Pi 3.
Make it right by rounding up instead of down, such that the mask
correctly correctly describes all possisble bits the device needs to
emit.
Fixes: 9a6d7298b083 ("of: Calculate device DMA masks based on DT dma-range size")
Reported-by: Stefan Wahren <stefan.wahren@i2se.com>
Reported-by: Andreas Färber <afaerber@suse.de>
Reported-by: Hans Verkuil <hverkuil@xs4all.nl>
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Acked-by: Rob Herring <robh@kernel.org>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2017-08-11 17:29:56 +01:00
dev - > coherent_dma_mask & = mask ;
* dev - > dma_mask & = mask ;
2021-01-19 18:52:03 +08:00
/* ...but only set bus limit and range map if we found valid dma-ranges earlier */
if ( ! ret ) {
2019-11-21 10:26:44 +01:00
dev - > bus_dma_limit = end ;
2021-01-19 18:52:03 +08:00
dev - > dma_range_map = map ;
}
2015-03-03 14:44:57 -06:00
2015-03-03 12:52:09 -05:00
coherent = of_dma_is_coherent ( np ) ;
dev_dbg ( dev , " device is%sdma coherent \n " ,
coherent ? " " : " not " ) ;
2023-12-07 14:03:09 -04:00
iommu_ret = of_iommu_configure ( dev , np , id ) ;
if ( iommu_ret = = - EPROBE_DEFER ) {
2021-01-19 18:52:03 +08:00
/* Don't touch range map if it wasn't set from a valid dma-ranges */
if ( ! ret )
dev - > dma_range_map = NULL ;
2020-09-17 18:43:40 +02:00
kfree ( map ) ;
2017-05-27 19:17:41 +05:30
return - EPROBE_DEFER ;
2023-12-07 14:03:09 -04:00
} else if ( iommu_ret = = - ENODEV ) {
dev_dbg ( dev , " device is not behind an iommu \n " ) ;
} else if ( iommu_ret ) {
dev_err ( dev , " iommu configuration for device failed with %pe \n " ,
ERR_PTR ( iommu_ret ) ) ;
2017-04-10 16:51:02 +05:30
2023-12-07 14:03:09 -04:00
/*
* Historically this routine doesn ' t fail driver probing
* due to errors in of_iommu_configure ( )
*/
} else
dev_dbg ( dev , " device is behind an iommu \n " ) ;
2015-03-03 12:52:09 -05:00
2024-04-19 17:54:46 +01:00
arch_setup_dma_ops ( dev , coherent ) ;
2017-04-10 16:51:02 +05:30
2023-12-07 14:03:09 -04:00
if ( iommu_ret )
2021-08-16 14:26:17 +01:00
of_dma_set_restricted_buffer ( dev , np ) ;
2021-06-19 11:40:43 +08:00
2017-04-10 16:51:02 +05:30
return 0 ;
2015-03-03 12:52:09 -05:00
}
2020-06-19 09:20:08 +01:00
EXPORT_SYMBOL_GPL ( of_dma_configure_id ) ;
2015-03-03 12:52:09 -05:00
2015-05-06 20:09:09 +02:00
const void * of_device_get_match_data ( const struct device * dev )
{
const struct of_device_id * match ;
match = of_match_device ( dev - > driver - > of_match_table , dev ) ;
if ( ! match )
return NULL ;
return match - > data ;
}
EXPORT_SYMBOL ( of_device_get_match_data ) ;
2017-03-22 09:16:27 -05:00
/**
* of_device_modalias - Fill buffer with newline terminated modalias string
2021-03-18 10:40:27 +00:00
* @ dev : Calling device
* @ str : Modalias string
* @ len : Size of @ str
2017-03-22 09:16:27 -05:00
*/
ssize_t of_device_modalias ( struct device * dev , char * str , ssize_t len )
{
2023-04-04 18:21:15 +01:00
ssize_t sl ;
if ( ! dev | | ! dev - > of_node | | dev - > of_node_reused )
return - ENODEV ;
sl = of_modalias ( dev - > of_node , str , len - 2 ) ;
2017-03-22 09:16:27 -05:00
if ( sl < 0 )
return sl ;
2017-08-23 18:04:04 -07:00
if ( sl > len - 2 )
return - ENOMEM ;
2017-03-22 09:16:27 -05:00
str [ sl + + ] = ' \n ' ;
str [ sl ] = 0 ;
return sl ;
}
EXPORT_SYMBOL_GPL ( of_device_modalias ) ;
2010-06-08 07:48:12 -06:00
/**
* of_device_uevent - Display OF related uevent information
2022-11-21 10:46:49 +01:00
* @ dev : Device to display the uevent information for
* @ env : Kernel object ' s userspace event reference to fill up
2010-06-08 07:48:12 -06:00
*/
2022-11-21 10:46:49 +01:00
void of_device_uevent ( const struct device * dev , struct kobj_uevent_env * env )
2010-06-08 07:48:12 -06:00
{
2018-08-29 08:36:12 -05:00
const char * compat , * type ;
2012-12-06 14:55:41 -08:00
struct alias_prop * app ;
2017-07-21 15:45:32 -05:00
struct property * p ;
int seen = 0 ;
2010-06-08 07:48:12 -06:00
if ( ( ! dev ) | | ( ! dev - > of_node ) )
2012-02-01 11:22:22 -07:00
return ;
2010-06-08 07:48:12 -06:00
2018-08-27 20:00:19 -05:00
add_uevent_var ( env , " OF_NAME=%pOFn " , dev - > of_node ) ;
2017-06-01 15:50:55 -05:00
add_uevent_var ( env , " OF_FULLNAME=%pOF " , dev - > of_node ) ;
2018-08-29 08:36:12 -05:00
type = of_node_get_device_type ( dev - > of_node ) ;
if ( type )
add_uevent_var ( env , " OF_TYPE=%s " , type ) ;
2010-06-08 07:48:12 -06:00
/* Since the compatible field can contain pretty much anything
* it ' s not really legal to split it out with commas . We split it
* up using a number of environment variables instead . */
2017-07-21 15:45:32 -05:00
of_property_for_each_string ( dev - > of_node , " compatible " , p , compat ) {
2012-02-01 11:22:22 -07:00
add_uevent_var ( env , " OF_COMPATIBLE_%d=%s " , seen , compat ) ;
2010-06-08 07:48:12 -06:00
seen + + ;
}
2012-02-01 11:22:22 -07:00
add_uevent_var ( env , " OF_COMPATIBLE_N=%d " , seen ) ;
2012-12-06 14:55:41 -08:00
seen = 0 ;
2014-07-04 19:58:03 +03:00
mutex_lock ( & of_mutex ) ;
2012-12-06 14:55:41 -08:00
list_for_each_entry ( app , & aliases_lookup , link ) {
if ( dev - > of_node = = app - > np ) {
add_uevent_var ( env , " OF_ALIAS_%d=%s " , seen ,
app - > alias ) ;
seen + + ;
}
}
2014-07-04 19:58:03 +03:00
mutex_unlock ( & of_mutex ) ;
2012-02-01 11:22:22 -07:00
}
2023-06-22 23:32:13 +02:00
EXPORT_SYMBOL_GPL ( of_device_uevent ) ;
2010-06-08 07:48:12 -06:00
2023-01-11 12:30:03 +01:00
int of_device_uevent_modalias ( const struct device * dev , struct kobj_uevent_env * env )
2012-02-01 11:22:22 -07:00
{
int sl ;
2023-02-07 12:05:29 +01:00
if ( ( ! dev ) | | ( ! dev - > of_node ) | | dev - > of_node_reused )
2012-02-01 11:22:22 -07:00
return - ENODEV ;
2010-06-08 07:48:12 -06:00
2012-02-01 11:22:22 -07:00
/* Devicetree modalias is tricky, we add it in 2 steps */
2010-06-08 07:48:12 -06:00
if ( add_uevent_var ( env , " MODALIAS= " ) )
return - ENOMEM ;
2023-04-04 18:21:15 +01:00
sl = of_modalias ( dev - > of_node , & env - > buf [ env - > buflen - 1 ] ,
sizeof ( env - > buf ) - env - > buflen ) ;
2023-02-07 12:05:30 +01:00
if ( sl < 0 )
return sl ;
2010-06-08 07:48:12 -06:00
if ( sl > = ( sizeof ( env - > buf ) - env - > buflen ) )
return - ENOMEM ;
env - > buflen + = sl ;
return 0 ;
}
2016-12-28 14:56:48 -08:00
EXPORT_SYMBOL_GPL ( of_device_uevent_modalias ) ;
2023-12-15 11:15:27 +00:00
/**
* of_device_make_bus_id - Use the device node data to assign a unique name
* @ dev : pointer to device structure that is linked to a device tree node
*
* This routine will first try using the translated bus address to
* derive a unique name . If it cannot , then it will prepend names from
* parent nodes until a unique name can be derived .
*/
void of_device_make_bus_id ( struct device * dev )
{
struct device_node * node = dev - > of_node ;
const __be32 * reg ;
u64 addr ;
u32 mask ;
/* Construct the name, using parent nodes if necessary to ensure uniqueness */
while ( node - > parent ) {
/*
* If the address can be translated , then that is as much
* uniqueness as we need . Make it the first component and return
*/
reg = of_get_property ( node , " reg " , NULL ) ;
if ( reg & & ( addr = of_translate_address ( node , reg ) ) ! = OF_BAD_ADDR ) {
if ( ! of_property_read_u32 ( node , " mask " , & mask ) )
dev_set_name ( dev , dev_name ( dev ) ? " %llx.%x.%pOFn:%s " : " %llx.%x.%pOFn " ,
addr , ffs ( mask ) - 1 , node , dev_name ( dev ) ) ;
else
dev_set_name ( dev , dev_name ( dev ) ? " %llx.%pOFn:%s " : " %llx.%pOFn " ,
addr , node , dev_name ( dev ) ) ;
return ;
}
/* format arguments only used if dev_name() resolves to NULL */
dev_set_name ( dev , dev_name ( dev ) ? " %s:%s " : " %s " ,
kbasename ( node - > full_name ) , dev_name ( dev ) ) ;
node = node - > parent ;
}
}
EXPORT_SYMBOL_GPL ( of_device_make_bus_id ) ;