2017-12-27 21:55:14 +03:00
// SPDX-License-Identifier: GPL-2.0
2007-05-01 10:40:36 +04:00
# include <linux/string.h>
# include <linux/kernel.h>
# include <linux/of.h>
2007-05-01 10:49:51 +04:00
# include <linux/of_device.h>
2015-03-03 20:52:09 +03:00
# include <linux/of_address.h>
# include <linux/of_iommu.h>
2021-08-16 16:26:16 +03:00
# include <linux/of_reserved_mem.h>
2020-09-17 19:43:40 +03:00
# include <linux/dma-direct.h> /* for bus_dma_region */
2020-09-22 16:31:03 +03:00
# include <linux/dma-map-ops.h>
2007-05-01 10:40:36 +04:00
# include <linux/init.h>
# include <linux/module.h>
# include <linux/mod_devicetable.h>
# include <linux/slab.h>
2017-08-31 13:32:54 +03:00
# include <linux/platform_device.h>
2007-05-01 10:40:36 +04:00
# include <asm/errno.h>
2012-12-07 02:55:41 +04:00
# include "of_private.h"
2007-05-01 10:40:36 +04:00
/**
2010-04-14 03:13:22 +04:00
* of_match_device - Tell if a struct device matches an of_device_id list
2019-04-25 04:45:54 +03:00
* @ matches : array of of device match structures to search in
2007-05-01 10:40:36 +04:00
* @ dev : the of device structure to match against
*
2010-08-06 19:25:50 +04:00
* Used by a driver to check whether an platform_device present in the
2007-05-01 10:40:36 +04:00
* system is in its list of supported devices .
*/
const struct of_device_id * of_match_device ( const struct of_device_id * matches ,
2010-04-14 03:13:22 +04:00
const struct device * dev )
2007-05-01 10:40:36 +04:00
{
2022-01-18 20:34:04 +03:00
if ( ! matches | | ! dev - > of_node | | dev - > of_node_reused )
2007-05-01 10:40:36 +04:00
return NULL ;
2010-04-14 03:13:22 +04:00
return of_match_node ( matches , dev - > of_node ) ;
2007-05-01 10:40:36 +04:00
}
EXPORT_SYMBOL ( of_match_device ) ;
2010-10-20 21:45:13 +04:00
int of_device_add ( struct platform_device * ofdev )
2007-05-01 10:40:36 +04:00
{
2010-04-14 03:12:29 +04:00
BUG_ON ( ofdev - > dev . of_node = = NULL ) ;
2008-10-27 00:51:25 +03:00
2010-06-08 17:48:21 +04:00
/* name and id have to be set so that the platform bus doesn't get
* confused on matching */
ofdev - > name = dev_name ( & ofdev - > dev ) ;
2017-08-25 18:33:13 +03:00
ofdev - > id = PLATFORM_DEVID_NONE ;
2010-06-08 17:48:21 +04:00
2015-08-25 07:08:22 +03:00
/*
* If this device has not binding numa node in devicetree , that is
* of_node_to_nid returns NUMA_NO_NODE . device_add will assume that this
* device is on the same node as the parent .
*/
set_dev_node ( & ofdev - > dev , of_node_to_nid ( ofdev - > dev . of_node ) ) ;
2008-10-27 00:51:25 +03:00
return device_add ( & ofdev - > dev ) ;
2007-05-01 10:40:36 +04:00
}
2010-10-20 21:45:13 +04:00
2021-08-16 16:26:17 +03:00
static void
2021-08-16 16:26:16 +03:00
of_dma_set_restricted_buffer ( struct device * dev , struct device_node * np )
{
struct device_node * node , * of_node = dev - > of_node ;
int count , i ;
2021-08-16 16:26:17 +03:00
if ( ! IS_ENABLED ( CONFIG_DMA_RESTRICTED_POOL ) )
return ;
2021-08-16 16:26:16 +03:00
count = of_property_count_elems_of_size ( of_node , " memory-region " ,
sizeof ( u32 ) ) ;
/*
* If dev - > of_node doesn ' t exist or doesn ' t contain memory - region , try
* the OF node having DMA configuration .
*/
if ( count < = 0 ) {
of_node = np ;
count = of_property_count_elems_of_size (
of_node , " memory-region " , sizeof ( u32 ) ) ;
}
for ( i = 0 ; i < count ; i + + ) {
node = of_parse_phandle ( of_node , " memory-region " , i ) ;
/*
* There might be multiple memory regions , but only one
* restricted - dma - pool region is allowed .
*/
if ( of_device_is_compatible ( node , " restricted-dma-pool " ) & &
2022-07-02 04:44:49 +03:00
of_device_is_available ( node ) ) {
of_node_put ( node ) ;
2021-08-16 16:26:17 +03:00
break ;
2022-07-02 04:44:49 +03:00
}
of_node_put ( node ) ;
2021-08-16 16:26:16 +03:00
}
2021-09-17 16:14:23 +03:00
/*
* Attempt to initialize a restricted - dma - pool region if one was found .
* Note that count can hold a negative error code .
*/
if ( i < count & & of_reserved_mem_device_init_by_idx ( dev , of_node , i ) )
2021-08-16 16:26:17 +03:00
dev_warn ( dev , " failed to initialise \" restricted-dma-pool \" memory node \n " ) ;
2021-08-16 16:26:16 +03:00
}
2015-03-03 20:52:09 +03:00
/**
2021-03-18 13:40:27 +03:00
* of_dma_configure_id - Setup DMA configuration
2015-03-03 20:52:09 +03:00
* @ dev : Device to apply DMA configuration
* @ np : Pointer to OF node having DMA configuration
2018-05-03 17:25:08 +03:00
* @ force_dma : Whether device is to be set up by of_dma_configure ( ) even if
* DMA capability is not explicitly described by firmware .
2020-06-19 11:20:08 +03:00
* @ id : Optional const pointer value input id
2015-03-03 20:52:09 +03:00
*
* Try to get devices ' s DMA configuration from DT and update it
* accordingly .
*
* If platform code needs to use its own special DMA configuration , it
* can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events
* to fix up DMA configuration .
*/
2020-06-19 11:20:08 +03:00
int of_dma_configure_id ( struct device * dev , struct device_node * np ,
bool force_dma , const u32 * id )
2015-03-03 20:52:09 +03:00
{
2016-04-07 20:42:05 +03:00
const struct iommu_ops * iommu ;
2020-09-17 19:43:40 +03:00
const struct bus_dma_region * map = NULL ;
2022-09-29 15:48:38 +03:00
struct device_node * bus_np ;
2020-10-26 18:27:55 +03:00
u64 dma_start = 0 ;
2020-09-17 19:43:40 +03:00
u64 mask , end , size = 0 ;
bool coherent ;
int ret ;
2015-03-03 20:52:09 +03:00
2022-09-29 15:48:38 +03:00
if ( np = = dev - > of_node )
bus_np = __of_get_dma_parent ( np ) ;
else
bus_np = of_node_get ( np ) ;
ret = of_dma_get_range ( bus_np , & map ) ;
of_node_put ( bus_np ) ;
2015-03-03 20:52:09 +03:00
if ( ret < 0 ) {
2017-08-31 13:32:54 +03:00
/*
* For legacy reasons , we have to assume some devices need
* DMA configuration regardless of whether " dma-ranges " is
* correctly specified or not .
*/
2018-05-03 17:25:08 +03:00
if ( ! force_dma )
2017-08-31 13:32:54 +03:00
return ret = = - ENODEV ? 0 : ret ;
2015-03-03 20:52:09 +03:00
} else {
2020-09-17 19:43:40 +03:00
const struct bus_dma_region * r = map ;
2020-10-26 18:27:55 +03:00
u64 dma_end = 0 ;
2020-09-17 19:43:40 +03:00
/* Determine the overall bounds of all DMA regions */
2020-11-02 13:54:22 +03:00
for ( dma_start = ~ 0 ; r - > size ; r + + ) {
2020-09-17 19:43:40 +03:00
/* Take lower and upper limits */
if ( r - > dma_start < dma_start )
dma_start = r - > dma_start ;
if ( r - > dma_start + r - > size > dma_end )
dma_end = r - > dma_start + r - > size ;
}
size = dma_end - dma_start ;
2015-03-03 20:52:10 +03:00
/*
* Add a work around to treat the size as mask + 1 in case
* it is defined in DT as a mask .
*/
if ( size & 1 ) {
2020-09-17 19:43:40 +03:00
dev_warn ( dev , " Invalid size 0x%llx for dma-range(s) \n " ,
2015-03-03 20:52:10 +03:00
size ) ;
size = size + 1 ;
}
if ( ! size ) {
dev_err ( dev , " Adjusted size 0x%llx invalid \n " , size ) ;
2020-09-17 19:43:40 +03:00
kfree ( map ) ;
2017-04-10 14:21:02 +03:00
return - EINVAL ;
2015-03-03 20:52:10 +03:00
}
2015-03-03 20:52:09 +03:00
}
2017-08-31 13:32:54 +03:00
/*
2018-07-24 01:16:12 +03:00
* If @ dev is expected to be DMA - capable then the bus code that created
* it should have initialised its dma_mask pointer by this point . For
* now , we ' ll continue the legacy behaviour of coercing it to the
* coherent mask if not , but we ' ll no longer do so quietly .
2017-08-31 13:32:54 +03:00
*/
2018-07-24 01:16:12 +03:00
if ( ! dev - > dma_mask ) {
dev_warn ( dev , " DMA mask not set \n " ) ;
2017-08-31 13:32:54 +03:00
dev - > dma_mask = & dev - > coherent_dma_mask ;
2018-07-24 01:16:12 +03:00
}
2017-08-31 13:32:54 +03:00
2018-07-24 01:16:12 +03:00
if ( ! size & & dev - > coherent_dma_mask )
2017-08-31 13:32:54 +03:00
size = max ( dev - > coherent_dma_mask , dev - > coherent_dma_mask + 1 ) ;
2018-07-24 01:16:12 +03:00
else if ( ! size )
size = 1ULL < < 32 ;
2017-08-31 13:32:54 +03:00
2015-03-03 23:44:57 +03:00
/*
* Limit coherent and dma mask based on size and default mask
* set by the driver .
*/
2020-09-17 19:43:40 +03:00
end = dma_start + size - 1 ;
2019-11-21 12:26:44 +03:00
mask = DMA_BIT_MASK ( ilog2 ( end ) + 1 ) ;
of: fix DMA mask generation
Historically, DMA masks have suffered some ambiguity between whether
they represent the range of physical memory a device can access, or the
address bits a device is capable of driving, particularly since on many
platforms the two are equivalent. Whilst there are some stragglers left
(dma_max_pfn(), I'm looking at you...), the majority of DMA code has
been cleaned up to follow the latter definition, not least since it is
the only one which makes sense once IOMMUs are involved.
In this respect, of_dma_configure() has always done the wrong thing in
how it generates initial masks based on "dma-ranges". Although rounding
down did not affect the TI Keystone platform where dma_addr + size is
already a power of two, in any other case it results in a mask which is
at best unnecessarily constrained and at worst unusable.
BCM2837 illustrates the problem nicely, where we have a DMA base of 3GB
and a size of 1GB - 16MB, giving dma_addr + size = 0xff000000 and a
resultant mask of 0x7fffffff, which is then insufficient to even cover
the necessary offset, effectively making all DMA addresses out-of-range.
This has been hidden until now (mostly because we don't yet prevent
drivers from simply overwriting this initial mask later upon probe), but
due to recent changes elsewhere now shows up as USB being broken on
Raspberry Pi 3.
Make it right by rounding up instead of down, such that the mask
correctly correctly describes all possisble bits the device needs to
emit.
Fixes: 9a6d7298b083 ("of: Calculate device DMA masks based on DT dma-range size")
Reported-by: Stefan Wahren <stefan.wahren@i2se.com>
Reported-by: Andreas Färber <afaerber@suse.de>
Reported-by: Hans Verkuil <hverkuil@xs4all.nl>
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Acked-by: Rob Herring <robh@kernel.org>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2017-08-11 19:29:56 +03:00
dev - > coherent_dma_mask & = mask ;
* dev - > dma_mask & = mask ;
2021-01-19 13:52:03 +03:00
/* ...but only set bus limit and range map if we found valid dma-ranges earlier */
if ( ! ret ) {
2019-11-21 12:26:44 +03:00
dev - > bus_dma_limit = end ;
2021-01-19 13:52:03 +03:00
dev - > dma_range_map = map ;
}
2015-03-03 23:44:57 +03:00
2015-03-03 20:52:09 +03:00
coherent = of_dma_is_coherent ( np ) ;
dev_dbg ( dev , " device is%sdma coherent \n " ,
coherent ? " " : " not " ) ;
2020-06-19 11:20:08 +03:00
iommu = of_iommu_configure ( dev , np , id ) ;
2020-09-17 19:43:40 +03:00
if ( PTR_ERR ( iommu ) = = - EPROBE_DEFER ) {
2021-01-19 13:52:03 +03:00
/* Don't touch range map if it wasn't set from a valid dma-ranges */
if ( ! ret )
dev - > dma_range_map = NULL ;
2020-09-17 19:43:40 +03:00
kfree ( map ) ;
2017-05-27 16:47:41 +03:00
return - EPROBE_DEFER ;
2020-09-17 19:43:40 +03:00
}
2017-04-10 14:21:02 +03:00
2015-03-03 20:52:09 +03:00
dev_dbg ( dev , " device is%sbehind an iommu \n " ,
iommu ? " " : " not " ) ;
2020-09-17 19:43:40 +03:00
arch_setup_dma_ops ( dev , dma_start , size , iommu , coherent ) ;
2017-04-10 14:21:02 +03:00
2021-06-19 06:40:43 +03:00
if ( ! iommu )
2021-08-16 16:26:17 +03:00
of_dma_set_restricted_buffer ( dev , np ) ;
2021-06-19 06:40:43 +03:00
2017-04-10 14:21:02 +03:00
return 0 ;
2015-03-03 20:52:09 +03:00
}
2020-06-19 11:20:08 +03:00
EXPORT_SYMBOL_GPL ( of_dma_configure_id ) ;
2015-03-03 20:52:09 +03:00
2010-10-20 21:45:13 +04:00
int of_device_register ( struct platform_device * pdev )
{
device_initialize ( & pdev - > dev ) ;
return of_device_add ( pdev ) ;
}
2007-05-01 10:40:36 +04:00
EXPORT_SYMBOL ( of_device_register ) ;
2010-07-22 23:59:23 +04:00
void of_device_unregister ( struct platform_device * ofdev )
2007-05-01 10:40:36 +04:00
{
device_unregister ( & ofdev - > dev ) ;
}
EXPORT_SYMBOL ( of_device_unregister ) ;
2008-05-16 05:57:45 +04:00
2015-05-06 21:09:09 +03:00
const void * of_device_get_match_data ( const struct device * dev )
{
const struct of_device_id * match ;
match = of_match_device ( dev - > driver - > of_match_table , dev ) ;
if ( ! match )
return NULL ;
return match - > data ;
}
EXPORT_SYMBOL ( of_device_get_match_data ) ;
2017-03-22 17:16:27 +03:00
static ssize_t of_device_get_modalias ( struct device * dev , char * str , ssize_t len )
2008-05-16 05:57:45 +04:00
{
const char * compat ;
2017-07-21 23:45:32 +03:00
char * c ;
struct property * p ;
ssize_t csize ;
2017-08-24 04:03:52 +03:00
ssize_t tsize ;
2008-05-16 05:57:45 +04:00
2014-01-14 12:46:38 +04:00
if ( ( ! dev ) | | ( ! dev - > of_node ) )
return - ENODEV ;
2008-05-16 05:57:45 +04:00
/* Name & Type */
2018-08-28 04:00:19 +03:00
/* %p eats all alphanum characters, so %c must be used here */
csize = snprintf ( str , len , " of:N%pOFn%c%s " , dev - > of_node , ' T ' ,
2018-08-29 16:36:12 +03:00
of_node_get_device_type ( dev - > of_node ) ) ;
2017-08-24 04:03:52 +03:00
tsize = csize ;
2017-07-21 23:45:32 +03:00
len - = csize ;
2017-08-24 04:03:52 +03:00
if ( str )
str + = csize ;
2017-07-21 23:45:32 +03:00
of_property_for_each_string ( dev - > of_node , " compatible " , p , compat ) {
2017-08-24 04:03:52 +03:00
csize = strlen ( compat ) + 1 ;
tsize + = csize ;
if ( csize > len )
continue ;
2017-07-21 23:45:32 +03:00
csize = snprintf ( str , len , " C%s " , compat ) ;
for ( c = str ; c ; ) {
c = strchr ( c , ' ' ) ;
if ( c )
* c + + = ' _ ' ;
}
len - = csize ;
str + = csize ;
2008-05-16 05:57:45 +04:00
}
2017-08-24 04:03:52 +03:00
return tsize ;
2008-05-16 05:57:45 +04:00
}
2010-06-08 17:48:12 +04:00
2016-12-29 01:56:47 +03:00
int of_device_request_module ( struct device * dev )
{
char * str ;
ssize_t size ;
int ret ;
size = of_device_get_modalias ( dev , NULL , 0 ) ;
if ( size < 0 )
return size ;
str = kmalloc ( size + 1 , GFP_KERNEL ) ;
if ( ! str )
return - ENOMEM ;
of_device_get_modalias ( dev , str , size ) ;
str [ size ] = ' \0 ' ;
ret = request_module ( str ) ;
kfree ( str ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( of_device_request_module ) ;
2017-03-22 17:16:27 +03:00
/**
* of_device_modalias - Fill buffer with newline terminated modalias string
2021-03-18 13:40:27 +03:00
* @ dev : Calling device
* @ str : Modalias string
* @ len : Size of @ str
2017-03-22 17:16:27 +03:00
*/
ssize_t of_device_modalias ( struct device * dev , char * str , ssize_t len )
{
ssize_t sl = of_device_get_modalias ( dev , str , len - 2 ) ;
if ( sl < 0 )
return sl ;
2017-08-24 04:04:04 +03:00
if ( sl > len - 2 )
return - ENOMEM ;
2017-03-22 17:16:27 +03:00
str [ sl + + ] = ' \n ' ;
str [ sl ] = 0 ;
return sl ;
}
EXPORT_SYMBOL_GPL ( of_device_modalias ) ;
2010-06-08 17:48:12 +04:00
/**
* of_device_uevent - Display OF related uevent information
2022-11-21 12:46:49 +03:00
* @ dev : Device to display the uevent information for
* @ env : Kernel object ' s userspace event reference to fill up
2010-06-08 17:48:12 +04:00
*/
2022-11-21 12:46:49 +03:00
void of_device_uevent ( const struct device * dev , struct kobj_uevent_env * env )
2010-06-08 17:48:12 +04:00
{
2018-08-29 16:36:12 +03:00
const char * compat , * type ;
2012-12-07 02:55:41 +04:00
struct alias_prop * app ;
2017-07-21 23:45:32 +03:00
struct property * p ;
int seen = 0 ;
2010-06-08 17:48:12 +04:00
if ( ( ! dev ) | | ( ! dev - > of_node ) )
2012-02-01 22:22:22 +04:00
return ;
2010-06-08 17:48:12 +04:00
2018-08-28 04:00:19 +03:00
add_uevent_var ( env , " OF_NAME=%pOFn " , dev - > of_node ) ;
2017-06-01 23:50:55 +03:00
add_uevent_var ( env , " OF_FULLNAME=%pOF " , dev - > of_node ) ;
2018-08-29 16:36:12 +03:00
type = of_node_get_device_type ( dev - > of_node ) ;
if ( type )
add_uevent_var ( env , " OF_TYPE=%s " , type ) ;
2010-06-08 17:48:12 +04:00
/* Since the compatible field can contain pretty much anything
* it ' s not really legal to split it out with commas . We split it
* up using a number of environment variables instead . */
2017-07-21 23:45:32 +03:00
of_property_for_each_string ( dev - > of_node , " compatible " , p , compat ) {
2012-02-01 22:22:22 +04:00
add_uevent_var ( env , " OF_COMPATIBLE_%d=%s " , seen , compat ) ;
2010-06-08 17:48:12 +04:00
seen + + ;
}
2012-02-01 22:22:22 +04:00
add_uevent_var ( env , " OF_COMPATIBLE_N=%d " , seen ) ;
2012-12-07 02:55:41 +04:00
seen = 0 ;
2014-07-04 20:58:03 +04:00
mutex_lock ( & of_mutex ) ;
2012-12-07 02:55:41 +04:00
list_for_each_entry ( app , & aliases_lookup , link ) {
if ( dev - > of_node = = app - > np ) {
add_uevent_var ( env , " OF_ALIAS_%d=%s " , seen ,
app - > alias ) ;
seen + + ;
}
}
2014-07-04 20:58:03 +04:00
mutex_unlock ( & of_mutex ) ;
2012-02-01 22:22:22 +04:00
}
2010-06-08 17:48:12 +04:00
2012-02-01 22:22:22 +04:00
int of_device_uevent_modalias ( struct device * dev , struct kobj_uevent_env * env )
{
int sl ;
if ( ( ! dev ) | | ( ! dev - > of_node ) )
return - ENODEV ;
2010-06-08 17:48:12 +04:00
2012-02-01 22:22:22 +04:00
/* Devicetree modalias is tricky, we add it in 2 steps */
2010-06-08 17:48:12 +04:00
if ( add_uevent_var ( env , " MODALIAS= " ) )
return - ENOMEM ;
2010-06-08 17:48:13 +04:00
sl = of_device_get_modalias ( dev , & env - > buf [ env - > buflen - 1 ] ,
2010-06-08 17:48:12 +04:00
sizeof ( env - > buf ) - env - > buflen ) ;
if ( sl > = ( sizeof ( env - > buf ) - env - > buflen ) )
return - ENOMEM ;
env - > buflen + = sl ;
return 0 ;
}
2016-12-29 01:56:48 +03:00
EXPORT_SYMBOL_GPL ( of_device_uevent_modalias ) ;