2017-12-27 12:55:14 -06:00
// SPDX-License-Identifier: GPL-2.0
2016-06-15 08:32:18 -05:00
# define pr_fmt(fmt) "OF: " fmt
2012-04-11 17:30:11 +00:00
# include <linux/device.h>
2018-03-15 02:15:52 +08:00
# include <linux/fwnode.h>
2010-06-08 07:48:09 -06:00
# include <linux/io.h>
# include <linux/ioport.h>
2018-03-15 02:15:54 +08:00
# include <linux/logic_pio.h>
2010-06-08 07:48:10 -06:00
# include <linux/module.h>
2010-06-08 07:48:09 -06:00
# include <linux/of_address.h>
2016-05-11 17:34:51 -05:00
# include <linux/pci.h>
2010-06-08 07:48:10 -06:00
# include <linux/pci_regs.h>
2014-09-29 15:29:21 +01:00
# include <linux/sizes.h>
# include <linux/slab.h>
2010-06-08 07:48:10 -06:00
# include <linux/string.h>
2020-09-17 18:43:40 +02:00
# include <linux/dma-direct.h> /* for bus_dma_region */
2010-06-08 07:48:09 -06:00
2019-07-02 18:42:39 +01:00
# include "of_private.h"
2010-06-08 07:48:10 -06:00
/* Max address size we deal with */
# define OF_MAX_ADDR_CELLS 4
2012-07-25 17:34:37 -06:00
# define OF_CHECK_ADDR_COUNT(na) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS)
# define OF_CHECK_COUNTS(na, ns) (OF_CHECK_ADDR_COUNT(na) && (ns) > 0)
2010-06-08 07:48:10 -06:00
/* Debug utility */
# ifdef DEBUG
2010-12-01 10:54:46 +01:00
static void of_dump_addr ( const char * s , const __be32 * addr , int na )
2010-06-08 07:48:10 -06:00
{
2016-06-15 08:32:18 -05:00
pr_debug ( " %s " , s ) ;
2010-06-08 07:48:10 -06:00
while ( na - - )
2016-06-15 08:32:18 -05:00
pr_cont ( " %08x " , be32_to_cpu ( * ( addr + + ) ) ) ;
pr_cont ( " \n " ) ;
2010-06-08 07:48:10 -06:00
}
# else
2010-12-01 10:54:46 +01:00
static void of_dump_addr ( const char * s , const __be32 * addr , int na ) { }
2010-06-08 07:48:10 -06:00
# endif
/* Callbacks for bus specific translators */
struct of_bus {
const char * name ;
const char * addresses ;
int ( * match ) ( struct device_node * parent ) ;
void ( * count_cells ) ( struct device_node * child ,
int * addrc , int * sizec ) ;
2012-10-08 19:41:58 -05:00
u64 ( * map ) ( __be32 * addr , const __be32 * range ,
2010-06-08 07:48:10 -06:00
int na , int ns , int pna ) ;
2012-10-08 19:41:58 -05:00
int ( * translate ) ( __be32 * addr , u64 offset , int na ) ;
2020-07-28 23:36:55 +08:00
bool has_flags ;
2010-12-01 10:54:46 +01:00
unsigned int ( * get_flags ) ( const __be32 * addr ) ;
2010-06-08 07:48:10 -06:00
} ;
/*
* Default translator ( generic bus )
*/
static void of_bus_default_count_cells ( struct device_node * dev ,
int * addrc , int * sizec )
{
if ( addrc )
* addrc = of_n_addr_cells ( dev ) ;
if ( sizec )
* sizec = of_n_size_cells ( dev ) ;
}
2012-10-08 19:41:58 -05:00
static u64 of_bus_default_map ( __be32 * addr , const __be32 * range ,
2010-06-08 07:48:10 -06:00
int na , int ns , int pna )
{
u64 cp , s , da ;
cp = of_read_number ( range , na ) ;
s = of_read_number ( range + na + pna , ns ) ;
da = of_read_number ( addr , na ) ;
2021-06-16 11:27:45 +02:00
pr_debug ( " default map, cp=%llx, s=%llx, da=%llx \n " , cp , s , da ) ;
2010-06-08 07:48:10 -06:00
if ( da < cp | | da > = ( cp + s ) )
return OF_BAD_ADDR ;
return da - cp ;
}
2012-10-08 19:41:58 -05:00
static int of_bus_default_translate ( __be32 * addr , u64 offset , int na )
2010-06-08 07:48:10 -06:00
{
u64 a = of_read_number ( addr , na ) ;
memset ( addr , 0 , na * 4 ) ;
a + = offset ;
if ( na > 1 )
2010-06-08 07:48:11 -06:00
addr [ na - 2 ] = cpu_to_be32 ( a > > 32 ) ;
addr [ na - 1 ] = cpu_to_be32 ( a & 0xffffffffu ) ;
2010-06-08 07:48:10 -06:00
return 0 ;
}
2023-03-28 15:15:58 -05:00
static unsigned int of_bus_default_flags_get_flags ( const __be32 * addr )
{
return of_read_number ( addr , 1 ) ;
}
2010-12-01 10:54:46 +01:00
static unsigned int of_bus_default_get_flags ( const __be32 * addr )
2010-06-08 07:48:10 -06:00
{
return IORESOURCE_MEM ;
}
2023-03-28 15:15:58 -05:00
2020-07-30 08:56:14 +08:00
# ifdef CONFIG_PCI
2020-02-06 14:34:54 +00:00
static unsigned int of_bus_pci_get_flags ( const __be32 * addr )
{
unsigned int flags = 0 ;
u32 w = be32_to_cpup ( addr ) ;
if ( ! IS_ENABLED ( CONFIG_PCI ) )
return 0 ;
switch ( ( w > > 24 ) & 0x03 ) {
case 0x01 :
flags | = IORESOURCE_IO ;
break ;
case 0x02 : /* 32 bits */
flags | = IORESOURCE_MEM ;
break ;
2021-04-15 15:00:51 -03:00
case 0x03 : /* 64 bits */
flags | = IORESOURCE_MEM | IORESOURCE_MEM_64 ;
break ;
2020-02-06 14:34:54 +00:00
}
if ( w & 0x40000000 )
flags | = IORESOURCE_PREFETCH ;
return flags ;
}
2010-06-08 07:48:10 -06:00
/*
* PCI bus specific translator
*/
2020-08-19 10:42:55 +01:00
static bool of_node_is_pcie ( struct device_node * np )
{
bool is_pcie = of_node_name_eq ( np , " pcie " ) ;
if ( is_pcie )
pr_warn_once ( " %pOF: Missing device_type \n " , np ) ;
return is_pcie ;
}
2010-06-08 07:48:10 -06:00
static int of_bus_pci_match ( struct device_node * np )
{
2013-07-03 16:01:10 +10:00
/*
2014-02-03 13:31:03 -02:00
* " pciex " is PCI Express
2013-07-03 16:01:10 +10:00
* " vci " is for the / chaos bridge on 1 st - gen PCI powermacs
* " ht " is hypertransport
2020-08-19 10:42:55 +01:00
*
* If none of the device_type match , and that the node name is
* " pcie " , accept the device as PCI ( with a warning ) .
2013-07-03 16:01:10 +10:00
*/
2018-08-29 08:36:12 -05:00
return of_node_is_type ( np , " pci " ) | | of_node_is_type ( np , " pciex " ) | |
2020-08-19 10:42:55 +01:00
of_node_is_type ( np , " vci " ) | | of_node_is_type ( np , " ht " ) | |
of_node_is_pcie ( np ) ;
2010-06-08 07:48:10 -06:00
}
static void of_bus_pci_count_cells ( struct device_node * np ,
int * addrc , int * sizec )
{
if ( addrc )
* addrc = 3 ;
if ( sizec )
* sizec = 2 ;
}
2012-10-08 19:41:58 -05:00
static u64 of_bus_pci_map ( __be32 * addr , const __be32 * range , int na , int ns ,
2010-12-01 10:54:46 +01:00
int pna )
2010-06-08 07:48:10 -06:00
{
u64 cp , s , da ;
unsigned int af , rf ;
af = of_bus_pci_get_flags ( addr ) ;
rf = of_bus_pci_get_flags ( range ) ;
/* Check address type match */
if ( ( af ^ rf ) & ( IORESOURCE_MEM | IORESOURCE_IO ) )
return OF_BAD_ADDR ;
/* Read address values, skipping high cell */
cp = of_read_number ( range + 1 , na - 1 ) ;
s = of_read_number ( range + na + pna , ns ) ;
da = of_read_number ( addr + 1 , na - 1 ) ;
2021-06-16 11:27:45 +02:00
pr_debug ( " PCI map, cp=%llx, s=%llx, da=%llx \n " , cp , s , da ) ;
2010-06-08 07:48:10 -06:00
if ( da < cp | | da > = ( cp + s ) )
return OF_BAD_ADDR ;
return da - cp ;
}
2012-10-08 19:41:58 -05:00
static int of_bus_pci_translate ( __be32 * addr , u64 offset , int na )
2010-06-08 07:48:10 -06:00
{
return of_bus_default_translate ( addr + 1 , offset , na - 1 ) ;
}
2021-05-27 14:45:46 -05:00
# endif /* CONFIG_PCI */
2010-06-08 07:48:10 -06:00
2014-09-29 15:29:25 +01:00
/*
* of_pci_range_to_resource - Create a resource from an of_pci_range
* @ range : the PCI range that describes the resource
* @ np : device node where the range belongs to
* @ res : pointer to a valid resource that will be updated to
* reflect the values contained in the range .
*
2023-03-31 09:52:29 +02:00
* Returns - EINVAL if the range cannot be converted to resource .
2014-09-29 15:29:25 +01:00
*
* Note that if the range is an IO range , the resource will be converted
* using pci_address_to_pio ( ) which can fail if it is called too early or
* if the range cannot be matched to any host bridge IO space ( our case here ) .
* To guard against that we try to register the IO range first .
* If that fails we know that pci_address_to_pio ( ) will do too .
*/
int of_pci_range_to_resource ( struct of_pci_range * range ,
struct device_node * np , struct resource * res )
2014-09-29 15:29:24 +01:00
{
2014-09-29 15:29:25 +01:00
int err ;
2014-09-29 15:29:24 +01:00
res - > flags = range - > flags ;
res - > parent = res - > child = res - > sibling = NULL ;
res - > name = np - > full_name ;
2014-09-29 15:29:25 +01:00
if ( res - > flags & IORESOURCE_IO ) {
unsigned long port ;
2018-03-15 02:15:52 +08:00
err = pci_register_io_range ( & np - > fwnode , range - > cpu_addr ,
range - > size ) ;
2014-09-29 15:29:25 +01:00
if ( err )
goto invalid_range ;
port = pci_address_to_pio ( range - > cpu_addr ) ;
if ( port = = ( unsigned long ) - 1 ) {
err = - EINVAL ;
goto invalid_range ;
}
res - > start = port ;
} else {
2015-10-08 10:24:25 +03:00
if ( ( sizeof ( resource_size_t ) < 8 ) & &
upper_32_bits ( range - > cpu_addr ) ) {
err = - EINVAL ;
goto invalid_range ;
}
2014-09-29 15:29:25 +01:00
res - > start = range - > cpu_addr ;
}
res - > end = res - > start + range - > size - 1 ;
return 0 ;
invalid_range :
res - > start = ( resource_size_t ) OF_BAD_ADDR ;
res - > end = ( resource_size_t ) OF_BAD_ADDR ;
return err ;
2014-09-29 15:29:24 +01:00
}
2018-01-11 11:38:02 +05:30
EXPORT_SYMBOL ( of_pci_range_to_resource ) ;
2010-06-08 07:48:10 -06:00
2023-03-28 15:15:57 -05:00
/*
* of_range_to_resource - Create a resource from a ranges entry
* @ np : device node where the range belongs to
* @ index : the ' ranges ' index to convert to a resource
* @ res : pointer to a valid resource that will be updated to
* reflect the values contained in the range .
*
* Returns ENOENT if the entry is not found or EINVAL if the range cannot be
* converted to resource .
*/
int of_range_to_resource ( struct device_node * np , int index , struct resource * res )
{
int ret , i = 0 ;
struct of_range_parser parser ;
struct of_range range ;
ret = of_range_parser_init ( & parser , np ) ;
if ( ret )
return ret ;
for_each_of_range ( & parser , & range )
if ( i + + = = index )
return of_pci_range_to_resource ( & range , np , res ) ;
return - ENOENT ;
}
EXPORT_SYMBOL ( of_range_to_resource ) ;
2010-06-08 07:48:10 -06:00
/*
* ISA bus specific translator
*/
static int of_bus_isa_match ( struct device_node * np )
{
2018-08-27 08:37:06 -05:00
return of_node_name_eq ( np , " isa " ) ;
2010-06-08 07:48:10 -06:00
}
static void of_bus_isa_count_cells ( struct device_node * child ,
int * addrc , int * sizec )
{
if ( addrc )
* addrc = 2 ;
if ( sizec )
* sizec = 1 ;
}
2012-10-08 19:41:58 -05:00
static u64 of_bus_isa_map ( __be32 * addr , const __be32 * range , int na , int ns ,
2010-12-01 10:54:46 +01:00
int pna )
2010-06-08 07:48:10 -06:00
{
u64 cp , s , da ;
/* Check address type match */
2010-12-01 10:54:46 +01:00
if ( ( addr [ 0 ] ^ range [ 0 ] ) & cpu_to_be32 ( 1 ) )
2010-06-08 07:48:10 -06:00
return OF_BAD_ADDR ;
/* Read address values, skipping high cell */
cp = of_read_number ( range + 1 , na - 1 ) ;
s = of_read_number ( range + na + pna , ns ) ;
da = of_read_number ( addr + 1 , na - 1 ) ;
2021-06-16 11:27:45 +02:00
pr_debug ( " ISA map, cp=%llx, s=%llx, da=%llx \n " , cp , s , da ) ;
2010-06-08 07:48:10 -06:00
if ( da < cp | | da > = ( cp + s ) )
return OF_BAD_ADDR ;
return da - cp ;
}
2012-10-08 19:41:58 -05:00
static int of_bus_isa_translate ( __be32 * addr , u64 offset , int na )
2010-06-08 07:48:10 -06:00
{
return of_bus_default_translate ( addr + 1 , offset , na - 1 ) ;
}
2010-12-01 10:54:46 +01:00
static unsigned int of_bus_isa_get_flags ( const __be32 * addr )
2010-06-08 07:48:10 -06:00
{
unsigned int flags = 0 ;
2010-12-01 10:54:46 +01:00
u32 w = be32_to_cpup ( addr ) ;
2010-06-08 07:48:10 -06:00
if ( w & 1 )
flags | = IORESOURCE_IO ;
else
flags | = IORESOURCE_MEM ;
return flags ;
}
2023-03-28 15:15:58 -05:00
static int of_bus_default_flags_match ( struct device_node * np )
{
return of_bus_n_addr_cells ( np ) = = 3 ;
}
2010-06-08 07:48:10 -06:00
/*
* Array of bus specific translators
*/
static struct of_bus of_busses [ ] = {
2018-01-17 17:36:39 -06:00
# ifdef CONFIG_PCI
2010-06-08 07:48:10 -06:00
/* PCI */
{
. name = " pci " ,
. addresses = " assigned-addresses " ,
. match = of_bus_pci_match ,
. count_cells = of_bus_pci_count_cells ,
. map = of_bus_pci_map ,
. translate = of_bus_pci_translate ,
2020-07-28 23:36:55 +08:00
. has_flags = true ,
2010-06-08 07:48:10 -06:00
. get_flags = of_bus_pci_get_flags ,
} ,
2018-01-17 17:36:39 -06:00
# endif /* CONFIG_PCI */
2010-06-08 07:48:10 -06:00
/* ISA */
{
. name = " isa " ,
. addresses = " reg " ,
. match = of_bus_isa_match ,
. count_cells = of_bus_isa_count_cells ,
. map = of_bus_isa_map ,
. translate = of_bus_isa_translate ,
2020-07-28 23:36:55 +08:00
. has_flags = true ,
2010-06-08 07:48:10 -06:00
. get_flags = of_bus_isa_get_flags ,
} ,
2023-03-28 15:15:58 -05:00
/* Default with flags cell */
{
. name = " default-flags " ,
. addresses = " reg " ,
. match = of_bus_default_flags_match ,
. count_cells = of_bus_default_count_cells ,
. map = of_bus_default_map ,
. translate = of_bus_default_translate ,
. has_flags = true ,
. get_flags = of_bus_default_flags_get_flags ,
} ,
2010-06-08 07:48:10 -06:00
/* Default */
{
. name = " default " ,
. addresses = " reg " ,
. match = NULL ,
. count_cells = of_bus_default_count_cells ,
. map = of_bus_default_map ,
. translate = of_bus_default_translate ,
. get_flags = of_bus_default_get_flags ,
} ,
} ;
static struct of_bus * of_match_bus ( struct device_node * np )
{
int i ;
for ( i = 0 ; i < ARRAY_SIZE ( of_busses ) ; i + + )
if ( ! of_busses [ i ] . match | | of_busses [ i ] . match ( np ) )
return & of_busses [ i ] ;
BUG ( ) ;
return NULL ;
}
2015-03-23 14:16:38 +11:00
static int of_empty_ranges_quirk ( struct device_node * np )
2014-11-14 17:55:03 +11:00
{
if ( IS_ENABLED ( CONFIG_PPC ) ) {
2015-03-23 14:16:38 +11:00
/* To save cycles, we cache the result for global "Mac" setting */
2014-11-14 17:55:03 +11:00
static int quirk_state = - 1 ;
2015-03-23 14:16:38 +11:00
/* PA-SEMI sdc DT bug */
if ( of_device_is_compatible ( np , " 1682m-sdc " ) )
return true ;
/* Make quirk cached */
2014-11-14 17:55:03 +11:00
if ( quirk_state < 0 )
quirk_state =
of_machine_is_compatible ( " Power Macintosh " ) | |
of_machine_is_compatible ( " MacRISC " ) ;
return quirk_state ;
}
return false ;
}
2010-06-08 07:48:10 -06:00
static int of_translate_one ( struct device_node * parent , struct of_bus * bus ,
2012-10-08 19:41:58 -05:00
struct of_bus * pbus , __be32 * addr ,
2010-06-08 07:48:10 -06:00
int na , int ns , int pna , const char * rprop )
{
2010-12-01 10:54:46 +01:00
const __be32 * ranges ;
2010-06-08 07:48:10 -06:00
unsigned int rlen ;
int rone ;
u64 offset = OF_BAD_ADDR ;
2015-11-30 15:22:37 +09:00
/*
* Normally , an absence of a " ranges " property means we are
2010-06-08 07:48:10 -06:00
* crossing a non - translatable boundary , and thus the addresses
2015-11-30 15:22:37 +09:00
* below the current cannot be converted to CPU physical ones .
2010-06-08 07:48:10 -06:00
* Unfortunately , while this is very clear in the spec , it ' s not
* what Apple understood , and they do have things like / uni - n or
* / ht nodes with no " ranges " property and a lot of perfectly
* useable mapped devices below them . Thus we treat the absence of
* " ranges " as equivalent to an empty " ranges " property which means
* a 1 : 1 translation at that level . It ' s up to the caller not to try
* to translate addresses that aren ' t supposed to be translated in
* the first place . - - BenH .
2010-06-08 07:48:11 -06:00
*
* As far as we know , this damage only exists on Apple machines , so
* This code is only enabled on powerpc . - - gcl
2019-09-04 11:43:30 +01:00
*
* This quirk also applies for ' dma - ranges ' which frequently exist in
* child nodes without ' dma - ranges ' in the parent nodes . - - RobH
2010-06-08 07:48:10 -06:00
*/
ranges = of_get_property ( parent , rprop , & rlen ) ;
2019-09-04 11:43:30 +01:00
if ( ranges = = NULL & & ! of_empty_ranges_quirk ( parent ) & &
strcmp ( rprop , " dma-ranges " ) ) {
2016-06-15 08:32:18 -05:00
pr_debug ( " no ranges; cannot translate \n " ) ;
2010-06-08 07:48:11 -06:00
return 1 ;
}
2010-06-08 07:48:10 -06:00
if ( ranges = = NULL | | rlen = = 0 ) {
offset = of_read_number ( addr , na ) ;
memset ( addr , 0 , pna * 4 ) ;
2016-06-15 08:32:18 -05:00
pr_debug ( " empty ranges; 1:1 translation \n " ) ;
2010-06-08 07:48:10 -06:00
goto finish ;
}
2016-06-15 08:32:18 -05:00
pr_debug ( " walking ranges... \n " ) ;
2010-06-08 07:48:10 -06:00
/* Now walk through the ranges */
rlen / = 4 ;
rone = na + pna + ns ;
for ( ; rlen > = rone ; rlen - = rone , ranges + = rone ) {
offset = bus - > map ( addr , ranges , na , ns , pna ) ;
if ( offset ! = OF_BAD_ADDR )
break ;
}
if ( offset = = OF_BAD_ADDR ) {
2016-06-15 08:32:18 -05:00
pr_debug ( " not found ! \n " ) ;
2010-06-08 07:48:10 -06:00
return 1 ;
}
memcpy ( addr , ranges + na , 4 * pna ) ;
finish :
2016-06-15 08:32:18 -05:00
of_dump_addr ( " parent translation for: " , addr , pna ) ;
2021-06-16 11:27:45 +02:00
pr_debug ( " with offset: %llx \n " , offset ) ;
2010-06-08 07:48:10 -06:00
/* Translate it into parent bus space */
return pbus - > translate ( addr , offset , pna ) ;
}
/*
* Translate an address from the device - tree into a CPU physical address ,
* this walks up the tree and applies the various bus mappings on the
* way .
*
* Note : We consider that crossing any level with # size - cells = = 0 to mean
* that translation is impossible ( that is we are not dealing with a value
* that can be mapped to a cpu physical address ) . This is not really specified
* that way , but this is traditionally the way IBM at least do things
2018-03-15 02:15:54 +08:00
*
* Whenever the translation fails , the * host pointer will be set to the
* device that had registered logical PIO mapping , and the return code is
* relative to that node .
2010-06-08 07:48:10 -06:00
*/
2012-10-08 19:41:58 -05:00
static u64 __of_translate_address ( struct device_node * dev ,
2019-04-01 10:56:43 +02:00
struct device_node * ( * get_parent ) ( const struct device_node * ) ,
2018-03-15 02:15:54 +08:00
const __be32 * in_addr , const char * rprop ,
struct device_node * * host )
2010-06-08 07:48:10 -06:00
{
struct device_node * parent = NULL ;
struct of_bus * bus , * pbus ;
2012-10-08 19:41:58 -05:00
__be32 addr [ OF_MAX_ADDR_CELLS ] ;
2010-06-08 07:48:10 -06:00
int na , ns , pna , pns ;
u64 result = OF_BAD_ADDR ;
2017-06-01 15:50:55 -05:00
pr_debug ( " ** translation for device %pOF ** \n " , dev ) ;
2010-06-08 07:48:10 -06:00
/* Increase refcount at current level */
of_node_get ( dev ) ;
2018-03-15 02:15:54 +08:00
* host = NULL ;
2010-06-08 07:48:10 -06:00
/* Get parent & match bus type */
2019-04-01 10:56:43 +02:00
parent = get_parent ( dev ) ;
2010-06-08 07:48:10 -06:00
if ( parent = = NULL )
goto bail ;
bus = of_match_bus ( parent ) ;
2012-12-13 10:11:23 +00:00
/* Count address cells & copy address locally */
2010-06-08 07:48:10 -06:00
bus - > count_cells ( dev , & na , & ns ) ;
if ( ! OF_CHECK_COUNTS ( na , ns ) ) {
2017-06-01 15:50:55 -05:00
pr_debug ( " Bad cell count for %pOF \n " , dev ) ;
2010-06-08 07:48:10 -06:00
goto bail ;
}
memcpy ( addr , in_addr , na * 4 ) ;
2017-06-01 15:50:55 -05:00
pr_debug ( " bus is %s (na=%d, ns=%d) on %pOF \n " ,
bus - > name , na , ns , parent ) ;
2016-06-15 08:32:18 -05:00
of_dump_addr ( " translating address: " , addr , na ) ;
2010-06-08 07:48:10 -06:00
/* Translate */
for ( ; ; ) {
2018-03-15 02:15:54 +08:00
struct logic_pio_hwaddr * iorange ;
2010-06-08 07:48:10 -06:00
/* Switch to parent bus */
of_node_put ( dev ) ;
dev = parent ;
2019-04-01 10:56:43 +02:00
parent = get_parent ( dev ) ;
2010-06-08 07:48:10 -06:00
/* If root, we have finished */
if ( parent = = NULL ) {
2016-06-15 08:32:18 -05:00
pr_debug ( " reached root node \n " ) ;
2010-06-08 07:48:10 -06:00
result = of_read_number ( addr , na ) ;
break ;
}
2018-03-15 02:15:54 +08:00
/*
* For indirectIO device which has no ranges property , get
* the address from reg directly .
*/
iorange = find_io_range_by_fwnode ( & dev - > fwnode ) ;
if ( iorange & & ( iorange - > flags ! = LOGIC_PIO_CPU_MMIO ) ) {
result = of_read_number ( addr + 1 , na - 1 ) ;
pr_debug ( " indirectIO matched(%pOF) 0x%llx \n " ,
dev , result ) ;
* host = of_node_get ( dev ) ;
break ;
}
2010-06-08 07:48:10 -06:00
/* Get new parent bus and counts */
pbus = of_match_bus ( parent ) ;
pbus - > count_cells ( dev , & pna , & pns ) ;
if ( ! OF_CHECK_COUNTS ( pna , pns ) ) {
2017-06-01 15:50:55 -05:00
pr_err ( " Bad cell count for %pOF \n " , dev ) ;
2010-06-08 07:48:10 -06:00
break ;
}
2017-06-01 15:50:55 -05:00
pr_debug ( " parent bus is %s (na=%d, ns=%d) on %pOF \n " ,
pbus - > name , pna , pns , parent ) ;
2010-06-08 07:48:10 -06:00
/* Apply bus translation */
if ( of_translate_one ( dev , bus , pbus , addr , na , ns , pna , rprop ) )
break ;
/* Complete the move up one level */
na = pna ;
ns = pns ;
bus = pbus ;
2016-06-15 08:32:18 -05:00
of_dump_addr ( " one level translation: " , addr , na ) ;
2010-06-08 07:48:10 -06:00
}
bail :
of_node_put ( parent ) ;
of_node_put ( dev ) ;
return result ;
}
2010-12-01 10:54:46 +01:00
u64 of_translate_address ( struct device_node * dev , const __be32 * in_addr )
2010-06-08 07:48:10 -06:00
{
2018-03-15 02:15:54 +08:00
struct device_node * host ;
u64 ret ;
2019-04-01 10:56:43 +02:00
ret = __of_translate_address ( dev , of_get_parent ,
in_addr , " ranges " , & host ) ;
2018-03-15 02:15:54 +08:00
if ( host ) {
of_node_put ( host ) ;
return OF_BAD_ADDR ;
}
return ret ;
2010-06-08 07:48:10 -06:00
}
EXPORT_SYMBOL ( of_translate_address ) ;
2022-09-29 13:48:38 +01:00
# ifdef CONFIG_HAS_DMA
struct device_node * __of_get_dma_parent ( const struct device_node * np )
2019-04-01 10:56:44 +02:00
{
struct of_phandle_args args ;
int ret , index ;
index = of_property_match_string ( np , " interconnect-names " , " dma-mem " ) ;
if ( index < 0 )
return of_get_parent ( np ) ;
ret = of_parse_phandle_with_args ( np , " interconnects " ,
" #interconnect-cells " ,
index , & args ) ;
if ( ret < 0 )
return of_get_parent ( np ) ;
return of_node_get ( args . np ) ;
}
2022-09-29 13:48:38 +01:00
# endif
2019-04-01 10:56:44 +02:00
2019-07-03 18:23:01 +01:00
static struct device_node * of_get_next_dma_parent ( struct device_node * np )
{
struct device_node * parent ;
parent = __of_get_dma_parent ( np ) ;
of_node_put ( np ) ;
return parent ;
}
2010-12-01 10:54:46 +01:00
u64 of_translate_dma_address ( struct device_node * dev , const __be32 * in_addr )
2010-06-08 07:48:10 -06:00
{
2018-03-15 02:15:54 +08:00
struct device_node * host ;
u64 ret ;
2019-04-01 10:56:44 +02:00
ret = __of_translate_address ( dev , __of_get_dma_parent ,
2019-04-01 10:56:43 +02:00
in_addr , " dma-ranges " , & host ) ;
2018-03-15 02:15:54 +08:00
if ( host ) {
of_node_put ( host ) ;
return OF_BAD_ADDR ;
}
return ret ;
2010-06-08 07:48:10 -06:00
}
EXPORT_SYMBOL ( of_translate_dma_address ) ;
2023-01-20 18:42:48 +01:00
/**
* of_translate_dma_region - Translate device tree address and size tuple
* @ dev : device tree node for which to translate
* @ prop : pointer into array of cells
* @ start : return value for the start of the DMA range
* @ length : return value for the length of the DMA range
*
* Returns a pointer to the cell immediately following the translated DMA region .
*/
const __be32 * of_translate_dma_region ( struct device_node * dev , const __be32 * prop ,
phys_addr_t * start , size_t * length )
{
struct device_node * parent ;
u64 address , size ;
int na , ns ;
parent = __of_get_dma_parent ( dev ) ;
if ( ! parent )
return NULL ;
na = of_bus_n_addr_cells ( parent ) ;
ns = of_bus_n_size_cells ( parent ) ;
of_node_put ( parent ) ;
address = of_translate_dma_address ( dev , prop ) ;
if ( address = = OF_BAD_ADDR )
return NULL ;
size = of_read_number ( prop + na , ns ) ;
if ( start )
* start = address ;
if ( length )
* length = size ;
return prop + na + ns ;
}
EXPORT_SYMBOL ( of_translate_dma_region ) ;
2021-05-27 14:45:45 -05:00
const __be32 * __of_get_address ( struct device_node * dev , int index , int bar_no ,
u64 * size , unsigned int * flags )
2010-06-08 07:48:10 -06:00
{
2010-12-01 10:54:46 +01:00
const __be32 * prop ;
2010-06-08 07:48:10 -06:00
unsigned int psize ;
struct device_node * parent ;
struct of_bus * bus ;
int onesize , i , na , ns ;
/* Get parent & match bus type */
parent = of_get_parent ( dev ) ;
if ( parent = = NULL )
return NULL ;
bus = of_match_bus ( parent ) ;
2021-05-27 14:45:45 -05:00
if ( strcmp ( bus - > name , " pci " ) & & ( bar_no > = 0 ) ) {
of_node_put ( parent ) ;
return NULL ;
}
2010-06-08 07:48:10 -06:00
bus - > count_cells ( dev , & na , & ns ) ;
of_node_put ( parent ) ;
2012-07-25 17:34:37 -06:00
if ( ! OF_CHECK_ADDR_COUNT ( na ) )
2010-06-08 07:48:10 -06:00
return NULL ;
/* Get "reg" or "assigned-addresses" property */
prop = of_get_property ( dev , bus - > addresses , & psize ) ;
if ( prop = = NULL )
return NULL ;
psize / = 4 ;
onesize = na + ns ;
2021-05-27 14:45:45 -05:00
for ( i = 0 ; psize > = onesize ; psize - = onesize , prop + = onesize , i + + ) {
u32 val = be32_to_cpu ( prop [ 0 ] ) ;
/* PCI bus matches on BAR number instead of index */
if ( ( ( bar_no > = 0 ) & & ( ( val & 0xff ) = = ( ( bar_no * 4 ) + PCI_BASE_ADDRESS_0 ) ) ) | |
( ( index > = 0 ) & & ( i = = index ) ) ) {
2010-06-08 07:48:10 -06:00
if ( size )
* size = of_read_number ( prop + na , ns ) ;
if ( flags )
* flags = bus - > get_flags ( prop ) ;
return prop ;
}
2021-05-27 14:45:45 -05:00
}
2010-06-08 07:48:10 -06:00
return NULL ;
}
2021-05-27 14:45:45 -05:00
EXPORT_SYMBOL ( __of_get_address ) ;
2010-06-08 07:48:10 -06:00
2023-03-28 15:16:00 -05:00
/**
* of_property_read_reg - Retrieve the specified " reg " entry index without translating
* @ np : device tree node for which to retrieve " reg " from
* @ idx : " reg " entry index to read
* @ addr : return value for the untranslated address
* @ size : return value for the entry size
*
* Returns - EINVAL if " reg " is not found . Returns 0 on success with addr and
* size values filled in .
*/
int of_property_read_reg ( struct device_node * np , int idx , u64 * addr , u64 * size )
{
const __be32 * prop = of_get_address ( np , idx , size , NULL ) ;
if ( ! prop )
return - EINVAL ;
* addr = of_read_number ( prop , of_n_addr_cells ( np ) ) ;
return 0 ;
}
EXPORT_SYMBOL ( of_property_read_reg ) ;
2020-02-06 14:34:54 +00:00
static int parser_init ( struct of_pci_range_parser * parser ,
struct device_node * node , const char * name )
{
int rlen ;
parser - > node = node ;
parser - > pna = of_n_addr_cells ( node ) ;
2020-02-06 14:01:05 +00:00
parser - > na = of_bus_n_addr_cells ( node ) ;
parser - > ns = of_bus_n_size_cells ( node ) ;
2020-02-06 14:34:54 +00:00
parser - > dma = ! strcmp ( name , " dma-ranges " ) ;
2020-07-28 23:36:55 +08:00
parser - > bus = of_match_bus ( node ) ;
2020-02-06 14:34:54 +00:00
parser - > range = of_get_property ( node , name , & rlen ) ;
if ( parser - > range = = NULL )
return - ENOENT ;
parser - > end = parser - > range + rlen / sizeof ( __be32 ) ;
return 0 ;
}
int of_pci_range_parser_init ( struct of_pci_range_parser * parser ,
struct device_node * node )
{
return parser_init ( parser , node , " ranges " ) ;
}
EXPORT_SYMBOL_GPL ( of_pci_range_parser_init ) ;
int of_pci_dma_range_parser_init ( struct of_pci_range_parser * parser ,
struct device_node * node )
{
return parser_init ( parser , node , " dma-ranges " ) ;
}
EXPORT_SYMBOL_GPL ( of_pci_dma_range_parser_init ) ;
2020-02-06 14:01:05 +00:00
# define of_dma_range_parser_init of_pci_dma_range_parser_init
2020-02-06 14:34:54 +00:00
struct of_pci_range * of_pci_range_parser_one ( struct of_pci_range_parser * parser ,
struct of_pci_range * range )
{
2020-02-06 14:01:05 +00:00
int na = parser - > na ;
int ns = parser - > ns ;
int np = parser - > pna + na + ns ;
2020-07-28 23:36:55 +08:00
int busflag_na = 0 ;
2020-02-06 14:34:54 +00:00
if ( ! range )
return NULL ;
2020-02-06 14:01:05 +00:00
if ( ! parser - > range | | parser - > range + np > parser - > end )
2020-02-06 14:34:54 +00:00
return NULL ;
2020-07-28 23:36:55 +08:00
range - > flags = parser - > bus - > get_flags ( parser - > range ) ;
/* A extra cell for resource flags */
if ( parser - > bus - > has_flags )
busflag_na = 1 ;
2020-02-06 14:01:05 +00:00
2020-07-28 23:36:55 +08:00
range - > bus_addr = of_read_number ( parser - > range + busflag_na , na - busflag_na ) ;
2020-02-06 14:01:05 +00:00
2020-02-06 14:34:54 +00:00
if ( parser - > dma )
range - > cpu_addr = of_translate_dma_address ( parser - > node ,
parser - > range + na ) ;
else
range - > cpu_addr = of_translate_address ( parser - > node ,
parser - > range + na ) ;
range - > size = of_read_number ( parser - > range + parser - > pna + na , ns ) ;
2020-02-06 14:01:05 +00:00
parser - > range + = np ;
2020-02-06 14:34:54 +00:00
/* Now consume following elements while they are contiguous */
2020-02-06 14:01:05 +00:00
while ( parser - > range + np < = parser - > end ) {
u32 flags = 0 ;
2020-07-28 23:36:55 +08:00
u64 bus_addr , cpu_addr , size ;
2020-02-06 14:34:54 +00:00
2020-07-28 23:36:55 +08:00
flags = parser - > bus - > get_flags ( parser - > range ) ;
bus_addr = of_read_number ( parser - > range + busflag_na , na - busflag_na ) ;
2020-02-06 14:34:54 +00:00
if ( parser - > dma )
cpu_addr = of_translate_dma_address ( parser - > node ,
parser - > range + na ) ;
else
cpu_addr = of_translate_address ( parser - > node ,
parser - > range + na ) ;
size = of_read_number ( parser - > range + parser - > pna + na , ns ) ;
if ( flags ! = range - > flags )
break ;
2020-07-28 23:36:55 +08:00
if ( bus_addr ! = range - > bus_addr + range - > size | |
2020-02-06 14:34:54 +00:00
cpu_addr ! = range - > cpu_addr + range - > size )
break ;
range - > size + = size ;
2020-02-06 14:01:05 +00:00
parser - > range + = np ;
2020-02-06 14:34:54 +00:00
}
return range ;
}
EXPORT_SYMBOL_GPL ( of_pci_range_parser_one ) ;
2018-03-15 02:15:54 +08:00
static u64 of_translate_ioport ( struct device_node * dev , const __be32 * in_addr ,
u64 size )
{
u64 taddr ;
unsigned long port ;
struct device_node * host ;
2019-04-01 10:56:43 +02:00
taddr = __of_translate_address ( dev , of_get_parent ,
in_addr , " ranges " , & host ) ;
2018-03-15 02:15:54 +08:00
if ( host ) {
/* host-specific port access */
port = logic_pio_trans_hwaddr ( & host - > fwnode , taddr , size ) ;
of_node_put ( host ) ;
} else {
/* memory-mapped I/O range */
port = pci_address_to_pio ( taddr ) ;
}
if ( port = = ( unsigned long ) - 1 )
return OF_BAD_ADDR ;
return port ;
}
2020-09-17 18:43:40 +02:00
# ifdef CONFIG_HAS_DMA
2014-04-24 11:30:02 -04:00
/**
2020-09-17 18:43:40 +02:00
* of_dma_get_range - Get DMA range info and put it into a map array
2014-04-24 11:30:02 -04:00
* @ np : device node to get DMA range info
2020-09-17 18:43:40 +02:00
* @ map : dma range structure to return
2014-04-24 11:30:02 -04:00
*
* Look in bottom up direction for the first " dma-ranges " property
2020-09-17 18:43:40 +02:00
* and parse it . Put the information into a DMA offset map array .
*
* dma - ranges format :
2014-04-24 11:30:02 -04:00
* DMA addr ( dma_addr ) : naddr cells
* CPU addr ( phys_addr_t ) : pna cells
* size : nsize cells
*
2020-09-17 18:43:40 +02:00
* It returns - ENODEV if " dma-ranges " property was not found for this
* device in the DT .
2014-04-24 11:30:02 -04:00
*/
2020-09-17 18:43:40 +02:00
int of_dma_get_range ( struct device_node * np , const struct bus_dma_region * * map )
2014-04-24 11:30:02 -04:00
{
struct device_node * node = of_node_get ( np ) ;
const __be32 * ranges = NULL ;
2019-07-03 18:42:20 +01:00
bool found_dma_ranges = false ;
2020-02-06 14:02:30 +00:00
struct of_range_parser parser ;
struct of_range range ;
2020-09-17 18:43:40 +02:00
struct bus_dma_region * r ;
int len , num_ranges = 0 ;
int ret = 0 ;
2014-04-24 11:30:02 -04:00
2019-07-03 18:42:20 +01:00
while ( node ) {
2014-04-24 11:30:02 -04:00
ranges = of_get_property ( node , " dma-ranges " , & len ) ;
/* Ignore empty ranges, they imply no translation required */
if ( ranges & & len > 0 )
break ;
2019-07-03 18:42:20 +01:00
/* Once we find 'dma-ranges', then a missing one is an error */
if ( found_dma_ranges & & ! ranges ) {
ret = - ENODEV ;
goto out ;
}
found_dma_ranges = true ;
node = of_get_next_dma_parent ( node ) ;
2014-04-24 11:30:02 -04:00
}
2019-07-03 18:42:20 +01:00
if ( ! node | | ! ranges ) {
2017-06-01 15:50:55 -05:00
pr_debug ( " no dma-ranges found for node(%pOF) \n " , np ) ;
2014-04-24 11:30:02 -04:00
ret = - ENODEV ;
goto out ;
}
2020-02-06 14:02:30 +00:00
of_dma_range_parser_init ( & parser , node ) ;
2023-01-28 17:47:50 +00:00
for_each_of_range ( & parser , & range ) {
if ( range . cpu_addr = = OF_BAD_ADDR ) {
pr_err ( " translation of DMA address(%llx) to CPU address failed node(%pOF) \n " ,
range . bus_addr , node ) ;
continue ;
}
2020-09-17 18:43:40 +02:00
num_ranges + + ;
2023-01-28 17:47:50 +00:00
}
if ( ! num_ranges ) {
ret = - EINVAL ;
goto out ;
}
2020-09-17 18:43:40 +02:00
r = kcalloc ( num_ranges + 1 , sizeof ( * r ) , GFP_KERNEL ) ;
if ( ! r ) {
ret = - ENOMEM ;
goto out ;
}
2020-02-06 14:02:30 +00:00
2020-09-17 18:43:40 +02:00
/*
2023-01-28 17:47:50 +00:00
* Record all info in the generic DMA ranges array for struct device ,
* returning an error if we don ' t find any parsable ranges .
2020-09-17 18:43:40 +02:00
*/
* map = r ;
of_dma_range_parser_init ( & parser , node ) ;
2020-02-06 14:02:30 +00:00
for_each_of_range ( & parser , & range ) {
pr_debug ( " dma_addr(%llx) cpu_addr(%llx) size(%llx) \n " ,
range . bus_addr , range . cpu_addr , range . size ) ;
2023-01-28 17:47:50 +00:00
if ( range . cpu_addr = = OF_BAD_ADDR )
2020-08-17 12:32:08 +01:00
continue ;
2020-09-17 18:43:40 +02:00
r - > cpu_start = range . cpu_addr ;
r - > dma_start = range . bus_addr ;
r - > size = range . size ;
r - > offset = range . cpu_addr - range . bus_addr ;
r + + ;
2014-04-24 11:30:02 -04:00
}
out :
of_node_put ( node ) ;
return ret ;
}
2020-09-17 18:43:40 +02:00
# endif /* CONFIG_HAS_DMA */
2014-04-24 11:30:03 -04:00
2020-11-19 18:53:55 +01:00
/**
* of_dma_get_max_cpu_address - Gets highest CPU address suitable for DMA
* @ np : The node to start searching from or NULL to start from the root
*
* Gets the highest CPU physical address that is addressable by all DMA masters
* in the sub - tree pointed by np , or the whole tree if NULL is passed . If no
* DMA constrained device is found , it returns PHYS_ADDR_MAX .
*/
phys_addr_t __init of_dma_get_max_cpu_address ( struct device_node * np )
{
phys_addr_t max_cpu_addr = PHYS_ADDR_MAX ;
struct of_range_parser parser ;
phys_addr_t subtree_max_addr ;
struct device_node * child ;
struct of_range range ;
const __be32 * ranges ;
u64 cpu_end = 0 ;
int len ;
if ( ! np )
np = of_root ;
ranges = of_get_property ( np , " dma-ranges " , & len ) ;
if ( ranges & & len ) {
of_dma_range_parser_init ( & parser , np ) ;
for_each_of_range ( & parser , & range )
if ( range . cpu_addr + range . size > cpu_end )
cpu_end = range . cpu_addr + range . size - 1 ;
if ( max_cpu_addr > cpu_end )
max_cpu_addr = cpu_end ;
}
for_each_available_child_of_node ( np , child ) {
subtree_max_addr = of_dma_get_max_cpu_address ( child ) ;
if ( max_cpu_addr > subtree_max_addr )
max_cpu_addr = subtree_max_addr ;
}
return max_cpu_addr ;
}
2014-04-24 11:30:03 -04:00
/**
* of_dma_is_coherent - Check if device is coherent
* @ np : device node
*
* It returns true if " dma-coherent " property was found
2020-01-26 22:52:47 +11:00
* for this device in the DT , or if DMA is coherent by
2022-07-07 01:15:33 +02:00
* default for OF devices on the current platform and no
* " dma-noncoherent " property was found for this device .
2014-04-24 11:30:03 -04:00
*/
bool of_dma_is_coherent ( struct device_node * np )
{
2020-11-10 15:28:25 +13:00
struct device_node * node ;
2023-04-01 10:15:31 +01:00
bool is_coherent = dma_default_coherent ;
2020-01-26 22:52:47 +11:00
2020-11-10 15:28:25 +13:00
node = of_node_get ( np ) ;
2014-04-24 11:30:03 -04:00
while ( node ) {
if ( of_property_read_bool ( node , " dma-coherent " ) ) {
2022-07-07 01:15:33 +02:00
is_coherent = true ;
break ;
}
if ( of_property_read_bool ( node , " dma-noncoherent " ) ) {
is_coherent = false ;
break ;
2014-04-24 11:30:03 -04:00
}
2019-07-03 14:47:31 +01:00
node = of_get_next_dma_parent ( node ) ;
2014-04-24 11:30:03 -04:00
}
of_node_put ( node ) ;
2022-07-07 01:15:33 +02:00
return is_coherent ;
2014-04-24 11:30:03 -04:00
}
2014-06-05 15:57:04 -07:00
EXPORT_SYMBOL_GPL ( of_dma_is_coherent ) ;
2021-02-11 21:41:56 +09:00
/**
* of_mmio_is_nonposted - Check if device uses non - posted MMIO
* @ np : device node
*
* Returns true if the " nonposted-mmio " property was found for
* the device ' s bus .
*
* This is currently only enabled on builds that support Apple ARM devices , as
* an optimization .
*/
static bool of_mmio_is_nonposted ( struct device_node * np )
{
struct device_node * parent ;
bool nonposted ;
if ( ! IS_ENABLED ( CONFIG_ARCH_APPLE ) )
return false ;
parent = of_get_parent ( np ) ;
if ( ! parent )
return false ;
nonposted = of_property_read_bool ( parent , " nonposted-mmio " ) ;
of_node_put ( parent ) ;
return nonposted ;
}
2023-03-31 09:52:30 +02:00
static int __of_address_to_resource ( struct device_node * dev , int index , int bar_no ,
struct resource * r )
{
u64 taddr ;
const __be32 * addrp ;
u64 size ;
unsigned int flags ;
const char * name = NULL ;
addrp = __of_get_address ( dev , index , bar_no , & size , & flags ) ;
if ( addrp = = NULL )
return - EINVAL ;
/* Get optional "reg-names" property to add a name to a resource */
if ( index > = 0 )
of_property_read_string_index ( dev , " reg-names " , index , & name ) ;
if ( flags & IORESOURCE_MEM )
taddr = of_translate_address ( dev , addrp ) ;
else if ( flags & IORESOURCE_IO )
taddr = of_translate_ioport ( dev , addrp , size ) ;
else
return - EINVAL ;
if ( taddr = = OF_BAD_ADDR )
return - EINVAL ;
memset ( r , 0 , sizeof ( struct resource ) ) ;
if ( of_mmio_is_nonposted ( dev ) )
flags | = IORESOURCE_MEM_NONPOSTED ;
r - > start = taddr ;
r - > end = taddr + size - 1 ;
r - > flags = flags ;
r - > name = name ? name : dev - > full_name ;
return 0 ;
}
/**
* of_address_to_resource - Translate device tree address and return as resource
* @ dev : Caller ' s Device Node
* @ index : Index into the array
* @ r : Pointer to resource array
*
* Returns - EINVAL if the range cannot be converted to resource .
*
* Note that if your address is a PIO address , the conversion will fail if
* the physical address can ' t be internally converted to an IO token with
* pci_address_to_pio ( ) , that is because it ' s either called too early or it
* can ' t be matched to any host bridge IO space
*/
int of_address_to_resource ( struct device_node * dev , int index ,
struct resource * r )
{
return __of_address_to_resource ( dev , index , - 1 , r ) ;
}
EXPORT_SYMBOL_GPL ( of_address_to_resource ) ;
int of_pci_address_to_resource ( struct device_node * dev , int bar ,
struct resource * r )
{
if ( ! IS_ENABLED ( CONFIG_PCI ) )
return - ENOSYS ;
return __of_address_to_resource ( dev , - 1 , bar , r ) ;
}
EXPORT_SYMBOL_GPL ( of_pci_address_to_resource ) ;
/**
* of_iomap - Maps the memory mapped IO for a given device_node
* @ np : the device whose io range will be mapped
* @ index : index of the io range
*
* Returns a pointer to the mapped memory
*/
void __iomem * of_iomap ( struct device_node * np , int index )
{
struct resource res ;
if ( of_address_to_resource ( np , index , & res ) )
return NULL ;
if ( res . flags & IORESOURCE_MEM_NONPOSTED )
return ioremap_np ( res . start , resource_size ( & res ) ) ;
else
return ioremap ( res . start , resource_size ( & res ) ) ;
}
EXPORT_SYMBOL ( of_iomap ) ;
/*
* of_io_request_and_map - Requests a resource and maps the memory mapped IO
* for a given device_node
* @ device : the device whose io range will be mapped
* @ index : index of the io range
* @ name : name " override " for the memory region request or NULL
*
* Returns a pointer to the requested and mapped memory or an ERR_PTR ( ) encoded
* error code on failure . Usage example :
*
* base = of_io_request_and_map ( node , 0 , " foo " ) ;
* if ( IS_ERR ( base ) )
* return PTR_ERR ( base ) ;
*/
void __iomem * of_io_request_and_map ( struct device_node * np , int index ,
const char * name )
{
struct resource res ;
void __iomem * mem ;
if ( of_address_to_resource ( np , index , & res ) )
return IOMEM_ERR_PTR ( - EINVAL ) ;
if ( ! name )
name = res . name ;
if ( ! request_mem_region ( res . start , resource_size ( & res ) , name ) )
return IOMEM_ERR_PTR ( - EBUSY ) ;
if ( res . flags & IORESOURCE_MEM_NONPOSTED )
mem = ioremap_np ( res . start , resource_size ( & res ) ) ;
else
mem = ioremap ( res . start , resource_size ( & res ) ) ;
if ( ! mem ) {
release_mem_region ( res . start , resource_size ( & res ) ) ;
return IOMEM_ERR_PTR ( - ENOMEM ) ;
}
return mem ;
}
EXPORT_SYMBOL ( of_io_request_and_map ) ;