2014-02-28 17:42:48 +04:00
/*
* Device tree based initialization code for reserved memory .
*
2015-09-16 04:30:36 +03:00
* Copyright ( c ) 2013 , 2015 The Linux Foundation . All Rights Reserved .
2014-02-28 17:42:48 +04:00
* Copyright ( c ) 2013 , 2014 Samsung Electronics Co . , Ltd .
* http : //www.samsung.com
* Author : Marek Szyprowski < m . szyprowski @ samsung . com >
* Author : Josh Cartwright < joshc @ codeaurora . org >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation ; either version 2 of the
* License or ( at your optional ) any later version of the license .
*/
2016-06-15 16:32:18 +03:00
# define pr_fmt(fmt) "OF: reserved mem: " fmt
2014-02-28 17:42:48 +04:00
# include <linux/err.h>
# include <linux/of.h>
# include <linux/of_fdt.h>
# include <linux/of_platform.h>
# include <linux/mm.h>
# include <linux/sizes.h>
# include <linux/of_reserved_mem.h>
2015-09-16 04:30:36 +03:00
# include <linux/sort.h>
2016-05-24 16:31:24 +03:00
# include <linux/slab.h>
2014-02-28 17:42:48 +04:00
# define MAX_RESERVED_REGIONS 16
static struct reserved_mem reserved_mem [ MAX_RESERVED_REGIONS ] ;
static int reserved_mem_count ;
# if defined(CONFIG_HAVE_MEMBLOCK)
# include <linux/memblock.h>
int __init __weak early_init_dt_alloc_reserved_memory_arch ( phys_addr_t size ,
phys_addr_t align , phys_addr_t start , phys_addr_t end , bool nomap ,
phys_addr_t * res_base )
{
2016-02-22 16:45:44 +03:00
phys_addr_t base ;
2014-02-28 17:42:48 +04:00
/*
* We use __memblock_alloc_base ( ) because memblock_alloc_base ( )
* panic ( ) s on allocation failure .
*/
2016-02-22 16:45:44 +03:00
end = ! end ? MEMBLOCK_ALLOC_ANYWHERE : end ;
base = __memblock_alloc_base ( size , align , end ) ;
2014-02-28 17:42:48 +04:00
if ( ! base )
return - ENOMEM ;
/*
* Check if the allocated region fits in to start . . end window
*/
if ( base < start ) {
memblock_free ( base , size ) ;
return - ENOMEM ;
}
* res_base = base ;
if ( nomap )
return memblock_remove ( base , size ) ;
return 0 ;
}
# else
int __init __weak early_init_dt_alloc_reserved_memory_arch ( phys_addr_t size ,
phys_addr_t align , phys_addr_t start , phys_addr_t end , bool nomap ,
phys_addr_t * res_base )
{
pr_err ( " Reserved memory not supported, ignoring region 0x%llx%s \n " ,
size , nomap ? " (nomap) " : " " ) ;
return - ENOSYS ;
}
# endif
/**
* res_mem_save_node ( ) - save fdt node for second pass initialization
*/
void __init fdt_reserved_mem_save_node ( unsigned long node , const char * uname ,
phys_addr_t base , phys_addr_t size )
{
struct reserved_mem * rmem = & reserved_mem [ reserved_mem_count ] ;
if ( reserved_mem_count = = ARRAY_SIZE ( reserved_mem ) ) {
2016-06-15 16:32:18 +03:00
pr_err ( " not enough space all defined regions. \n " ) ;
2014-02-28 17:42:48 +04:00
return ;
}
rmem - > fdt_node = node ;
rmem - > name = uname ;
rmem - > base = base ;
rmem - > size = size ;
reserved_mem_count + + ;
return ;
}
/**
* res_mem_alloc_size ( ) - allocate reserved memory described by ' size ' , ' align '
* and ' alloc - ranges ' properties
*/
static int __init __reserved_mem_alloc_size ( unsigned long node ,
const char * uname , phys_addr_t * res_base , phys_addr_t * res_size )
{
int t_len = ( dt_root_addr_cells + dt_root_size_cells ) * sizeof ( __be32 ) ;
phys_addr_t start = 0 , end = 0 ;
phys_addr_t base = 0 , align = 0 , size ;
2014-04-02 08:49:03 +04:00
int len ;
const __be32 * prop ;
2014-02-28 17:42:48 +04:00
int nomap ;
int ret ;
prop = of_get_flat_dt_prop ( node , " size " , & len ) ;
if ( ! prop )
return - EINVAL ;
if ( len ! = dt_root_size_cells * sizeof ( __be32 ) ) {
2016-06-15 16:32:18 +03:00
pr_err ( " invalid size property in '%s' node. \n " , uname ) ;
2014-02-28 17:42:48 +04:00
return - EINVAL ;
}
size = dt_mem_next_cell ( dt_root_size_cells , & prop ) ;
nomap = of_get_flat_dt_prop ( node , " no-map " , NULL ) ! = NULL ;
prop = of_get_flat_dt_prop ( node , " alignment " , & len ) ;
if ( prop ) {
if ( len ! = dt_root_addr_cells * sizeof ( __be32 ) ) {
2016-06-15 16:32:18 +03:00
pr_err ( " invalid alignment property in '%s' node. \n " ,
2014-02-28 17:42:48 +04:00
uname ) ;
return - EINVAL ;
}
align = dt_mem_next_cell ( dt_root_addr_cells , & prop ) ;
}
2015-11-10 15:30:26 +03:00
/* Need adjust the alignment to satisfy the CMA requirement */
2016-05-25 07:29:50 +03:00
if ( IS_ENABLED ( CONFIG_CMA )
& & of_flat_dt_is_compatible ( node , " shared-dma-pool " )
& & of_get_flat_dt_prop ( node , " reusable " , NULL )
2016-05-31 02:38:56 +03:00
& & ! of_get_flat_dt_prop ( node , " no-map " , NULL ) ) {
unsigned long order =
max_t ( unsigned long , MAX_ORDER - 1 , pageblock_order ) ;
align = max ( align , ( phys_addr_t ) PAGE_SIZE < < order ) ;
}
2015-11-10 15:30:26 +03:00
2014-02-28 17:42:48 +04:00
prop = of_get_flat_dt_prop ( node , " alloc-ranges " , & len ) ;
if ( prop ) {
if ( len % t_len ! = 0 ) {
2016-06-15 16:32:18 +03:00
pr_err ( " invalid alloc-ranges property in '%s', skipping node. \n " ,
2014-02-28 17:42:48 +04:00
uname ) ;
return - EINVAL ;
}
base = 0 ;
while ( len > 0 ) {
start = dt_mem_next_cell ( dt_root_addr_cells , & prop ) ;
end = start + dt_mem_next_cell ( dt_root_size_cells ,
& prop ) ;
ret = early_init_dt_alloc_reserved_memory_arch ( size ,
align , start , end , nomap , & base ) ;
if ( ret = = 0 ) {
2016-06-15 16:32:18 +03:00
pr_debug ( " allocated memory for '%s' node: base %pa, size %ld MiB \n " ,
2014-02-28 17:42:48 +04:00
uname , & base ,
( unsigned long ) size / SZ_1M ) ;
break ;
}
len - = t_len ;
}
} else {
ret = early_init_dt_alloc_reserved_memory_arch ( size , align ,
0 , 0 , nomap , & base ) ;
if ( ret = = 0 )
2016-06-15 16:32:18 +03:00
pr_debug ( " allocated memory for '%s' node: base %pa, size %ld MiB \n " ,
2014-02-28 17:42:48 +04:00
uname , & base , ( unsigned long ) size / SZ_1M ) ;
}
if ( base = = 0 ) {
2016-06-15 16:32:18 +03:00
pr_info ( " failed to allocate memory for node '%s' \n " , uname ) ;
2014-02-28 17:42:48 +04:00
return - ENOMEM ;
}
* res_base = base ;
* res_size = size ;
return 0 ;
}
2014-02-28 17:42:49 +04:00
static const struct of_device_id __rmem_of_table_sentinel
__used __section ( __reservedmem_of_table_end ) ;
/**
* res_mem_init_node ( ) - call region specific reserved memory init code
*/
static int __init __reserved_mem_init_node ( struct reserved_mem * rmem )
{
extern const struct of_device_id __reservedmem_of_table [ ] ;
const struct of_device_id * i ;
for ( i = __reservedmem_of_table ; i < & __rmem_of_table_sentinel ; i + + ) {
2017-05-11 18:15:10 +03:00
reservedmem_of_init_fn initfn = i - > data ;
2014-02-28 17:42:49 +04:00
const char * compat = i - > compatible ;
if ( ! of_flat_dt_is_compatible ( rmem - > fdt_node , compat ) )
continue ;
2014-05-09 01:06:17 +04:00
if ( initfn ( rmem ) = = 0 ) {
2016-06-15 16:32:18 +03:00
pr_info ( " initialized node %s, compatible id %s \n " ,
2014-02-28 17:42:49 +04:00
rmem - > name , compat ) ;
return 0 ;
}
}
return - ENOENT ;
}
2015-09-16 04:30:36 +03:00
static int __init __rmem_cmp ( const void * a , const void * b )
{
const struct reserved_mem * ra = a , * rb = b ;
2015-11-18 13:46:38 +03:00
if ( ra - > base < rb - > base )
return - 1 ;
if ( ra - > base > rb - > base )
return 1 ;
return 0 ;
2015-09-16 04:30:36 +03:00
}
static void __init __rmem_check_for_overlap ( void )
{
int i ;
if ( reserved_mem_count < 2 )
return ;
sort ( reserved_mem , reserved_mem_count , sizeof ( reserved_mem [ 0 ] ) ,
__rmem_cmp , NULL ) ;
for ( i = 0 ; i < reserved_mem_count - 1 ; i + + ) {
struct reserved_mem * this , * next ;
this = & reserved_mem [ i ] ;
next = & reserved_mem [ i + 1 ] ;
if ( ! ( this - > base & & next - > base ) )
continue ;
if ( this - > base + this - > size > next - > base ) {
phys_addr_t this_end , next_end ;
this_end = this - > base + this - > size ;
next_end = next - > base + next - > size ;
2016-06-15 16:32:18 +03:00
pr_err ( " OVERLAP DETECTED! \n %s (%pa--%pa) overlaps with %s (%pa--%pa) \n " ,
2015-11-10 08:08:33 +03:00
this - > name , & this - > base , & this_end ,
next - > name , & next - > base , & next_end ) ;
2015-09-16 04:30:36 +03:00
}
}
}
2014-02-28 17:42:48 +04:00
/**
* fdt_init_reserved_mem - allocate and init all saved reserved memory regions
*/
void __init fdt_init_reserved_mem ( void )
{
int i ;
2015-09-16 04:30:36 +03:00
/* check for overlapping reserved regions */
__rmem_check_for_overlap ( ) ;
2014-02-28 17:42:48 +04:00
for ( i = 0 ; i < reserved_mem_count ; i + + ) {
struct reserved_mem * rmem = & reserved_mem [ i ] ;
unsigned long node = rmem - > fdt_node ;
2014-07-14 12:28:04 +04:00
int len ;
const __be32 * prop ;
2014-02-28 17:42:48 +04:00
int err = 0 ;
2014-07-14 12:28:04 +04:00
prop = of_get_flat_dt_prop ( node , " phandle " , & len ) ;
if ( ! prop )
prop = of_get_flat_dt_prop ( node , " linux,phandle " , & len ) ;
if ( prop )
rmem - > phandle = of_read_number ( prop , len / 4 ) ;
2014-02-28 17:42:48 +04:00
if ( rmem - > size = = 0 )
err = __reserved_mem_alloc_size ( node , rmem - > name ,
& rmem - > base , & rmem - > size ) ;
2014-02-28 17:42:49 +04:00
if ( err = = 0 )
__reserved_mem_init_node ( rmem ) ;
2014-02-28 17:42:48 +04:00
}
}
2014-07-14 12:28:04 +04:00
static inline struct reserved_mem * __find_rmem ( struct device_node * node )
{
unsigned int i ;
if ( ! node - > phandle )
return NULL ;
for ( i = 0 ; i < reserved_mem_count ; i + + )
if ( reserved_mem [ i ] . phandle = = node - > phandle )
return & reserved_mem [ i ] ;
return NULL ;
}
2016-05-24 16:31:24 +03:00
struct rmem_assigned_device {
struct device * dev ;
struct reserved_mem * rmem ;
struct list_head list ;
} ;
static LIST_HEAD ( of_rmem_assigned_device_list ) ;
static DEFINE_MUTEX ( of_rmem_assigned_device_mutex ) ;
2014-07-14 12:28:04 +04:00
/**
2016-05-24 16:31:24 +03:00
* of_reserved_mem_device_init_by_idx ( ) - assign reserved memory region to
* given device
* @ dev : Pointer to the device to configure
* @ np : Pointer to the device_node with ' reserved - memory ' property
* @ idx : Index of selected region
2014-07-14 12:28:04 +04:00
*
2016-05-24 16:31:24 +03:00
* This function assigns respective DMA - mapping operations based on reserved
* memory region specified by ' memory - region ' property in @ np node to the @ dev
* device . When driver needs to use more than one reserved memory region , it
* should allocate child devices and initialize regions by name for each of
* child device .
*
* Returns error code or zero on success .
2014-07-14 12:28:04 +04:00
*/
2016-05-24 16:31:24 +03:00
int of_reserved_mem_device_init_by_idx ( struct device * dev ,
struct device_node * np , int idx )
2014-07-14 12:28:04 +04:00
{
2016-05-24 16:31:24 +03:00
struct rmem_assigned_device * rd ;
struct device_node * target ;
2014-07-14 12:28:04 +04:00
struct reserved_mem * rmem ;
2014-10-30 00:50:29 +03:00
int ret ;
2014-07-14 12:28:04 +04:00
2016-05-24 16:31:24 +03:00
if ( ! np | | ! dev )
return - EINVAL ;
target = of_parse_phandle ( np , " memory-region " , idx ) ;
if ( ! target )
2016-06-08 09:51:53 +03:00
return - ENODEV ;
2014-07-14 12:28:04 +04:00
2016-05-24 16:31:24 +03:00
rmem = __find_rmem ( target ) ;
of_node_put ( target ) ;
2014-07-14 12:28:04 +04:00
if ( ! rmem | | ! rmem - > ops | | ! rmem - > ops - > device_init )
2014-10-30 00:50:29 +03:00
return - EINVAL ;
2016-05-24 16:31:24 +03:00
rd = kmalloc ( sizeof ( struct rmem_assigned_device ) , GFP_KERNEL ) ;
if ( ! rd )
return - ENOMEM ;
2014-10-30 00:50:29 +03:00
ret = rmem - > ops - > device_init ( rmem , dev ) ;
2016-05-24 16:31:24 +03:00
if ( ret = = 0 ) {
rd - > dev = dev ;
rd - > rmem = rmem ;
mutex_lock ( & of_rmem_assigned_device_mutex ) ;
list_add ( & rd - > list , & of_rmem_assigned_device_list ) ;
mutex_unlock ( & of_rmem_assigned_device_mutex ) ;
2016-12-21 08:44:31 +03:00
/* ensure that dma_ops is set for virtual devices
* using reserved memory
*/
of_dma_configure ( dev , np ) ;
2016-05-24 16:31:24 +03:00
2014-10-30 00:50:29 +03:00
dev_info ( dev , " assigned reserved memory node %s \n " , rmem - > name ) ;
2016-05-24 16:31:24 +03:00
} else {
kfree ( rd ) ;
}
2014-07-14 12:28:04 +04:00
2014-10-30 00:50:29 +03:00
return ret ;
2014-07-14 12:28:04 +04:00
}
2016-05-24 16:31:24 +03:00
EXPORT_SYMBOL_GPL ( of_reserved_mem_device_init_by_idx ) ;
2014-07-14 12:28:04 +04:00
/**
* of_reserved_mem_device_release ( ) - release reserved memory device structures
2016-05-24 16:31:24 +03:00
* @ dev : Pointer to the device to deconfigure
2014-07-14 12:28:04 +04:00
*
* This function releases structures allocated for memory region handling for
* the given device .
*/
void of_reserved_mem_device_release ( struct device * dev )
{
2016-05-24 16:31:24 +03:00
struct rmem_assigned_device * rd ;
struct reserved_mem * rmem = NULL ;
mutex_lock ( & of_rmem_assigned_device_mutex ) ;
list_for_each_entry ( rd , & of_rmem_assigned_device_list , list ) {
if ( rd - > dev = = dev ) {
rmem = rd - > rmem ;
list_del ( & rd - > list ) ;
kfree ( rd ) ;
break ;
}
}
mutex_unlock ( & of_rmem_assigned_device_mutex ) ;
2014-07-14 12:28:04 +04:00
if ( ! rmem | | ! rmem - > ops | | ! rmem - > ops - > device_release )
return ;
rmem - > ops - > device_release ( rmem , dev ) ;
}
2015-01-09 17:29:05 +03:00
EXPORT_SYMBOL_GPL ( of_reserved_mem_device_release ) ;