2014-02-28 14:42:48 +01:00
/*
* Device tree based initialization code for reserved memory .
*
2015-09-15 18:30:36 -07:00
* Copyright ( c ) 2013 , 2015 The Linux Foundation . All Rights Reserved .
2014-02-28 14:42:48 +01:00
* Copyright ( c ) 2013 , 2014 Samsung Electronics Co . , Ltd .
* http : //www.samsung.com
* Author : Marek Szyprowski < m . szyprowski @ samsung . com >
* Author : Josh Cartwright < joshc @ codeaurora . org >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation ; either version 2 of the
* License or ( at your optional ) any later version of the license .
*/
# include <linux/err.h>
# include <linux/of.h>
# include <linux/of_fdt.h>
# include <linux/of_platform.h>
# include <linux/mm.h>
# include <linux/sizes.h>
# include <linux/of_reserved_mem.h>
2015-09-15 18:30:36 -07:00
# include <linux/sort.h>
2014-02-28 14:42:48 +01:00
# define MAX_RESERVED_REGIONS 16
static struct reserved_mem reserved_mem [ MAX_RESERVED_REGIONS ] ;
static int reserved_mem_count ;
# if defined(CONFIG_HAVE_MEMBLOCK)
# include <linux/memblock.h>
int __init __weak early_init_dt_alloc_reserved_memory_arch ( phys_addr_t size ,
phys_addr_t align , phys_addr_t start , phys_addr_t end , bool nomap ,
phys_addr_t * res_base )
{
2016-02-22 19:15:44 +05:30
phys_addr_t base ;
2014-02-28 14:42:48 +01:00
/*
* We use __memblock_alloc_base ( ) because memblock_alloc_base ( )
* panic ( ) s on allocation failure .
*/
2016-02-22 19:15:44 +05:30
end = ! end ? MEMBLOCK_ALLOC_ANYWHERE : end ;
base = __memblock_alloc_base ( size , align , end ) ;
2014-02-28 14:42:48 +01:00
if ( ! base )
return - ENOMEM ;
/*
* Check if the allocated region fits in to start . . end window
*/
if ( base < start ) {
memblock_free ( base , size ) ;
return - ENOMEM ;
}
* res_base = base ;
if ( nomap )
return memblock_remove ( base , size ) ;
return 0 ;
}
# else
int __init __weak early_init_dt_alloc_reserved_memory_arch ( phys_addr_t size ,
phys_addr_t align , phys_addr_t start , phys_addr_t end , bool nomap ,
phys_addr_t * res_base )
{
pr_err ( " Reserved memory not supported, ignoring region 0x%llx%s \n " ,
size , nomap ? " (nomap) " : " " ) ;
return - ENOSYS ;
}
# endif
/**
* res_mem_save_node ( ) - save fdt node for second pass initialization
*/
void __init fdt_reserved_mem_save_node ( unsigned long node , const char * uname ,
phys_addr_t base , phys_addr_t size )
{
struct reserved_mem * rmem = & reserved_mem [ reserved_mem_count ] ;
if ( reserved_mem_count = = ARRAY_SIZE ( reserved_mem ) ) {
pr_err ( " Reserved memory: not enough space all defined regions. \n " ) ;
return ;
}
rmem - > fdt_node = node ;
rmem - > name = uname ;
rmem - > base = base ;
rmem - > size = size ;
reserved_mem_count + + ;
return ;
}
/**
* res_mem_alloc_size ( ) - allocate reserved memory described by ' size ' , ' align '
* and ' alloc - ranges ' properties
*/
static int __init __reserved_mem_alloc_size ( unsigned long node ,
const char * uname , phys_addr_t * res_base , phys_addr_t * res_size )
{
int t_len = ( dt_root_addr_cells + dt_root_size_cells ) * sizeof ( __be32 ) ;
phys_addr_t start = 0 , end = 0 ;
phys_addr_t base = 0 , align = 0 , size ;
2014-04-01 23:49:03 -05:00
int len ;
const __be32 * prop ;
2014-02-28 14:42:48 +01:00
int nomap ;
int ret ;
prop = of_get_flat_dt_prop ( node , " size " , & len ) ;
if ( ! prop )
return - EINVAL ;
if ( len ! = dt_root_size_cells * sizeof ( __be32 ) ) {
pr_err ( " Reserved memory: invalid size property in '%s' node. \n " ,
uname ) ;
return - EINVAL ;
}
size = dt_mem_next_cell ( dt_root_size_cells , & prop ) ;
nomap = of_get_flat_dt_prop ( node , " no-map " , NULL ) ! = NULL ;
prop = of_get_flat_dt_prop ( node , " alignment " , & len ) ;
if ( prop ) {
if ( len ! = dt_root_addr_cells * sizeof ( __be32 ) ) {
pr_err ( " Reserved memory: invalid alignment property in '%s' node. \n " ,
uname ) ;
return - EINVAL ;
}
align = dt_mem_next_cell ( dt_root_addr_cells , & prop ) ;
}
2015-11-10 20:30:26 +08:00
/* Need adjust the alignment to satisfy the CMA requirement */
2016-05-25 13:29:50 +09:00
if ( IS_ENABLED ( CONFIG_CMA )
& & of_flat_dt_is_compatible ( node , " shared-dma-pool " )
& & of_get_flat_dt_prop ( node , " reusable " , NULL )
& & ! of_get_flat_dt_prop ( node , " no-map " , NULL ) )
2015-11-10 20:30:26 +08:00
align = max ( align , ( phys_addr_t ) PAGE_SIZE < < max ( MAX_ORDER - 1 , pageblock_order ) ) ;
2014-02-28 14:42:48 +01:00
prop = of_get_flat_dt_prop ( node , " alloc-ranges " , & len ) ;
if ( prop ) {
if ( len % t_len ! = 0 ) {
pr_err ( " Reserved memory: invalid alloc-ranges property in '%s', skipping node. \n " ,
uname ) ;
return - EINVAL ;
}
base = 0 ;
while ( len > 0 ) {
start = dt_mem_next_cell ( dt_root_addr_cells , & prop ) ;
end = start + dt_mem_next_cell ( dt_root_size_cells ,
& prop ) ;
ret = early_init_dt_alloc_reserved_memory_arch ( size ,
align , start , end , nomap , & base ) ;
if ( ret = = 0 ) {
pr_debug ( " Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB \n " ,
uname , & base ,
( unsigned long ) size / SZ_1M ) ;
break ;
}
len - = t_len ;
}
} else {
ret = early_init_dt_alloc_reserved_memory_arch ( size , align ,
0 , 0 , nomap , & base ) ;
if ( ret = = 0 )
pr_debug ( " Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB \n " ,
uname , & base , ( unsigned long ) size / SZ_1M ) ;
}
if ( base = = 0 ) {
pr_info ( " Reserved memory: failed to allocate memory for node '%s' \n " ,
uname ) ;
return - ENOMEM ;
}
* res_base = base ;
* res_size = size ;
return 0 ;
}
2014-02-28 14:42:49 +01:00
static const struct of_device_id __rmem_of_table_sentinel
__used __section ( __reservedmem_of_table_end ) ;
/**
* res_mem_init_node ( ) - call region specific reserved memory init code
*/
static int __init __reserved_mem_init_node ( struct reserved_mem * rmem )
{
extern const struct of_device_id __reservedmem_of_table [ ] ;
const struct of_device_id * i ;
for ( i = __reservedmem_of_table ; i < & __rmem_of_table_sentinel ; i + + ) {
reservedmem_of_init_fn initfn = i - > data ;
const char * compat = i - > compatible ;
if ( ! of_flat_dt_is_compatible ( rmem - > fdt_node , compat ) )
continue ;
2014-05-08 16:06:17 -05:00
if ( initfn ( rmem ) = = 0 ) {
2014-02-28 14:42:49 +01:00
pr_info ( " Reserved memory: initialized node %s, compatible id %s \n " ,
rmem - > name , compat ) ;
return 0 ;
}
}
return - ENOENT ;
}
2015-09-15 18:30:36 -07:00
static int __init __rmem_cmp ( const void * a , const void * b )
{
const struct reserved_mem * ra = a , * rb = b ;
2015-11-18 21:46:38 +11:00
if ( ra - > base < rb - > base )
return - 1 ;
if ( ra - > base > rb - > base )
return 1 ;
return 0 ;
2015-09-15 18:30:36 -07:00
}
static void __init __rmem_check_for_overlap ( void )
{
int i ;
if ( reserved_mem_count < 2 )
return ;
sort ( reserved_mem , reserved_mem_count , sizeof ( reserved_mem [ 0 ] ) ,
__rmem_cmp , NULL ) ;
for ( i = 0 ; i < reserved_mem_count - 1 ; i + + ) {
struct reserved_mem * this , * next ;
this = & reserved_mem [ i ] ;
next = & reserved_mem [ i + 1 ] ;
if ( ! ( this - > base & & next - > base ) )
continue ;
if ( this - > base + this - > size > next - > base ) {
phys_addr_t this_end , next_end ;
this_end = this - > base + this - > size ;
next_end = next - > base + next - > size ;
2015-11-10 16:08:33 +11:00
pr_err ( " Reserved memory: OVERLAP DETECTED! \n %s (%pa--%pa) overlaps with %s (%pa--%pa) \n " ,
this - > name , & this - > base , & this_end ,
next - > name , & next - > base , & next_end ) ;
2015-09-15 18:30:36 -07:00
}
}
}
2014-02-28 14:42:48 +01:00
/**
* fdt_init_reserved_mem - allocate and init all saved reserved memory regions
*/
void __init fdt_init_reserved_mem ( void )
{
int i ;
2015-09-15 18:30:36 -07:00
/* check for overlapping reserved regions */
__rmem_check_for_overlap ( ) ;
2014-02-28 14:42:48 +01:00
for ( i = 0 ; i < reserved_mem_count ; i + + ) {
struct reserved_mem * rmem = & reserved_mem [ i ] ;
unsigned long node = rmem - > fdt_node ;
2014-07-14 10:28:04 +02:00
int len ;
const __be32 * prop ;
2014-02-28 14:42:48 +01:00
int err = 0 ;
2014-07-14 10:28:04 +02:00
prop = of_get_flat_dt_prop ( node , " phandle " , & len ) ;
if ( ! prop )
prop = of_get_flat_dt_prop ( node , " linux,phandle " , & len ) ;
if ( prop )
rmem - > phandle = of_read_number ( prop , len / 4 ) ;
2014-02-28 14:42:48 +01:00
if ( rmem - > size = = 0 )
err = __reserved_mem_alloc_size ( node , rmem - > name ,
& rmem - > base , & rmem - > size ) ;
2014-02-28 14:42:49 +01:00
if ( err = = 0 )
__reserved_mem_init_node ( rmem ) ;
2014-02-28 14:42:48 +01:00
}
}
2014-07-14 10:28:04 +02:00
static inline struct reserved_mem * __find_rmem ( struct device_node * node )
{
unsigned int i ;
if ( ! node - > phandle )
return NULL ;
for ( i = 0 ; i < reserved_mem_count ; i + + )
if ( reserved_mem [ i ] . phandle = = node - > phandle )
return & reserved_mem [ i ] ;
return NULL ;
}
/**
* of_reserved_mem_device_init ( ) - assign reserved memory region to given device
*
* This function assign memory region pointed by " memory-region " device tree
* property to the given device .
*/
2014-10-29 14:50:29 -07:00
int of_reserved_mem_device_init ( struct device * dev )
2014-07-14 10:28:04 +02:00
{
struct reserved_mem * rmem ;
struct device_node * np ;
2014-10-29 14:50:29 -07:00
int ret ;
2014-07-14 10:28:04 +02:00
np = of_parse_phandle ( dev - > of_node , " memory-region " , 0 ) ;
if ( ! np )
2014-10-29 14:50:29 -07:00
return - ENODEV ;
2014-07-14 10:28:04 +02:00
rmem = __find_rmem ( np ) ;
of_node_put ( np ) ;
if ( ! rmem | | ! rmem - > ops | | ! rmem - > ops - > device_init )
2014-10-29 14:50:29 -07:00
return - EINVAL ;
ret = rmem - > ops - > device_init ( rmem , dev ) ;
if ( ret = = 0 )
dev_info ( dev , " assigned reserved memory node %s \n " , rmem - > name ) ;
2014-07-14 10:28:04 +02:00
2014-10-29 14:50:29 -07:00
return ret ;
2014-07-14 10:28:04 +02:00
}
2015-01-09 09:29:05 -05:00
EXPORT_SYMBOL_GPL ( of_reserved_mem_device_init ) ;
2014-07-14 10:28:04 +02:00
/**
* of_reserved_mem_device_release ( ) - release reserved memory device structures
*
* This function releases structures allocated for memory region handling for
* the given device .
*/
void of_reserved_mem_device_release ( struct device * dev )
{
struct reserved_mem * rmem ;
struct device_node * np ;
np = of_parse_phandle ( dev - > of_node , " memory-region " , 0 ) ;
if ( ! np )
return ;
rmem = __find_rmem ( np ) ;
of_node_put ( np ) ;
if ( ! rmem | | ! rmem - > ops | | ! rmem - > ops - > device_release )
return ;
rmem - > ops - > device_release ( rmem , dev ) ;
}
2015-01-09 09:29:05 -05:00
EXPORT_SYMBOL_GPL ( of_reserved_mem_device_release ) ;