2019-05-29 17:17:58 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2015-06-27 00:50:10 +03:00
/*
* Copyright ( c ) 2015 , Sony Mobile Communications AB .
* Copyright ( c ) 2012 - 2013 , The Linux Foundation . All rights reserved .
*/
# include <linux/hwspinlock.h>
# include <linux/io.h>
# include <linux/module.h>
# include <linux/of.h>
# include <linux/of_address.h>
# include <linux/platform_device.h>
2018-08-29 10:57:15 +03:00
# include <linux/sizes.h>
2015-06-27 00:50:10 +03:00
# include <linux/slab.h>
# include <linux/soc/qcom/smem.h>
/*
* The Qualcomm shared memory system is a allocate only heap structure that
* consists of one of more memory areas that can be accessed by the processors
* in the SoC .
*
* All systems contains a global heap , accessible by all processors in the SoC ,
* with a table of contents data structure ( @ smem_header ) at the beginning of
* the main shared memory block .
*
* The global header contains meta data for allocations as well as a fixed list
* of 512 entries ( @ smem_global_entry ) that can be initialized to reference
* parts of the shared memory space .
*
*
* In addition to this global heap a set of " private " heaps can be set up at
* boot time with access restrictions so that only certain processor pairs can
* access the data .
*
* These partitions are referenced from an optional partition table
* ( @ smem_ptable ) , that is found 4 kB from the end of the main smem region . The
* partition table entries ( @ smem_ptable_entry ) lists the involved processors
* ( or hosts ) and their location in the main shared memory region .
*
* Each partition starts with a header ( @ smem_partition_header ) that identifies
* the partition and holds properties for the two internal memory regions . The
* two regions are cached and non - cached memory respectively . Each region
* contain a link list of allocation headers ( @ smem_private_entry ) followed by
* their data .
*
* Items in the non - cached region are allocated from the start of the partition
* while items in the cached region are allocated from the end . The free area
2017-10-05 06:11:28 +03:00
* is hence the region between the cached and non - cached offsets . The header of
* cached items comes after the data .
2015-06-27 00:50:10 +03:00
*
2017-10-12 00:29:08 +03:00
* Version 12 ( SMEM_GLOBAL_PART_VERSION ) changes the item alloc / get procedure
* for the global heap . A new global partition is created from the global heap
* region with partition type ( SMEM_GLOBAL_HOST ) and the max smem item count is
* set by the bootloader .
2015-06-27 00:50:10 +03:00
*
* To synchronize allocations in the shared memory heaps a remote spinlock must
* be held - currently lock number 3 of the sfpb or tcsr is used for this on all
* platforms .
*
*/
/*
2017-10-12 00:29:07 +03:00
* The version member of the smem header contains an array of versions for the
* various software components in the SoC . We verify that the boot loader
* version is a valid version as a sanity check .
2015-06-27 00:50:10 +03:00
*/
2017-10-12 00:29:07 +03:00
# define SMEM_MASTER_SBL_VERSION_INDEX 7
2017-10-12 00:29:08 +03:00
# define SMEM_GLOBAL_HEAP_VERSION 11
# define SMEM_GLOBAL_PART_VERSION 12
2015-06-27 00:50:10 +03:00
/*
* The first 8 items are only to be allocated by the boot loader while
* initializing the heap .
*/
# define SMEM_ITEM_LAST_FIXED 8
/* Highest accepted item number, for both global and private heaps */
# define SMEM_ITEM_COUNT 512
/* Processor/host identifier for the application processor */
# define SMEM_HOST_APPS 0
2017-10-12 00:29:08 +03:00
/* Processor/host identifier for the global partition */
# define SMEM_GLOBAL_HOST 0xfffe
2015-06-27 00:50:10 +03:00
/* Max number of processors/hosts in a system */
2019-08-07 10:09:51 +03:00
# define SMEM_HOST_COUNT 11
2015-06-27 00:50:10 +03:00
/**
* struct smem_proc_comm - proc_comm communication struct ( legacy )
* @ command : current command to be executed
* @ status : status of the currently requested command
* @ params : parameters to the command
*/
struct smem_proc_comm {
2015-09-03 01:46:45 +03:00
__le32 command ;
__le32 status ;
__le32 params [ 2 ] ;
2015-06-27 00:50:10 +03:00
} ;
/**
* struct smem_global_entry - entry to reference smem items on the heap
* @ allocated : boolean to indicate if this entry is used
* @ offset : offset to the allocated space
* @ size : size of the allocated space , 8 byte aligned
* @ aux_base : base address for the memory region used by this unit , or 0 for
* the default region . bits 0 , 1 are reserved
*/
struct smem_global_entry {
2015-09-03 01:46:45 +03:00
__le32 allocated ;
__le32 offset ;
__le32 size ;
__le32 aux_base ; /* bits 1:0 reserved */
2015-06-27 00:50:10 +03:00
} ;
# define AUX_BASE_MASK 0xfffffffc
/**
* struct smem_header - header found in beginning of primary smem region
* @ proc_comm : proc_comm communication interface ( legacy )
* @ version : array of versions for the various subsystems
* @ initialized : boolean to indicate that smem is initialized
* @ free_offset : index of the first unallocated byte in smem
* @ available : number of bytes available for allocation
* @ reserved : reserved field , must be 0
* toc : array of references to items
*/
struct smem_header {
struct smem_proc_comm proc_comm [ 4 ] ;
2015-09-03 01:46:45 +03:00
__le32 version [ 32 ] ;
__le32 initialized ;
__le32 free_offset ;
__le32 available ;
__le32 reserved ;
2015-06-27 00:50:10 +03:00
struct smem_global_entry toc [ SMEM_ITEM_COUNT ] ;
} ;
/**
* struct smem_ptable_entry - one entry in the @ smem_ptable list
* @ offset : offset , within the main shared memory region , of the partition
* @ size : size of the partition
* @ flags : flags for the partition ( currently unused )
* @ host0 : first processor / host with access to this partition
* @ host1 : second processor / host with access to this partition
2017-10-05 06:11:28 +03:00
* @ cacheline : alignment for " cached " entries
2015-06-27 00:50:10 +03:00
* @ reserved : reserved entries for later use
*/
struct smem_ptable_entry {
2015-09-03 01:46:45 +03:00
__le32 offset ;
__le32 size ;
__le32 flags ;
__le16 host0 ;
__le16 host1 ;
2017-10-05 06:11:28 +03:00
__le32 cacheline ;
__le32 reserved [ 7 ] ;
2015-06-27 00:50:10 +03:00
} ;
/**
* struct smem_ptable - partition table for the private partitions
* @ magic : magic number , must be SMEM_PTABLE_MAGIC
* @ version : version of the partition table
* @ num_entries : number of partitions in the table
* @ reserved : for now reserved entries
* @ entry : list of @ smem_ptable_entry for the @ num_entries partitions
*/
struct smem_ptable {
2015-09-03 01:46:45 +03:00
u8 magic [ 4 ] ;
__le32 version ;
__le32 num_entries ;
__le32 reserved [ 5 ] ;
2015-06-27 00:50:10 +03:00
struct smem_ptable_entry entry [ ] ;
} ;
2015-09-03 01:46:45 +03:00
static const u8 SMEM_PTABLE_MAGIC [ ] = { 0x24 , 0x54 , 0x4f , 0x43 } ; /* "$TOC" */
2015-06-27 00:50:10 +03:00
/**
* struct smem_partition_header - header of the partitions
* @ magic : magic number , must be SMEM_PART_MAGIC
* @ host0 : first processor / host with access to this partition
* @ host1 : second processor / host with access to this partition
* @ size : size of the partition
* @ offset_free_uncached : offset to the first free byte of uncached memory in
* this partition
* @ offset_free_cached : offset to the first free byte of cached memory in this
* partition
* @ reserved : for now reserved entries
*/
struct smem_partition_header {
2015-09-03 01:46:45 +03:00
u8 magic [ 4 ] ;
__le16 host0 ;
__le16 host1 ;
__le32 size ;
__le32 offset_free_uncached ;
__le32 offset_free_cached ;
__le32 reserved [ 3 ] ;
2015-06-27 00:50:10 +03:00
} ;
2015-09-03 01:46:45 +03:00
static const u8 SMEM_PART_MAGIC [ ] = { 0x24 , 0x50 , 0x52 , 0x54 } ;
2015-06-27 00:50:10 +03:00
/**
* struct smem_private_entry - header of each item in the private partition
* @ canary : magic number , must be SMEM_PRIVATE_CANARY
* @ item : identifying number of the smem item
* @ size : size of the data , including padding bytes
* @ padding_data : number of bytes of padding of data
* @ padding_hdr : number of bytes of padding between the header and the data
* @ reserved : for now reserved entry
*/
struct smem_private_entry {
2015-09-03 01:46:45 +03:00
u16 canary ; /* bytes are the same so no swapping needed */
__le16 item ;
__le32 size ; /* includes padding bytes */
__le16 padding_data ;
__le16 padding_hdr ;
__le32 reserved ;
2015-06-27 00:50:10 +03:00
} ;
# define SMEM_PRIVATE_CANARY 0xa5a5
2017-10-12 00:29:09 +03:00
/**
* struct smem_info - smem region info located after the table of contents
* @ magic : magic number , must be SMEM_INFO_MAGIC
* @ size : size of the smem region
* @ base_addr : base address of the smem region
* @ reserved : for now reserved entry
* @ num_items : highest accepted item number
*/
struct smem_info {
u8 magic [ 4 ] ;
__le32 size ;
__le32 base_addr ;
__le32 reserved ;
__le16 num_items ;
} ;
static const u8 SMEM_INFO_MAGIC [ ] = { 0x53 , 0x49 , 0x49 , 0x49 } ; /* SIII */
2015-06-27 00:50:10 +03:00
/**
* struct smem_region - representation of a chunk of memory used for smem
* @ aux_base : identifier of aux_mem base
* @ virt_base : virtual base address of memory with this aux_mem identifier
* @ size : size of the memory region
*/
struct smem_region {
u32 aux_base ;
void __iomem * virt_base ;
size_t size ;
} ;
/**
* struct qcom_smem - device data for the smem device
* @ dev : device pointer
* @ hwlock : reference to a hwspinlock
2017-10-12 00:29:08 +03:00
* @ global_partition : pointer to global partition when in use
* @ global_cacheline : cacheline size for global partition
2015-06-27 00:50:10 +03:00
* @ partitions : list of pointers to partitions affecting the current
* processor / host
2017-10-05 06:11:28 +03:00
* @ cacheline : list of cacheline sizes for each host
2017-10-12 00:29:09 +03:00
* @ item_count : max accepted item number
2015-06-27 00:50:10 +03:00
* @ num_regions : number of @ regions
* @ regions : list of the memory regions defining the shared memory
*/
struct qcom_smem {
struct device * dev ;
struct hwspinlock * hwlock ;
2017-10-12 00:29:08 +03:00
struct smem_partition_header * global_partition ;
size_t global_cacheline ;
2015-06-27 00:50:10 +03:00
struct smem_partition_header * partitions [ SMEM_HOST_COUNT ] ;
2017-10-05 06:11:28 +03:00
size_t cacheline [ SMEM_HOST_COUNT ] ;
2017-10-12 00:29:09 +03:00
u32 item_count ;
2019-07-24 01:35:13 +03:00
struct platform_device * socinfo ;
2015-06-27 00:50:10 +03:00
unsigned num_regions ;
2018-06-26 03:58:45 +03:00
struct smem_region regions [ ] ;
2015-06-27 00:50:10 +03:00
} ;
2018-04-11 01:25:38 +03:00
static void *
2017-10-05 06:11:27 +03:00
phdr_to_last_uncached_entry ( struct smem_partition_header * phdr )
2015-09-03 01:46:45 +03:00
{
void * p = phdr ;
return p + le32_to_cpu ( phdr - > offset_free_uncached ) ;
}
2018-04-11 01:25:38 +03:00
static struct smem_private_entry *
phdr_to_first_cached_entry ( struct smem_partition_header * phdr ,
2017-10-05 06:11:28 +03:00
size_t cacheline )
{
void * p = phdr ;
2018-04-11 01:25:37 +03:00
struct smem_private_entry * e ;
2017-10-05 06:11:28 +03:00
2018-04-11 01:25:37 +03:00
return p + le32_to_cpu ( phdr - > size ) - ALIGN ( sizeof ( * e ) , cacheline ) ;
2017-10-05 06:11:28 +03:00
}
2018-04-11 01:25:38 +03:00
static void *
phdr_to_last_cached_entry ( struct smem_partition_header * phdr )
2015-09-03 01:46:45 +03:00
{
void * p = phdr ;
return p + le32_to_cpu ( phdr - > offset_free_cached ) ;
}
static struct smem_private_entry *
2017-10-05 06:11:27 +03:00
phdr_to_first_uncached_entry ( struct smem_partition_header * phdr )
2015-09-03 01:46:45 +03:00
{
void * p = phdr ;
return p + sizeof ( * phdr ) ;
}
static struct smem_private_entry *
2017-10-05 06:11:27 +03:00
uncached_entry_next ( struct smem_private_entry * e )
2015-09-03 01:46:45 +03:00
{
void * p = e ;
return p + sizeof ( * e ) + le16_to_cpu ( e - > padding_hdr ) +
le32_to_cpu ( e - > size ) ;
}
2017-10-05 06:11:28 +03:00
static struct smem_private_entry *
cached_entry_next ( struct smem_private_entry * e , size_t cacheline )
{
void * p = e ;
return p - le32_to_cpu ( e - > size ) - ALIGN ( sizeof ( * e ) , cacheline ) ;
}
2017-10-05 06:11:27 +03:00
static void * uncached_entry_to_item ( struct smem_private_entry * e )
2015-09-03 01:46:45 +03:00
{
void * p = e ;
return p + sizeof ( * e ) + le16_to_cpu ( e - > padding_hdr ) ;
}
2017-10-05 06:11:28 +03:00
static void * cached_entry_to_item ( struct smem_private_entry * e )
{
void * p = e ;
return p - le32_to_cpu ( e - > size ) ;
}
2015-06-27 00:50:10 +03:00
/* Pointer to the one and only smem handle */
static struct qcom_smem * __smem ;
/* Timeout (ms) for the trylock of remote spinlocks */
# define HWSPINLOCK_TIMEOUT 1000
static int qcom_smem_alloc_private ( struct qcom_smem * smem ,
2017-10-12 00:29:08 +03:00
struct smem_partition_header * phdr ,
2015-06-27 00:50:10 +03:00
unsigned item ,
size_t size )
{
2015-09-03 01:46:45 +03:00
struct smem_private_entry * hdr , * end ;
2015-06-27 00:50:10 +03:00
size_t alloc_size ;
2015-09-03 01:46:45 +03:00
void * cached ;
2015-06-27 00:50:10 +03:00
2017-10-05 06:11:27 +03:00
hdr = phdr_to_first_uncached_entry ( phdr ) ;
end = phdr_to_last_uncached_entry ( phdr ) ;
cached = phdr_to_last_cached_entry ( phdr ) ;
2015-06-27 00:50:10 +03:00
2015-09-03 01:46:45 +03:00
while ( hdr < end ) {
2018-04-11 01:25:39 +03:00
if ( hdr - > canary ! = SMEM_PRIVATE_CANARY )
goto bad_canary ;
2015-09-03 01:46:45 +03:00
if ( le16_to_cpu ( hdr - > item ) = = item )
2015-06-27 00:50:10 +03:00
return - EEXIST ;
2017-10-05 06:11:27 +03:00
hdr = uncached_entry_next ( hdr ) ;
2015-06-27 00:50:10 +03:00
}
/* Check that we don't grow into the cached region */
alloc_size = sizeof ( * hdr ) + ALIGN ( size , 8 ) ;
2018-04-11 01:25:40 +03:00
if ( ( void * ) hdr + alloc_size > cached ) {
2015-06-27 00:50:10 +03:00
dev_err ( smem - > dev , " Out of memory \n " ) ;
return - ENOSPC ;
}
hdr - > canary = SMEM_PRIVATE_CANARY ;
2015-09-03 01:46:45 +03:00
hdr - > item = cpu_to_le16 ( item ) ;
hdr - > size = cpu_to_le32 ( ALIGN ( size , 8 ) ) ;
hdr - > padding_data = cpu_to_le16 ( le32_to_cpu ( hdr - > size ) - size ) ;
2015-06-27 00:50:10 +03:00
hdr - > padding_hdr = 0 ;
/*
* Ensure the header is written before we advance the free offset , so
* that remote processors that does not take the remote spinlock still
* gets a consistent view of the linked list .
*/
wmb ( ) ;
2015-09-03 01:46:45 +03:00
le32_add_cpu ( & phdr - > offset_free_uncached , alloc_size ) ;
2015-06-27 00:50:10 +03:00
return 0 ;
2018-04-11 01:25:39 +03:00
bad_canary :
dev_err ( smem - > dev , " Found invalid canary in hosts %hu:%hu partition \n " ,
le16_to_cpu ( phdr - > host0 ) , le16_to_cpu ( phdr - > host1 ) ) ;
return - EINVAL ;
2015-06-27 00:50:10 +03:00
}
static int qcom_smem_alloc_global ( struct qcom_smem * smem ,
unsigned item ,
size_t size )
{
struct smem_global_entry * entry ;
2017-10-12 00:29:08 +03:00
struct smem_header * header ;
2015-06-27 00:50:10 +03:00
header = smem - > regions [ 0 ] . virt_base ;
entry = & header - > toc [ item ] ;
if ( entry - > allocated )
return - EEXIST ;
size = ALIGN ( size , 8 ) ;
2015-09-03 01:46:45 +03:00
if ( WARN_ON ( size > le32_to_cpu ( header - > available ) ) )
2015-06-27 00:50:10 +03:00
return - ENOMEM ;
entry - > offset = header - > free_offset ;
2015-09-03 01:46:45 +03:00
entry - > size = cpu_to_le32 ( size ) ;
2015-06-27 00:50:10 +03:00
/*
* Ensure the header is consistent before we mark the item allocated ,
* so that remote processors will get a consistent view of the item
* even though they do not take the spinlock on read .
*/
wmb ( ) ;
2015-09-03 01:46:45 +03:00
entry - > allocated = cpu_to_le32 ( 1 ) ;
2015-06-27 00:50:10 +03:00
2015-09-03 01:46:45 +03:00
le32_add_cpu ( & header - > free_offset , size ) ;
le32_add_cpu ( & header - > available , - size ) ;
2015-06-27 00:50:10 +03:00
return 0 ;
}
/**
* qcom_smem_alloc ( ) - allocate space for a smem item
* @ host : remote processor id , or - 1
* @ item : smem item handle
* @ size : number of bytes to be allocated
*
* Allocate space for a given smem item of size @ size , given that the item is
* not yet allocated .
*/
int qcom_smem_alloc ( unsigned host , unsigned item , size_t size )
{
2017-10-12 00:29:08 +03:00
struct smem_partition_header * phdr ;
2015-06-27 00:50:10 +03:00
unsigned long flags ;
int ret ;
if ( ! __smem )
return - EPROBE_DEFER ;
if ( item < SMEM_ITEM_LAST_FIXED ) {
dev_err ( __smem - > dev ,
" Rejecting allocation of static entry %d \n " , item ) ;
return - EINVAL ;
}
2017-10-12 00:29:09 +03:00
if ( WARN_ON ( item > = __smem - > item_count ) )
return - EINVAL ;
2015-06-27 00:50:10 +03:00
ret = hwspin_lock_timeout_irqsave ( __smem - > hwlock ,
HWSPINLOCK_TIMEOUT ,
& flags ) ;
if ( ret )
return ret ;
2017-10-12 00:29:08 +03:00
if ( host < SMEM_HOST_COUNT & & __smem - > partitions [ host ] ) {
phdr = __smem - > partitions [ host ] ;
ret = qcom_smem_alloc_private ( __smem , phdr , item , size ) ;
} else if ( __smem - > global_partition ) {
phdr = __smem - > global_partition ;
ret = qcom_smem_alloc_private ( __smem , phdr , item , size ) ;
} else {
2015-06-27 00:50:10 +03:00
ret = qcom_smem_alloc_global ( __smem , item , size ) ;
2017-10-12 00:29:08 +03:00
}
2015-06-27 00:50:10 +03:00
hwspin_unlock_irqrestore ( __smem - > hwlock , & flags ) ;
return ret ;
}
EXPORT_SYMBOL ( qcom_smem_alloc ) ;
2015-09-03 01:46:44 +03:00
static void * qcom_smem_get_global ( struct qcom_smem * smem ,
unsigned item ,
size_t * size )
2015-06-27 00:50:10 +03:00
{
struct smem_header * header ;
2018-06-26 03:58:45 +03:00
struct smem_region * region ;
2015-06-27 00:50:10 +03:00
struct smem_global_entry * entry ;
u32 aux_base ;
unsigned i ;
header = smem - > regions [ 0 ] . virt_base ;
entry = & header - > toc [ item ] ;
if ( ! entry - > allocated )
2015-09-03 01:46:44 +03:00
return ERR_PTR ( - ENXIO ) ;
2015-06-27 00:50:10 +03:00
2015-09-03 01:46:45 +03:00
aux_base = le32_to_cpu ( entry - > aux_base ) & AUX_BASE_MASK ;
2015-06-27 00:50:10 +03:00
2015-09-03 01:46:44 +03:00
for ( i = 0 ; i < smem - > num_regions ; i + + ) {
2018-06-26 03:58:45 +03:00
region = & smem - > regions [ i ] ;
2015-06-27 00:50:10 +03:00
2018-06-26 03:58:45 +03:00
if ( region - > aux_base = = aux_base | | ! aux_base ) {
2015-09-03 01:46:44 +03:00
if ( size ! = NULL )
2015-09-03 01:46:45 +03:00
* size = le32_to_cpu ( entry - > size ) ;
2018-06-26 03:58:45 +03:00
return region - > virt_base + le32_to_cpu ( entry - > offset ) ;
2015-06-27 00:50:10 +03:00
}
}
2015-09-03 01:46:44 +03:00
return ERR_PTR ( - ENOENT ) ;
2015-06-27 00:50:10 +03:00
}
2015-09-03 01:46:44 +03:00
static void * qcom_smem_get_private ( struct qcom_smem * smem ,
2017-10-12 00:29:08 +03:00
struct smem_partition_header * phdr ,
size_t cacheline ,
2015-09-03 01:46:44 +03:00
unsigned item ,
size_t * size )
2015-06-27 00:50:10 +03:00
{
2015-09-03 01:46:45 +03:00
struct smem_private_entry * e , * end ;
2017-10-05 06:11:28 +03:00
2017-10-05 06:11:27 +03:00
e = phdr_to_first_uncached_entry ( phdr ) ;
end = phdr_to_last_uncached_entry ( phdr ) ;
2015-06-27 00:50:10 +03:00
2015-09-03 01:46:45 +03:00
while ( e < end ) {
2017-10-05 06:11:28 +03:00
if ( e - > canary ! = SMEM_PRIVATE_CANARY )
goto invalid_canary ;
2015-06-27 00:50:10 +03:00
2015-09-03 01:46:45 +03:00
if ( le16_to_cpu ( e - > item ) = = item ) {
2015-06-27 00:50:10 +03:00
if ( size ! = NULL )
2015-09-03 01:46:45 +03:00
* size = le32_to_cpu ( e - > size ) -
le16_to_cpu ( e - > padding_data ) ;
2015-06-27 00:50:10 +03:00
2017-10-05 06:11:27 +03:00
return uncached_entry_to_item ( e ) ;
2015-06-27 00:50:10 +03:00
}
2017-10-05 06:11:27 +03:00
e = uncached_entry_next ( e ) ;
2015-06-27 00:50:10 +03:00
}
2017-10-05 06:11:28 +03:00
/* Item was not found in the uncached list, search the cached list */
e = phdr_to_first_cached_entry ( phdr , cacheline ) ;
end = phdr_to_last_cached_entry ( phdr ) ;
while ( e > end ) {
if ( e - > canary ! = SMEM_PRIVATE_CANARY )
goto invalid_canary ;
if ( le16_to_cpu ( e - > item ) = = item ) {
if ( size ! = NULL )
* size = le32_to_cpu ( e - > size ) -
le16_to_cpu ( e - > padding_data ) ;
return cached_entry_to_item ( e ) ;
}
e = cached_entry_next ( e , cacheline ) ;
}
2015-09-03 01:46:44 +03:00
return ERR_PTR ( - ENOENT ) ;
2017-10-05 06:11:28 +03:00
invalid_canary :
2018-04-11 01:25:39 +03:00
dev_err ( smem - > dev , " Found invalid canary in hosts %hu:%hu partition \n " ,
le16_to_cpu ( phdr - > host0 ) , le16_to_cpu ( phdr - > host1 ) ) ;
2017-10-05 06:11:28 +03:00
return ERR_PTR ( - EINVAL ) ;
2015-06-27 00:50:10 +03:00
}
/**
* qcom_smem_get ( ) - resolve ptr of size of a smem item
* @ host : the remote processor , or - 1
* @ item : smem item handle
* @ size : pointer to be filled out with size of the item
*
2015-09-03 01:46:44 +03:00
* Looks up smem item and returns pointer to it . Size of smem
* item is returned in @ size .
2015-06-27 00:50:10 +03:00
*/
2015-09-03 01:46:44 +03:00
void * qcom_smem_get ( unsigned host , unsigned item , size_t * size )
2015-06-27 00:50:10 +03:00
{
2017-10-12 00:29:08 +03:00
struct smem_partition_header * phdr ;
2015-06-27 00:50:10 +03:00
unsigned long flags ;
2017-10-12 00:29:08 +03:00
size_t cacheln ;
2015-06-27 00:50:10 +03:00
int ret ;
2015-09-03 01:46:44 +03:00
void * ptr = ERR_PTR ( - EPROBE_DEFER ) ;
2015-06-27 00:50:10 +03:00
if ( ! __smem )
2015-09-03 01:46:44 +03:00
return ptr ;
2015-06-27 00:50:10 +03:00
2017-10-12 00:29:09 +03:00
if ( WARN_ON ( item > = __smem - > item_count ) )
return ERR_PTR ( - EINVAL ) ;
2015-06-27 00:50:10 +03:00
ret = hwspin_lock_timeout_irqsave ( __smem - > hwlock ,
HWSPINLOCK_TIMEOUT ,
& flags ) ;
if ( ret )
2015-09-03 01:46:44 +03:00
return ERR_PTR ( ret ) ;
2015-06-27 00:50:10 +03:00
2017-10-12 00:29:08 +03:00
if ( host < SMEM_HOST_COUNT & & __smem - > partitions [ host ] ) {
phdr = __smem - > partitions [ host ] ;
cacheln = __smem - > cacheline [ host ] ;
ptr = qcom_smem_get_private ( __smem , phdr , cacheln , item , size ) ;
} else if ( __smem - > global_partition ) {
phdr = __smem - > global_partition ;
cacheln = __smem - > global_cacheline ;
ptr = qcom_smem_get_private ( __smem , phdr , cacheln , item , size ) ;
} else {
2015-09-03 01:46:44 +03:00
ptr = qcom_smem_get_global ( __smem , item , size ) ;
2017-10-12 00:29:08 +03:00
}
2015-06-27 00:50:10 +03:00
hwspin_unlock_irqrestore ( __smem - > hwlock , & flags ) ;
2015-09-03 01:46:44 +03:00
return ptr ;
2015-06-27 00:50:10 +03:00
}
EXPORT_SYMBOL ( qcom_smem_get ) ;
/**
* qcom_smem_get_free_space ( ) - retrieve amount of free space in a partition
* @ host : the remote processor identifying a partition , or - 1
*
* To be used by smem clients as a quick way to determine if any new
* allocations has been made .
*/
int qcom_smem_get_free_space ( unsigned host )
{
struct smem_partition_header * phdr ;
struct smem_header * header ;
unsigned ret ;
if ( ! __smem )
return - EPROBE_DEFER ;
if ( host < SMEM_HOST_COUNT & & __smem - > partitions [ host ] ) {
phdr = __smem - > partitions [ host ] ;
2015-09-03 01:46:45 +03:00
ret = le32_to_cpu ( phdr - > offset_free_cached ) -
le32_to_cpu ( phdr - > offset_free_uncached ) ;
2017-10-12 00:29:08 +03:00
} else if ( __smem - > global_partition ) {
phdr = __smem - > global_partition ;
ret = le32_to_cpu ( phdr - > offset_free_cached ) -
le32_to_cpu ( phdr - > offset_free_uncached ) ;
2015-06-27 00:50:10 +03:00
} else {
header = __smem - > regions [ 0 ] . virt_base ;
2015-09-03 01:46:45 +03:00
ret = le32_to_cpu ( header - > available ) ;
2015-06-27 00:50:10 +03:00
}
return ret ;
}
EXPORT_SYMBOL ( qcom_smem_get_free_space ) ;
2018-04-25 18:18:20 +03:00
/**
* qcom_smem_virt_to_phys ( ) - return the physical address associated
* with an smem item pointer ( previously returned by qcom_smem_get ( )
* @ p : the virtual address to convert
*
* Returns 0 if the pointer provided is not within any smem region .
*/
phys_addr_t qcom_smem_virt_to_phys ( void * p )
{
unsigned i ;
for ( i = 0 ; i < __smem - > num_regions ; i + + ) {
struct smem_region * region = & __smem - > regions [ i ] ;
if ( p < region - > virt_base )
continue ;
if ( p < region - > virt_base + region - > size ) {
u64 offset = p - region - > virt_base ;
return ( phys_addr_t ) region - > aux_base + offset ;
}
}
return 0 ;
}
EXPORT_SYMBOL ( qcom_smem_virt_to_phys ) ;
2015-06-27 00:50:10 +03:00
static int qcom_smem_get_sbl_version ( struct qcom_smem * smem )
{
2017-10-12 00:29:07 +03:00
struct smem_header * header ;
2015-09-03 01:46:45 +03:00
__le32 * versions ;
2015-06-27 00:50:10 +03:00
2017-10-12 00:29:07 +03:00
header = smem - > regions [ 0 ] . virt_base ;
versions = header - > version ;
2015-06-27 00:50:10 +03:00
2015-09-03 01:46:45 +03:00
return le32_to_cpu ( versions [ SMEM_MASTER_SBL_VERSION_INDEX ] ) ;
2015-06-27 00:50:10 +03:00
}
2017-10-12 00:29:08 +03:00
static struct smem_ptable * qcom_smem_get_ptable ( struct qcom_smem * smem )
2015-06-27 00:50:10 +03:00
{
struct smem_ptable * ptable ;
2017-10-12 00:29:08 +03:00
u32 version ;
2015-06-27 00:50:10 +03:00
ptable = smem - > regions [ 0 ] . virt_base + smem - > regions [ 0 ] . size - SZ_4K ;
2015-09-03 01:46:45 +03:00
if ( memcmp ( ptable - > magic , SMEM_PTABLE_MAGIC , sizeof ( ptable - > magic ) ) )
2017-10-12 00:29:08 +03:00
return ERR_PTR ( - ENOENT ) ;
2015-06-27 00:50:10 +03:00
2015-09-03 01:46:45 +03:00
version = le32_to_cpu ( ptable - > version ) ;
if ( version ! = 1 ) {
2015-06-27 00:50:10 +03:00
dev_err ( smem - > dev ,
2015-09-03 01:46:45 +03:00
" Unsupported partition header version %d \n " , version ) ;
2017-10-12 00:29:08 +03:00
return ERR_PTR ( - EINVAL ) ;
}
return ptable ;
}
2017-10-12 00:29:09 +03:00
static u32 qcom_smem_get_item_count ( struct qcom_smem * smem )
{
struct smem_ptable * ptable ;
struct smem_info * info ;
ptable = qcom_smem_get_ptable ( smem ) ;
if ( IS_ERR_OR_NULL ( ptable ) )
return SMEM_ITEM_COUNT ;
info = ( struct smem_info * ) & ptable - > entry [ ptable - > num_entries ] ;
if ( memcmp ( info - > magic , SMEM_INFO_MAGIC , sizeof ( info - > magic ) ) )
return SMEM_ITEM_COUNT ;
return le16_to_cpu ( info - > num_items ) ;
}
2018-06-26 03:58:51 +03:00
/*
* Validate the partition header for a partition whose partition
* table entry is supplied . Returns a pointer to its header if
* valid , or a null pointer otherwise .
*/
static struct smem_partition_header *
qcom_smem_partition_header ( struct qcom_smem * smem ,
2018-06-26 03:58:55 +03:00
struct smem_ptable_entry * entry , u16 host0 , u16 host1 )
2018-06-26 03:58:51 +03:00
{
struct smem_partition_header * header ;
2018-06-26 03:58:52 +03:00
u32 size ;
2018-06-26 03:58:51 +03:00
header = smem - > regions [ 0 ] . virt_base + le32_to_cpu ( entry - > offset ) ;
if ( memcmp ( header - > magic , SMEM_PART_MAGIC , sizeof ( header - > magic ) ) ) {
dev_err ( smem - > dev , " bad partition magic %02x %02x %02x %02x \n " ,
header - > magic [ 0 ] , header - > magic [ 1 ] ,
header - > magic [ 2 ] , header - > magic [ 3 ] ) ;
return NULL ;
}
2018-06-26 03:58:55 +03:00
if ( host0 ! = le16_to_cpu ( header - > host0 ) ) {
dev_err ( smem - > dev , " bad host0 (%hu != %hu) \n " ,
host0 , le16_to_cpu ( header - > host0 ) ) ;
return NULL ;
}
if ( host1 ! = le16_to_cpu ( header - > host1 ) ) {
dev_err ( smem - > dev , " bad host1 (%hu != %hu) \n " ,
host1 , le16_to_cpu ( header - > host1 ) ) ;
return NULL ;
}
2018-06-26 03:58:52 +03:00
size = le32_to_cpu ( header - > size ) ;
if ( size ! = le32_to_cpu ( entry - > size ) ) {
dev_err ( smem - > dev , " bad partition size (%u != %u) \n " ,
size , le32_to_cpu ( entry - > size ) ) ;
return NULL ;
}
2018-06-26 03:58:53 +03:00
if ( le32_to_cpu ( header - > offset_free_uncached ) > size ) {
dev_err ( smem - > dev , " bad partition free uncached (%u > %u) \n " ,
le32_to_cpu ( header - > offset_free_uncached ) , size ) ;
return NULL ;
}
2018-06-26 03:58:51 +03:00
return header ;
}
2017-10-12 00:29:08 +03:00
static int qcom_smem_set_global_partition ( struct qcom_smem * smem )
{
struct smem_partition_header * header ;
2018-04-11 01:25:41 +03:00
struct smem_ptable_entry * entry ;
2017-10-12 00:29:08 +03:00
struct smem_ptable * ptable ;
2018-04-11 01:25:41 +03:00
bool found = false ;
2017-10-12 00:29:08 +03:00
int i ;
2018-06-25 23:06:13 +03:00
if ( smem - > global_partition ) {
dev_err ( smem - > dev , " Already found the global partition \n " ) ;
return - EINVAL ;
}
2017-10-12 00:29:08 +03:00
ptable = qcom_smem_get_ptable ( smem ) ;
if ( IS_ERR ( ptable ) )
return PTR_ERR ( ptable ) ;
for ( i = 0 ; i < le32_to_cpu ( ptable - > num_entries ) ; i + + ) {
entry = & ptable - > entry [ i ] ;
2018-06-26 03:58:47 +03:00
if ( ! le32_to_cpu ( entry - > offset ) )
continue ;
if ( ! le32_to_cpu ( entry - > size ) )
continue ;
2018-06-26 03:58:54 +03:00
if ( le16_to_cpu ( entry - > host0 ) ! = SMEM_GLOBAL_HOST )
continue ;
if ( le16_to_cpu ( entry - > host1 ) = = SMEM_GLOBAL_HOST ) {
2018-04-11 01:25:41 +03:00
found = true ;
2017-10-12 00:29:08 +03:00
break ;
2018-04-11 01:25:41 +03:00
}
2017-10-12 00:29:08 +03:00
}
2018-04-11 01:25:41 +03:00
if ( ! found ) {
2017-10-12 00:29:08 +03:00
dev_err ( smem - > dev , " Missing entry for global partition \n " ) ;
return - EINVAL ;
}
2018-06-26 03:58:55 +03:00
header = qcom_smem_partition_header ( smem , entry ,
SMEM_GLOBAL_HOST , SMEM_GLOBAL_HOST ) ;
2018-06-26 03:58:51 +03:00
if ( ! header )
return - EINVAL ;
2017-10-12 00:29:08 +03:00
smem - > global_partition = header ;
smem - > global_cacheline = le32_to_cpu ( entry - > cacheline ) ;
return 0 ;
}
2018-06-26 03:58:56 +03:00
static int
qcom_smem_enumerate_partitions ( struct qcom_smem * smem , u16 local_host )
2017-10-12 00:29:08 +03:00
{
struct smem_partition_header * header ;
struct smem_ptable_entry * entry ;
struct smem_ptable * ptable ;
unsigned int remote_host ;
2018-06-26 03:58:56 +03:00
u16 host0 , host1 ;
2017-10-12 00:29:08 +03:00
int i ;
ptable = qcom_smem_get_ptable ( smem ) ;
if ( IS_ERR ( ptable ) )
return PTR_ERR ( ptable ) ;
2015-09-03 01:46:45 +03:00
for ( i = 0 ; i < le32_to_cpu ( ptable - > num_entries ) ; i + + ) {
2015-06-27 00:50:10 +03:00
entry = & ptable - > entry [ i ] ;
2015-09-03 01:46:45 +03:00
if ( ! le32_to_cpu ( entry - > offset ) )
2015-06-27 00:50:10 +03:00
continue ;
2015-09-03 01:46:45 +03:00
if ( ! le32_to_cpu ( entry - > size ) )
2015-06-27 00:50:10 +03:00
continue ;
2018-06-26 03:58:47 +03:00
host0 = le16_to_cpu ( entry - > host0 ) ;
host1 = le16_to_cpu ( entry - > host1 ) ;
2015-09-03 01:46:45 +03:00
if ( host0 = = local_host )
remote_host = host1 ;
2018-06-26 03:58:48 +03:00
else if ( host1 = = local_host )
2015-09-03 01:46:45 +03:00
remote_host = host0 ;
2018-06-26 03:58:48 +03:00
else
continue ;
2015-06-27 00:50:10 +03:00
if ( remote_host > = SMEM_HOST_COUNT ) {
2018-06-26 03:58:56 +03:00
dev_err ( smem - > dev , " bad host %hu \n " , remote_host ) ;
2015-06-27 00:50:10 +03:00
return - EINVAL ;
}
if ( smem - > partitions [ remote_host ] ) {
2018-06-26 03:58:56 +03:00
dev_err ( smem - > dev , " duplicate host %hu \n " , remote_host ) ;
2015-06-27 00:50:10 +03:00
return - EINVAL ;
}
2018-06-26 03:58:55 +03:00
header = qcom_smem_partition_header ( smem , entry , host0 , host1 ) ;
2018-06-26 03:58:51 +03:00
if ( ! header )
return - EINVAL ;
2015-06-27 00:50:10 +03:00
smem - > partitions [ remote_host ] = header ;
2017-10-05 06:11:28 +03:00
smem - > cacheline [ remote_host ] = le32_to_cpu ( entry - > cacheline ) ;
2015-06-27 00:50:10 +03:00
}
return 0 ;
}
2015-10-08 21:34:09 +03:00
static int qcom_smem_map_memory ( struct qcom_smem * smem , struct device * dev ,
const char * name , int i )
2015-06-27 00:50:10 +03:00
{
2015-10-08 21:34:09 +03:00
struct device_node * np ;
struct resource r ;
2018-06-26 03:58:46 +03:00
resource_size_t size ;
2015-10-08 21:34:09 +03:00
int ret ;
2015-06-27 00:50:10 +03:00
2015-10-08 21:34:09 +03:00
np = of_parse_phandle ( dev - > of_node , name , 0 ) ;
if ( ! np ) {
dev_err ( dev , " No %s specified \n " , name ) ;
return - EINVAL ;
2015-06-27 00:50:10 +03:00
}
2015-10-08 21:34:09 +03:00
ret = of_address_to_resource ( np , 0 , & r ) ;
of_node_put ( np ) ;
if ( ret )
return ret ;
2018-06-26 03:58:46 +03:00
size = resource_size ( & r ) ;
2015-10-08 21:34:09 +03:00
2018-06-26 03:58:46 +03:00
smem - > regions [ i ] . virt_base = devm_ioremap_wc ( dev , r . start , size ) ;
2015-10-08 21:34:09 +03:00
if ( ! smem - > regions [ i ] . virt_base )
return - ENOMEM ;
2018-06-26 03:58:46 +03:00
smem - > regions [ i ] . aux_base = ( u32 ) r . start ;
smem - > regions [ i ] . size = size ;
2015-10-08 21:34:09 +03:00
return 0 ;
2015-06-27 00:50:10 +03:00
}
static int qcom_smem_probe ( struct platform_device * pdev )
{
struct smem_header * header ;
struct qcom_smem * smem ;
size_t array_size ;
2015-10-08 21:34:09 +03:00
int num_regions ;
2015-06-27 00:50:10 +03:00
int hwlock_id ;
u32 version ;
int ret ;
2015-10-08 21:34:09 +03:00
num_regions = 1 ;
if ( of_find_property ( pdev - > dev . of_node , " qcom,rpm-msg-ram " , NULL ) )
num_regions + + ;
2015-06-27 00:50:10 +03:00
array_size = num_regions * sizeof ( struct smem_region ) ;
smem = devm_kzalloc ( & pdev - > dev , sizeof ( * smem ) + array_size , GFP_KERNEL ) ;
if ( ! smem )
return - ENOMEM ;
smem - > dev = & pdev - > dev ;
smem - > num_regions = num_regions ;
2015-10-08 21:34:09 +03:00
ret = qcom_smem_map_memory ( smem , & pdev - > dev , " memory-region " , 0 ) ;
2015-06-27 00:50:10 +03:00
if ( ret )
return ret ;
2015-10-08 21:34:09 +03:00
if ( num_regions > 1 & & ( ret = qcom_smem_map_memory ( smem , & pdev - > dev ,
" qcom,rpm-msg-ram " , 1 ) ) )
return ret ;
2015-06-27 00:50:10 +03:00
header = smem - > regions [ 0 ] . virt_base ;
2015-09-03 01:46:45 +03:00
if ( le32_to_cpu ( header - > initialized ) ! = 1 | |
le32_to_cpu ( header - > reserved ) ) {
2015-06-27 00:50:10 +03:00
dev_err ( & pdev - > dev , " SMEM is not initialized by SBL \n " ) ;
return - EINVAL ;
}
version = qcom_smem_get_sbl_version ( smem ) ;
2017-10-12 00:29:08 +03:00
switch ( version > > 16 ) {
case SMEM_GLOBAL_PART_VERSION :
ret = qcom_smem_set_global_partition ( smem ) ;
if ( ret < 0 )
return ret ;
2017-10-12 00:29:09 +03:00
smem - > item_count = qcom_smem_get_item_count ( smem ) ;
break ;
2017-10-12 00:29:08 +03:00
case SMEM_GLOBAL_HEAP_VERSION :
2017-10-12 00:29:09 +03:00
smem - > item_count = SMEM_ITEM_COUNT ;
2017-10-12 00:29:08 +03:00
break ;
default :
2015-06-27 00:50:10 +03:00
dev_err ( & pdev - > dev , " Unsupported SMEM version 0x%x \n " , version ) ;
return - EINVAL ;
}
2018-06-26 03:58:56 +03:00
BUILD_BUG_ON ( SMEM_HOST_APPS > = SMEM_HOST_COUNT ) ;
2015-06-27 00:50:10 +03:00
ret = qcom_smem_enumerate_partitions ( smem , SMEM_HOST_APPS ) ;
2017-10-12 00:29:08 +03:00
if ( ret < 0 & & ret ! = - ENOENT )
2015-06-27 00:50:10 +03:00
return ret ;
hwlock_id = of_hwspin_lock_get_id ( pdev - > dev . of_node , 0 ) ;
if ( hwlock_id < 0 ) {
2016-07-02 00:18:59 +03:00
if ( hwlock_id ! = - EPROBE_DEFER )
dev_err ( & pdev - > dev , " failed to retrieve hwlock \n " ) ;
2015-06-27 00:50:10 +03:00
return hwlock_id ;
}
smem - > hwlock = hwspin_lock_request_specific ( hwlock_id ) ;
if ( ! smem - > hwlock )
return - ENXIO ;
__smem = smem ;
2019-07-24 01:35:13 +03:00
smem - > socinfo = platform_device_register_data ( & pdev - > dev , " qcom-socinfo " ,
PLATFORM_DEVID_NONE , NULL ,
0 ) ;
if ( IS_ERR ( smem - > socinfo ) )
dev_dbg ( & pdev - > dev , " failed to register socinfo device \n " ) ;
2015-06-27 00:50:10 +03:00
return 0 ;
}
static int qcom_smem_remove ( struct platform_device * pdev )
{
2019-07-24 01:35:13 +03:00
platform_device_unregister ( __smem - > socinfo ) ;
2015-06-27 00:50:10 +03:00
hwspin_lock_free ( __smem - > hwlock ) ;
2015-08-28 21:18:49 +03:00
__smem = NULL ;
2015-06-27 00:50:10 +03:00
return 0 ;
}
static const struct of_device_id qcom_smem_of_match [ ] = {
{ . compatible = " qcom,smem " } ,
{ }
} ;
MODULE_DEVICE_TABLE ( of , qcom_smem_of_match ) ;
static struct platform_driver qcom_smem_driver = {
. probe = qcom_smem_probe ,
. remove = qcom_smem_remove ,
. driver = {
. name = " qcom-smem " ,
. of_match_table = qcom_smem_of_match ,
. suppress_bind_attrs = true ,
} ,
} ;
static int __init qcom_smem_init ( void )
{
return platform_driver_register ( & qcom_smem_driver ) ;
}
arch_initcall ( qcom_smem_init ) ;
static void __exit qcom_smem_exit ( void )
{
platform_driver_unregister ( & qcom_smem_driver ) ;
}
module_exit ( qcom_smem_exit )
MODULE_AUTHOR ( " Bjorn Andersson <bjorn.andersson@sonymobile.com> " ) ;
MODULE_DESCRIPTION ( " Qualcomm Shared Memory Manager " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;