2019-05-29 17:17:56 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2015-03-11 16:39:39 +03:00
/*
2022-02-04 12:33:53 +03:00
* Copyright ( c ) 2015 , 2017 , 2022 Linaro Limited
2015-03-11 16:39:39 +03:00
*/
# include <linux/device.h>
# include <linux/dma-buf.h>
# include <linux/genalloc.h>
# include <linux/slab.h>
# include <linux/tee_drv.h>
# include "tee_private.h"
2022-02-04 12:33:53 +03:00
static int pool_op_gen_alloc ( struct tee_shm_pool * pool , struct tee_shm * shm ,
size_t size , size_t align )
2015-03-11 16:39:39 +03:00
{
unsigned long va ;
2022-02-04 12:33:53 +03:00
struct gen_pool * genpool = pool - > private_data ;
size_t a = max_t ( size_t , align , BIT ( genpool - > min_alloc_order ) ) ;
struct genpool_data_align data = { . align = a } ;
size_t s = roundup ( size , a ) ;
2015-03-11 16:39:39 +03:00
2022-02-04 12:33:53 +03:00
va = gen_pool_alloc_algo ( genpool , s , gen_pool_first_fit_align , & data ) ;
2015-03-11 16:39:39 +03:00
if ( ! va )
return - ENOMEM ;
memset ( ( void * ) va , 0 , s ) ;
shm - > kaddr = ( void * ) va ;
shm - > paddr = gen_pool_virt_to_phys ( genpool , va ) ;
shm - > size = s ;
2022-02-04 12:33:53 +03:00
/*
* This is from a static shared memory pool so no need to register
* each chunk , and no need to unregister later either .
*/
2022-02-04 12:33:59 +03:00
shm - > flags & = ~ TEE_SHM_DYNAMIC ;
2015-03-11 16:39:39 +03:00
return 0 ;
}
2022-02-04 12:33:53 +03:00
static void pool_op_gen_free ( struct tee_shm_pool * pool , struct tee_shm * shm )
2015-03-11 16:39:39 +03:00
{
2022-02-04 12:33:53 +03:00
gen_pool_free ( pool - > private_data , ( unsigned long ) shm - > kaddr ,
2015-03-11 16:39:39 +03:00
shm - > size ) ;
shm - > kaddr = NULL ;
}
2022-02-04 12:33:53 +03:00
static void pool_op_gen_destroy_pool ( struct tee_shm_pool * pool )
2017-11-29 15:48:25 +03:00
{
2022-02-04 12:33:53 +03:00
gen_pool_destroy ( pool - > private_data ) ;
kfree ( pool ) ;
2017-11-29 15:48:25 +03:00
}
2022-02-04 12:33:53 +03:00
static const struct tee_shm_pool_ops pool_ops_generic = {
2015-03-11 16:39:39 +03:00
. alloc = pool_op_gen_alloc ,
. free = pool_op_gen_free ,
2022-02-04 12:33:53 +03:00
. destroy_pool = pool_op_gen_destroy_pool ,
2015-03-11 16:39:39 +03:00
} ;
2022-02-04 12:33:53 +03:00
struct tee_shm_pool * tee_shm_pool_alloc_res_mem ( unsigned long vaddr ,
phys_addr_t paddr , size_t size ,
int min_alloc_order )
2017-11-29 15:48:25 +03:00
{
const size_t page_mask = PAGE_SIZE - 1 ;
2022-02-04 12:33:53 +03:00
struct tee_shm_pool * pool ;
2017-11-29 15:48:25 +03:00
int rc ;
/* Start and end must be page aligned */
if ( vaddr & page_mask | | paddr & page_mask | | size & page_mask )
return ERR_PTR ( - EINVAL ) ;
2022-02-04 12:33:53 +03:00
pool = kzalloc ( sizeof ( * pool ) , GFP_KERNEL ) ;
if ( ! pool )
2017-11-29 15:48:25 +03:00
return ERR_PTR ( - ENOMEM ) ;
2022-02-04 12:33:53 +03:00
pool - > private_data = gen_pool_create ( min_alloc_order , - 1 ) ;
if ( ! pool - > private_data ) {
2017-11-29 15:48:25 +03:00
rc = - ENOMEM ;
2015-03-11 16:39:39 +03:00
goto err ;
2017-11-29 15:48:25 +03:00
}
2015-03-11 16:39:39 +03:00
2022-02-04 12:33:53 +03:00
rc = gen_pool_add_virt ( pool - > private_data , vaddr , paddr , size , - 1 ) ;
2017-11-29 15:48:25 +03:00
if ( rc ) {
2022-02-04 12:33:53 +03:00
gen_pool_destroy ( pool - > private_data ) ;
2017-11-29 15:48:25 +03:00
goto err ;
}
2022-02-04 12:33:53 +03:00
pool - > ops = & pool_ops_generic ;
2017-11-29 15:48:25 +03:00
2022-02-04 12:33:53 +03:00
return pool ;
2015-03-11 16:39:39 +03:00
err :
2022-02-04 12:33:53 +03:00
kfree ( pool ) ;
2017-11-29 15:48:25 +03:00
return ERR_PTR ( rc ) ;
2015-03-11 16:39:39 +03:00
}
2022-02-04 12:33:53 +03:00
EXPORT_SYMBOL_GPL ( tee_shm_pool_alloc_res_mem ) ;