optee: add page list to kernel private shared memory

Until now has kernel private shared memory allocated as dynamic shared
memory (not from the static shared memory pool) been returned without a
list of physical pages on allocations via RPC. To support allocations
larger than one page add a list of physical pages.

Reviewed-by: Sumit Garg <sumit.garg@linaro.org>
Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
This commit is contained in:
Jens Wiklander 2023-11-14 10:52:16 +01:00
parent b85ea95d08
commit 69724b3eac
2 changed files with 39 additions and 38 deletions

View File

@ -27,7 +27,10 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
unsigned long start))
{
unsigned int order = get_order(size);
unsigned int nr_pages = 1 << order;
struct page **pages;
struct page *page;
unsigned int i;
int rc = 0;
/*
@ -42,30 +45,29 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
shm->paddr = page_to_phys(page);
shm->size = PAGE_SIZE << order;
pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
if (!pages) {
rc = -ENOMEM;
goto err;
}
for (i = 0; i < nr_pages; i++)
pages[i] = page + i;
shm->pages = pages;
shm->num_pages = nr_pages;
if (shm_register) {
unsigned int nr_pages = 1 << order, i;
struct page **pages;
pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
if (!pages) {
rc = -ENOMEM;
goto err;
}
for (i = 0; i < nr_pages; i++)
pages[i] = page + i;
rc = shm_register(shm->ctx, shm, pages, nr_pages,
(unsigned long)shm->kaddr);
kfree(pages);
if (rc)
goto err;
}
return 0;
err:
free_pages((unsigned long)shm->kaddr, order);
shm->kaddr = NULL;
return rc;
}
@ -77,6 +79,8 @@ void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
shm_unregister(shm->ctx, shm);
free_pages((unsigned long)shm->kaddr, get_order(shm->size));
shm->kaddr = NULL;
kfree(shm->pages);
shm->pages = NULL;
}
static void optee_bus_scan(struct work_struct *work)

View File

@ -678,10 +678,11 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
struct optee_msg_arg *arg,
struct optee_call_ctx *call_ctx)
{
phys_addr_t pa;
struct tee_shm *shm;
size_t sz;
size_t n;
struct page **pages;
size_t page_count;
arg->ret_origin = TEEC_ORIGIN_COMMS;
@ -716,32 +717,23 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
return;
}
if (tee_shm_get_pa(shm, 0, &pa)) {
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
goto bad;
}
sz = tee_shm_get_size(shm);
if (tee_shm_is_dynamic(shm)) {
struct page **pages;
/*
* If there are pages it's dynamically allocated shared memory (not
* from the reserved shared memory pool) and needs to be
* registered.
*/
pages = tee_shm_get_pages(shm, &page_count);
if (pages) {
u64 *pages_list;
size_t page_num;
pages = tee_shm_get_pages(shm, &page_num);
if (!pages || !page_num) {
arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
goto bad;
}
pages_list = optee_allocate_pages_list(page_num);
pages_list = optee_allocate_pages_list(page_count);
if (!pages_list) {
arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
goto bad;
}
call_ctx->pages_list = pages_list;
call_ctx->num_entries = page_num;
call_ctx->num_entries = page_count;
arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
OPTEE_MSG_ATTR_NONCONTIG;
@ -752,17 +744,22 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
(tee_shm_get_page_offset(shm) &
(OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
arg->params[0].u.tmem.size = tee_shm_get_size(shm);
arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
optee_fill_pages_list(pages_list, pages, page_num,
optee_fill_pages_list(pages_list, pages, page_count,
tee_shm_get_page_offset(shm));
} else {
phys_addr_t pa;
if (tee_shm_get_pa(shm, 0, &pa)) {
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
goto bad;
}
arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
arg->params[0].u.tmem.buf_ptr = pa;
arg->params[0].u.tmem.size = sz;
arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
}
arg->params[0].u.tmem.size = tee_shm_get_size(shm);
arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
arg->ret = TEEC_SUCCESS;
return;