2019-05-29 07:17:56 -07:00
// SPDX-License-Identifier: GPL-2.0-only
2015-03-11 14:39:39 +01:00
/*
2021-12-09 15:59:37 +01:00
* Copyright ( c ) 2015 - 2017 , 2019 - 2021 Linaro Limited
2015-03-11 14:39:39 +01:00
*/
2021-12-09 15:59:37 +01:00
# include <linux/anon_inodes.h>
2015-03-11 14:39:39 +01:00
# include <linux/device.h>
# include <linux/idr.h>
2021-12-09 15:59:37 +01:00
# include <linux/mm.h>
2015-03-11 14:39:39 +01:00
# include <linux/sched.h>
# include <linux/slab.h>
# include <linux/tee_drv.h>
2020-03-27 10:59:47 +05:30
# include <linux/uio.h>
2015-03-11 14:39:39 +01:00
# include "tee_private.h"
2022-02-04 10:33:58 +01:00
static void shm_put_kernel_pages ( struct page * * pages , size_t page_count )
{
size_t n ;
for ( n = 0 ; n < page_count ; n + + )
put_page ( pages [ n ] ) ;
}
static int shm_get_kernel_pages ( unsigned long start , size_t page_count ,
struct page * * pages )
{
size_t n ;
int rc ;
2022-02-25 23:20:40 +08:00
if ( is_vmalloc_addr ( ( void * ) start ) ) {
struct page * page ;
2022-02-04 10:33:58 +01:00
2022-02-25 23:20:40 +08:00
for ( n = 0 ; n < page_count ; n + + ) {
page = vmalloc_to_page ( ( void * ) ( start + PAGE_SIZE * n ) ) ;
if ( ! page )
return - ENOMEM ;
get_page ( page ) ;
pages [ n ] = page ;
}
rc = page_count ;
} else {
struct kvec * kiov ;
kiov = kcalloc ( page_count , sizeof ( * kiov ) , GFP_KERNEL ) ;
if ( ! kiov )
return - ENOMEM ;
2022-02-04 10:33:58 +01:00
2022-02-25 23:20:40 +08:00
for ( n = 0 ; n < page_count ; n + + ) {
kiov [ n ] . iov_base = ( void * ) ( start + n * PAGE_SIZE ) ;
kiov [ n ] . iov_len = PAGE_SIZE ;
}
rc = get_kernel_pages ( kiov , page_count , 0 , pages ) ;
kfree ( kiov ) ;
}
2022-02-04 10:33:58 +01:00
return rc ;
}
2020-08-24 14:11:25 -07:00
static void release_registered_pages ( struct tee_shm * shm )
{
if ( shm - > pages ) {
2022-02-04 10:33:58 +01:00
if ( shm - > flags & TEE_SHM_USER_MAPPED )
2020-08-24 14:11:25 -07:00
unpin_user_pages ( shm - > pages , shm - > num_pages ) ;
2022-02-04 10:33:58 +01:00
else
shm_put_kernel_pages ( shm - > pages , shm - > num_pages ) ;
2020-08-24 14:11:25 -07:00
kfree ( shm - > pages ) ;
}
}
2021-12-09 15:59:37 +01:00
static void tee_shm_release ( struct tee_device * teedev , struct tee_shm * shm )
2015-03-11 14:39:39 +01:00
{
2017-11-29 14:48:26 +02:00
if ( shm - > flags & TEE_SHM_POOL ) {
2022-02-04 10:33:53 +01:00
teedev - > pool - > ops - > free ( teedev - > pool , shm ) ;
2022-02-04 10:33:59 +01:00
} else if ( shm - > flags & TEE_SHM_DYNAMIC ) {
2017-11-29 14:48:26 +02:00
int rc = teedev - > desc - > ops - > shm_unregister ( shm - > ctx , shm ) ;
if ( rc )
dev_err ( teedev - > dev . parent ,
" unregister shm %p failed: %d " , shm , rc ) ;
2020-08-24 14:11:25 -07:00
release_registered_pages ( shm ) ;
2017-11-29 14:48:26 +02:00
}
2015-03-11 14:39:39 +01:00
2019-11-07 11:42:59 +01:00
teedev_ctx_put ( shm - > ctx ) ;
2017-11-29 14:48:37 +02:00
2015-03-11 14:39:39 +01:00
kfree ( shm ) ;
tee_device_put ( teedev ) ;
}
2022-02-04 10:33:54 +01:00
static struct tee_shm * shm_alloc_helper ( struct tee_context * ctx , size_t size ,
size_t align , u32 flags , int id )
2015-03-11 14:39:39 +01:00
{
2019-11-07 11:42:52 +01:00
struct tee_device * teedev = ctx - > teedev ;
2015-03-11 14:39:39 +01:00
struct tee_shm * shm ;
void * ret ;
int rc ;
if ( ! tee_device_get ( teedev ) )
return ERR_PTR ( - EINVAL ) ;
if ( ! teedev - > pool ) {
/* teedev has been detached from driver */
ret = ERR_PTR ( - EINVAL ) ;
goto err_dev_put ;
}
shm = kzalloc ( sizeof ( * shm ) , GFP_KERNEL ) ;
if ( ! shm ) {
ret = ERR_PTR ( - ENOMEM ) ;
goto err_dev_put ;
}
2021-12-09 15:59:37 +01:00
refcount_set ( & shm - > refcount , 1 ) ;
2022-02-04 10:33:54 +01:00
shm - > flags = flags ;
shm - > id = id ;
/*
* We ' re assigning this as it is needed if the shm is to be
* registered . If this function returns OK then the caller expected
* to call teedev_ctx_get ( ) or clear shm - > ctx in case it ' s not
* needed any longer .
*/
2015-03-11 14:39:39 +01:00
shm - > ctx = ctx ;
2022-02-04 10:33:53 +01:00
rc = teedev - > pool - > ops - > alloc ( teedev - > pool , shm , size , align ) ;
2015-03-11 14:39:39 +01:00
if ( rc ) {
ret = ERR_PTR ( rc ) ;
goto err_kfree ;
}
2020-04-07 12:30:28 +03:00
teedev_ctx_get ( ctx ) ;
2015-03-11 14:39:39 +01:00
return shm ;
err_kfree :
kfree ( shm ) ;
err_dev_put :
tee_device_put ( teedev ) ;
return ret ;
}
2022-02-04 10:33:52 +01:00
/**
* tee_shm_alloc_user_buf ( ) - Allocate shared memory for user space
* @ ctx : Context that allocates the shared memory
* @ size : Requested size of shared memory
*
* Memory allocated as user space shared memory is automatically freed when
* the TEE file pointer is closed . The primary usage of this function is
* when the TEE driver doesn ' t support registering ordinary user space
* memory .
*
* @ returns a pointer to ' struct tee_shm '
*/
struct tee_shm * tee_shm_alloc_user_buf ( struct tee_context * ctx , size_t size )
{
2022-02-04 10:33:59 +01:00
u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL ;
2022-02-04 10:33:54 +01:00
struct tee_device * teedev = ctx - > teedev ;
struct tee_shm * shm ;
void * ret ;
int id ;
mutex_lock ( & teedev - > mutex ) ;
id = idr_alloc ( & teedev - > idr , NULL , 1 , 0 , GFP_KERNEL ) ;
mutex_unlock ( & teedev - > mutex ) ;
if ( id < 0 )
return ERR_PTR ( id ) ;
shm = shm_alloc_helper ( ctx , size , PAGE_SIZE , flags , id ) ;
if ( IS_ERR ( shm ) ) {
mutex_lock ( & teedev - > mutex ) ;
idr_remove ( & teedev - > idr , id ) ;
mutex_unlock ( & teedev - > mutex ) ;
return shm ;
}
mutex_lock ( & teedev - > mutex ) ;
ret = idr_replace ( & teedev - > idr , shm , id ) ;
mutex_unlock ( & teedev - > mutex ) ;
if ( IS_ERR ( ret ) ) {
tee_shm_free ( shm ) ;
return ret ;
}
return shm ;
2022-02-04 10:33:52 +01:00
}
2021-06-14 17:33:14 -05:00
/**
* tee_shm_alloc_kernel_buf ( ) - Allocate shared memory for kernel buffer
* @ ctx : Context that allocates the shared memory
* @ size : Requested size of shared memory
*
* The returned memory registered in secure world and is suitable to be
* passed as a memory buffer in parameter argument to
* tee_client_invoke_func ( ) . The memory allocated is later freed with a
* call to tee_shm_free ( ) .
*
* @ returns a pointer to ' struct tee_shm '
*/
struct tee_shm * tee_shm_alloc_kernel_buf ( struct tee_context * ctx , size_t size )
{
2022-02-04 10:33:59 +01:00
u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL ;
2022-02-04 10:33:54 +01:00
return shm_alloc_helper ( ctx , size , PAGE_SIZE , flags , - 1 ) ;
2021-06-14 17:33:14 -05:00
}
EXPORT_SYMBOL_GPL ( tee_shm_alloc_kernel_buf ) ;
2022-02-04 10:33:54 +01:00
/**
* tee_shm_alloc_priv_buf ( ) - Allocate shared memory for a privately shared
* kernel buffer
* @ ctx : Context that allocates the shared memory
* @ size : Requested size of shared memory
*
* This function returns similar shared memory as
* tee_shm_alloc_kernel_buf ( ) , but with the difference that the memory
* might not be registered in secure world in case the driver supports
* passing memory not registered in advance .
*
* This function should normally only be used internally in the TEE
* drivers .
*
* @ returns a pointer to ' struct tee_shm '
*/
struct tee_shm * tee_shm_alloc_priv_buf ( struct tee_context * ctx , size_t size )
{
2022-02-04 10:33:59 +01:00
u32 flags = TEE_SHM_PRIV | TEE_SHM_POOL ;
2022-02-04 10:33:54 +01:00
return shm_alloc_helper ( ctx , size , sizeof ( long ) * 2 , flags , - 1 ) ;
}
EXPORT_SYMBOL_GPL ( tee_shm_alloc_priv_buf ) ;
2022-02-04 10:33:58 +01:00
static struct tee_shm *
register_shm_helper ( struct tee_context * ctx , unsigned long addr ,
size_t length , u32 flags , int id )
2017-11-29 14:48:26 +02:00
{
struct tee_device * teedev = ctx - > teedev ;
struct tee_shm * shm ;
2022-02-04 10:33:58 +01:00
unsigned long start ;
size_t num_pages ;
2017-11-29 14:48:26 +02:00
void * ret ;
int rc ;
if ( ! tee_device_get ( teedev ) )
return ERR_PTR ( - EINVAL ) ;
if ( ! teedev - > desc - > ops - > shm_register | |
! teedev - > desc - > ops - > shm_unregister ) {
2022-02-04 10:33:58 +01:00
ret = ERR_PTR ( - ENOTSUPP ) ;
goto err_dev_put ;
2017-11-29 14:48:26 +02:00
}
2017-11-29 14:48:37 +02:00
teedev_ctx_get ( ctx ) ;
2017-11-29 14:48:26 +02:00
shm = kzalloc ( sizeof ( * shm ) , GFP_KERNEL ) ;
if ( ! shm ) {
ret = ERR_PTR ( - ENOMEM ) ;
2022-02-04 10:33:58 +01:00
goto err_ctx_put ;
2017-11-29 14:48:26 +02:00
}
2021-12-09 15:59:37 +01:00
refcount_set ( & shm - > refcount , 1 ) ;
2022-02-04 10:33:58 +01:00
shm - > flags = flags ;
2017-11-29 14:48:26 +02:00
shm - > ctx = ctx ;
2022-02-04 10:33:58 +01:00
shm - > id = id ;
2019-09-25 16:48:58 -07:00
addr = untagged_addr ( addr ) ;
2017-11-29 14:48:26 +02:00
start = rounddown ( addr , PAGE_SIZE ) ;
shm - > offset = addr - start ;
shm - > size = length ;
num_pages = ( roundup ( addr + length , PAGE_SIZE ) - start ) / PAGE_SIZE ;
shm - > pages = kcalloc ( num_pages , sizeof ( * shm - > pages ) , GFP_KERNEL ) ;
if ( ! shm - > pages ) {
ret = ERR_PTR ( - ENOMEM ) ;
2022-02-04 10:33:58 +01:00
goto err_free_shm ;
2017-11-29 14:48:26 +02:00
}
2022-02-04 10:33:58 +01:00
if ( flags & TEE_SHM_USER_MAPPED )
2020-08-24 14:11:25 -07:00
rc = pin_user_pages_fast ( start , num_pages , FOLL_WRITE ,
2020-03-27 10:59:47 +05:30
shm - > pages ) ;
2022-02-04 10:33:58 +01:00
else
rc = shm_get_kernel_pages ( start , num_pages , shm - > pages ) ;
2017-11-29 14:48:26 +02:00
if ( rc > 0 )
shm - > num_pages = rc ;
if ( rc ! = num_pages ) {
2018-01-06 12:22:30 +03:00
if ( rc > = 0 )
2017-11-29 14:48:26 +02:00
rc = - ENOMEM ;
ret = ERR_PTR ( rc ) ;
2022-02-04 10:33:58 +01:00
goto err_put_shm_pages ;
2017-11-29 14:48:26 +02:00
}
rc = teedev - > desc - > ops - > shm_register ( ctx , shm , shm - > pages ,
2017-12-28 10:08:00 +01:00
shm - > num_pages , start ) ;
2017-11-29 14:48:26 +02:00
if ( rc ) {
ret = ERR_PTR ( rc ) ;
2022-02-04 10:33:58 +01:00
goto err_put_shm_pages ;
2017-11-29 14:48:26 +02:00
}
return shm ;
2022-02-04 10:33:58 +01:00
err_put_shm_pages :
if ( flags & TEE_SHM_USER_MAPPED )
unpin_user_pages ( shm - > pages , shm - > num_pages ) ;
else
shm_put_kernel_pages ( shm - > pages , shm - > num_pages ) ;
kfree ( shm - > pages ) ;
err_free_shm :
2017-11-29 14:48:26 +02:00
kfree ( shm ) ;
2022-02-04 10:33:58 +01:00
err_ctx_put :
2017-11-29 14:48:37 +02:00
teedev_ctx_put ( ctx ) ;
2022-02-04 10:33:58 +01:00
err_dev_put :
2017-11-29 14:48:26 +02:00
tee_device_put ( teedev ) ;
return ret ;
}
2022-02-04 10:33:56 +01:00
/**
* tee_shm_register_user_buf ( ) - Register a userspace shared memory buffer
* @ ctx : Context that registers the shared memory
* @ addr : The userspace address of the shared buffer
* @ length : Length of the shared buffer
*
* @ returns a pointer to ' struct tee_shm '
*/
struct tee_shm * tee_shm_register_user_buf ( struct tee_context * ctx ,
unsigned long addr , size_t length )
{
2022-02-04 10:33:59 +01:00
u32 flags = TEE_SHM_USER_MAPPED | TEE_SHM_DYNAMIC ;
2022-02-04 10:33:58 +01:00
struct tee_device * teedev = ctx - > teedev ;
struct tee_shm * shm ;
void * ret ;
int id ;
mutex_lock ( & teedev - > mutex ) ;
id = idr_alloc ( & teedev - > idr , NULL , 1 , 0 , GFP_KERNEL ) ;
mutex_unlock ( & teedev - > mutex ) ;
if ( id < 0 )
return ERR_PTR ( id ) ;
shm = register_shm_helper ( ctx , addr , length , flags , id ) ;
if ( IS_ERR ( shm ) ) {
mutex_lock ( & teedev - > mutex ) ;
idr_remove ( & teedev - > idr , id ) ;
mutex_unlock ( & teedev - > mutex ) ;
return shm ;
}
mutex_lock ( & teedev - > mutex ) ;
ret = idr_replace ( & teedev - > idr , shm , id ) ;
mutex_unlock ( & teedev - > mutex ) ;
if ( IS_ERR ( ret ) ) {
tee_shm_free ( shm ) ;
return ret ;
}
return shm ;
2022-02-04 10:33:56 +01:00
}
/**
* tee_shm_register_kernel_buf ( ) - Register kernel memory to be shared with
* secure world
* @ ctx : Context that registers the shared memory
* @ addr : The buffer
* @ length : Length of the buffer
*
* @ returns a pointer to ' struct tee_shm '
*/
struct tee_shm * tee_shm_register_kernel_buf ( struct tee_context * ctx ,
void * addr , size_t length )
{
2022-02-04 10:33:59 +01:00
u32 flags = TEE_SHM_DYNAMIC ;
2022-02-04 10:33:58 +01:00
return register_shm_helper ( ctx , ( unsigned long ) addr , length , flags , - 1 ) ;
2022-02-04 10:33:56 +01:00
}
EXPORT_SYMBOL_GPL ( tee_shm_register_kernel_buf ) ;
2021-12-09 15:59:37 +01:00
static int tee_shm_fop_release ( struct inode * inode , struct file * filp )
{
tee_shm_put ( filp - > private_data ) ;
return 0 ;
}
static int tee_shm_fop_mmap ( struct file * filp , struct vm_area_struct * vma )
{
struct tee_shm * shm = filp - > private_data ;
size_t size = vma - > vm_end - vma - > vm_start ;
/* Refuse sharing shared memory provided by application */
if ( shm - > flags & TEE_SHM_USER_MAPPED )
return - EINVAL ;
/* check for overflowing the buffer's size */
if ( vma - > vm_pgoff + vma_pages ( vma ) > shm - > size > > PAGE_SHIFT )
return - EINVAL ;
return remap_pfn_range ( vma , vma - > vm_start , shm - > paddr > > PAGE_SHIFT ,
size , vma - > vm_page_prot ) ;
}
static const struct file_operations tee_shm_fops = {
. owner = THIS_MODULE ,
. release = tee_shm_fop_release ,
. mmap = tee_shm_fop_mmap ,
} ;
2015-03-11 14:39:39 +01:00
/**
* tee_shm_get_fd ( ) - Increase reference count and return file descriptor
* @ shm : Shared memory handle
* @ returns user space file descriptor to shared memory
*/
int tee_shm_get_fd ( struct tee_shm * shm )
{
int fd ;
2022-02-04 10:33:59 +01:00
if ( shm - > id < 0 )
2015-03-11 14:39:39 +01:00
return - EINVAL ;
2021-12-09 15:59:37 +01:00
/* matched by tee_shm_put() in tee_shm_op_release() */
refcount_inc ( & shm - > refcount ) ;
fd = anon_inode_getfd ( " tee_shm " , & tee_shm_fops , shm , O_RDWR ) ;
2018-04-04 21:03:21 +02:00
if ( fd < 0 )
2021-12-09 15:59:37 +01:00
tee_shm_put ( shm ) ;
2015-03-11 14:39:39 +01:00
return fd ;
}
/**
* tee_shm_free ( ) - Free shared memory
* @ shm : Handle to shared memory to free
*/
void tee_shm_free ( struct tee_shm * shm )
{
2021-12-09 15:59:37 +01:00
tee_shm_put ( shm ) ;
2015-03-11 14:39:39 +01:00
}
EXPORT_SYMBOL_GPL ( tee_shm_free ) ;
/**
* tee_shm_get_va ( ) - Get virtual address of a shared memory plus an offset
* @ shm : Shared memory handle
* @ offs : Offset from start of this shared memory
* @ returns virtual address of the shared memory + offs if offs is within
* the bounds of this shared memory , else an ERR_PTR
*/
void * tee_shm_get_va ( struct tee_shm * shm , size_t offs )
{
2022-02-04 10:33:59 +01:00
if ( ! shm - > kaddr )
2017-11-29 14:48:26 +02:00
return ERR_PTR ( - EINVAL ) ;
2015-03-11 14:39:39 +01:00
if ( offs > = shm - > size )
return ERR_PTR ( - EINVAL ) ;
return ( char * ) shm - > kaddr + offs ;
}
EXPORT_SYMBOL_GPL ( tee_shm_get_va ) ;
/**
* tee_shm_get_pa ( ) - Get physical address of a shared memory plus an offset
* @ shm : Shared memory handle
* @ offs : Offset from start of this shared memory
* @ pa : Physical address to return
* @ returns 0 if offs is within the bounds of this shared memory , else an
* error code .
*/
int tee_shm_get_pa ( struct tee_shm * shm , size_t offs , phys_addr_t * pa )
{
if ( offs > = shm - > size )
return - EINVAL ;
if ( pa )
* pa = shm - > paddr + offs ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( tee_shm_get_pa ) ;
/**
* tee_shm_get_from_id ( ) - Find shared memory object and increase reference
* count
* @ ctx : Context owning the shared memory
* @ id : Id of shared memory object
* @ returns a pointer to ' struct tee_shm ' on success or an ERR_PTR on failure
*/
struct tee_shm * tee_shm_get_from_id ( struct tee_context * ctx , int id )
{
struct tee_device * teedev ;
struct tee_shm * shm ;
if ( ! ctx )
return ERR_PTR ( - EINVAL ) ;
teedev = ctx - > teedev ;
mutex_lock ( & teedev - > mutex ) ;
shm = idr_find ( & teedev - > idr , id ) ;
2021-12-09 15:59:37 +01:00
/*
* If the tee_shm was found in the IDR it must have a refcount
* larger than 0 due to the guarantee in tee_shm_put ( ) below . So
* it ' s safe to use refcount_inc ( ) .
*/
2015-03-11 14:39:39 +01:00
if ( ! shm | | shm - > ctx ! = ctx )
shm = ERR_PTR ( - EINVAL ) ;
2021-12-09 15:59:37 +01:00
else
refcount_inc ( & shm - > refcount ) ;
2015-03-11 14:39:39 +01:00
mutex_unlock ( & teedev - > mutex ) ;
return shm ;
}
EXPORT_SYMBOL_GPL ( tee_shm_get_from_id ) ;
/**
* tee_shm_put ( ) - Decrease reference count on a shared memory handle
* @ shm : Shared memory handle
*/
void tee_shm_put ( struct tee_shm * shm )
{
2021-12-09 15:59:37 +01:00
struct tee_device * teedev = shm - > ctx - > teedev ;
bool do_release = false ;
mutex_lock ( & teedev - > mutex ) ;
if ( refcount_dec_and_test ( & shm - > refcount ) ) {
/*
* refcount has reached 0 , we must now remove it from the
* IDR before releasing the mutex . This will guarantee that
* the refcount_inc ( ) in tee_shm_get_from_id ( ) never starts
* from 0.
*/
2022-02-04 10:33:59 +01:00
if ( shm - > id > = 0 )
2021-12-09 15:59:37 +01:00
idr_remove ( & teedev - > idr , shm - > id ) ;
do_release = true ;
}
mutex_unlock ( & teedev - > mutex ) ;
if ( do_release )
tee_shm_release ( teedev , shm ) ;
2015-03-11 14:39:39 +01:00
}
EXPORT_SYMBOL_GPL ( tee_shm_put ) ;