2019-05-29 17:17:56 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2015-03-11 16:39:39 +03:00
/*
* Copyright ( c ) 2015 - 2016 , Linaro Limited
*/
# include <linux/device.h>
# include <linux/dma-buf.h>
# include <linux/fdtable.h>
# include <linux/idr.h>
# include <linux/sched.h>
# include <linux/slab.h>
# include <linux/tee_drv.h>
2020-03-27 08:29:47 +03:00
# include <linux/uio.h>
2015-03-11 16:39:39 +03:00
# include "tee_private.h"
2020-08-25 00:11:25 +03:00
static void release_registered_pages ( struct tee_shm * shm )
{
if ( shm - > pages ) {
if ( shm - > flags & TEE_SHM_USER_MAPPED ) {
unpin_user_pages ( shm - > pages , shm - > num_pages ) ;
} else {
size_t n ;
for ( n = 0 ; n < shm - > num_pages ; n + + )
put_page ( shm - > pages [ n ] ) ;
}
kfree ( shm - > pages ) ;
}
}
2015-03-11 16:39:39 +03:00
static void tee_shm_release ( struct tee_shm * shm )
{
2019-11-07 13:42:59 +03:00
struct tee_device * teedev = shm - > ctx - > teedev ;
2015-03-11 16:39:39 +03:00
2019-11-07 13:42:56 +03:00
if ( shm - > flags & TEE_SHM_DMA_BUF ) {
mutex_lock ( & teedev - > mutex ) ;
idr_remove ( & teedev - > idr , shm - > id ) ;
mutex_unlock ( & teedev - > mutex ) ;
}
2015-03-11 16:39:39 +03:00
2017-11-29 15:48:26 +03:00
if ( shm - > flags & TEE_SHM_POOL ) {
struct tee_shm_pool_mgr * poolm ;
if ( shm - > flags & TEE_SHM_DMA_BUF )
poolm = teedev - > pool - > dma_buf_mgr ;
else
poolm = teedev - > pool - > private_mgr ;
poolm - > ops - > free ( poolm , shm ) ;
} else if ( shm - > flags & TEE_SHM_REGISTER ) {
int rc = teedev - > desc - > ops - > shm_unregister ( shm - > ctx , shm ) ;
if ( rc )
dev_err ( teedev - > dev . parent ,
" unregister shm %p failed: %d " , shm , rc ) ;
2020-08-25 00:11:25 +03:00
release_registered_pages ( shm ) ;
2017-11-29 15:48:26 +03:00
}
2015-03-11 16:39:39 +03:00
2019-11-07 13:42:59 +03:00
teedev_ctx_put ( shm - > ctx ) ;
2017-11-29 15:48:37 +03:00
2015-03-11 16:39:39 +03:00
kfree ( shm ) ;
tee_device_put ( teedev ) ;
}
static struct sg_table * tee_shm_op_map_dma_buf ( struct dma_buf_attachment
* attach , enum dma_data_direction dir )
{
return NULL ;
}
static void tee_shm_op_unmap_dma_buf ( struct dma_buf_attachment * attach ,
struct sg_table * table ,
enum dma_data_direction dir )
{
}
static void tee_shm_op_release ( struct dma_buf * dmabuf )
{
struct tee_shm * shm = dmabuf - > priv ;
tee_shm_release ( shm ) ;
}
static int tee_shm_op_mmap ( struct dma_buf * dmabuf , struct vm_area_struct * vma )
{
struct tee_shm * shm = dmabuf - > priv ;
size_t size = vma - > vm_end - vma - > vm_start ;
2017-11-29 15:48:26 +03:00
/* Refuse sharing shared memory provided by application */
2019-11-07 13:43:02 +03:00
if ( shm - > flags & TEE_SHM_USER_MAPPED )
2017-11-29 15:48:26 +03:00
return - EINVAL ;
2015-03-11 16:39:39 +03:00
return remap_pfn_range ( vma , vma - > vm_start , shm - > paddr > > PAGE_SHIFT ,
size , vma - > vm_page_prot ) ;
}
2017-07-01 15:26:06 +03:00
static const struct dma_buf_ops tee_shm_dma_buf_ops = {
2015-03-11 16:39:39 +03:00
. map_dma_buf = tee_shm_op_map_dma_buf ,
. unmap_dma_buf = tee_shm_op_unmap_dma_buf ,
. release = tee_shm_op_release ,
. mmap = tee_shm_op_mmap ,
} ;
2019-11-07 13:42:52 +03:00
struct tee_shm * tee_shm_alloc ( struct tee_context * ctx , size_t size , u32 flags )
2015-03-11 16:39:39 +03:00
{
2019-11-07 13:42:52 +03:00
struct tee_device * teedev = ctx - > teedev ;
2015-03-11 16:39:39 +03:00
struct tee_shm_pool_mgr * poolm = NULL ;
struct tee_shm * shm ;
void * ret ;
int rc ;
if ( ! ( flags & TEE_SHM_MAPPED ) ) {
dev_err ( teedev - > dev . parent ,
" only mapped allocations supported \n " ) ;
return ERR_PTR ( - EINVAL ) ;
}
if ( ( flags & ~ ( TEE_SHM_MAPPED | TEE_SHM_DMA_BUF ) ) ) {
dev_err ( teedev - > dev . parent , " invalid shm flags 0x%x " , flags ) ;
return ERR_PTR ( - EINVAL ) ;
}
if ( ! tee_device_get ( teedev ) )
return ERR_PTR ( - EINVAL ) ;
if ( ! teedev - > pool ) {
/* teedev has been detached from driver */
ret = ERR_PTR ( - EINVAL ) ;
goto err_dev_put ;
}
shm = kzalloc ( sizeof ( * shm ) , GFP_KERNEL ) ;
if ( ! shm ) {
ret = ERR_PTR ( - ENOMEM ) ;
goto err_dev_put ;
}
2017-11-29 15:48:26 +03:00
shm - > flags = flags | TEE_SHM_POOL ;
2015-03-11 16:39:39 +03:00
shm - > ctx = ctx ;
if ( flags & TEE_SHM_DMA_BUF )
2017-11-29 15:48:25 +03:00
poolm = teedev - > pool - > dma_buf_mgr ;
2015-03-11 16:39:39 +03:00
else
2017-11-29 15:48:25 +03:00
poolm = teedev - > pool - > private_mgr ;
2015-03-11 16:39:39 +03:00
rc = poolm - > ops - > alloc ( poolm , shm , size ) ;
if ( rc ) {
ret = ERR_PTR ( rc ) ;
goto err_kfree ;
}
if ( flags & TEE_SHM_DMA_BUF ) {
DEFINE_DMA_BUF_EXPORT_INFO ( exp_info ) ;
2019-11-07 13:42:56 +03:00
mutex_lock ( & teedev - > mutex ) ;
shm - > id = idr_alloc ( & teedev - > idr , shm , 1 , 0 , GFP_KERNEL ) ;
mutex_unlock ( & teedev - > mutex ) ;
if ( shm - > id < 0 ) {
ret = ERR_PTR ( shm - > id ) ;
goto err_pool_free ;
}
2015-03-11 16:39:39 +03:00
exp_info . ops = & tee_shm_dma_buf_ops ;
exp_info . size = shm - > size ;
exp_info . flags = O_RDWR ;
exp_info . priv = shm ;
shm - > dmabuf = dma_buf_export ( & exp_info ) ;
if ( IS_ERR ( shm - > dmabuf ) ) {
ret = ERR_CAST ( shm - > dmabuf ) ;
goto err_rem ;
}
}
2017-11-29 15:48:26 +03:00
2020-04-07 12:30:28 +03:00
teedev_ctx_get ( ctx ) ;
2015-03-11 16:39:39 +03:00
return shm ;
err_rem :
2019-11-07 13:42:56 +03:00
if ( flags & TEE_SHM_DMA_BUF ) {
mutex_lock ( & teedev - > mutex ) ;
idr_remove ( & teedev - > idr , shm - > id ) ;
mutex_unlock ( & teedev - > mutex ) ;
}
2015-03-11 16:39:39 +03:00
err_pool_free :
poolm - > ops - > free ( poolm , shm ) ;
err_kfree :
kfree ( shm ) ;
err_dev_put :
tee_device_put ( teedev ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( tee_shm_alloc ) ;
2021-06-15 01:33:14 +03:00
/**
* tee_shm_alloc_kernel_buf ( ) - Allocate shared memory for kernel buffer
* @ ctx : Context that allocates the shared memory
* @ size : Requested size of shared memory
*
* The returned memory registered in secure world and is suitable to be
* passed as a memory buffer in parameter argument to
* tee_client_invoke_func ( ) . The memory allocated is later freed with a
* call to tee_shm_free ( ) .
*
* @ returns a pointer to ' struct tee_shm '
*/
struct tee_shm * tee_shm_alloc_kernel_buf ( struct tee_context * ctx , size_t size )
{
return tee_shm_alloc ( ctx , size , TEE_SHM_MAPPED | TEE_SHM_DMA_BUF ) ;
}
EXPORT_SYMBOL_GPL ( tee_shm_alloc_kernel_buf ) ;
2017-11-29 15:48:26 +03:00
struct tee_shm * tee_shm_register ( struct tee_context * ctx , unsigned long addr ,
size_t length , u32 flags )
{
struct tee_device * teedev = ctx - > teedev ;
2020-03-27 08:29:47 +03:00
const u32 req_user_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED ;
const u32 req_kernel_flags = TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED ;
2017-11-29 15:48:26 +03:00
struct tee_shm * shm ;
void * ret ;
int rc ;
int num_pages ;
unsigned long start ;
2020-03-27 08:29:47 +03:00
if ( flags ! = req_user_flags & & flags ! = req_kernel_flags )
2017-11-29 15:48:26 +03:00
return ERR_PTR ( - ENOTSUPP ) ;
if ( ! tee_device_get ( teedev ) )
return ERR_PTR ( - EINVAL ) ;
if ( ! teedev - > desc - > ops - > shm_register | |
! teedev - > desc - > ops - > shm_unregister ) {
tee_device_put ( teedev ) ;
return ERR_PTR ( - ENOTSUPP ) ;
}
2017-11-29 15:48:37 +03:00
teedev_ctx_get ( ctx ) ;
2017-11-29 15:48:26 +03:00
shm = kzalloc ( sizeof ( * shm ) , GFP_KERNEL ) ;
if ( ! shm ) {
ret = ERR_PTR ( - ENOMEM ) ;
goto err ;
}
shm - > flags = flags | TEE_SHM_REGISTER ;
shm - > ctx = ctx ;
shm - > id = - 1 ;
2019-09-26 02:48:58 +03:00
addr = untagged_addr ( addr ) ;
2017-11-29 15:48:26 +03:00
start = rounddown ( addr , PAGE_SIZE ) ;
shm - > offset = addr - start ;
shm - > size = length ;
num_pages = ( roundup ( addr + length , PAGE_SIZE ) - start ) / PAGE_SIZE ;
shm - > pages = kcalloc ( num_pages , sizeof ( * shm - > pages ) , GFP_KERNEL ) ;
if ( ! shm - > pages ) {
ret = ERR_PTR ( - ENOMEM ) ;
goto err ;
}
2020-03-27 08:29:47 +03:00
if ( flags & TEE_SHM_USER_MAPPED ) {
2020-08-25 00:11:25 +03:00
rc = pin_user_pages_fast ( start , num_pages , FOLL_WRITE ,
2020-03-27 08:29:47 +03:00
shm - > pages ) ;
} else {
struct kvec * kiov ;
int i ;
kiov = kcalloc ( num_pages , sizeof ( * kiov ) , GFP_KERNEL ) ;
if ( ! kiov ) {
ret = ERR_PTR ( - ENOMEM ) ;
goto err ;
}
for ( i = 0 ; i < num_pages ; i + + ) {
kiov [ i ] . iov_base = ( void * ) ( start + i * PAGE_SIZE ) ;
kiov [ i ] . iov_len = PAGE_SIZE ;
}
rc = get_kernel_pages ( kiov , num_pages , 0 , shm - > pages ) ;
kfree ( kiov ) ;
}
2017-11-29 15:48:26 +03:00
if ( rc > 0 )
shm - > num_pages = rc ;
if ( rc ! = num_pages ) {
2018-01-06 12:22:30 +03:00
if ( rc > = 0 )
2017-11-29 15:48:26 +03:00
rc = - ENOMEM ;
ret = ERR_PTR ( rc ) ;
goto err ;
}
mutex_lock ( & teedev - > mutex ) ;
shm - > id = idr_alloc ( & teedev - > idr , shm , 1 , 0 , GFP_KERNEL ) ;
mutex_unlock ( & teedev - > mutex ) ;
if ( shm - > id < 0 ) {
ret = ERR_PTR ( shm - > id ) ;
goto err ;
}
rc = teedev - > desc - > ops - > shm_register ( ctx , shm , shm - > pages ,
2017-12-28 12:08:00 +03:00
shm - > num_pages , start ) ;
2017-11-29 15:48:26 +03:00
if ( rc ) {
ret = ERR_PTR ( rc ) ;
goto err ;
}
if ( flags & TEE_SHM_DMA_BUF ) {
DEFINE_DMA_BUF_EXPORT_INFO ( exp_info ) ;
exp_info . ops = & tee_shm_dma_buf_ops ;
exp_info . size = shm - > size ;
exp_info . flags = O_RDWR ;
exp_info . priv = shm ;
shm - > dmabuf = dma_buf_export ( & exp_info ) ;
if ( IS_ERR ( shm - > dmabuf ) ) {
ret = ERR_CAST ( shm - > dmabuf ) ;
teedev - > desc - > ops - > shm_unregister ( ctx , shm ) ;
goto err ;
}
}
return shm ;
err :
if ( shm ) {
if ( shm - > id > = 0 ) {
mutex_lock ( & teedev - > mutex ) ;
idr_remove ( & teedev - > idr , shm - > id ) ;
mutex_unlock ( & teedev - > mutex ) ;
}
2020-08-25 00:11:25 +03:00
release_registered_pages ( shm ) ;
2017-11-29 15:48:26 +03:00
}
kfree ( shm ) ;
2017-11-29 15:48:37 +03:00
teedev_ctx_put ( ctx ) ;
2017-11-29 15:48:26 +03:00
tee_device_put ( teedev ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( tee_shm_register ) ;
2015-03-11 16:39:39 +03:00
/**
* tee_shm_get_fd ( ) - Increase reference count and return file descriptor
* @ shm : Shared memory handle
* @ returns user space file descriptor to shared memory
*/
int tee_shm_get_fd ( struct tee_shm * shm )
{
int fd ;
2017-11-29 15:48:26 +03:00
if ( ! ( shm - > flags & TEE_SHM_DMA_BUF ) )
2015-03-11 16:39:39 +03:00
return - EINVAL ;
2018-04-04 22:03:21 +03:00
get_dma_buf ( shm - > dmabuf ) ;
2015-03-11 16:39:39 +03:00
fd = dma_buf_fd ( shm - > dmabuf , O_CLOEXEC ) ;
2018-04-04 22:03:21 +03:00
if ( fd < 0 )
dma_buf_put ( shm - > dmabuf ) ;
2015-03-11 16:39:39 +03:00
return fd ;
}
/**
* tee_shm_free ( ) - Free shared memory
* @ shm : Handle to shared memory to free
*/
void tee_shm_free ( struct tee_shm * shm )
{
/*
* dma_buf_put ( ) decreases the dmabuf reference counter and will
* call tee_shm_release ( ) when the last reference is gone .
*
* In the case of driver private memory we call tee_shm_release
* directly instead as it doesn ' t have a reference counter .
*/
if ( shm - > flags & TEE_SHM_DMA_BUF )
dma_buf_put ( shm - > dmabuf ) ;
else
tee_shm_release ( shm ) ;
}
EXPORT_SYMBOL_GPL ( tee_shm_free ) ;
/**
* tee_shm_va2pa ( ) - Get physical address of a virtual address
* @ shm : Shared memory handle
* @ va : Virtual address to tranlsate
* @ pa : Returned physical address
* @ returns 0 on success and < 0 on failure
*/
int tee_shm_va2pa ( struct tee_shm * shm , void * va , phys_addr_t * pa )
{
2017-11-29 15:48:26 +03:00
if ( ! ( shm - > flags & TEE_SHM_MAPPED ) )
return - EINVAL ;
2015-03-11 16:39:39 +03:00
/* Check that we're in the range of the shm */
if ( ( char * ) va < ( char * ) shm - > kaddr )
return - EINVAL ;
if ( ( char * ) va > = ( ( char * ) shm - > kaddr + shm - > size ) )
return - EINVAL ;
return tee_shm_get_pa (
shm , ( unsigned long ) va - ( unsigned long ) shm - > kaddr , pa ) ;
}
EXPORT_SYMBOL_GPL ( tee_shm_va2pa ) ;
/**
* tee_shm_pa2va ( ) - Get virtual address of a physical address
* @ shm : Shared memory handle
* @ pa : Physical address to tranlsate
* @ va : Returned virtual address
* @ returns 0 on success and < 0 on failure
*/
int tee_shm_pa2va ( struct tee_shm * shm , phys_addr_t pa , void * * va )
{
2017-11-29 15:48:26 +03:00
if ( ! ( shm - > flags & TEE_SHM_MAPPED ) )
return - EINVAL ;
2015-03-11 16:39:39 +03:00
/* Check that we're in the range of the shm */
if ( pa < shm - > paddr )
return - EINVAL ;
if ( pa > = ( shm - > paddr + shm - > size ) )
return - EINVAL ;
if ( va ) {
void * v = tee_shm_get_va ( shm , pa - shm - > paddr ) ;
if ( IS_ERR ( v ) )
return PTR_ERR ( v ) ;
* va = v ;
}
return 0 ;
}
EXPORT_SYMBOL_GPL ( tee_shm_pa2va ) ;
/**
* tee_shm_get_va ( ) - Get virtual address of a shared memory plus an offset
* @ shm : Shared memory handle
* @ offs : Offset from start of this shared memory
* @ returns virtual address of the shared memory + offs if offs is within
* the bounds of this shared memory , else an ERR_PTR
*/
void * tee_shm_get_va ( struct tee_shm * shm , size_t offs )
{
2017-11-29 15:48:26 +03:00
if ( ! ( shm - > flags & TEE_SHM_MAPPED ) )
return ERR_PTR ( - EINVAL ) ;
2015-03-11 16:39:39 +03:00
if ( offs > = shm - > size )
return ERR_PTR ( - EINVAL ) ;
return ( char * ) shm - > kaddr + offs ;
}
EXPORT_SYMBOL_GPL ( tee_shm_get_va ) ;
/**
* tee_shm_get_pa ( ) - Get physical address of a shared memory plus an offset
* @ shm : Shared memory handle
* @ offs : Offset from start of this shared memory
* @ pa : Physical address to return
* @ returns 0 if offs is within the bounds of this shared memory , else an
* error code .
*/
int tee_shm_get_pa ( struct tee_shm * shm , size_t offs , phys_addr_t * pa )
{
if ( offs > = shm - > size )
return - EINVAL ;
if ( pa )
* pa = shm - > paddr + offs ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( tee_shm_get_pa ) ;
/**
* tee_shm_get_from_id ( ) - Find shared memory object and increase reference
* count
* @ ctx : Context owning the shared memory
* @ id : Id of shared memory object
* @ returns a pointer to ' struct tee_shm ' on success or an ERR_PTR on failure
*/
struct tee_shm * tee_shm_get_from_id ( struct tee_context * ctx , int id )
{
struct tee_device * teedev ;
struct tee_shm * shm ;
if ( ! ctx )
return ERR_PTR ( - EINVAL ) ;
teedev = ctx - > teedev ;
mutex_lock ( & teedev - > mutex ) ;
shm = idr_find ( & teedev - > idr , id ) ;
if ( ! shm | | shm - > ctx ! = ctx )
shm = ERR_PTR ( - EINVAL ) ;
else if ( shm - > flags & TEE_SHM_DMA_BUF )
get_dma_buf ( shm - > dmabuf ) ;
mutex_unlock ( & teedev - > mutex ) ;
return shm ;
}
EXPORT_SYMBOL_GPL ( tee_shm_get_from_id ) ;
/**
* tee_shm_put ( ) - Decrease reference count on a shared memory handle
* @ shm : Shared memory handle
*/
void tee_shm_put ( struct tee_shm * shm )
{
if ( shm - > flags & TEE_SHM_DMA_BUF )
dma_buf_put ( shm - > dmabuf ) ;
}
EXPORT_SYMBOL_GPL ( tee_shm_put ) ;