2019-05-29 17:17:56 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2015-03-11 16:39:39 +03:00
/*
2021-12-09 17:59:37 +03:00
* Copyright ( c ) 2015 - 2017 , 2019 - 2021 Linaro Limited
2015-03-11 16:39:39 +03:00
*/
2021-12-09 17:59:37 +03:00
# include <linux/anon_inodes.h>
2015-03-11 16:39:39 +03:00
# include <linux/device.h>
# include <linux/idr.h>
2021-12-09 17:59:37 +03:00
# include <linux/mm.h>
2015-03-11 16:39:39 +03:00
# include <linux/sched.h>
# include <linux/slab.h>
# include <linux/tee_drv.h>
2020-03-27 08:29:47 +03:00
# include <linux/uio.h>
2015-03-11 16:39:39 +03:00
# include "tee_private.h"
2020-08-25 00:11:25 +03:00
static void release_registered_pages ( struct tee_shm * shm )
{
if ( shm - > pages ) {
if ( shm - > flags & TEE_SHM_USER_MAPPED ) {
unpin_user_pages ( shm - > pages , shm - > num_pages ) ;
} else {
size_t n ;
for ( n = 0 ; n < shm - > num_pages ; n + + )
put_page ( shm - > pages [ n ] ) ;
}
kfree ( shm - > pages ) ;
}
}
2021-12-09 17:59:37 +03:00
static void tee_shm_release ( struct tee_device * teedev , struct tee_shm * shm )
2015-03-11 16:39:39 +03:00
{
2017-11-29 15:48:26 +03:00
if ( shm - > flags & TEE_SHM_POOL ) {
struct tee_shm_pool_mgr * poolm ;
if ( shm - > flags & TEE_SHM_DMA_BUF )
poolm = teedev - > pool - > dma_buf_mgr ;
else
poolm = teedev - > pool - > private_mgr ;
poolm - > ops - > free ( poolm , shm ) ;
} else if ( shm - > flags & TEE_SHM_REGISTER ) {
int rc = teedev - > desc - > ops - > shm_unregister ( shm - > ctx , shm ) ;
if ( rc )
dev_err ( teedev - > dev . parent ,
" unregister shm %p failed: %d " , shm , rc ) ;
2020-08-25 00:11:25 +03:00
release_registered_pages ( shm ) ;
2017-11-29 15:48:26 +03:00
}
2015-03-11 16:39:39 +03:00
2019-11-07 13:42:59 +03:00
teedev_ctx_put ( shm - > ctx ) ;
2017-11-29 15:48:37 +03:00
2015-03-11 16:39:39 +03:00
kfree ( shm ) ;
tee_device_put ( teedev ) ;
}
2019-11-07 13:42:52 +03:00
struct tee_shm * tee_shm_alloc ( struct tee_context * ctx , size_t size , u32 flags )
2015-03-11 16:39:39 +03:00
{
2019-11-07 13:42:52 +03:00
struct tee_device * teedev = ctx - > teedev ;
2015-03-11 16:39:39 +03:00
struct tee_shm_pool_mgr * poolm = NULL ;
struct tee_shm * shm ;
void * ret ;
int rc ;
if ( ! ( flags & TEE_SHM_MAPPED ) ) {
dev_err ( teedev - > dev . parent ,
" only mapped allocations supported \n " ) ;
return ERR_PTR ( - EINVAL ) ;
}
2021-06-15 01:33:15 +03:00
if ( ( flags & ~ ( TEE_SHM_MAPPED | TEE_SHM_DMA_BUF | TEE_SHM_PRIV ) ) ) {
2015-03-11 16:39:39 +03:00
dev_err ( teedev - > dev . parent , " invalid shm flags 0x%x " , flags ) ;
return ERR_PTR ( - EINVAL ) ;
}
if ( ! tee_device_get ( teedev ) )
return ERR_PTR ( - EINVAL ) ;
if ( ! teedev - > pool ) {
/* teedev has been detached from driver */
ret = ERR_PTR ( - EINVAL ) ;
goto err_dev_put ;
}
shm = kzalloc ( sizeof ( * shm ) , GFP_KERNEL ) ;
if ( ! shm ) {
ret = ERR_PTR ( - ENOMEM ) ;
goto err_dev_put ;
}
2021-12-09 17:59:37 +03:00
refcount_set ( & shm - > refcount , 1 ) ;
2017-11-29 15:48:26 +03:00
shm - > flags = flags | TEE_SHM_POOL ;
2015-03-11 16:39:39 +03:00
shm - > ctx = ctx ;
if ( flags & TEE_SHM_DMA_BUF )
2017-11-29 15:48:25 +03:00
poolm = teedev - > pool - > dma_buf_mgr ;
2015-03-11 16:39:39 +03:00
else
2017-11-29 15:48:25 +03:00
poolm = teedev - > pool - > private_mgr ;
2015-03-11 16:39:39 +03:00
rc = poolm - > ops - > alloc ( poolm , shm , size ) ;
if ( rc ) {
ret = ERR_PTR ( rc ) ;
goto err_kfree ;
}
if ( flags & TEE_SHM_DMA_BUF ) {
2019-11-07 13:42:56 +03:00
mutex_lock ( & teedev - > mutex ) ;
shm - > id = idr_alloc ( & teedev - > idr , shm , 1 , 0 , GFP_KERNEL ) ;
mutex_unlock ( & teedev - > mutex ) ;
if ( shm - > id < 0 ) {
ret = ERR_PTR ( shm - > id ) ;
goto err_pool_free ;
}
2015-03-11 16:39:39 +03:00
}
2017-11-29 15:48:26 +03:00
2020-04-07 12:30:28 +03:00
teedev_ctx_get ( ctx ) ;
2015-03-11 16:39:39 +03:00
return shm ;
err_pool_free :
poolm - > ops - > free ( poolm , shm ) ;
err_kfree :
kfree ( shm ) ;
err_dev_put :
tee_device_put ( teedev ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( tee_shm_alloc ) ;
2021-06-15 01:33:14 +03:00
/**
* tee_shm_alloc_kernel_buf ( ) - Allocate shared memory for kernel buffer
* @ ctx : Context that allocates the shared memory
* @ size : Requested size of shared memory
*
* The returned memory registered in secure world and is suitable to be
* passed as a memory buffer in parameter argument to
* tee_client_invoke_func ( ) . The memory allocated is later freed with a
* call to tee_shm_free ( ) .
*
* @ returns a pointer to ' struct tee_shm '
*/
struct tee_shm * tee_shm_alloc_kernel_buf ( struct tee_context * ctx , size_t size )
{
2021-06-15 01:33:15 +03:00
return tee_shm_alloc ( ctx , size , TEE_SHM_MAPPED ) ;
2021-06-15 01:33:14 +03:00
}
EXPORT_SYMBOL_GPL ( tee_shm_alloc_kernel_buf ) ;
2017-11-29 15:48:26 +03:00
struct tee_shm * tee_shm_register ( struct tee_context * ctx , unsigned long addr ,
size_t length , u32 flags )
{
struct tee_device * teedev = ctx - > teedev ;
2020-03-27 08:29:47 +03:00
const u32 req_user_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED ;
const u32 req_kernel_flags = TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED ;
2017-11-29 15:48:26 +03:00
struct tee_shm * shm ;
void * ret ;
int rc ;
int num_pages ;
unsigned long start ;
2020-03-27 08:29:47 +03:00
if ( flags ! = req_user_flags & & flags ! = req_kernel_flags )
2017-11-29 15:48:26 +03:00
return ERR_PTR ( - ENOTSUPP ) ;
if ( ! tee_device_get ( teedev ) )
return ERR_PTR ( - EINVAL ) ;
if ( ! teedev - > desc - > ops - > shm_register | |
! teedev - > desc - > ops - > shm_unregister ) {
tee_device_put ( teedev ) ;
return ERR_PTR ( - ENOTSUPP ) ;
}
2017-11-29 15:48:37 +03:00
teedev_ctx_get ( ctx ) ;
2017-11-29 15:48:26 +03:00
shm = kzalloc ( sizeof ( * shm ) , GFP_KERNEL ) ;
if ( ! shm ) {
ret = ERR_PTR ( - ENOMEM ) ;
goto err ;
}
2021-12-09 17:59:37 +03:00
refcount_set ( & shm - > refcount , 1 ) ;
2017-11-29 15:48:26 +03:00
shm - > flags = flags | TEE_SHM_REGISTER ;
shm - > ctx = ctx ;
shm - > id = - 1 ;
2019-09-26 02:48:58 +03:00
addr = untagged_addr ( addr ) ;
2017-11-29 15:48:26 +03:00
start = rounddown ( addr , PAGE_SIZE ) ;
shm - > offset = addr - start ;
shm - > size = length ;
num_pages = ( roundup ( addr + length , PAGE_SIZE ) - start ) / PAGE_SIZE ;
shm - > pages = kcalloc ( num_pages , sizeof ( * shm - > pages ) , GFP_KERNEL ) ;
if ( ! shm - > pages ) {
ret = ERR_PTR ( - ENOMEM ) ;
goto err ;
}
2020-03-27 08:29:47 +03:00
if ( flags & TEE_SHM_USER_MAPPED ) {
2020-08-25 00:11:25 +03:00
rc = pin_user_pages_fast ( start , num_pages , FOLL_WRITE ,
2020-03-27 08:29:47 +03:00
shm - > pages ) ;
} else {
struct kvec * kiov ;
int i ;
kiov = kcalloc ( num_pages , sizeof ( * kiov ) , GFP_KERNEL ) ;
if ( ! kiov ) {
ret = ERR_PTR ( - ENOMEM ) ;
goto err ;
}
for ( i = 0 ; i < num_pages ; i + + ) {
kiov [ i ] . iov_base = ( void * ) ( start + i * PAGE_SIZE ) ;
kiov [ i ] . iov_len = PAGE_SIZE ;
}
rc = get_kernel_pages ( kiov , num_pages , 0 , shm - > pages ) ;
kfree ( kiov ) ;
}
2017-11-29 15:48:26 +03:00
if ( rc > 0 )
shm - > num_pages = rc ;
if ( rc ! = num_pages ) {
2018-01-06 12:22:30 +03:00
if ( rc > = 0 )
2017-11-29 15:48:26 +03:00
rc = - ENOMEM ;
ret = ERR_PTR ( rc ) ;
goto err ;
}
mutex_lock ( & teedev - > mutex ) ;
shm - > id = idr_alloc ( & teedev - > idr , shm , 1 , 0 , GFP_KERNEL ) ;
mutex_unlock ( & teedev - > mutex ) ;
if ( shm - > id < 0 ) {
ret = ERR_PTR ( shm - > id ) ;
goto err ;
}
rc = teedev - > desc - > ops - > shm_register ( ctx , shm , shm - > pages ,
2017-12-28 12:08:00 +03:00
shm - > num_pages , start ) ;
2017-11-29 15:48:26 +03:00
if ( rc ) {
ret = ERR_PTR ( rc ) ;
goto err ;
}
return shm ;
err :
if ( shm ) {
if ( shm - > id > = 0 ) {
mutex_lock ( & teedev - > mutex ) ;
idr_remove ( & teedev - > idr , shm - > id ) ;
mutex_unlock ( & teedev - > mutex ) ;
}
2020-08-25 00:11:25 +03:00
release_registered_pages ( shm ) ;
2017-11-29 15:48:26 +03:00
}
kfree ( shm ) ;
2017-11-29 15:48:37 +03:00
teedev_ctx_put ( ctx ) ;
2017-11-29 15:48:26 +03:00
tee_device_put ( teedev ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( tee_shm_register ) ;
2021-12-09 17:59:37 +03:00
static int tee_shm_fop_release ( struct inode * inode , struct file * filp )
{
tee_shm_put ( filp - > private_data ) ;
return 0 ;
}
static int tee_shm_fop_mmap ( struct file * filp , struct vm_area_struct * vma )
{
struct tee_shm * shm = filp - > private_data ;
size_t size = vma - > vm_end - vma - > vm_start ;
/* Refuse sharing shared memory provided by application */
if ( shm - > flags & TEE_SHM_USER_MAPPED )
return - EINVAL ;
/* check for overflowing the buffer's size */
if ( vma - > vm_pgoff + vma_pages ( vma ) > shm - > size > > PAGE_SHIFT )
return - EINVAL ;
return remap_pfn_range ( vma , vma - > vm_start , shm - > paddr > > PAGE_SHIFT ,
size , vma - > vm_page_prot ) ;
}
static const struct file_operations tee_shm_fops = {
. owner = THIS_MODULE ,
. release = tee_shm_fop_release ,
. mmap = tee_shm_fop_mmap ,
} ;
2015-03-11 16:39:39 +03:00
/**
* tee_shm_get_fd ( ) - Increase reference count and return file descriptor
* @ shm : Shared memory handle
* @ returns user space file descriptor to shared memory
*/
int tee_shm_get_fd ( struct tee_shm * shm )
{
int fd ;
2017-11-29 15:48:26 +03:00
if ( ! ( shm - > flags & TEE_SHM_DMA_BUF ) )
2015-03-11 16:39:39 +03:00
return - EINVAL ;
2021-12-09 17:59:37 +03:00
/* matched by tee_shm_put() in tee_shm_op_release() */
refcount_inc ( & shm - > refcount ) ;
fd = anon_inode_getfd ( " tee_shm " , & tee_shm_fops , shm , O_RDWR ) ;
2018-04-04 22:03:21 +03:00
if ( fd < 0 )
2021-12-09 17:59:37 +03:00
tee_shm_put ( shm ) ;
2015-03-11 16:39:39 +03:00
return fd ;
}
/**
* tee_shm_free ( ) - Free shared memory
* @ shm : Handle to shared memory to free
*/
void tee_shm_free ( struct tee_shm * shm )
{
2021-12-09 17:59:37 +03:00
tee_shm_put ( shm ) ;
2015-03-11 16:39:39 +03:00
}
EXPORT_SYMBOL_GPL ( tee_shm_free ) ;
/**
* tee_shm_va2pa ( ) - Get physical address of a virtual address
* @ shm : Shared memory handle
* @ va : Virtual address to tranlsate
* @ pa : Returned physical address
* @ returns 0 on success and < 0 on failure
*/
int tee_shm_va2pa ( struct tee_shm * shm , void * va , phys_addr_t * pa )
{
2017-11-29 15:48:26 +03:00
if ( ! ( shm - > flags & TEE_SHM_MAPPED ) )
return - EINVAL ;
2015-03-11 16:39:39 +03:00
/* Check that we're in the range of the shm */
if ( ( char * ) va < ( char * ) shm - > kaddr )
return - EINVAL ;
if ( ( char * ) va > = ( ( char * ) shm - > kaddr + shm - > size ) )
return - EINVAL ;
return tee_shm_get_pa (
shm , ( unsigned long ) va - ( unsigned long ) shm - > kaddr , pa ) ;
}
EXPORT_SYMBOL_GPL ( tee_shm_va2pa ) ;
/**
* tee_shm_pa2va ( ) - Get virtual address of a physical address
* @ shm : Shared memory handle
* @ pa : Physical address to tranlsate
* @ va : Returned virtual address
* @ returns 0 on success and < 0 on failure
*/
int tee_shm_pa2va ( struct tee_shm * shm , phys_addr_t pa , void * * va )
{
2017-11-29 15:48:26 +03:00
if ( ! ( shm - > flags & TEE_SHM_MAPPED ) )
return - EINVAL ;
2015-03-11 16:39:39 +03:00
/* Check that we're in the range of the shm */
if ( pa < shm - > paddr )
return - EINVAL ;
if ( pa > = ( shm - > paddr + shm - > size ) )
return - EINVAL ;
if ( va ) {
void * v = tee_shm_get_va ( shm , pa - shm - > paddr ) ;
if ( IS_ERR ( v ) )
return PTR_ERR ( v ) ;
* va = v ;
}
return 0 ;
}
EXPORT_SYMBOL_GPL ( tee_shm_pa2va ) ;
/**
* tee_shm_get_va ( ) - Get virtual address of a shared memory plus an offset
* @ shm : Shared memory handle
* @ offs : Offset from start of this shared memory
* @ returns virtual address of the shared memory + offs if offs is within
* the bounds of this shared memory , else an ERR_PTR
*/
void * tee_shm_get_va ( struct tee_shm * shm , size_t offs )
{
2017-11-29 15:48:26 +03:00
if ( ! ( shm - > flags & TEE_SHM_MAPPED ) )
return ERR_PTR ( - EINVAL ) ;
2015-03-11 16:39:39 +03:00
if ( offs > = shm - > size )
return ERR_PTR ( - EINVAL ) ;
return ( char * ) shm - > kaddr + offs ;
}
EXPORT_SYMBOL_GPL ( tee_shm_get_va ) ;
/**
* tee_shm_get_pa ( ) - Get physical address of a shared memory plus an offset
* @ shm : Shared memory handle
* @ offs : Offset from start of this shared memory
* @ pa : Physical address to return
* @ returns 0 if offs is within the bounds of this shared memory , else an
* error code .
*/
int tee_shm_get_pa ( struct tee_shm * shm , size_t offs , phys_addr_t * pa )
{
if ( offs > = shm - > size )
return - EINVAL ;
if ( pa )
* pa = shm - > paddr + offs ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( tee_shm_get_pa ) ;
/**
* tee_shm_get_from_id ( ) - Find shared memory object and increase reference
* count
* @ ctx : Context owning the shared memory
* @ id : Id of shared memory object
* @ returns a pointer to ' struct tee_shm ' on success or an ERR_PTR on failure
*/
struct tee_shm * tee_shm_get_from_id ( struct tee_context * ctx , int id )
{
struct tee_device * teedev ;
struct tee_shm * shm ;
if ( ! ctx )
return ERR_PTR ( - EINVAL ) ;
teedev = ctx - > teedev ;
mutex_lock ( & teedev - > mutex ) ;
shm = idr_find ( & teedev - > idr , id ) ;
2021-12-09 17:59:37 +03:00
/*
* If the tee_shm was found in the IDR it must have a refcount
* larger than 0 due to the guarantee in tee_shm_put ( ) below . So
* it ' s safe to use refcount_inc ( ) .
*/
2015-03-11 16:39:39 +03:00
if ( ! shm | | shm - > ctx ! = ctx )
shm = ERR_PTR ( - EINVAL ) ;
2021-12-09 17:59:37 +03:00
else
refcount_inc ( & shm - > refcount ) ;
2015-03-11 16:39:39 +03:00
mutex_unlock ( & teedev - > mutex ) ;
return shm ;
}
EXPORT_SYMBOL_GPL ( tee_shm_get_from_id ) ;
/**
* tee_shm_put ( ) - Decrease reference count on a shared memory handle
* @ shm : Shared memory handle
*/
void tee_shm_put ( struct tee_shm * shm )
{
2021-12-09 17:59:37 +03:00
struct tee_device * teedev = shm - > ctx - > teedev ;
bool do_release = false ;
mutex_lock ( & teedev - > mutex ) ;
if ( refcount_dec_and_test ( & shm - > refcount ) ) {
/*
* refcount has reached 0 , we must now remove it from the
* IDR before releasing the mutex . This will guarantee that
* the refcount_inc ( ) in tee_shm_get_from_id ( ) never starts
* from 0.
*/
if ( shm - > flags & TEE_SHM_DMA_BUF )
idr_remove ( & teedev - > idr , shm - > id ) ;
do_release = true ;
}
mutex_unlock ( & teedev - > mutex ) ;
if ( do_release )
tee_shm_release ( teedev , shm ) ;
2015-03-11 16:39:39 +03:00
}
EXPORT_SYMBOL_GPL ( tee_shm_put ) ;