2019-05-29 17:17:56 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2015-04-14 15:33:20 +03:00
/*
* Copyright ( c ) 2015 , Linaro Limited
*/
# include <linux/device.h>
# include <linux/slab.h>
# include <linux/uaccess.h>
# include "optee_private.h"
2016-12-23 15:13:39 +03:00
struct optee_supp_req {
struct list_head link ;
2018-11-21 06:01:43 +03:00
bool in_queue ;
2016-12-23 15:13:39 +03:00
u32 func ;
u32 ret ;
size_t num_params ;
struct tee_param * param ;
struct completion c ;
} ;
2015-04-14 15:33:20 +03:00
void optee_supp_init ( struct optee_supp * supp )
{
memset ( supp , 0 , sizeof ( * supp ) ) ;
2016-12-23 15:13:39 +03:00
mutex_init ( & supp - > mutex ) ;
init_completion ( & supp - > reqs_c ) ;
idr_init ( & supp - > idr ) ;
INIT_LIST_HEAD ( & supp - > reqs ) ;
supp - > req_id = - 1 ;
2015-04-14 15:33:20 +03:00
}
void optee_supp_uninit ( struct optee_supp * supp )
{
2016-12-23 15:13:39 +03:00
mutex_destroy ( & supp - > mutex ) ;
idr_destroy ( & supp - > idr ) ;
}
void optee_supp_release ( struct optee_supp * supp )
{
int id ;
struct optee_supp_req * req ;
struct optee_supp_req * req_tmp ;
mutex_lock ( & supp - > mutex ) ;
/* Abort all request retrieved by supplicant */
idr_for_each_entry ( & supp - > idr , req , id ) {
idr_remove ( & supp - > idr , id ) ;
req - > ret = TEEC_ERROR_COMMUNICATION ;
complete ( & req - > c ) ;
}
/* Abort all queued requests */
list_for_each_entry_safe ( req , req_tmp , & supp - > reqs , link ) {
list_del ( & req - > link ) ;
2018-11-21 06:01:43 +03:00
req - > in_queue = false ;
2016-12-23 15:13:39 +03:00
req - > ret = TEEC_ERROR_COMMUNICATION ;
complete ( & req - > c ) ;
}
supp - > ctx = NULL ;
supp - > req_id = - 1 ;
mutex_unlock ( & supp - > mutex ) ;
2015-04-14 15:33:20 +03:00
}
/**
* optee_supp_thrd_req ( ) - request service from supplicant
* @ ctx : context doing the request
* @ func : function requested
* @ num_params : number of elements in @ param array
* @ param : parameters for function
*
* Returns result of operation to be passed to secure world
*/
u32 optee_supp_thrd_req ( struct tee_context * ctx , u32 func , size_t num_params ,
struct tee_param * param )
2016-12-23 15:13:39 +03:00
2015-04-14 15:33:20 +03:00
{
struct optee * optee = tee_get_drvdata ( ctx - > teedev ) ;
struct optee_supp * supp = & optee - > supp ;
2019-01-29 08:49:36 +03:00
struct optee_supp_req * req ;
2016-12-23 15:13:39 +03:00
bool interruptable ;
2015-04-14 15:33:20 +03:00
u32 ret ;
2019-01-29 08:49:36 +03:00
/*
* Return in case there is no supplicant available and
* non - blocking request .
*/
if ( ! supp - > ctx & & ctx - > supp_nowait )
return TEEC_ERROR_COMMUNICATION ;
req = kzalloc ( sizeof ( * req ) , GFP_KERNEL ) ;
2016-12-23 15:13:39 +03:00
if ( ! req )
return TEEC_ERROR_OUT_OF_MEMORY ;
2015-04-14 15:33:20 +03:00
2016-12-23 15:13:39 +03:00
init_completion ( & req - > c ) ;
req - > func = func ;
req - > num_params = num_params ;
req - > param = param ;
2015-04-14 15:33:20 +03:00
2016-12-23 15:13:39 +03:00
/* Insert the request in the request list */
mutex_lock ( & supp - > mutex ) ;
list_add_tail ( & req - > link , & supp - > reqs ) ;
2018-11-21 06:01:43 +03:00
req - > in_queue = true ;
2016-12-23 15:13:39 +03:00
mutex_unlock ( & supp - > mutex ) ;
2015-04-14 15:33:20 +03:00
2016-12-23 15:13:39 +03:00
/* Tell an eventual waiter there's a new request */
complete ( & supp - > reqs_c ) ;
2015-04-14 15:33:20 +03:00
/*
* Wait for supplicant to process and return result , once we ' ve
2016-12-23 15:13:39 +03:00
* returned from wait_for_completion ( & req - > c ) successfully we have
2015-04-14 15:33:20 +03:00
* exclusive access again .
*/
2016-12-23 15:13:39 +03:00
while ( wait_for_completion_interruptible ( & req - > c ) ) {
mutex_lock ( & supp - > mutex ) ;
2015-04-14 15:33:20 +03:00
interruptable = ! supp - > ctx ;
if ( interruptable ) {
/*
* There ' s no supplicant available and since the
2016-12-23 15:13:39 +03:00
* supp - > mutex currently is held none can
2015-04-14 15:33:20 +03:00
* become available until the mutex released
* again .
*
* Interrupting an RPC to supplicant is only
* allowed as a way of slightly improving the user
* experience in case the supplicant hasn ' t been
* started yet . During normal operation the supplicant
* will serve all requests in a timely manner and
* interrupting then wouldn ' t make sense .
*/
2018-11-21 06:01:43 +03:00
if ( req - > in_queue ) {
2016-12-23 15:13:39 +03:00
list_del ( & req - > link ) ;
2018-11-21 06:01:43 +03:00
req - > in_queue = false ;
}
2015-04-14 15:33:20 +03:00
}
2016-12-23 15:13:39 +03:00
mutex_unlock ( & supp - > mutex ) ;
if ( interruptable ) {
req - > ret = TEEC_ERROR_COMMUNICATION ;
2015-04-14 15:33:20 +03:00
break ;
2016-12-23 15:13:39 +03:00
}
2015-04-14 15:33:20 +03:00
}
2016-12-23 15:13:39 +03:00
ret = req - > ret ;
kfree ( req ) ;
2015-04-14 15:33:20 +03:00
return ret ;
}
2016-12-23 15:13:39 +03:00
static struct optee_supp_req * supp_pop_entry ( struct optee_supp * supp ,
int num_params , int * id )
{
struct optee_supp_req * req ;
if ( supp - > req_id ! = - 1 ) {
/*
* Supplicant should not mix synchronous and asnynchronous
* requests .
*/
return ERR_PTR ( - EINVAL ) ;
}
if ( list_empty ( & supp - > reqs ) )
return NULL ;
req = list_first_entry ( & supp - > reqs , struct optee_supp_req , link ) ;
if ( num_params < req - > num_params ) {
/* Not enough room for parameters */
return ERR_PTR ( - EINVAL ) ;
}
* id = idr_alloc ( & supp - > idr , req , 1 , 0 , GFP_KERNEL ) ;
if ( * id < 0 )
return ERR_PTR ( - ENOMEM ) ;
list_del ( & req - > link ) ;
2018-11-21 06:01:43 +03:00
req - > in_queue = false ;
2016-12-23 15:13:39 +03:00
return req ;
}
static int supp_check_recv_params ( size_t num_params , struct tee_param * params ,
size_t * num_meta )
2016-12-23 15:13:34 +03:00
{
size_t n ;
2016-12-23 15:13:39 +03:00
if ( ! num_params )
return - EINVAL ;
2016-12-23 15:13:34 +03:00
/*
* If there ' s memrefs we need to decrease those as they where
* increased earlier and we ' ll even refuse to accept any below .
*/
for ( n = 0 ; n < num_params ; n + + )
if ( tee_param_is_memref ( params + n ) & & params [ n ] . u . memref . shm )
tee_shm_put ( params [ n ] . u . memref . shm ) ;
/*
2016-12-23 15:13:39 +03:00
* We only expect parameters as TEE_IOCTL_PARAM_ATTR_TYPE_NONE with
* or without the TEE_IOCTL_PARAM_ATTR_META bit set .
2016-12-23 15:13:34 +03:00
*/
for ( n = 0 ; n < num_params ; n + + )
2016-12-23 15:13:39 +03:00
if ( params [ n ] . attr & &
params [ n ] . attr ! = TEE_IOCTL_PARAM_ATTR_META )
2016-12-23 15:13:34 +03:00
return - EINVAL ;
2016-12-23 15:13:39 +03:00
/* At most we'll need one meta parameter so no need to check for more */
if ( params - > attr = = TEE_IOCTL_PARAM_ATTR_META )
* num_meta = 1 ;
else
* num_meta = 0 ;
2016-12-23 15:13:34 +03:00
return 0 ;
}
2015-04-14 15:33:20 +03:00
/**
* optee_supp_recv ( ) - receive request for supplicant
* @ ctx : context receiving the request
* @ func : requested function in supplicant
* @ num_params : number of elements allocated in @ param , updated with number
* used elements
* @ param : space for parameters for @ func
*
* Returns 0 on success or < 0 on failure
*/
int optee_supp_recv ( struct tee_context * ctx , u32 * func , u32 * num_params ,
struct tee_param * param )
{
struct tee_device * teedev = ctx - > teedev ;
struct optee * optee = tee_get_drvdata ( teedev ) ;
struct optee_supp * supp = & optee - > supp ;
2016-12-23 15:13:39 +03:00
struct optee_supp_req * req = NULL ;
int id ;
size_t num_meta ;
2015-04-14 15:33:20 +03:00
int rc ;
2016-12-23 15:13:39 +03:00
rc = supp_check_recv_params ( * num_params , param , & num_meta ) ;
2016-12-23 15:13:34 +03:00
if ( rc )
return rc ;
2016-12-23 15:13:39 +03:00
while ( true ) {
mutex_lock ( & supp - > mutex ) ;
req = supp_pop_entry ( supp , * num_params - num_meta , & id ) ;
mutex_unlock ( & supp - > mutex ) ;
if ( req ) {
if ( IS_ERR ( req ) )
return PTR_ERR ( req ) ;
break ;
}
2015-04-14 15:33:20 +03:00
/*
2016-12-23 15:13:39 +03:00
* If we didn ' t get a request we ' ll block in
* wait_for_completion ( ) to avoid needless spinning .
*
* This is where supplicant will be hanging most of
* the time , let ' s make this interruptable so we
* can easily restart supplicant if needed .
2015-04-14 15:33:20 +03:00
*/
2016-12-23 15:13:39 +03:00
if ( wait_for_completion_interruptible ( & supp - > reqs_c ) )
return - ERESTARTSYS ;
2015-04-14 15:33:20 +03:00
}
2016-12-23 15:13:39 +03:00
if ( num_meta ) {
/*
* tee - supplicant support meta parameters - > requsts can be
* processed asynchronously .
*/
param - > attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
TEE_IOCTL_PARAM_ATTR_META ;
param - > u . value . a = id ;
param - > u . value . b = 0 ;
param - > u . value . c = 0 ;
} else {
mutex_lock ( & supp - > mutex ) ;
supp - > req_id = id ;
mutex_unlock ( & supp - > mutex ) ;
2015-04-14 15:33:20 +03:00
}
2016-12-23 15:13:39 +03:00
* func = req - > func ;
* num_params = req - > num_params + num_meta ;
memcpy ( param + num_meta , req - > param ,
sizeof ( struct tee_param ) * req - > num_params ) ;
2015-04-14 15:33:20 +03:00
2016-12-23 15:13:39 +03:00
return 0 ;
}
static struct optee_supp_req * supp_pop_req ( struct optee_supp * supp ,
size_t num_params ,
struct tee_param * param ,
size_t * num_meta )
{
struct optee_supp_req * req ;
int id ;
size_t nm ;
const u32 attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
TEE_IOCTL_PARAM_ATTR_META ;
if ( ! num_params )
return ERR_PTR ( - EINVAL ) ;
if ( supp - > req_id = = - 1 ) {
if ( param - > attr ! = attr )
return ERR_PTR ( - EINVAL ) ;
id = param - > u . value . a ;
nm = 1 ;
} else {
id = supp - > req_id ;
nm = 0 ;
2015-04-14 15:33:20 +03:00
}
2016-12-23 15:13:39 +03:00
req = idr_find ( & supp - > idr , id ) ;
if ( ! req )
return ERR_PTR ( - ENOENT ) ;
if ( ( num_params - nm ) ! = req - > num_params )
return ERR_PTR ( - EINVAL ) ;
2015-04-14 15:33:20 +03:00
2016-12-23 15:13:39 +03:00
idr_remove ( & supp - > idr , id ) ;
supp - > req_id = - 1 ;
* num_meta = nm ;
2015-04-14 15:33:20 +03:00
2016-12-23 15:13:39 +03:00
return req ;
2015-04-14 15:33:20 +03:00
}
/**
* optee_supp_send ( ) - send result of request from supplicant
* @ ctx : context sending result
* @ ret : return value of request
* @ num_params : number of parameters returned
* @ param : returned parameters
*
* Returns 0 on success or < 0 on failure .
*/
int optee_supp_send ( struct tee_context * ctx , u32 ret , u32 num_params ,
struct tee_param * param )
{
struct tee_device * teedev = ctx - > teedev ;
struct optee * optee = tee_get_drvdata ( teedev ) ;
struct optee_supp * supp = & optee - > supp ;
2016-12-23 15:13:39 +03:00
struct optee_supp_req * req ;
2015-04-14 15:33:20 +03:00
size_t n ;
2016-12-23 15:13:39 +03:00
size_t num_meta ;
2015-04-14 15:33:20 +03:00
2016-12-23 15:13:39 +03:00
mutex_lock ( & supp - > mutex ) ;
req = supp_pop_req ( supp , num_params , param , & num_meta ) ;
mutex_unlock ( & supp - > mutex ) ;
2015-04-14 15:33:20 +03:00
2016-12-23 15:13:39 +03:00
if ( IS_ERR ( req ) ) {
/* Something is wrong, let supplicant restart. */
return PTR_ERR ( req ) ;
2015-04-14 15:33:20 +03:00
}
/* Update out and in/out parameters */
2016-12-23 15:13:39 +03:00
for ( n = 0 ; n < req - > num_params ; n + + ) {
struct tee_param * p = req - > param + n ;
2015-04-14 15:33:20 +03:00
2016-12-23 15:13:39 +03:00
switch ( p - > attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK ) {
2015-04-14 15:33:20 +03:00
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT :
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT :
2016-12-23 15:13:39 +03:00
p - > u . value . a = param [ n + num_meta ] . u . value . a ;
p - > u . value . b = param [ n + num_meta ] . u . value . b ;
p - > u . value . c = param [ n + num_meta ] . u . value . c ;
2015-04-14 15:33:20 +03:00
break ;
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT :
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT :
2016-12-23 15:13:39 +03:00
p - > u . memref . size = param [ n + num_meta ] . u . memref . size ;
2015-04-14 15:33:20 +03:00
break ;
default :
break ;
}
}
2016-12-23 15:13:39 +03:00
req - > ret = ret ;
2015-04-14 15:33:20 +03:00
/* Let the requesting thread continue */
2016-12-23 15:13:39 +03:00
complete ( & req - > c ) ;
return 0 ;
2015-04-14 15:33:20 +03:00
}