2016-01-26 15:25:39 +03:00
/*
* Handle async block request by crypto hardware engine .
*
* Copyright ( C ) 2016 Linaro , Inc .
*
* Author : Baolin Wang < baolin . wang @ linaro . org >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation ; either version 2 of the License , or ( at your option )
* any later version .
*
*/
# include <linux/err.h>
# include <linux/delay.h>
2016-08-31 15:02:57 +03:00
# include <crypto/engine.h>
2017-02-01 20:07:51 +03:00
# include <uapi/linux/sched/types.h>
2016-01-26 15:25:39 +03:00
# include "internal.h"
# define CRYPTO_ENGINE_MAX_QLEN 10
2018-01-26 22:15:30 +03:00
/**
* crypto_finalize_request - finalize one request if the request is done
* @ engine : the hardware engine
* @ req : the request need to be finalized
* @ err : error number
*/
static void crypto_finalize_request ( struct crypto_engine * engine ,
struct crypto_async_request * req , int err )
{
unsigned long flags ;
bool finalize_cur_req = false ;
int ret ;
struct crypto_engine_ctx * enginectx ;
spin_lock_irqsave ( & engine - > queue_lock , flags ) ;
if ( engine - > cur_req = = req )
finalize_cur_req = true ;
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
if ( finalize_cur_req ) {
enginectx = crypto_tfm_ctx ( req - > tfm ) ;
if ( engine - > cur_req_prepared & &
enginectx - > op . unprepare_request ) {
ret = enginectx - > op . unprepare_request ( engine , req ) ;
if ( ret )
dev_err ( engine - > dev , " failed to unprepare request \n " ) ;
}
spin_lock_irqsave ( & engine - > queue_lock , flags ) ;
engine - > cur_req = NULL ;
engine - > cur_req_prepared = false ;
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
}
req - > complete ( req , err ) ;
kthread_queue_work ( engine - > kworker , & engine - > pump_requests ) ;
}
2016-01-26 15:25:39 +03:00
/**
* crypto_pump_requests - dequeue one request from engine queue to process
* @ engine : the hardware engine
* @ in_kthread : true if we are in the context of the request pump thread
*
* This function checks if there is any request in the engine queue that
* needs processing and if so call out to the driver to initialize hardware
* and handle each request .
*/
static void crypto_pump_requests ( struct crypto_engine * engine ,
bool in_kthread )
{
struct crypto_async_request * async_req , * backlog ;
unsigned long flags ;
bool was_busy = false ;
2018-01-26 22:15:30 +03:00
int ret ;
struct crypto_engine_ctx * enginectx ;
2016-01-26 15:25:39 +03:00
spin_lock_irqsave ( & engine - > queue_lock , flags ) ;
/* Make sure we are not already running a request */
if ( engine - > cur_req )
goto out ;
/* If another context is idling then defer */
if ( engine - > idling ) {
2016-10-19 14:54:30 +03:00
kthread_queue_work ( engine - > kworker , & engine - > pump_requests ) ;
2016-01-26 15:25:39 +03:00
goto out ;
}
/* Check if the engine queue is idle */
if ( ! crypto_queue_len ( & engine - > queue ) | | ! engine - > running ) {
if ( ! engine - > busy )
goto out ;
/* Only do teardown in the thread */
if ( ! in_kthread ) {
2016-10-19 14:54:30 +03:00
kthread_queue_work ( engine - > kworker ,
2016-01-26 15:25:39 +03:00
& engine - > pump_requests ) ;
goto out ;
}
engine - > busy = false ;
engine - > idling = true ;
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
if ( engine - > unprepare_crypt_hardware & &
engine - > unprepare_crypt_hardware ( engine ) )
2017-06-06 16:44:16 +03:00
dev_err ( engine - > dev , " failed to unprepare crypt hardware \n " ) ;
2016-01-26 15:25:39 +03:00
spin_lock_irqsave ( & engine - > queue_lock , flags ) ;
engine - > idling = false ;
goto out ;
}
/* Get the fist request from the engine queue to handle */
backlog = crypto_get_backlog ( & engine - > queue ) ;
async_req = crypto_dequeue_request ( & engine - > queue ) ;
if ( ! async_req )
goto out ;
2016-08-31 15:02:58 +03:00
engine - > cur_req = async_req ;
2016-01-26 15:25:39 +03:00
if ( backlog )
backlog - > complete ( backlog , - EINPROGRESS ) ;
if ( engine - > busy )
was_busy = true ;
else
engine - > busy = true ;
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
/* Until here we get the request need to be encrypted successfully */
if ( ! was_busy & & engine - > prepare_crypt_hardware ) {
ret = engine - > prepare_crypt_hardware ( engine ) ;
if ( ret ) {
2017-06-06 16:44:16 +03:00
dev_err ( engine - > dev , " failed to prepare crypt hardware \n " ) ;
2016-01-26 15:25:39 +03:00
goto req_err ;
}
}
2018-01-26 22:15:30 +03:00
enginectx = crypto_tfm_ctx ( async_req - > tfm ) ;
if ( enginectx - > op . prepare_request ) {
ret = enginectx - > op . prepare_request ( engine , async_req ) ;
2016-08-31 15:02:58 +03:00
if ( ret ) {
2018-01-26 22:15:30 +03:00
dev_err ( engine - > dev , " failed to prepare request: %d \n " ,
ret ) ;
2016-08-31 15:02:58 +03:00
goto req_err ;
}
2018-01-26 22:15:30 +03:00
engine - > cur_req_prepared = true ;
}
if ( ! enginectx - > op . do_one_request ) {
dev_err ( engine - > dev , " failed to do request \n " ) ;
ret = - EINVAL ;
goto req_err ;
2016-01-26 15:25:39 +03:00
}
2018-01-26 22:15:30 +03:00
ret = enginectx - > op . do_one_request ( engine , async_req ) ;
if ( ret ) {
dev_err ( engine - > dev , " Failed to do one request from queue: %d \n " , ret ) ;
goto req_err ;
}
return ;
2016-01-26 15:25:39 +03:00
req_err :
2018-01-26 22:15:30 +03:00
crypto_finalize_request ( engine , async_req , ret ) ;
2016-01-26 15:25:39 +03:00
return ;
out :
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
}
static void crypto_pump_work ( struct kthread_work * work )
{
struct crypto_engine * engine =
container_of ( work , struct crypto_engine , pump_requests ) ;
crypto_pump_requests ( engine , true ) ;
}
/**
2018-01-26 22:15:30 +03:00
* crypto_transfer_request - transfer the new request into the engine queue
2016-01-26 15:25:39 +03:00
* @ engine : the hardware engine
* @ req : the request need to be listed into the engine queue
*/
2018-01-26 22:15:30 +03:00
static int crypto_transfer_request ( struct crypto_engine * engine ,
struct crypto_async_request * req ,
2016-08-31 15:02:58 +03:00
bool need_pump )
2016-01-26 15:25:39 +03:00
{
unsigned long flags ;
int ret ;
spin_lock_irqsave ( & engine - > queue_lock , flags ) ;
if ( ! engine - > running ) {
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
return - ESHUTDOWN ;
}
2018-01-26 22:15:30 +03:00
ret = crypto_enqueue_request ( & engine - > queue , req ) ;
2016-01-26 15:25:39 +03:00
if ( ! engine - > busy & & need_pump )
2016-10-19 14:54:30 +03:00
kthread_queue_work ( engine - > kworker , & engine - > pump_requests ) ;
2016-01-26 15:25:39 +03:00
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
return ret ;
}
2016-08-31 15:02:58 +03:00
/**
2018-01-26 22:15:30 +03:00
* crypto_transfer_request_to_engine - transfer one request to list
2016-08-31 15:02:58 +03:00
* into the engine queue
* @ engine : the hardware engine
* @ req : the request need to be listed into the engine queue
*/
2018-01-26 22:15:30 +03:00
static int crypto_transfer_request_to_engine ( struct crypto_engine * engine ,
struct crypto_async_request * req )
2016-08-31 15:02:58 +03:00
{
2018-01-26 22:15:30 +03:00
return crypto_transfer_request ( engine , req , true ) ;
2016-08-31 15:02:58 +03:00
}
/**
2018-01-26 22:15:30 +03:00
* crypto_transfer_ablkcipher_request_to_engine - transfer one ablkcipher_request
* to list into the engine queue
2016-08-31 15:02:58 +03:00
* @ engine : the hardware engine
* @ req : the request need to be listed into the engine queue
2018-01-26 22:15:30 +03:00
* TODO : Remove this function when skcipher conversion is finished
2016-08-31 15:02:58 +03:00
*/
2018-01-26 22:15:30 +03:00
int crypto_transfer_ablkcipher_request_to_engine ( struct crypto_engine * engine ,
struct ablkcipher_request * req )
2016-08-31 15:02:58 +03:00
{
2018-01-26 22:15:30 +03:00
return crypto_transfer_request_to_engine ( engine , & req - > base ) ;
}
EXPORT_SYMBOL_GPL ( crypto_transfer_ablkcipher_request_to_engine ) ;
2016-08-31 15:02:58 +03:00
2018-01-26 22:15:30 +03:00
/**
* crypto_transfer_aead_request_to_engine - transfer one aead_request
* to list into the engine queue
* @ engine : the hardware engine
* @ req : the request need to be listed into the engine queue
*/
int crypto_transfer_aead_request_to_engine ( struct crypto_engine * engine ,
struct aead_request * req )
{
return crypto_transfer_request_to_engine ( engine , & req - > base ) ;
}
EXPORT_SYMBOL_GPL ( crypto_transfer_aead_request_to_engine ) ;
2016-08-31 15:02:58 +03:00
2018-01-26 22:15:30 +03:00
/**
* crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
* to list into the engine queue
* @ engine : the hardware engine
* @ req : the request need to be listed into the engine queue
*/
int crypto_transfer_akcipher_request_to_engine ( struct crypto_engine * engine ,
struct akcipher_request * req )
{
return crypto_transfer_request_to_engine ( engine , & req - > base ) ;
2016-08-31 15:02:58 +03:00
}
2018-01-26 22:15:30 +03:00
EXPORT_SYMBOL_GPL ( crypto_transfer_akcipher_request_to_engine ) ;
2016-01-26 15:25:39 +03:00
/**
2018-01-26 22:15:30 +03:00
* crypto_transfer_hash_request_to_engine - transfer one ahash_request
* to list into the engine queue
2016-01-26 15:25:39 +03:00
* @ engine : the hardware engine
* @ req : the request need to be listed into the engine queue
*/
2016-08-31 15:02:58 +03:00
int crypto_transfer_hash_request_to_engine ( struct crypto_engine * engine ,
struct ahash_request * req )
2016-01-26 15:25:39 +03:00
{
2018-01-26 22:15:30 +03:00
return crypto_transfer_request_to_engine ( engine , & req - > base ) ;
2016-01-26 15:25:39 +03:00
}
2016-08-31 15:02:58 +03:00
EXPORT_SYMBOL_GPL ( crypto_transfer_hash_request_to_engine ) ;
2016-01-26 15:25:39 +03:00
/**
2018-01-26 22:15:30 +03:00
* crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
* to list into the engine queue
* @ engine : the hardware engine
* @ req : the request need to be listed into the engine queue
*/
int crypto_transfer_skcipher_request_to_engine ( struct crypto_engine * engine ,
struct skcipher_request * req )
{
return crypto_transfer_request_to_engine ( engine , & req - > base ) ;
}
EXPORT_SYMBOL_GPL ( crypto_transfer_skcipher_request_to_engine ) ;
/**
* crypto_finalize_ablkcipher_request - finalize one ablkcipher_request if
* the request is done
2016-01-26 15:25:39 +03:00
* @ engine : the hardware engine
* @ req : the request need to be finalized
* @ err : error number
2018-01-26 22:15:30 +03:00
* TODO : Remove this function when skcipher conversion is finished
2016-01-26 15:25:39 +03:00
*/
2018-01-26 22:15:30 +03:00
void crypto_finalize_ablkcipher_request ( struct crypto_engine * engine ,
struct ablkcipher_request * req , int err )
2016-01-26 15:25:39 +03:00
{
2018-01-26 22:15:30 +03:00
return crypto_finalize_request ( engine , & req - > base , err ) ;
}
EXPORT_SYMBOL_GPL ( crypto_finalize_ablkcipher_request ) ;
2016-08-31 15:02:58 +03:00
2018-01-26 22:15:30 +03:00
/**
* crypto_finalize_aead_request - finalize one aead_request if
* the request is done
* @ engine : the hardware engine
* @ req : the request need to be finalized
* @ err : error number
*/
void crypto_finalize_aead_request ( struct crypto_engine * engine ,
struct aead_request * req , int err )
{
return crypto_finalize_request ( engine , & req - > base , err ) ;
}
EXPORT_SYMBOL_GPL ( crypto_finalize_aead_request ) ;
2016-01-26 15:25:39 +03:00
2018-01-26 22:15:30 +03:00
/**
* crypto_finalize_akcipher_request - finalize one akcipher_request if
* the request is done
* @ engine : the hardware engine
* @ req : the request need to be finalized
* @ err : error number
*/
void crypto_finalize_akcipher_request ( struct crypto_engine * engine ,
struct akcipher_request * req , int err )
{
return crypto_finalize_request ( engine , & req - > base , err ) ;
2016-08-31 15:02:58 +03:00
}
2018-01-26 22:15:30 +03:00
EXPORT_SYMBOL_GPL ( crypto_finalize_akcipher_request ) ;
2016-08-31 15:02:58 +03:00
/**
2018-01-26 22:15:30 +03:00
* crypto_finalize_hash_request - finalize one ahash_request if
* the request is done
2016-08-31 15:02:58 +03:00
* @ engine : the hardware engine
* @ req : the request need to be finalized
* @ err : error number
*/
void crypto_finalize_hash_request ( struct crypto_engine * engine ,
struct ahash_request * req , int err )
{
2018-01-26 22:15:30 +03:00
return crypto_finalize_request ( engine , & req - > base , err ) ;
2016-01-26 15:25:39 +03:00
}
2016-08-31 15:02:58 +03:00
EXPORT_SYMBOL_GPL ( crypto_finalize_hash_request ) ;
2016-01-26 15:25:39 +03:00
2018-01-26 22:15:30 +03:00
/**
* crypto_finalize_skcipher_request - finalize one skcipher_request if
* the request is done
* @ engine : the hardware engine
* @ req : the request need to be finalized
* @ err : error number
*/
void crypto_finalize_skcipher_request ( struct crypto_engine * engine ,
struct skcipher_request * req , int err )
{
return crypto_finalize_request ( engine , & req - > base , err ) ;
}
EXPORT_SYMBOL_GPL ( crypto_finalize_skcipher_request ) ;
2016-01-26 15:25:39 +03:00
/**
* crypto_engine_start - start the hardware engine
* @ engine : the hardware engine need to be started
*
* Return 0 on success , else on fail .
*/
int crypto_engine_start ( struct crypto_engine * engine )
{
unsigned long flags ;
spin_lock_irqsave ( & engine - > queue_lock , flags ) ;
if ( engine - > running | | engine - > busy ) {
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
return - EBUSY ;
}
engine - > running = true ;
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
2016-10-19 14:54:30 +03:00
kthread_queue_work ( engine - > kworker , & engine - > pump_requests ) ;
2016-01-26 15:25:39 +03:00
return 0 ;
}
EXPORT_SYMBOL_GPL ( crypto_engine_start ) ;
/**
* crypto_engine_stop - stop the hardware engine
* @ engine : the hardware engine need to be stopped
*
* Return 0 on success , else on fail .
*/
int crypto_engine_stop ( struct crypto_engine * engine )
{
unsigned long flags ;
2016-08-31 15:02:58 +03:00
unsigned int limit = 500 ;
2016-01-26 15:25:39 +03:00
int ret = 0 ;
spin_lock_irqsave ( & engine - > queue_lock , flags ) ;
/*
* If the engine queue is not empty or the engine is on busy state ,
* we need to wait for a while to pump the requests of engine queue .
*/
while ( ( crypto_queue_len ( & engine - > queue ) | | engine - > busy ) & & limit - - ) {
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
msleep ( 20 ) ;
spin_lock_irqsave ( & engine - > queue_lock , flags ) ;
}
if ( crypto_queue_len ( & engine - > queue ) | | engine - > busy )
ret = - EBUSY ;
else
engine - > running = false ;
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
if ( ret )
2017-06-06 16:44:16 +03:00
dev_warn ( engine - > dev , " could not stop engine \n " ) ;
2016-01-26 15:25:39 +03:00
return ret ;
}
EXPORT_SYMBOL_GPL ( crypto_engine_stop ) ;
/**
* crypto_engine_alloc_init - allocate crypto hardware engine structure and
* initialize it .
* @ dev : the device attached with one hardware engine
* @ rt : whether this queue is set to run as a realtime task
*
* This must be called from context that can sleep .
* Return : the crypto engine structure on success , else NULL .
*/
struct crypto_engine * crypto_engine_alloc_init ( struct device * dev , bool rt )
{
struct sched_param param = { . sched_priority = MAX_RT_PRIO - 1 } ;
struct crypto_engine * engine ;
if ( ! dev )
return NULL ;
engine = devm_kzalloc ( dev , sizeof ( * engine ) , GFP_KERNEL ) ;
if ( ! engine )
return NULL ;
2017-06-06 16:44:16 +03:00
engine - > dev = dev ;
2016-01-26 15:25:39 +03:00
engine - > rt = rt ;
engine - > running = false ;
engine - > busy = false ;
engine - > idling = false ;
engine - > cur_req_prepared = false ;
engine - > priv_data = dev ;
snprintf ( engine - > name , sizeof ( engine - > name ) ,
" %s-engine " , dev_name ( dev ) ) ;
crypto_init_queue ( & engine - > queue , CRYPTO_ENGINE_MAX_QLEN ) ;
spin_lock_init ( & engine - > queue_lock ) ;
2016-10-19 14:54:30 +03:00
engine - > kworker = kthread_create_worker ( 0 , " %s " , engine - > name ) ;
if ( IS_ERR ( engine - > kworker ) ) {
2016-01-26 15:25:39 +03:00
dev_err ( dev , " failed to create crypto request pump task \n " ) ;
return NULL ;
}
2016-10-11 23:55:20 +03:00
kthread_init_work ( & engine - > pump_requests , crypto_pump_work ) ;
2016-01-26 15:25:39 +03:00
if ( engine - > rt ) {
dev_info ( dev , " will run requests pump with realtime priority \n " ) ;
2016-10-19 14:54:30 +03:00
sched_setscheduler ( engine - > kworker - > task , SCHED_FIFO , & param ) ;
2016-01-26 15:25:39 +03:00
}
return engine ;
}
EXPORT_SYMBOL_GPL ( crypto_engine_alloc_init ) ;
/**
* crypto_engine_exit - free the resources of hardware engine when exit
* @ engine : the hardware engine need to be freed
*
* Return 0 for success .
*/
int crypto_engine_exit ( struct crypto_engine * engine )
{
int ret ;
ret = crypto_engine_stop ( engine ) ;
if ( ret )
return ret ;
2016-10-19 14:54:30 +03:00
kthread_destroy_worker ( engine - > kworker ) ;
2016-01-26 15:25:39 +03:00
return 0 ;
}
EXPORT_SYMBOL_GPL ( crypto_engine_exit ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " Crypto hardware engine framework " ) ;