2016-01-26 15:25:39 +03:00
/*
* Handle async block request by crypto hardware engine .
*
* Copyright ( C ) 2016 Linaro , Inc .
*
* Author : Baolin Wang < baolin . wang @ linaro . org >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation ; either version 2 of the License , or ( at your option )
* any later version .
*
*/
# include <linux/err.h>
# include <linux/delay.h>
2016-08-31 15:02:57 +03:00
# include <crypto/engine.h>
2016-08-31 15:02:58 +03:00
# include <crypto/internal/hash.h>
2017-02-01 20:07:51 +03:00
# include <uapi/linux/sched/types.h>
2016-01-26 15:25:39 +03:00
# include "internal.h"
# define CRYPTO_ENGINE_MAX_QLEN 10
/**
* crypto_pump_requests - dequeue one request from engine queue to process
* @ engine : the hardware engine
* @ in_kthread : true if we are in the context of the request pump thread
*
* This function checks if there is any request in the engine queue that
* needs processing and if so call out to the driver to initialize hardware
* and handle each request .
*/
static void crypto_pump_requests ( struct crypto_engine * engine ,
bool in_kthread )
{
struct crypto_async_request * async_req , * backlog ;
2016-08-31 15:02:58 +03:00
struct ahash_request * hreq ;
struct ablkcipher_request * breq ;
2016-01-26 15:25:39 +03:00
unsigned long flags ;
bool was_busy = false ;
2016-08-31 15:02:58 +03:00
int ret , rtype ;
2016-01-26 15:25:39 +03:00
spin_lock_irqsave ( & engine - > queue_lock , flags ) ;
/* Make sure we are not already running a request */
if ( engine - > cur_req )
goto out ;
/* If another context is idling then defer */
if ( engine - > idling ) {
2016-10-19 14:54:30 +03:00
kthread_queue_work ( engine - > kworker , & engine - > pump_requests ) ;
2016-01-26 15:25:39 +03:00
goto out ;
}
/* Check if the engine queue is idle */
if ( ! crypto_queue_len ( & engine - > queue ) | | ! engine - > running ) {
if ( ! engine - > busy )
goto out ;
/* Only do teardown in the thread */
if ( ! in_kthread ) {
2016-10-19 14:54:30 +03:00
kthread_queue_work ( engine - > kworker ,
2016-01-26 15:25:39 +03:00
& engine - > pump_requests ) ;
goto out ;
}
engine - > busy = false ;
engine - > idling = true ;
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
if ( engine - > unprepare_crypt_hardware & &
engine - > unprepare_crypt_hardware ( engine ) )
2017-06-06 16:44:16 +03:00
dev_err ( engine - > dev , " failed to unprepare crypt hardware \n " ) ;
2016-01-26 15:25:39 +03:00
spin_lock_irqsave ( & engine - > queue_lock , flags ) ;
engine - > idling = false ;
goto out ;
}
/* Get the fist request from the engine queue to handle */
backlog = crypto_get_backlog ( & engine - > queue ) ;
async_req = crypto_dequeue_request ( & engine - > queue ) ;
if ( ! async_req )
goto out ;
2016-08-31 15:02:58 +03:00
engine - > cur_req = async_req ;
2016-01-26 15:25:39 +03:00
if ( backlog )
backlog - > complete ( backlog , - EINPROGRESS ) ;
if ( engine - > busy )
was_busy = true ;
else
engine - > busy = true ;
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
2016-08-31 15:02:58 +03:00
rtype = crypto_tfm_alg_type ( engine - > cur_req - > tfm ) ;
2016-01-26 15:25:39 +03:00
/* Until here we get the request need to be encrypted successfully */
if ( ! was_busy & & engine - > prepare_crypt_hardware ) {
ret = engine - > prepare_crypt_hardware ( engine ) ;
if ( ret ) {
2017-06-06 16:44:16 +03:00
dev_err ( engine - > dev , " failed to prepare crypt hardware \n " ) ;
2016-01-26 15:25:39 +03:00
goto req_err ;
}
}
2016-08-31 15:02:58 +03:00
switch ( rtype ) {
case CRYPTO_ALG_TYPE_AHASH :
hreq = ahash_request_cast ( engine - > cur_req ) ;
if ( engine - > prepare_hash_request ) {
ret = engine - > prepare_hash_request ( engine , hreq ) ;
if ( ret ) {
2017-06-06 16:44:16 +03:00
dev_err ( engine - > dev , " failed to prepare request: %d \n " ,
ret ) ;
2016-08-31 15:02:58 +03:00
goto req_err ;
}
engine - > cur_req_prepared = true ;
}
ret = engine - > hash_one_request ( engine , hreq ) ;
2016-01-26 15:25:39 +03:00
if ( ret ) {
2017-06-06 16:44:16 +03:00
dev_err ( engine - > dev , " failed to hash one request from queue \n " ) ;
2016-01-26 15:25:39 +03:00
goto req_err ;
}
2016-08-31 15:02:58 +03:00
return ;
case CRYPTO_ALG_TYPE_ABLKCIPHER :
breq = ablkcipher_request_cast ( engine - > cur_req ) ;
if ( engine - > prepare_cipher_request ) {
ret = engine - > prepare_cipher_request ( engine , breq ) ;
if ( ret ) {
2017-06-06 16:44:16 +03:00
dev_err ( engine - > dev , " failed to prepare request: %d \n " ,
ret ) ;
2016-08-31 15:02:58 +03:00
goto req_err ;
}
engine - > cur_req_prepared = true ;
}
ret = engine - > cipher_one_request ( engine , breq ) ;
if ( ret ) {
2017-06-06 16:44:16 +03:00
dev_err ( engine - > dev , " failed to cipher one request from queue \n " ) ;
2016-08-31 15:02:58 +03:00
goto req_err ;
}
return ;
default :
2017-06-06 16:44:16 +03:00
dev_err ( engine - > dev , " failed to prepare request of unknown type \n " ) ;
2016-08-31 15:02:58 +03:00
return ;
2016-01-26 15:25:39 +03:00
}
req_err :
2016-08-31 15:02:58 +03:00
switch ( rtype ) {
case CRYPTO_ALG_TYPE_AHASH :
hreq = ahash_request_cast ( engine - > cur_req ) ;
crypto_finalize_hash_request ( engine , hreq , ret ) ;
break ;
case CRYPTO_ALG_TYPE_ABLKCIPHER :
breq = ablkcipher_request_cast ( engine - > cur_req ) ;
crypto_finalize_cipher_request ( engine , breq , ret ) ;
break ;
}
2016-01-26 15:25:39 +03:00
return ;
out :
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
}
static void crypto_pump_work ( struct kthread_work * work )
{
struct crypto_engine * engine =
container_of ( work , struct crypto_engine , pump_requests ) ;
crypto_pump_requests ( engine , true ) ;
}
/**
2016-08-31 15:02:58 +03:00
* crypto_transfer_cipher_request - transfer the new request into the
* enginequeue
2016-01-26 15:25:39 +03:00
* @ engine : the hardware engine
* @ req : the request need to be listed into the engine queue
*/
2016-08-31 15:02:58 +03:00
int crypto_transfer_cipher_request ( struct crypto_engine * engine ,
struct ablkcipher_request * req ,
bool need_pump )
2016-01-26 15:25:39 +03:00
{
unsigned long flags ;
int ret ;
spin_lock_irqsave ( & engine - > queue_lock , flags ) ;
if ( ! engine - > running ) {
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
return - ESHUTDOWN ;
}
ret = ablkcipher_enqueue_request ( & engine - > queue , req ) ;
if ( ! engine - > busy & & need_pump )
2016-10-19 14:54:30 +03:00
kthread_queue_work ( engine - > kworker , & engine - > pump_requests ) ;
2016-01-26 15:25:39 +03:00
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
return ret ;
}
2016-08-31 15:02:58 +03:00
EXPORT_SYMBOL_GPL ( crypto_transfer_cipher_request ) ;
/**
* crypto_transfer_cipher_request_to_engine - transfer one request to list
* into the engine queue
* @ engine : the hardware engine
* @ req : the request need to be listed into the engine queue
*/
int crypto_transfer_cipher_request_to_engine ( struct crypto_engine * engine ,
struct ablkcipher_request * req )
{
return crypto_transfer_cipher_request ( engine , req , true ) ;
}
EXPORT_SYMBOL_GPL ( crypto_transfer_cipher_request_to_engine ) ;
/**
* crypto_transfer_hash_request - transfer the new request into the
* enginequeue
* @ engine : the hardware engine
* @ req : the request need to be listed into the engine queue
*/
int crypto_transfer_hash_request ( struct crypto_engine * engine ,
struct ahash_request * req , bool need_pump )
{
unsigned long flags ;
int ret ;
spin_lock_irqsave ( & engine - > queue_lock , flags ) ;
if ( ! engine - > running ) {
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
return - ESHUTDOWN ;
}
ret = ahash_enqueue_request ( & engine - > queue , req ) ;
if ( ! engine - > busy & & need_pump )
2016-10-19 14:54:30 +03:00
kthread_queue_work ( engine - > kworker , & engine - > pump_requests ) ;
2016-08-31 15:02:58 +03:00
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( crypto_transfer_hash_request ) ;
2016-01-26 15:25:39 +03:00
/**
2016-08-31 15:02:58 +03:00
* crypto_transfer_hash_request_to_engine - transfer one request to list
* into the engine queue
2016-01-26 15:25:39 +03:00
* @ engine : the hardware engine
* @ req : the request need to be listed into the engine queue
*/
2016-08-31 15:02:58 +03:00
int crypto_transfer_hash_request_to_engine ( struct crypto_engine * engine ,
struct ahash_request * req )
2016-01-26 15:25:39 +03:00
{
2016-08-31 15:02:58 +03:00
return crypto_transfer_hash_request ( engine , req , true ) ;
2016-01-26 15:25:39 +03:00
}
2016-08-31 15:02:58 +03:00
EXPORT_SYMBOL_GPL ( crypto_transfer_hash_request_to_engine ) ;
2016-01-26 15:25:39 +03:00
/**
2016-08-31 15:02:58 +03:00
* crypto_finalize_cipher_request - finalize one request if the request is done
2016-01-26 15:25:39 +03:00
* @ engine : the hardware engine
* @ req : the request need to be finalized
* @ err : error number
*/
2016-08-31 15:02:58 +03:00
void crypto_finalize_cipher_request ( struct crypto_engine * engine ,
struct ablkcipher_request * req , int err )
2016-01-26 15:25:39 +03:00
{
unsigned long flags ;
bool finalize_cur_req = false ;
int ret ;
spin_lock_irqsave ( & engine - > queue_lock , flags ) ;
2016-08-31 15:02:58 +03:00
if ( engine - > cur_req = = & req - > base )
2016-01-26 15:25:39 +03:00
finalize_cur_req = true ;
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
if ( finalize_cur_req ) {
2016-08-31 15:02:58 +03:00
if ( engine - > cur_req_prepared & &
engine - > unprepare_cipher_request ) {
ret = engine - > unprepare_cipher_request ( engine , req ) ;
2016-01-26 15:25:39 +03:00
if ( ret )
2017-06-06 16:44:16 +03:00
dev_err ( engine - > dev , " failed to unprepare request \n " ) ;
2016-01-26 15:25:39 +03:00
}
2016-08-31 15:02:58 +03:00
spin_lock_irqsave ( & engine - > queue_lock , flags ) ;
engine - > cur_req = NULL ;
engine - > cur_req_prepared = false ;
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
}
req - > base . complete ( & req - > base , err ) ;
2016-01-26 15:25:39 +03:00
2016-10-19 14:54:30 +03:00
kthread_queue_work ( engine - > kworker , & engine - > pump_requests ) ;
2016-08-31 15:02:58 +03:00
}
EXPORT_SYMBOL_GPL ( crypto_finalize_cipher_request ) ;
/**
* crypto_finalize_hash_request - finalize one request if the request is done
* @ engine : the hardware engine
* @ req : the request need to be finalized
* @ err : error number
*/
void crypto_finalize_hash_request ( struct crypto_engine * engine ,
struct ahash_request * req , int err )
{
unsigned long flags ;
bool finalize_cur_req = false ;
int ret ;
spin_lock_irqsave ( & engine - > queue_lock , flags ) ;
if ( engine - > cur_req = = & req - > base )
finalize_cur_req = true ;
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
if ( finalize_cur_req ) {
if ( engine - > cur_req_prepared & &
engine - > unprepare_hash_request ) {
ret = engine - > unprepare_hash_request ( engine , req ) ;
if ( ret )
2017-06-06 16:44:16 +03:00
dev_err ( engine - > dev , " failed to unprepare request \n " ) ;
2016-08-31 15:02:58 +03:00
}
2016-01-26 15:25:39 +03:00
spin_lock_irqsave ( & engine - > queue_lock , flags ) ;
engine - > cur_req = NULL ;
engine - > cur_req_prepared = false ;
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
}
req - > base . complete ( & req - > base , err ) ;
2016-10-19 14:54:30 +03:00
kthread_queue_work ( engine - > kworker , & engine - > pump_requests ) ;
2016-01-26 15:25:39 +03:00
}
2016-08-31 15:02:58 +03:00
EXPORT_SYMBOL_GPL ( crypto_finalize_hash_request ) ;
2016-01-26 15:25:39 +03:00
/**
* crypto_engine_start - start the hardware engine
* @ engine : the hardware engine need to be started
*
* Return 0 on success , else on fail .
*/
int crypto_engine_start ( struct crypto_engine * engine )
{
unsigned long flags ;
spin_lock_irqsave ( & engine - > queue_lock , flags ) ;
if ( engine - > running | | engine - > busy ) {
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
return - EBUSY ;
}
engine - > running = true ;
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
2016-10-19 14:54:30 +03:00
kthread_queue_work ( engine - > kworker , & engine - > pump_requests ) ;
2016-01-26 15:25:39 +03:00
return 0 ;
}
EXPORT_SYMBOL_GPL ( crypto_engine_start ) ;
/**
* crypto_engine_stop - stop the hardware engine
* @ engine : the hardware engine need to be stopped
*
* Return 0 on success , else on fail .
*/
int crypto_engine_stop ( struct crypto_engine * engine )
{
unsigned long flags ;
2016-08-31 15:02:58 +03:00
unsigned int limit = 500 ;
2016-01-26 15:25:39 +03:00
int ret = 0 ;
spin_lock_irqsave ( & engine - > queue_lock , flags ) ;
/*
* If the engine queue is not empty or the engine is on busy state ,
* we need to wait for a while to pump the requests of engine queue .
*/
while ( ( crypto_queue_len ( & engine - > queue ) | | engine - > busy ) & & limit - - ) {
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
msleep ( 20 ) ;
spin_lock_irqsave ( & engine - > queue_lock , flags ) ;
}
if ( crypto_queue_len ( & engine - > queue ) | | engine - > busy )
ret = - EBUSY ;
else
engine - > running = false ;
spin_unlock_irqrestore ( & engine - > queue_lock , flags ) ;
if ( ret )
2017-06-06 16:44:16 +03:00
dev_warn ( engine - > dev , " could not stop engine \n " ) ;
2016-01-26 15:25:39 +03:00
return ret ;
}
EXPORT_SYMBOL_GPL ( crypto_engine_stop ) ;
/**
* crypto_engine_alloc_init - allocate crypto hardware engine structure and
* initialize it .
* @ dev : the device attached with one hardware engine
* @ rt : whether this queue is set to run as a realtime task
*
* This must be called from context that can sleep .
* Return : the crypto engine structure on success , else NULL .
*/
struct crypto_engine * crypto_engine_alloc_init ( struct device * dev , bool rt )
{
struct sched_param param = { . sched_priority = MAX_RT_PRIO - 1 } ;
struct crypto_engine * engine ;
if ( ! dev )
return NULL ;
engine = devm_kzalloc ( dev , sizeof ( * engine ) , GFP_KERNEL ) ;
if ( ! engine )
return NULL ;
2017-06-06 16:44:16 +03:00
engine - > dev = dev ;
2016-01-26 15:25:39 +03:00
engine - > rt = rt ;
engine - > running = false ;
engine - > busy = false ;
engine - > idling = false ;
engine - > cur_req_prepared = false ;
engine - > priv_data = dev ;
snprintf ( engine - > name , sizeof ( engine - > name ) ,
" %s-engine " , dev_name ( dev ) ) ;
crypto_init_queue ( & engine - > queue , CRYPTO_ENGINE_MAX_QLEN ) ;
spin_lock_init ( & engine - > queue_lock ) ;
2016-10-19 14:54:30 +03:00
engine - > kworker = kthread_create_worker ( 0 , " %s " , engine - > name ) ;
if ( IS_ERR ( engine - > kworker ) ) {
2016-01-26 15:25:39 +03:00
dev_err ( dev , " failed to create crypto request pump task \n " ) ;
return NULL ;
}
2016-10-11 23:55:20 +03:00
kthread_init_work ( & engine - > pump_requests , crypto_pump_work ) ;
2016-01-26 15:25:39 +03:00
if ( engine - > rt ) {
dev_info ( dev , " will run requests pump with realtime priority \n " ) ;
2016-10-19 14:54:30 +03:00
sched_setscheduler ( engine - > kworker - > task , SCHED_FIFO , & param ) ;
2016-01-26 15:25:39 +03:00
}
return engine ;
}
EXPORT_SYMBOL_GPL ( crypto_engine_alloc_init ) ;
/**
* crypto_engine_exit - free the resources of hardware engine when exit
* @ engine : the hardware engine need to be freed
*
* Return 0 for success .
*/
int crypto_engine_exit ( struct crypto_engine * engine )
{
int ret ;
ret = crypto_engine_stop ( engine ) ;
if ( ret )
return ret ;
2016-10-19 14:54:30 +03:00
kthread_destroy_worker ( engine - > kworker ) ;
2016-01-26 15:25:39 +03:00
return 0 ;
}
EXPORT_SYMBOL_GPL ( crypto_engine_exit ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " Crypto hardware engine framework " ) ;