2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2016-03-01 22:49:25 +03:00
/*
* AMD Cryptographic Coprocessor ( CCP ) driver
*
2017-07-17 23:00:49 +03:00
* Copyright ( C ) 2013 , 2017 Advanced Micro Devices , Inc .
2016-03-01 22:49:25 +03:00
*
* Author : Tom Lendacky < thomas . lendacky @ amd . com >
2016-07-27 03:09:20 +03:00
* Author : Gary R Hook < gary . hook @ amd . com >
2016-03-01 22:49:25 +03:00
*/
# include <linux/module.h>
# include <linux/kernel.h>
# include <linux/kthread.h>
# include <linux/interrupt.h>
# include <linux/ccp.h>
# include "ccp-dev.h"
2016-07-27 03:09:50 +03:00
static u32 ccp_alloc_ksb ( struct ccp_cmd_queue * cmd_q , unsigned int count )
{
int start ;
struct ccp_device * ccp = cmd_q - > ccp ;
for ( ; ; ) {
mutex_lock ( & ccp - > sb_mutex ) ;
start = ( u32 ) bitmap_find_next_zero_area ( ccp - > sb ,
ccp - > sb_count ,
ccp - > sb_start ,
count , 0 ) ;
if ( start < = ccp - > sb_count ) {
bitmap_set ( ccp - > sb , start , count ) ;
mutex_unlock ( & ccp - > sb_mutex ) ;
break ;
}
ccp - > sb_avail = 0 ;
mutex_unlock ( & ccp - > sb_mutex ) ;
/* Wait for KSB entries to become available */
if ( wait_event_interruptible ( ccp - > sb_queue , ccp - > sb_avail ) )
return 0 ;
}
return KSB_START + start ;
}
static void ccp_free_ksb ( struct ccp_cmd_queue * cmd_q , unsigned int start ,
unsigned int count )
{
struct ccp_device * ccp = cmd_q - > ccp ;
if ( ! start )
return ;
mutex_lock ( & ccp - > sb_mutex ) ;
bitmap_clear ( ccp - > sb , start - KSB_START , count ) ;
ccp - > sb_avail = 1 ;
mutex_unlock ( & ccp - > sb_mutex ) ;
wake_up_interruptible_all ( & ccp - > sb_queue ) ;
}
2016-07-27 03:10:13 +03:00
static unsigned int ccp_get_free_slots ( struct ccp_cmd_queue * cmd_q )
{
return CMD_Q_DEPTH ( ioread32 ( cmd_q - > reg_status ) ) ;
}
2016-03-01 22:49:25 +03:00
static int ccp_do_cmd ( struct ccp_op * op , u32 * cr , unsigned int cr_count )
{
struct ccp_cmd_queue * cmd_q = op - > cmd_q ;
struct ccp_device * ccp = cmd_q - > ccp ;
void __iomem * cr_addr ;
u32 cr0 , cmd ;
unsigned int i ;
int ret = 0 ;
/* We could read a status register to see how many free slots
* are actually available , but reading that register resets it
* and you could lose some error information .
*/
cmd_q - > free_slots - - ;
cr0 = ( cmd_q - > id < < REQ0_CMD_Q_SHIFT )
| ( op - > jobid < < REQ0_JOBID_SHIFT )
| REQ0_WAIT_FOR_WRITE ;
if ( op - > soc )
cr0 | = REQ0_STOP_ON_COMPLETE
| REQ0_INT_ON_COMPLETE ;
if ( op - > ioc | | ! cmd_q - > free_slots )
cr0 | = REQ0_INT_ON_COMPLETE ;
/* Start at CMD_REQ1 */
cr_addr = ccp - > io_regs + CMD_REQ0 + CMD_REQ_INCR ;
mutex_lock ( & ccp - > req_mutex ) ;
/* Write CMD_REQ1 through CMD_REQx first */
for ( i = 0 ; i < cr_count ; i + + , cr_addr + = CMD_REQ_INCR )
iowrite32 ( * ( cr + i ) , cr_addr ) ;
/* Tell the CCP to start */
wmb ( ) ;
iowrite32 ( cr0 , ccp - > io_regs + CMD_REQ0 ) ;
mutex_unlock ( & ccp - > req_mutex ) ;
if ( cr0 & REQ0_INT_ON_COMPLETE ) {
/* Wait for the job to complete */
ret = wait_event_interruptible ( cmd_q - > int_queue ,
cmd_q - > int_rcvd ) ;
if ( ret | | cmd_q - > cmd_error ) {
/* On error delete all related jobs from the queue */
cmd = ( cmd_q - > id < < DEL_Q_ID_SHIFT )
| op - > jobid ;
2016-09-28 19:53:56 +03:00
if ( cmd_q - > cmd_error )
ccp_log_error ( cmd_q - > ccp ,
cmd_q - > cmd_error ) ;
2016-03-01 22:49:25 +03:00
iowrite32 ( cmd , ccp - > io_regs + DEL_CMD_Q_JOB ) ;
if ( ! ret )
ret = - EIO ;
} else if ( op - > soc ) {
/* Delete just head job from the queue on SoC */
cmd = DEL_Q_ACTIVE
| ( cmd_q - > id < < DEL_Q_ID_SHIFT )
| op - > jobid ;
iowrite32 ( cmd , ccp - > io_regs + DEL_CMD_Q_JOB ) ;
}
cmd_q - > free_slots = CMD_Q_DEPTH ( cmd_q - > q_status ) ;
cmd_q - > int_rcvd = 0 ;
}
return ret ;
}
static int ccp_perform_aes ( struct ccp_op * op )
{
u32 cr [ 6 ] ;
/* Fill out the register contents for REQ1 through REQ6 */
cr [ 0 ] = ( CCP_ENGINE_AES < < REQ1_ENGINE_SHIFT )
| ( op - > u . aes . type < < REQ1_AES_TYPE_SHIFT )
| ( op - > u . aes . mode < < REQ1_AES_MODE_SHIFT )
| ( op - > u . aes . action < < REQ1_AES_ACTION_SHIFT )
2016-07-27 03:09:40 +03:00
| ( op - > sb_key < < REQ1_KEY_KSB_SHIFT ) ;
2016-03-01 22:49:25 +03:00
cr [ 1 ] = op - > src . u . dma . length - 1 ;
cr [ 2 ] = ccp_addr_lo ( & op - > src . u . dma ) ;
2016-07-27 03:09:40 +03:00
cr [ 3 ] = ( op - > sb_ctx < < REQ4_KSB_SHIFT )
2016-03-01 22:49:25 +03:00
| ( CCP_MEMTYPE_SYSTEM < < REQ4_MEMTYPE_SHIFT )
| ccp_addr_hi ( & op - > src . u . dma ) ;
cr [ 4 ] = ccp_addr_lo ( & op - > dst . u . dma ) ;
cr [ 5 ] = ( CCP_MEMTYPE_SYSTEM < < REQ6_MEMTYPE_SHIFT )
| ccp_addr_hi ( & op - > dst . u . dma ) ;
if ( op - > u . aes . mode = = CCP_AES_MODE_CFB )
cr [ 0 ] | = ( ( 0x7f ) < < REQ1_AES_CFB_SIZE_SHIFT ) ;
if ( op - > eom )
cr [ 0 ] | = REQ1_EOM ;
if ( op - > init )
cr [ 0 ] | = REQ1_INIT ;
return ccp_do_cmd ( op , cr , ARRAY_SIZE ( cr ) ) ;
}
static int ccp_perform_xts_aes ( struct ccp_op * op )
{
u32 cr [ 6 ] ;
/* Fill out the register contents for REQ1 through REQ6 */
cr [ 0 ] = ( CCP_ENGINE_XTS_AES_128 < < REQ1_ENGINE_SHIFT )
| ( op - > u . xts . action < < REQ1_AES_ACTION_SHIFT )
| ( op - > u . xts . unit_size < < REQ1_XTS_AES_SIZE_SHIFT )
2016-07-27 03:09:40 +03:00
| ( op - > sb_key < < REQ1_KEY_KSB_SHIFT ) ;
2016-03-01 22:49:25 +03:00
cr [ 1 ] = op - > src . u . dma . length - 1 ;
cr [ 2 ] = ccp_addr_lo ( & op - > src . u . dma ) ;
2016-07-27 03:09:40 +03:00
cr [ 3 ] = ( op - > sb_ctx < < REQ4_KSB_SHIFT )
2016-03-01 22:49:25 +03:00
| ( CCP_MEMTYPE_SYSTEM < < REQ4_MEMTYPE_SHIFT )
| ccp_addr_hi ( & op - > src . u . dma ) ;
cr [ 4 ] = ccp_addr_lo ( & op - > dst . u . dma ) ;
cr [ 5 ] = ( CCP_MEMTYPE_SYSTEM < < REQ6_MEMTYPE_SHIFT )
| ccp_addr_hi ( & op - > dst . u . dma ) ;
if ( op - > eom )
cr [ 0 ] | = REQ1_EOM ;
if ( op - > init )
cr [ 0 ] | = REQ1_INIT ;
return ccp_do_cmd ( op , cr , ARRAY_SIZE ( cr ) ) ;
}
static int ccp_perform_sha ( struct ccp_op * op )
{
u32 cr [ 6 ] ;
/* Fill out the register contents for REQ1 through REQ6 */
cr [ 0 ] = ( CCP_ENGINE_SHA < < REQ1_ENGINE_SHIFT )
| ( op - > u . sha . type < < REQ1_SHA_TYPE_SHIFT )
| REQ1_INIT ;
cr [ 1 ] = op - > src . u . dma . length - 1 ;
cr [ 2 ] = ccp_addr_lo ( & op - > src . u . dma ) ;
2016-07-27 03:09:40 +03:00
cr [ 3 ] = ( op - > sb_ctx < < REQ4_KSB_SHIFT )
2016-03-01 22:49:25 +03:00
| ( CCP_MEMTYPE_SYSTEM < < REQ4_MEMTYPE_SHIFT )
| ccp_addr_hi ( & op - > src . u . dma ) ;
if ( op - > eom ) {
cr [ 0 ] | = REQ1_EOM ;
cr [ 4 ] = lower_32_bits ( op - > u . sha . msg_bits ) ;
cr [ 5 ] = upper_32_bits ( op - > u . sha . msg_bits ) ;
} else {
cr [ 4 ] = 0 ;
cr [ 5 ] = 0 ;
}
return ccp_do_cmd ( op , cr , ARRAY_SIZE ( cr ) ) ;
}
static int ccp_perform_rsa ( struct ccp_op * op )
{
u32 cr [ 6 ] ;
/* Fill out the register contents for REQ1 through REQ6 */
cr [ 0 ] = ( CCP_ENGINE_RSA < < REQ1_ENGINE_SHIFT )
| ( op - > u . rsa . mod_size < < REQ1_RSA_MOD_SIZE_SHIFT )
2016-07-27 03:09:40 +03:00
| ( op - > sb_key < < REQ1_KEY_KSB_SHIFT )
2016-03-01 22:49:25 +03:00
| REQ1_EOM ;
cr [ 1 ] = op - > u . rsa . input_len - 1 ;
cr [ 2 ] = ccp_addr_lo ( & op - > src . u . dma ) ;
2016-07-27 03:09:40 +03:00
cr [ 3 ] = ( op - > sb_ctx < < REQ4_KSB_SHIFT )
2016-03-01 22:49:25 +03:00
| ( CCP_MEMTYPE_SYSTEM < < REQ4_MEMTYPE_SHIFT )
| ccp_addr_hi ( & op - > src . u . dma ) ;
cr [ 4 ] = ccp_addr_lo ( & op - > dst . u . dma ) ;
cr [ 5 ] = ( CCP_MEMTYPE_SYSTEM < < REQ6_MEMTYPE_SHIFT )
| ccp_addr_hi ( & op - > dst . u . dma ) ;
return ccp_do_cmd ( op , cr , ARRAY_SIZE ( cr ) ) ;
}
static int ccp_perform_passthru ( struct ccp_op * op )
{
u32 cr [ 6 ] ;
/* Fill out the register contents for REQ1 through REQ6 */
cr [ 0 ] = ( CCP_ENGINE_PASSTHRU < < REQ1_ENGINE_SHIFT )
| ( op - > u . passthru . bit_mod < < REQ1_PT_BW_SHIFT )
| ( op - > u . passthru . byte_swap < < REQ1_PT_BS_SHIFT ) ;
if ( op - > src . type = = CCP_MEMTYPE_SYSTEM )
cr [ 1 ] = op - > src . u . dma . length - 1 ;
else
cr [ 1 ] = op - > dst . u . dma . length - 1 ;
if ( op - > src . type = = CCP_MEMTYPE_SYSTEM ) {
cr [ 2 ] = ccp_addr_lo ( & op - > src . u . dma ) ;
cr [ 3 ] = ( CCP_MEMTYPE_SYSTEM < < REQ4_MEMTYPE_SHIFT )
| ccp_addr_hi ( & op - > src . u . dma ) ;
if ( op - > u . passthru . bit_mod ! = CCP_PASSTHRU_BITWISE_NOOP )
2016-07-27 03:09:40 +03:00
cr [ 3 ] | = ( op - > sb_key < < REQ4_KSB_SHIFT ) ;
2016-03-01 22:49:25 +03:00
} else {
2016-07-27 03:09:40 +03:00
cr [ 2 ] = op - > src . u . sb * CCP_SB_BYTES ;
cr [ 3 ] = ( CCP_MEMTYPE_SB < < REQ4_MEMTYPE_SHIFT ) ;
2016-03-01 22:49:25 +03:00
}
if ( op - > dst . type = = CCP_MEMTYPE_SYSTEM ) {
cr [ 4 ] = ccp_addr_lo ( & op - > dst . u . dma ) ;
cr [ 5 ] = ( CCP_MEMTYPE_SYSTEM < < REQ6_MEMTYPE_SHIFT )
| ccp_addr_hi ( & op - > dst . u . dma ) ;
} else {
2016-07-27 03:09:40 +03:00
cr [ 4 ] = op - > dst . u . sb * CCP_SB_BYTES ;
cr [ 5 ] = ( CCP_MEMTYPE_SB < < REQ6_MEMTYPE_SHIFT ) ;
2016-03-01 22:49:25 +03:00
}
if ( op - > eom )
cr [ 0 ] | = REQ1_EOM ;
return ccp_do_cmd ( op , cr , ARRAY_SIZE ( cr ) ) ;
}
static int ccp_perform_ecc ( struct ccp_op * op )
{
u32 cr [ 6 ] ;
/* Fill out the register contents for REQ1 through REQ6 */
cr [ 0 ] = REQ1_ECC_AFFINE_CONVERT
| ( CCP_ENGINE_ECC < < REQ1_ENGINE_SHIFT )
| ( op - > u . ecc . function < < REQ1_ECC_FUNCTION_SHIFT )
| REQ1_EOM ;
cr [ 1 ] = op - > src . u . dma . length - 1 ;
cr [ 2 ] = ccp_addr_lo ( & op - > src . u . dma ) ;
cr [ 3 ] = ( CCP_MEMTYPE_SYSTEM < < REQ4_MEMTYPE_SHIFT )
| ccp_addr_hi ( & op - > src . u . dma ) ;
cr [ 4 ] = ccp_addr_lo ( & op - > dst . u . dma ) ;
cr [ 5 ] = ( CCP_MEMTYPE_SYSTEM < < REQ6_MEMTYPE_SHIFT )
| ccp_addr_hi ( & op - > dst . u . dma ) ;
return ccp_do_cmd ( op , cr , ARRAY_SIZE ( cr ) ) ;
}
2017-04-21 18:50:05 +03:00
static void ccp_disable_queue_interrupts ( struct ccp_device * ccp )
{
iowrite32 ( 0x00 , ccp - > io_regs + IRQ_MASK_REG ) ;
}
static void ccp_enable_queue_interrupts ( struct ccp_device * ccp )
{
iowrite32 ( ccp - > qim , ccp - > io_regs + IRQ_MASK_REG ) ;
}
static void ccp_irq_bh ( unsigned long data )
{
struct ccp_device * ccp = ( struct ccp_device * ) data ;
struct ccp_cmd_queue * cmd_q ;
u32 q_int , status ;
unsigned int i ;
status = ioread32 ( ccp - > io_regs + IRQ_STATUS_REG ) ;
for ( i = 0 ; i < ccp - > cmd_q_count ; i + + ) {
cmd_q = & ccp - > cmd_q [ i ] ;
q_int = status & ( cmd_q - > int_ok | cmd_q - > int_err ) ;
if ( q_int ) {
cmd_q - > int_status = status ;
cmd_q - > q_status = ioread32 ( cmd_q - > reg_status ) ;
cmd_q - > q_int_status = ioread32 ( cmd_q - > reg_int_status ) ;
/* On error, only save the first error value */
if ( ( q_int & cmd_q - > int_err ) & & ! cmd_q - > cmd_error )
cmd_q - > cmd_error = CMD_Q_ERROR ( cmd_q - > q_status ) ;
cmd_q - > int_rcvd = 1 ;
/* Acknowledge the interrupt and wake the kthread */
iowrite32 ( q_int , ccp - > io_regs + IRQ_STATUS_REG ) ;
wake_up_interruptible ( & cmd_q - > int_queue ) ;
}
}
ccp_enable_queue_interrupts ( ccp ) ;
}
static irqreturn_t ccp_irq_handler ( int irq , void * data )
{
2017-07-06 17:59:14 +03:00
struct ccp_device * ccp = ( struct ccp_device * ) data ;
2017-04-21 18:50:05 +03:00
ccp_disable_queue_interrupts ( ccp ) ;
if ( ccp - > use_tasklet )
tasklet_schedule ( & ccp - > irq_tasklet ) ;
else
ccp_irq_bh ( ( unsigned long ) ccp ) ;
return IRQ_HANDLED ;
}
2016-03-01 22:49:25 +03:00
static int ccp_init ( struct ccp_device * ccp )
{
struct device * dev = ccp - > dev ;
struct ccp_cmd_queue * cmd_q ;
struct dma_pool * dma_pool ;
char dma_pool_name [ MAX_DMAPOOL_NAME_LEN ] ;
2017-04-21 18:50:05 +03:00
unsigned int qmr , i ;
2016-03-01 22:49:25 +03:00
int ret ;
/* Find available queues */
2017-04-21 18:50:05 +03:00
ccp - > qim = 0 ;
2016-03-01 22:49:25 +03:00
qmr = ioread32 ( ccp - > io_regs + Q_MASK_REG ) ;
2019-07-09 18:07:22 +03:00
for ( i = 0 ; ( i < MAX_HW_QUEUES ) & & ( ccp - > cmd_q_count < ccp - > max_q_count ) ; i + + ) {
2016-03-01 22:49:25 +03:00
if ( ! ( qmr & ( 1 < < i ) ) )
continue ;
/* Allocate a dma pool for this queue */
snprintf ( dma_pool_name , sizeof ( dma_pool_name ) , " %s_q%d " ,
ccp - > name , i ) ;
dma_pool = dma_pool_create ( dma_pool_name , dev ,
CCP_DMAPOOL_MAX_SIZE ,
CCP_DMAPOOL_ALIGN , 0 ) ;
if ( ! dma_pool ) {
dev_err ( dev , " unable to allocate dma pool \n " ) ;
ret = - ENOMEM ;
goto e_pool ;
}
cmd_q = & ccp - > cmd_q [ ccp - > cmd_q_count ] ;
ccp - > cmd_q_count + + ;
cmd_q - > ccp = ccp ;
cmd_q - > id = i ;
cmd_q - > dma_pool = dma_pool ;
/* Reserve 2 KSB regions for the queue */
2016-07-27 03:09:40 +03:00
cmd_q - > sb_key = KSB_START + ccp - > sb_start + + ;
cmd_q - > sb_ctx = KSB_START + ccp - > sb_start + + ;
ccp - > sb_count - = 2 ;
2016-03-01 22:49:25 +03:00
/* Preset some register values and masks that are queue
* number dependent
*/
cmd_q - > reg_status = ccp - > io_regs + CMD_Q_STATUS_BASE +
( CMD_Q_STATUS_INCR * i ) ;
cmd_q - > reg_int_status = ccp - > io_regs + CMD_Q_INT_STATUS_BASE +
( CMD_Q_STATUS_INCR * i ) ;
cmd_q - > int_ok = 1 < < ( i * 2 ) ;
cmd_q - > int_err = 1 < < ( ( i * 2 ) + 1 ) ;
2016-07-27 03:10:13 +03:00
cmd_q - > free_slots = ccp_get_free_slots ( cmd_q ) ;
2016-03-01 22:49:25 +03:00
init_waitqueue_head ( & cmd_q - > int_queue ) ;
/* Build queue interrupt mask (two interrupts per queue) */
2017-04-21 18:50:05 +03:00
ccp - > qim | = cmd_q - > int_ok | cmd_q - > int_err ;
2016-03-01 22:49:25 +03:00
# ifdef CONFIG_ARM64
/* For arm64 set the recommended queue cache settings */
iowrite32 ( ccp - > axcache , ccp - > io_regs + CMD_Q_CACHE_BASE +
( CMD_Q_CACHE_INC * i ) ) ;
# endif
dev_dbg ( dev , " queue #%u available \n " , i ) ;
}
if ( ccp - > cmd_q_count = = 0 ) {
dev_notice ( dev , " no command queues available \n " ) ;
ret = - EIO ;
goto e_pool ;
}
dev_notice ( dev , " %u command queues available \n " , ccp - > cmd_q_count ) ;
/* Disable and clear interrupts until ready */
2017-04-21 18:50:05 +03:00
ccp_disable_queue_interrupts ( ccp ) ;
2016-03-01 22:49:25 +03:00
for ( i = 0 ; i < ccp - > cmd_q_count ; i + + ) {
cmd_q = & ccp - > cmd_q [ i ] ;
ioread32 ( cmd_q - > reg_int_status ) ;
ioread32 ( cmd_q - > reg_status ) ;
}
2017-04-21 18:50:05 +03:00
iowrite32 ( ccp - > qim , ccp - > io_regs + IRQ_STATUS_REG ) ;
2016-03-01 22:49:25 +03:00
/* Request an irq */
2017-07-06 17:59:15 +03:00
ret = sp_request_ccp_irq ( ccp - > sp , ccp_irq_handler , ccp - > name , ccp ) ;
2016-03-01 22:49:25 +03:00
if ( ret ) {
dev_err ( dev , " unable to allocate an IRQ \n " ) ;
goto e_pool ;
}
2017-04-21 18:50:05 +03:00
/* Initialize the ISR tasklet? */
if ( ccp - > use_tasklet )
tasklet_init ( & ccp - > irq_tasklet , ccp_irq_bh ,
( unsigned long ) ccp ) ;
2016-07-27 03:10:21 +03:00
dev_dbg ( dev , " Starting threads... \n " ) ;
2016-03-01 22:49:25 +03:00
/* Create a kthread for each queue */
for ( i = 0 ; i < ccp - > cmd_q_count ; i + + ) {
struct task_struct * kthread ;
cmd_q = & ccp - > cmd_q [ i ] ;
2021-10-21 11:38:54 +03:00
kthread = kthread_run ( ccp_cmd_queue_thread , cmd_q ,
" %s-q%u " , ccp - > name , cmd_q - > id ) ;
2016-03-01 22:49:25 +03:00
if ( IS_ERR ( kthread ) ) {
dev_err ( dev , " error creating queue thread (%ld) \n " ,
PTR_ERR ( kthread ) ) ;
ret = PTR_ERR ( kthread ) ;
goto e_kthread ;
}
cmd_q - > kthread = kthread ;
}
2016-07-27 03:10:21 +03:00
dev_dbg ( dev , " Enabling interrupts... \n " ) ;
/* Enable interrupts */
2017-04-21 18:50:05 +03:00
ccp_enable_queue_interrupts ( ccp ) ;
2016-07-27 03:10:21 +03:00
dev_dbg ( dev , " Registering device... \n " ) ;
ccp_add_device ( ccp ) ;
2016-07-27 03:10:31 +03:00
ret = ccp_register_rng ( ccp ) ;
if ( ret )
2016-03-01 22:49:25 +03:00
goto e_kthread ;
2016-04-18 17:21:44 +03:00
/* Register the DMA engine support */
ret = ccp_dmaengine_register ( ccp ) ;
if ( ret )
goto e_hwrng ;
2016-03-01 22:49:25 +03:00
return 0 ;
2016-04-18 17:21:44 +03:00
e_hwrng :
2016-07-27 03:10:31 +03:00
ccp_unregister_rng ( ccp ) ;
2016-04-18 17:21:44 +03:00
2016-03-01 22:49:25 +03:00
e_kthread :
for ( i = 0 ; i < ccp - > cmd_q_count ; i + + )
if ( ccp - > cmd_q [ i ] . kthread )
kthread_stop ( ccp - > cmd_q [ i ] . kthread ) ;
2017-07-06 17:59:15 +03:00
sp_free_ccp_irq ( ccp - > sp , ccp ) ;
2016-03-01 22:49:25 +03:00
e_pool :
for ( i = 0 ; i < ccp - > cmd_q_count ; i + + )
dma_pool_destroy ( ccp - > cmd_q [ i ] . dma_pool ) ;
return ret ;
}
static void ccp_destroy ( struct ccp_device * ccp )
{
struct ccp_cmd_queue * cmd_q ;
struct ccp_cmd * cmd ;
2017-04-21 18:50:05 +03:00
unsigned int i ;
2016-03-01 22:49:25 +03:00
2016-07-27 03:10:21 +03:00
/* Unregister the DMA engine */
ccp_dmaengine_unregister ( ccp ) ;
/* Unregister the RNG */
2016-07-27 03:10:31 +03:00
ccp_unregister_rng ( ccp ) ;
2016-07-27 03:10:21 +03:00
/* Remove this device from the list of available units */
2016-03-01 22:49:25 +03:00
ccp_del_device ( ccp ) ;
/* Disable and clear interrupts */
2017-04-21 18:50:05 +03:00
ccp_disable_queue_interrupts ( ccp ) ;
2016-03-01 22:49:25 +03:00
for ( i = 0 ; i < ccp - > cmd_q_count ; i + + ) {
cmd_q = & ccp - > cmd_q [ i ] ;
ioread32 ( cmd_q - > reg_int_status ) ;
ioread32 ( cmd_q - > reg_status ) ;
}
2017-04-21 18:50:05 +03:00
iowrite32 ( ccp - > qim , ccp - > io_regs + IRQ_STATUS_REG ) ;
2016-03-01 22:49:25 +03:00
2016-07-27 03:10:02 +03:00
/* Stop the queue kthreads */
for ( i = 0 ; i < ccp - > cmd_q_count ; i + + )
if ( ccp - > cmd_q [ i ] . kthread )
kthread_stop ( ccp - > cmd_q [ i ] . kthread ) ;
2017-07-06 17:59:15 +03:00
sp_free_ccp_irq ( ccp - > sp , ccp ) ;
2016-03-01 22:49:25 +03:00
for ( i = 0 ; i < ccp - > cmd_q_count ; i + + )
dma_pool_destroy ( ccp - > cmd_q [ i ] . dma_pool ) ;
/* Flush the cmd and backlog queue */
while ( ! list_empty ( & ccp - > cmd ) ) {
/* Invoke the callback directly with an error code */
cmd = list_first_entry ( & ccp - > cmd , struct ccp_cmd , entry ) ;
list_del ( & cmd - > entry ) ;
cmd - > callback ( cmd - > data , - ENODEV ) ;
}
while ( ! list_empty ( & ccp - > backlog ) ) {
/* Invoke the callback directly with an error code */
cmd = list_first_entry ( & ccp - > backlog , struct ccp_cmd , entry ) ;
list_del ( & cmd - > entry ) ;
cmd - > callback ( cmd - > data , - ENODEV ) ;
}
}
2016-05-01 14:52:55 +03:00
static const struct ccp_actions ccp3_actions = {
2016-07-27 03:09:31 +03:00
. aes = ccp_perform_aes ,
. xts_aes = ccp_perform_xts_aes ,
2017-03-15 21:20:52 +03:00
. des3 = NULL ,
2016-07-27 03:09:31 +03:00
. sha = ccp_perform_sha ,
. rsa = ccp_perform_rsa ,
. passthru = ccp_perform_passthru ,
. ecc = ccp_perform_ecc ,
2016-07-27 03:09:50 +03:00
. sballoc = ccp_alloc_ksb ,
. sbfree = ccp_free_ksb ,
2016-03-01 22:49:25 +03:00
. init = ccp_init ,
. destroy = ccp_destroy ,
2016-07-27 03:10:13 +03:00
. get_free_slots = ccp_get_free_slots ,
2016-03-01 22:49:25 +03:00
. irqhandler = ccp_irq_handler ,
} ;
2017-07-06 17:59:13 +03:00
const struct ccp_vdata ccpv3_platform = {
. version = CCP_VERSION ( 3 , 0 ) ,
. setup = NULL ,
. perform = & ccp3_actions ,
. offset = 0 ,
2019-11-27 15:01:36 +03:00
. rsamax = CCP_RSA_MAX_WIDTH ,
2017-07-06 17:59:13 +03:00
} ;
2016-09-28 19:53:47 +03:00
const struct ccp_vdata ccpv3 = {
2016-03-01 22:49:25 +03:00
. version = CCP_VERSION ( 3 , 0 ) ,
2016-07-27 03:10:21 +03:00
. setup = NULL ,
2016-03-01 22:49:25 +03:00
. perform = & ccp3_actions ,
2016-07-27 03:09:20 +03:00
. offset = 0x20000 ,
2017-07-17 23:16:42 +03:00
. rsamax = CCP_RSA_MAX_WIDTH ,
2016-03-01 22:49:25 +03:00
} ;