2019-06-04 10:11:33 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2013-11-12 11:46:16 -06:00
/*
* AMD Cryptographic Coprocessor ( CCP ) driver
*
2019-07-09 15:07:29 +00:00
* Copyright ( C ) 2013 , 2019 Advanced Micro Devices , Inc .
2013-11-12 11:46:16 -06:00
*
* Author : Tom Lendacky < thomas . lendacky @ amd . com >
2016-07-26 19:09:40 -05:00
* Author : Gary R Hook < gary . hook @ amd . com >
2013-11-12 11:46:16 -06:00
*/
2019-07-09 15:07:22 +00:00
# include <linux/module.h>
2013-11-12 11:46:16 -06:00
# include <linux/kernel.h>
# include <linux/kthread.h>
# include <linux/sched.h>
# include <linux/interrupt.h>
# include <linux/spinlock.h>
2016-04-05 15:03:21 +02:00
# include <linux/spinlock_types.h>
2016-03-01 13:49:04 -06:00
# include <linux/types.h>
2013-11-12 11:46:16 -06:00
# include <linux/mutex.h>
# include <linux/delay.h>
# include <linux/hw_random.h>
# include <linux/cpu.h>
2019-07-09 15:07:29 +00:00
# include <linux/atomic.h>
2014-06-05 10:17:57 -05:00
# ifdef CONFIG_X86
2013-11-12 11:46:16 -06:00
# include <asm/cpu_device_id.h>
2014-06-05 10:17:57 -05:00
# endif
2013-11-12 11:46:16 -06:00
# include <linux/ccp.h>
# include "ccp-dev.h"
2019-07-09 15:07:29 +00:00
# define MAX_CCPS 32
2019-07-09 15:07:22 +00:00
/* Limit CCP use to a specifed number of queues per device */
static unsigned int nqueues = 0 ;
module_param ( nqueues , uint , 0444 ) ;
MODULE_PARM_DESC ( nqueues , " Number of queues per CCP (minimum 1; default: all available) " ) ;
2019-07-09 15:07:29 +00:00
/* Limit the maximum number of configured CCPs */
static atomic_t dev_count = ATOMIC_INIT ( 0 ) ;
static unsigned int max_devs = MAX_CCPS ;
module_param ( max_devs , uint , 0444 ) ;
MODULE_PARM_DESC ( max_devs , " Maximum number of CCPs to enable (default: all; 0 disables all CCPs) " ) ;
2014-01-24 16:18:14 -06:00
struct ccp_tasklet_data {
struct completion completion ;
struct ccp_cmd * cmd ;
} ;
2016-09-28 11:53:56 -05:00
/* Human-readable error strings */
2019-06-27 16:16:23 +00:00
# define CCP_MAX_ERROR_CODE 64
2016-10-17 15:08:50 +00:00
static char * ccp_error_codes [ ] = {
2016-09-28 11:53:56 -05:00
" " ,
2019-06-27 16:16:23 +00:00
" ILLEGAL_ENGINE " ,
" ILLEGAL_KEY_ID " ,
" ILLEGAL_FUNCTION_TYPE " ,
" ILLEGAL_FUNCTION_MODE " ,
" ILLEGAL_FUNCTION_ENCRYPT " ,
" ILLEGAL_FUNCTION_SIZE " ,
" Zlib_MISSING_INIT_EOM " ,
" ILLEGAL_FUNCTION_RSVD " ,
" ILLEGAL_BUFFER_LENGTH " ,
" VLSB_FAULT " ,
" ILLEGAL_MEM_ADDR " ,
" ILLEGAL_MEM_SEL " ,
" ILLEGAL_CONTEXT_ID " ,
" ILLEGAL_KEY_ADDR " ,
" 0xF Reserved " ,
" Zlib_ILLEGAL_MULTI_QUEUE " ,
" Zlib_ILLEGAL_JOBID_CHANGE " ,
" CMD_TIMEOUT " ,
" IDMA0_AXI_SLVERR " ,
" IDMA0_AXI_DECERR " ,
" 0x15 Reserved " ,
" IDMA1_AXI_SLAVE_FAULT " ,
" IDMA1_AIXI_DECERR " ,
" 0x18 Reserved " ,
" ZLIBVHB_AXI_SLVERR " ,
" ZLIBVHB_AXI_DECERR " ,
" 0x1B Reserved " ,
" ZLIB_UNEXPECTED_EOM " ,
" ZLIB_EXTRA_DATA " ,
" ZLIB_BTYPE " ,
" ZLIB_UNDEFINED_SYMBOL " ,
" ZLIB_UNDEFINED_DISTANCE_S " ,
" ZLIB_CODE_LENGTH_SYMBOL " ,
" ZLIB _VHB_ILLEGAL_FETCH " ,
" ZLIB_UNCOMPRESSED_LEN " ,
" ZLIB_LIMIT_REACHED " ,
" ZLIB_CHECKSUM_MISMATCH0 " ,
" ODMA0_AXI_SLVERR " ,
" ODMA0_AXI_DECERR " ,
" 0x28 Reserved " ,
" ODMA1_AXI_SLVERR " ,
" ODMA1_AXI_DECERR " ,
2016-09-28 11:53:56 -05:00
} ;
2019-06-27 16:16:23 +00:00
void ccp_log_error ( struct ccp_device * d , unsigned int e )
2016-09-28 11:53:56 -05:00
{
2019-06-27 16:16:23 +00:00
if ( WARN_ON ( e > = CCP_MAX_ERROR_CODE ) )
return ;
if ( e < ARRAY_SIZE ( ccp_error_codes ) )
dev_err ( d - > dev , " CCP error %d: %s \n " , e , ccp_error_codes [ e ] ) ;
else
dev_err ( d - > dev , " CCP error %d: Unknown Error \n " , e ) ;
2016-09-28 11:53:56 -05:00
}
2016-03-01 13:49:04 -06:00
/* List of CCPs, CCP count, read-write access lock, and access functions
*
* Lock structure : get ccp_unit_lock for reading whenever we need to
* examine the CCP list . While holding it for reading we can acquire
* the RR lock to update the round - robin next - CCP pointer . The unit lock
* must be acquired before the RR lock .
*
* If the unit - lock is acquired for writing , we have total control over
* the list , so there ' s no value in getting the RR lock .
*/
static DEFINE_RWLOCK ( ccp_unit_lock ) ;
static LIST_HEAD ( ccp_units ) ;
/* Round-robin counter */
2016-03-16 09:02:26 -05:00
static DEFINE_SPINLOCK ( ccp_rr_lock ) ;
2016-03-01 13:49:04 -06:00
static struct ccp_device * ccp_rr ;
2016-03-01 13:49:25 -06:00
/**
* ccp_add_device - add a CCP device to the list
*
* @ ccp : ccp_device struct pointer
*
2016-03-01 13:49:04 -06:00
* Put this CCP on the unit list , which makes it available
* for use .
2016-03-01 13:49:25 -06:00
*
* Returns zero if a CCP device is present , - ENODEV otherwise .
2016-03-01 13:49:04 -06:00
*/
2016-03-01 13:49:25 -06:00
void ccp_add_device ( struct ccp_device * ccp )
2013-11-12 11:46:16 -06:00
{
2016-03-01 13:49:04 -06:00
unsigned long flags ;
write_lock_irqsave ( & ccp_unit_lock , flags ) ;
list_add_tail ( & ccp - > entry , & ccp_units ) ;
if ( ! ccp_rr )
/* We already have the list lock (we're first) so this
* pointer can ' t change on us . Set its initial value .
*/
ccp_rr = ccp ;
write_unlock_irqrestore ( & ccp_unit_lock , flags ) ;
2013-11-12 11:46:16 -06:00
}
2016-03-01 13:49:25 -06:00
/**
* ccp_del_device - remove a CCP device from the list
*
* @ ccp : ccp_device struct pointer
*
* Remove this unit from the list of devices . If the next device
2016-03-01 13:49:04 -06:00
* up for use is this one , adjust the pointer . If this is the last
* device , NULL the pointer .
*/
2016-03-01 13:49:25 -06:00
void ccp_del_device ( struct ccp_device * ccp )
2013-11-12 11:46:16 -06:00
{
2016-03-01 13:49:04 -06:00
unsigned long flags ;
write_lock_irqsave ( & ccp_unit_lock , flags ) ;
if ( ccp_rr = = ccp ) {
/* ccp_unit_lock is read/write; any read access
* will be suspended while we make changes to the
* list and RR pointer .
*/
if ( list_is_last ( & ccp_rr - > entry , & ccp_units ) )
ccp_rr = list_first_entry ( & ccp_units , struct ccp_device ,
entry ) ;
else
ccp_rr = list_next_entry ( ccp_rr , entry ) ;
}
list_del ( & ccp - > entry ) ;
if ( list_empty ( & ccp_units ) )
ccp_rr = NULL ;
write_unlock_irqrestore ( & ccp_unit_lock , flags ) ;
}
2016-07-26 19:10:31 -05:00
int ccp_register_rng ( struct ccp_device * ccp )
{
int ret = 0 ;
dev_dbg ( ccp - > dev , " Registering RNG... \n " ) ;
/* Register an RNG */
ccp - > hwrng . name = ccp - > rngname ;
ccp - > hwrng . read = ccp_trng_read ;
ret = hwrng_register ( & ccp - > hwrng ) ;
if ( ret )
dev_err ( ccp - > dev , " error registering hwrng (%d) \n " , ret ) ;
return ret ;
}
void ccp_unregister_rng ( struct ccp_device * ccp )
{
if ( ccp - > hwrng . name )
hwrng_unregister ( & ccp - > hwrng ) ;
}
2016-03-01 13:49:04 -06:00
static struct ccp_device * ccp_get_device ( void )
{
unsigned long flags ;
struct ccp_device * dp = NULL ;
/* We round-robin through the unit list.
* The ( ccp_rr ) pointer refers to the next unit to use .
*/
read_lock_irqsave ( & ccp_unit_lock , flags ) ;
if ( ! list_empty ( & ccp_units ) ) {
2016-03-16 09:02:26 -05:00
spin_lock ( & ccp_rr_lock ) ;
2016-03-01 13:49:04 -06:00
dp = ccp_rr ;
if ( list_is_last ( & ccp_rr - > entry , & ccp_units ) )
ccp_rr = list_first_entry ( & ccp_units , struct ccp_device ,
entry ) ;
else
ccp_rr = list_next_entry ( ccp_rr , entry ) ;
2016-03-16 09:02:26 -05:00
spin_unlock ( & ccp_rr_lock ) ;
2016-03-01 13:49:04 -06:00
}
read_unlock_irqrestore ( & ccp_unit_lock , flags ) ;
return dp ;
2013-11-12 11:46:16 -06:00
}
2014-09-05 10:31:09 -05:00
/**
* ccp_present - check if a CCP device is present
*
* Returns zero if a CCP device is present , - ENODEV otherwise .
*/
int ccp_present ( void )
{
2016-03-01 13:49:04 -06:00
unsigned long flags ;
int ret ;
2014-09-05 10:31:09 -05:00
2016-03-01 13:49:04 -06:00
read_lock_irqsave ( & ccp_unit_lock , flags ) ;
ret = list_empty ( & ccp_units ) ;
read_unlock_irqrestore ( & ccp_unit_lock , flags ) ;
return ret ? - ENODEV : 0 ;
2014-09-05 10:31:09 -05:00
}
EXPORT_SYMBOL_GPL ( ccp_present ) ;
2016-03-01 13:49:15 -06:00
/**
* ccp_version - get the version of the CCP device
*
* Returns the version from the first unit on the list ;
* otherwise a zero if no CCP device is present
*/
unsigned int ccp_version ( void )
{
struct ccp_device * dp ;
unsigned long flags ;
int ret = 0 ;
read_lock_irqsave ( & ccp_unit_lock , flags ) ;
if ( ! list_empty ( & ccp_units ) ) {
dp = list_first_entry ( & ccp_units , struct ccp_device , entry ) ;
ret = dp - > vdata - > version ;
}
read_unlock_irqrestore ( & ccp_unit_lock , flags ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( ccp_version ) ;
2013-11-12 11:46:16 -06:00
/**
* ccp_enqueue_cmd - queue an operation for processing by the CCP
*
* @ cmd : ccp_cmd struct to be processed
*
* Queue a cmd to be processed by the CCP . If queueing the cmd
* would exceed the defined length of the cmd queue the cmd will
* only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
* result in a return code of - EBUSY .
*
* The callback routine specified in the ccp_cmd struct will be
* called to notify the caller of completion ( if the cmd was not
* backlogged ) or advancement out of the backlog . If the cmd has
* advanced out of the backlog the " err " value of the callback
* will be - EINPROGRESS . Any other " err " value during callback is
* the result of the operation .
*
* The cmd has been successfully queued if :
* the return code is - EINPROGRESS or
* the return code is - EBUSY and CCP_CMD_MAY_BACKLOG flag is set
*/
int ccp_enqueue_cmd ( struct ccp_cmd * cmd )
{
2017-03-10 12:28:18 -06:00
struct ccp_device * ccp ;
2013-11-12 11:46:16 -06:00
unsigned long flags ;
unsigned int i ;
int ret ;
2017-03-10 12:28:18 -06:00
/* Some commands might need to be sent to a specific device */
ccp = cmd - > ccp ? cmd - > ccp : ccp_get_device ( ) ;
2013-11-12 11:46:16 -06:00
if ( ! ccp )
return - ENODEV ;
/* Caller must supply a callback routine */
if ( ! cmd - > callback )
return - EINVAL ;
cmd - > ccp = ccp ;
spin_lock_irqsave ( & ccp - > cmd_lock , flags ) ;
i = ccp - > cmd_q_count ;
if ( ccp - > cmd_count > = MAX_CMD_QLEN ) {
2017-10-18 08:00:34 +01:00
if ( cmd - > flags & CCP_CMD_MAY_BACKLOG ) {
ret = - EBUSY ;
2013-11-12 11:46:16 -06:00
list_add_tail ( & cmd - > entry , & ccp - > backlog ) ;
2017-10-18 08:00:34 +01:00
} else {
ret = - ENOSPC ;
}
2013-11-12 11:46:16 -06:00
} else {
ret = - EINPROGRESS ;
ccp - > cmd_count + + ;
list_add_tail ( & cmd - > entry , & ccp - > cmd ) ;
/* Find an idle queue */
if ( ! ccp - > suspending ) {
for ( i = 0 ; i < ccp - > cmd_q_count ; i + + ) {
if ( ccp - > cmd_q [ i ] . active )
continue ;
break ;
}
}
}
spin_unlock_irqrestore ( & ccp - > cmd_lock , flags ) ;
/* If we found an idle queue, wake it up */
if ( i < ccp - > cmd_q_count )
wake_up_process ( ccp - > cmd_q [ i ] . kthread ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( ccp_enqueue_cmd ) ;
static void ccp_do_cmd_backlog ( struct work_struct * work )
{
struct ccp_cmd * cmd = container_of ( work , struct ccp_cmd , work ) ;
struct ccp_device * ccp = cmd - > ccp ;
unsigned long flags ;
unsigned int i ;
cmd - > callback ( cmd - > data , - EINPROGRESS ) ;
spin_lock_irqsave ( & ccp - > cmd_lock , flags ) ;
ccp - > cmd_count + + ;
list_add_tail ( & cmd - > entry , & ccp - > cmd ) ;
/* Find an idle queue */
for ( i = 0 ; i < ccp - > cmd_q_count ; i + + ) {
if ( ccp - > cmd_q [ i ] . active )
continue ;
break ;
}
spin_unlock_irqrestore ( & ccp - > cmd_lock , flags ) ;
/* If we found an idle queue, wake it up */
if ( i < ccp - > cmd_q_count )
wake_up_process ( ccp - > cmd_q [ i ] . kthread ) ;
}
static struct ccp_cmd * ccp_dequeue_cmd ( struct ccp_cmd_queue * cmd_q )
{
struct ccp_device * ccp = cmd_q - > ccp ;
struct ccp_cmd * cmd = NULL ;
struct ccp_cmd * backlog = NULL ;
unsigned long flags ;
spin_lock_irqsave ( & ccp - > cmd_lock , flags ) ;
cmd_q - > active = 0 ;
if ( ccp - > suspending ) {
cmd_q - > suspended = 1 ;
spin_unlock_irqrestore ( & ccp - > cmd_lock , flags ) ;
wake_up_interruptible ( & ccp - > suspend_queue ) ;
return NULL ;
}
if ( ccp - > cmd_count ) {
cmd_q - > active = 1 ;
cmd = list_first_entry ( & ccp - > cmd , struct ccp_cmd , entry ) ;
list_del ( & cmd - > entry ) ;
ccp - > cmd_count - - ;
}
if ( ! list_empty ( & ccp - > backlog ) ) {
backlog = list_first_entry ( & ccp - > backlog , struct ccp_cmd ,
entry ) ;
list_del ( & backlog - > entry ) ;
}
spin_unlock_irqrestore ( & ccp - > cmd_lock , flags ) ;
if ( backlog ) {
INIT_WORK ( & backlog - > work , ccp_do_cmd_backlog ) ;
schedule_work ( & backlog - > work ) ;
}
return cmd ;
}
2014-01-24 16:18:14 -06:00
static void ccp_do_cmd_complete ( unsigned long data )
2013-11-12 11:46:16 -06:00
{
2014-01-24 16:18:14 -06:00
struct ccp_tasklet_data * tdata = ( struct ccp_tasklet_data * ) data ;
struct ccp_cmd * cmd = tdata - > cmd ;
2013-11-12 11:46:16 -06:00
cmd - > callback ( cmd - > data , cmd - > ret ) ;
2017-06-27 08:58:04 -05:00
2014-01-24 16:18:14 -06:00
complete ( & tdata - > completion ) ;
2013-11-12 11:46:16 -06:00
}
2016-03-01 13:49:25 -06:00
/**
* ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue
*
* @ data : thread - specific data
*/
int ccp_cmd_queue_thread ( void * data )
2013-11-12 11:46:16 -06:00
{
struct ccp_cmd_queue * cmd_q = ( struct ccp_cmd_queue * ) data ;
struct ccp_cmd * cmd ;
2014-01-24 16:18:14 -06:00
struct ccp_tasklet_data tdata ;
struct tasklet_struct tasklet ;
tasklet_init ( & tasklet , ccp_do_cmd_complete , ( unsigned long ) & tdata ) ;
2013-11-12 11:46:16 -06:00
set_current_state ( TASK_INTERRUPTIBLE ) ;
while ( ! kthread_should_stop ( ) ) {
schedule ( ) ;
set_current_state ( TASK_INTERRUPTIBLE ) ;
cmd = ccp_dequeue_cmd ( cmd_q ) ;
if ( ! cmd )
continue ;
__set_current_state ( TASK_RUNNING ) ;
/* Execute the command */
cmd - > ret = ccp_run_cmd ( cmd_q , cmd ) ;
/* Schedule the completion callback */
2014-01-24 16:18:14 -06:00
tdata . cmd = cmd ;
init_completion ( & tdata . completion ) ;
tasklet_schedule ( & tasklet ) ;
wait_for_completion ( & tdata . completion ) ;
2013-11-12 11:46:16 -06:00
}
__set_current_state ( TASK_RUNNING ) ;
return 0 ;
}
/**
* ccp_alloc_struct - allocate and initialize the ccp_device struct
*
* @ dev : device struct of the CCP
*/
2017-07-06 09:59:14 -05:00
struct ccp_device * ccp_alloc_struct ( struct sp_device * sp )
2013-11-12 11:46:16 -06:00
{
2017-07-06 09:59:14 -05:00
struct device * dev = sp - > dev ;
2013-11-12 11:46:16 -06:00
struct ccp_device * ccp ;
2015-02-03 13:07:23 -06:00
ccp = devm_kzalloc ( dev , sizeof ( * ccp ) , GFP_KERNEL ) ;
2015-02-03 13:07:05 -06:00
if ( ! ccp )
2013-11-12 11:46:16 -06:00
return NULL ;
ccp - > dev = dev ;
2017-07-06 09:59:14 -05:00
ccp - > sp = sp ;
ccp - > axcache = sp - > axcache ;
2013-11-12 11:46:16 -06:00
INIT_LIST_HEAD ( & ccp - > cmd ) ;
INIT_LIST_HEAD ( & ccp - > backlog ) ;
spin_lock_init ( & ccp - > cmd_lock ) ;
mutex_init ( & ccp - > req_mutex ) ;
2016-07-26 19:09:40 -05:00
mutex_init ( & ccp - > sb_mutex ) ;
ccp - > sb_count = KSB_COUNT ;
ccp - > sb_start = 0 ;
2013-11-12 11:46:16 -06:00
2016-10-18 17:33:37 -05:00
/* Initialize the wait queues */
init_waitqueue_head ( & ccp - > sb_queue ) ;
init_waitqueue_head ( & ccp - > suspend_queue ) ;
2017-07-06 09:59:14 -05:00
snprintf ( ccp - > name , MAX_CCP_NAME_LEN , " ccp-%u " , sp - > ord ) ;
snprintf ( ccp - > rngname , MAX_CCP_NAME_LEN , " ccp-%u-rng " , sp - > ord ) ;
2016-03-01 13:49:04 -06:00
2013-11-12 11:46:16 -06:00
return ccp ;
}
2016-07-26 19:10:02 -05:00
int ccp_trng_read ( struct hwrng * rng , void * data , size_t max , bool wait )
{
struct ccp_device * ccp = container_of ( rng , struct ccp_device , hwrng ) ;
u32 trng_value ;
int len = min_t ( int , sizeof ( trng_value ) , max ) ;
/* Locking is provided by the caller so we can update device
* hwrng - related fields safely
*/
trng_value = ioread32 ( ccp - > io_regs + TRNG_OUT_REG ) ;
if ( ! trng_value ) {
/* Zero is returned if not data is available or if a
* bad - entropy error is present . Assume an error if
* we exceed TRNG_RETRIES reads of zero .
*/
if ( ccp - > hwrng_retries + + > TRNG_RETRIES )
return - EIO ;
return 0 ;
}
/* Reset the counter and save the rng value */
ccp - > hwrng_retries = 0 ;
memcpy ( data , & trng_value , len ) ;
return len ;
}
2013-11-12 11:46:16 -06:00
# ifdef CONFIG_PM
bool ccp_queues_suspended ( struct ccp_device * ccp )
{
unsigned int suspended = 0 ;
unsigned long flags ;
unsigned int i ;
spin_lock_irqsave ( & ccp - > cmd_lock , flags ) ;
for ( i = 0 ; i < ccp - > cmd_q_count ; i + + )
if ( ccp - > cmd_q [ i ] . suspended )
suspended + + ;
spin_unlock_irqrestore ( & ccp - > cmd_lock , flags ) ;
return ccp - > cmd_q_count = = suspended ;
}
2017-07-06 09:59:13 -05:00
2017-07-06 09:59:14 -05:00
int ccp_dev_suspend ( struct sp_device * sp , pm_message_t state )
2017-07-06 09:59:13 -05:00
{
2017-07-06 09:59:14 -05:00
struct ccp_device * ccp = sp - > ccp_data ;
2017-07-06 09:59:13 -05:00
unsigned long flags ;
unsigned int i ;
2019-08-19 22:23:27 +00:00
/* If there's no device there's nothing to do */
if ( ! ccp )
return 0 ;
2017-07-06 09:59:13 -05:00
spin_lock_irqsave ( & ccp - > cmd_lock , flags ) ;
ccp - > suspending = 1 ;
/* Wake all the queue kthreads to prepare for suspend */
for ( i = 0 ; i < ccp - > cmd_q_count ; i + + )
wake_up_process ( ccp - > cmd_q [ i ] . kthread ) ;
spin_unlock_irqrestore ( & ccp - > cmd_lock , flags ) ;
/* Wait for all queue kthreads to say they're done */
while ( ! ccp_queues_suspended ( ccp ) )
wait_event_interruptible ( ccp - > suspend_queue ,
ccp_queues_suspended ( ccp ) ) ;
return 0 ;
}
2017-07-06 09:59:14 -05:00
int ccp_dev_resume ( struct sp_device * sp )
2017-07-06 09:59:13 -05:00
{
2017-07-06 09:59:14 -05:00
struct ccp_device * ccp = sp - > ccp_data ;
2017-07-06 09:59:13 -05:00
unsigned long flags ;
unsigned int i ;
2019-08-19 22:23:27 +00:00
/* If there's no device there's nothing to do */
if ( ! ccp )
return 0 ;
2017-07-06 09:59:13 -05:00
spin_lock_irqsave ( & ccp - > cmd_lock , flags ) ;
ccp - > suspending = 0 ;
/* Wake up all the kthreads */
for ( i = 0 ; i < ccp - > cmd_q_count ; i + + ) {
ccp - > cmd_q [ i ] . suspended = 0 ;
wake_up_process ( ccp - > cmd_q [ i ] . kthread ) ;
}
spin_unlock_irqrestore ( & ccp - > cmd_lock , flags ) ;
return 0 ;
}
2013-11-12 11:46:16 -06:00
# endif
2017-07-06 09:59:14 -05:00
int ccp_dev_init ( struct sp_device * sp )
2017-07-06 09:59:13 -05:00
{
2017-07-06 09:59:14 -05:00
struct device * dev = sp - > dev ;
struct ccp_device * ccp ;
int ret ;
2017-07-06 09:59:13 -05:00
2019-07-09 15:07:29 +00:00
/*
* Check how many we have so far , and stop after reaching
* that number
*/
if ( atomic_inc_return ( & dev_count ) > max_devs )
return 0 ; /* don't fail the load */
2017-07-06 09:59:14 -05:00
ret = - ENOMEM ;
ccp = ccp_alloc_struct ( sp ) ;
2017-07-06 09:59:13 -05:00
if ( ! ccp )
2017-07-06 09:59:14 -05:00
goto e_err ;
sp - > ccp_data = ccp ;
2019-07-09 15:07:22 +00:00
if ( ! nqueues | | ( nqueues > MAX_HW_QUEUES ) )
ccp - > max_q_count = MAX_HW_QUEUES ;
else
ccp - > max_q_count = nqueues ;
2017-07-06 09:59:14 -05:00
ccp - > vdata = ( struct ccp_vdata * ) sp - > dev_vdata - > ccp_vdata ;
if ( ! ccp - > vdata | | ! ccp - > vdata - > version ) {
ret = - ENODEV ;
dev_err ( dev , " missing driver data \n " ) ;
goto e_err ;
}
2017-07-06 09:59:13 -05:00
2017-07-06 09:59:15 -05:00
ccp - > use_tasklet = sp - > use_tasklet ;
2017-07-06 09:59:13 -05:00
2017-07-06 09:59:14 -05:00
ccp - > io_regs = sp - > io_map + ccp - > vdata - > offset ;
if ( ccp - > vdata - > setup )
ccp - > vdata - > setup ( ccp ) ;
2013-11-12 11:46:16 -06:00
2017-07-06 09:59:14 -05:00
ret = ccp - > vdata - > perform - > init ( ccp ) ;
2019-10-21 13:44:37 +00:00
if ( ret ) {
/* A positive number means that the device cannot be initialized,
* but no additional message is required .
*/
if ( ret > 0 )
goto e_quiet ;
/* An unexpected problem occurred, and should be reported in the log */
2017-07-06 09:59:14 -05:00
goto e_err ;
2019-10-21 13:44:37 +00:00
}
2014-01-06 13:34:29 -06:00
2017-07-06 09:59:14 -05:00
dev_notice ( dev , " ccp enabled \n " ) ;
2016-03-01 13:48:54 -06:00
return 0 ;
2014-06-05 10:17:57 -05:00
2017-07-06 09:59:14 -05:00
e_err :
dev_notice ( dev , " ccp initialization failed \n " ) ;
2013-11-12 11:46:16 -06:00
2019-10-21 13:44:37 +00:00
e_quiet :
sp - > ccp_data = NULL ;
2017-07-06 09:59:14 -05:00
return ret ;
2013-11-12 11:46:16 -06:00
}
2017-07-06 09:59:14 -05:00
void ccp_dev_destroy ( struct sp_device * sp )
2013-11-12 11:46:16 -06:00
{
2017-07-06 09:59:14 -05:00
struct ccp_device * ccp = sp - > ccp_data ;
2014-06-05 10:17:57 -05:00
2017-07-06 09:59:14 -05:00
if ( ! ccp )
return ;
2013-11-12 11:46:16 -06:00
2017-07-06 09:59:14 -05:00
ccp - > vdata - > perform - > destroy ( ccp ) ;
}