2019-05-19 15:08:20 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2010-05-19 08:14:04 +04:00
/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
*
2011-07-28 12:30:07 +04:00
* Copyright ( C ) 2010 , 2011 David S . Miller < davem @ davemloft . net >
2010-05-19 08:14:04 +04:00
*/
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/of.h>
2023-06-09 21:31:05 +03:00
# include <linux/of_address.h>
2023-07-14 20:44:18 +03:00
# include <linux/platform_device.h>
2010-05-19 08:14:04 +04:00
# include <linux/cpumask.h>
# include <linux/slab.h>
# include <linux/interrupt.h>
# include <linux/crypto.h>
# include <crypto/md5.h>
2020-11-13 08:20:21 +03:00
# include <crypto/sha1.h>
# include <crypto/sha2.h>
2010-05-19 08:14:04 +04:00
# include <crypto/aes.h>
2019-08-15 12:00:58 +03:00
# include <crypto/internal/des.h>
2010-05-19 08:14:04 +04:00
# include <linux/mutex.h>
# include <linux/delay.h>
# include <linux/sched.h>
# include <crypto/internal/hash.h>
2019-11-09 20:09:47 +03:00
# include <crypto/internal/skcipher.h>
2010-05-19 08:14:04 +04:00
# include <crypto/scatterwalk.h>
# include <crypto/algapi.h>
# include <asm/hypervisor.h>
# include <asm/mdesc.h>
# include "n2_core.h"
# define DRV_MODULE_NAME "n2_crypto"
2011-07-28 12:30:07 +04:00
# define DRV_MODULE_VERSION "0.2"
# define DRV_MODULE_RELDATE "July 28, 2011"
2010-05-19 08:14:04 +04:00
2015-10-14 22:15:13 +03:00
static const char version [ ] =
2010-05-19 08:14:04 +04:00
DRV_MODULE_NAME " .c:v " DRV_MODULE_VERSION " ( " DRV_MODULE_RELDATE " ) \n " ;
MODULE_AUTHOR ( " David S. Miller (davem@davemloft.net) " ) ;
MODULE_DESCRIPTION ( " Niagara2 Crypto driver " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_VERSION ( DRV_MODULE_VERSION ) ;
2012-09-15 20:06:30 +04:00
# define N2_CRA_PRIORITY 200
2010-05-19 08:14:04 +04:00
static DEFINE_MUTEX ( spu_lock ) ;
struct spu_queue {
cpumask_t sharing ;
unsigned long qhandle ;
spinlock_t lock ;
u8 q_type ;
void * q ;
unsigned long head ;
unsigned long tail ;
struct list_head jobs ;
unsigned long devino ;
char irq_name [ 32 ] ;
unsigned int irq ;
struct list_head list ;
} ;
2017-04-13 11:20:23 +03:00
struct spu_qreg {
struct spu_queue * queue ;
unsigned long type ;
} ;
2010-05-19 08:14:04 +04:00
static struct spu_queue * * cpu_to_cwq ;
static struct spu_queue * * cpu_to_mau ;
static unsigned long spu_next_offset ( struct spu_queue * q , unsigned long off )
{
if ( q - > q_type = = HV_NCS_QTYPE_MAU ) {
off + = MAU_ENTRY_SIZE ;
if ( off = = ( MAU_ENTRY_SIZE * MAU_NUM_ENTRIES ) )
off = 0 ;
} else {
off + = CWQ_ENTRY_SIZE ;
if ( off = = ( CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES ) )
off = 0 ;
}
return off ;
}
struct n2_request_common {
struct list_head entry ;
unsigned int offset ;
} ;
# define OFFSET_NOT_RUNNING (~(unsigned int)0)
/* An async job request records the final tail value it used in
* n2_request_common - > offset , test to see if that offset is in
* the range old_head , new_head , inclusive .
*/
static inline bool job_finished ( struct spu_queue * q , unsigned int offset ,
unsigned long old_head , unsigned long new_head )
{
if ( old_head < = new_head ) {
if ( offset > old_head & & offset < = new_head )
return true ;
} else {
if ( offset > old_head | | offset < = new_head )
return true ;
}
return false ;
}
/* When the HEAD marker is unequal to the actual HEAD, we get
* a virtual device INO interrupt . We should process the
* completed CWQ entries and adjust the HEAD marker to clear
* the IRQ .
*/
static irqreturn_t cwq_intr ( int irq , void * dev_id )
{
unsigned long off , new_head , hv_ret ;
struct spu_queue * q = dev_id ;
pr_err ( " CPU[%d]: Got CWQ interrupt for qhdl[%lx] \n " ,
smp_processor_id ( ) , q - > qhandle ) ;
spin_lock ( & q - > lock ) ;
hv_ret = sun4v_ncs_gethead ( q - > qhandle , & new_head ) ;
pr_err ( " CPU[%d]: CWQ gethead[%lx] hv_ret[%lu] \n " ,
smp_processor_id ( ) , new_head , hv_ret ) ;
for ( off = q - > head ; off ! = new_head ; off = spu_next_offset ( q , off ) ) {
/* XXX ... XXX */
}
hv_ret = sun4v_ncs_sethead_marker ( q - > qhandle , new_head ) ;
if ( hv_ret = = HV_EOK )
q - > head = new_head ;
spin_unlock ( & q - > lock ) ;
return IRQ_HANDLED ;
}
static irqreturn_t mau_intr ( int irq , void * dev_id )
{
struct spu_queue * q = dev_id ;
unsigned long head , hv_ret ;
spin_lock ( & q - > lock ) ;
pr_err ( " CPU[%d]: Got MAU interrupt for qhdl[%lx] \n " ,
smp_processor_id ( ) , q - > qhandle ) ;
hv_ret = sun4v_ncs_gethead ( q - > qhandle , & head ) ;
pr_err ( " CPU[%d]: MAU gethead[%lx] hv_ret[%lu] \n " ,
smp_processor_id ( ) , head , hv_ret ) ;
sun4v_ncs_sethead_marker ( q - > qhandle , head ) ;
spin_unlock ( & q - > lock ) ;
return IRQ_HANDLED ;
}
static void * spu_queue_next ( struct spu_queue * q , void * cur )
{
return q - > q + spu_next_offset ( q , cur - q - > q ) ;
}
static int spu_queue_num_free ( struct spu_queue * q )
{
unsigned long head = q - > head ;
unsigned long tail = q - > tail ;
unsigned long end = ( CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES ) ;
unsigned long diff ;
if ( head > tail )
diff = head - tail ;
else
diff = ( end - tail ) + head ;
return ( diff / CWQ_ENTRY_SIZE ) - 1 ;
}
static void * spu_queue_alloc ( struct spu_queue * q , int num_entries )
{
int avail = spu_queue_num_free ( q ) ;
if ( avail > = num_entries )
return q - > q + q - > tail ;
return NULL ;
}
static unsigned long spu_queue_submit ( struct spu_queue * q , void * last )
{
unsigned long hv_ret , new_tail ;
new_tail = spu_next_offset ( q , last - q - > q ) ;
hv_ret = sun4v_ncs_settail ( q - > qhandle , new_tail ) ;
if ( hv_ret = = HV_EOK )
q - > tail = new_tail ;
return hv_ret ;
}
static u64 control_word_base ( unsigned int len , unsigned int hmac_key_len ,
int enc_type , int auth_type ,
unsigned int hash_len ,
bool sfas , bool sob , bool eob , bool encrypt ,
int opcode )
{
u64 word = ( len - 1 ) & CONTROL_LEN ;
word | = ( ( u64 ) opcode < < CONTROL_OPCODE_SHIFT ) ;
word | = ( ( u64 ) enc_type < < CONTROL_ENC_TYPE_SHIFT ) ;
word | = ( ( u64 ) auth_type < < CONTROL_AUTH_TYPE_SHIFT ) ;
if ( sfas )
word | = CONTROL_STORE_FINAL_AUTH_STATE ;
if ( sob )
word | = CONTROL_START_OF_BLOCK ;
if ( eob )
word | = CONTROL_END_OF_BLOCK ;
if ( encrypt )
word | = CONTROL_ENCRYPT ;
if ( hmac_key_len )
word | = ( ( u64 ) ( hmac_key_len - 1 ) ) < < CONTROL_HMAC_KEY_LEN_SHIFT ;
if ( hash_len )
word | = ( ( u64 ) ( hash_len - 1 ) ) < < CONTROL_HASH_LEN_SHIFT ;
return word ;
}
#if 0
static inline bool n2_should_run_async ( struct spu_queue * qp , int this_len )
{
if ( this_len > = 64 | |
qp - > head ! = qp - > tail )
return true ;
return false ;
}
# endif
2010-05-22 13:45:56 +04:00
struct n2_ahash_alg {
struct list_head entry ;
2015-12-17 15:45:40 +03:00
const u8 * hash_zero ;
2020-09-11 09:55:05 +03:00
const u8 * hash_init ;
2010-05-22 13:45:56 +04:00
u8 hw_op_hashsz ;
u8 digest_size ;
u8 auth_type ;
2010-05-23 09:53:09 +04:00
u8 hmac_type ;
2010-05-22 13:45:56 +04:00
struct ahash_alg alg ;
} ;
static inline struct n2_ahash_alg * n2_ahash_alg ( struct crypto_tfm * tfm )
{
struct crypto_alg * alg = tfm - > __crt_alg ;
struct ahash_alg * ahash_alg ;
ahash_alg = container_of ( alg , struct ahash_alg , halg . base ) ;
return container_of ( ahash_alg , struct n2_ahash_alg , alg ) ;
}
2010-05-23 09:53:09 +04:00
struct n2_hmac_alg {
const char * child_alg ;
struct n2_ahash_alg derived ;
} ;
static inline struct n2_hmac_alg * n2_hmac_alg ( struct crypto_tfm * tfm )
{
struct crypto_alg * alg = tfm - > __crt_alg ;
struct ahash_alg * ahash_alg ;
ahash_alg = container_of ( alg , struct ahash_alg , halg . base ) ;
return container_of ( ahash_alg , struct n2_hmac_alg , derived . alg ) ;
}
2010-05-19 08:14:04 +04:00
struct n2_hash_ctx {
2010-05-22 12:09:04 +04:00
struct crypto_ahash * fallback_tfm ;
} ;
2010-05-19 08:14:04 +04:00
2010-05-23 09:53:09 +04:00
# define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */
struct n2_hmac_ctx {
struct n2_hash_ctx base ;
struct crypto_shash * child_shash ;
int hash_key_len ;
unsigned char hash_key [ N2_HASH_KEY_MAX ] ;
} ;
2010-05-22 12:09:04 +04:00
struct n2_hash_req_ctx {
2010-05-19 08:14:04 +04:00
union {
struct md5_state md5 ;
struct sha1_state sha1 ;
struct sha256_state sha256 ;
} u ;
2010-05-22 12:09:04 +04:00
struct ahash_request fallback_req ;
2010-05-19 08:14:04 +04:00
} ;
static int n2_hash_async_init ( struct ahash_request * req )
{
2010-05-22 12:09:04 +04:00
struct n2_hash_req_ctx * rctx = ahash_request_ctx ( req ) ;
2010-05-19 08:14:04 +04:00
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct n2_hash_ctx * ctx = crypto_ahash_ctx ( tfm ) ;
2010-05-22 12:09:04 +04:00
ahash_request_set_tfm ( & rctx - > fallback_req , ctx - > fallback_tfm ) ;
rctx - > fallback_req . base . flags = req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ;
2010-05-19 08:14:04 +04:00
2010-05-22 12:09:04 +04:00
return crypto_ahash_init ( & rctx - > fallback_req ) ;
2010-05-19 08:14:04 +04:00
}
static int n2_hash_async_update ( struct ahash_request * req )
{
2010-05-22 12:09:04 +04:00
struct n2_hash_req_ctx * rctx = ahash_request_ctx ( req ) ;
2010-05-19 08:14:04 +04:00
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct n2_hash_ctx * ctx = crypto_ahash_ctx ( tfm ) ;
2010-05-22 12:09:04 +04:00
ahash_request_set_tfm ( & rctx - > fallback_req , ctx - > fallback_tfm ) ;
rctx - > fallback_req . base . flags = req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ;
rctx - > fallback_req . nbytes = req - > nbytes ;
rctx - > fallback_req . src = req - > src ;
2010-05-19 08:14:04 +04:00
2010-05-22 12:09:04 +04:00
return crypto_ahash_update ( & rctx - > fallback_req ) ;
2010-05-19 08:14:04 +04:00
}
static int n2_hash_async_final ( struct ahash_request * req )
{
2010-05-22 12:09:04 +04:00
struct n2_hash_req_ctx * rctx = ahash_request_ctx ( req ) ;
2010-05-19 08:14:04 +04:00
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct n2_hash_ctx * ctx = crypto_ahash_ctx ( tfm ) ;
2010-05-22 12:09:04 +04:00
ahash_request_set_tfm ( & rctx - > fallback_req , ctx - > fallback_tfm ) ;
rctx - > fallback_req . base . flags = req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ;
rctx - > fallback_req . result = req - > result ;
2010-05-19 08:14:04 +04:00
2010-05-22 12:09:04 +04:00
return crypto_ahash_final ( & rctx - > fallback_req ) ;
2010-05-19 08:14:04 +04:00
}
static int n2_hash_async_finup ( struct ahash_request * req )
{
2010-05-22 12:09:04 +04:00
struct n2_hash_req_ctx * rctx = ahash_request_ctx ( req ) ;
2010-05-19 08:14:04 +04:00
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct n2_hash_ctx * ctx = crypto_ahash_ctx ( tfm ) ;
2010-05-22 12:09:04 +04:00
ahash_request_set_tfm ( & rctx - > fallback_req , ctx - > fallback_tfm ) ;
rctx - > fallback_req . base . flags = req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ;
rctx - > fallback_req . nbytes = req - > nbytes ;
rctx - > fallback_req . src = req - > src ;
rctx - > fallback_req . result = req - > result ;
2010-05-19 08:14:04 +04:00
2010-05-22 12:09:04 +04:00
return crypto_ahash_finup ( & rctx - > fallback_req ) ;
2010-05-19 08:14:04 +04:00
}
2018-01-18 21:34:01 +03:00
static int n2_hash_async_noimport ( struct ahash_request * req , const void * in )
{
return - ENOSYS ;
}
static int n2_hash_async_noexport ( struct ahash_request * req , void * out )
{
return - ENOSYS ;
}
2010-05-19 08:14:04 +04:00
static int n2_hash_cra_init ( struct crypto_tfm * tfm )
{
2014-05-14 13:41:01 +04:00
const char * fallback_driver_name = crypto_tfm_alg_name ( tfm ) ;
2010-05-19 08:14:04 +04:00
struct crypto_ahash * ahash = __crypto_ahash_cast ( tfm ) ;
struct n2_hash_ctx * ctx = crypto_ahash_ctx ( ahash ) ;
struct crypto_ahash * fallback_tfm ;
int err ;
fallback_tfm = crypto_alloc_ahash ( fallback_driver_name , 0 ,
CRYPTO_ALG_NEED_FALLBACK ) ;
if ( IS_ERR ( fallback_tfm ) ) {
2019-10-18 06:18:29 +03:00
pr_warn ( " Fallback driver '%s' could not be loaded! \n " ,
fallback_driver_name ) ;
2010-05-19 08:14:04 +04:00
err = PTR_ERR ( fallback_tfm ) ;
goto out ;
}
2010-05-22 12:09:04 +04:00
crypto_ahash_set_reqsize ( ahash , ( sizeof ( struct n2_hash_req_ctx ) +
crypto_ahash_reqsize ( fallback_tfm ) ) ) ;
ctx - > fallback_tfm = fallback_tfm ;
2010-05-19 08:14:04 +04:00
return 0 ;
out :
return err ;
}
static void n2_hash_cra_exit ( struct crypto_tfm * tfm )
{
struct crypto_ahash * ahash = __crypto_ahash_cast ( tfm ) ;
struct n2_hash_ctx * ctx = crypto_ahash_ctx ( ahash ) ;
2010-05-22 12:09:04 +04:00
crypto_free_ahash ( ctx - > fallback_tfm ) ;
2010-05-19 08:14:04 +04:00
}
2010-05-23 09:53:09 +04:00
static int n2_hmac_cra_init ( struct crypto_tfm * tfm )
{
2014-05-14 13:41:01 +04:00
const char * fallback_driver_name = crypto_tfm_alg_name ( tfm ) ;
2010-05-23 09:53:09 +04:00
struct crypto_ahash * ahash = __crypto_ahash_cast ( tfm ) ;
struct n2_hmac_ctx * ctx = crypto_ahash_ctx ( ahash ) ;
struct n2_hmac_alg * n2alg = n2_hmac_alg ( tfm ) ;
struct crypto_ahash * fallback_tfm ;
struct crypto_shash * child_shash ;
int err ;
fallback_tfm = crypto_alloc_ahash ( fallback_driver_name , 0 ,
CRYPTO_ALG_NEED_FALLBACK ) ;
if ( IS_ERR ( fallback_tfm ) ) {
2019-10-18 06:18:29 +03:00
pr_warn ( " Fallback driver '%s' could not be loaded! \n " ,
fallback_driver_name ) ;
2010-05-23 09:53:09 +04:00
err = PTR_ERR ( fallback_tfm ) ;
goto out ;
}
child_shash = crypto_alloc_shash ( n2alg - > child_alg , 0 , 0 ) ;
if ( IS_ERR ( child_shash ) ) {
2019-10-18 06:18:29 +03:00
pr_warn ( " Child shash '%s' could not be loaded! \n " ,
n2alg - > child_alg ) ;
2010-05-23 09:53:09 +04:00
err = PTR_ERR ( child_shash ) ;
goto out_free_fallback ;
}
crypto_ahash_set_reqsize ( ahash , ( sizeof ( struct n2_hash_req_ctx ) +
crypto_ahash_reqsize ( fallback_tfm ) ) ) ;
ctx - > child_shash = child_shash ;
ctx - > base . fallback_tfm = fallback_tfm ;
return 0 ;
out_free_fallback :
crypto_free_ahash ( fallback_tfm ) ;
out :
return err ;
}
static void n2_hmac_cra_exit ( struct crypto_tfm * tfm )
{
struct crypto_ahash * ahash = __crypto_ahash_cast ( tfm ) ;
struct n2_hmac_ctx * ctx = crypto_ahash_ctx ( ahash ) ;
crypto_free_ahash ( ctx - > base . fallback_tfm ) ;
crypto_free_shash ( ctx - > child_shash ) ;
}
static int n2_hmac_async_setkey ( struct crypto_ahash * tfm , const u8 * key ,
unsigned int keylen )
{
struct n2_hmac_ctx * ctx = crypto_ahash_ctx ( tfm ) ;
struct crypto_shash * child_shash = ctx - > child_shash ;
struct crypto_ahash * fallback_tfm ;
int err , bs , ds ;
fallback_tfm = ctx - > base . fallback_tfm ;
err = crypto_ahash_setkey ( fallback_tfm , key , keylen ) ;
if ( err )
return err ;
bs = crypto_shash_blocksize ( child_shash ) ;
ds = crypto_shash_digestsize ( child_shash ) ;
BUG_ON ( ds > N2_HASH_KEY_MAX ) ;
if ( keylen > bs ) {
2020-05-02 08:31:11 +03:00
err = crypto_shash_tfm_digest ( child_shash , key , keylen ,
ctx - > hash_key ) ;
2010-05-23 09:53:09 +04:00
if ( err )
return err ;
keylen = ds ;
} else if ( keylen < = N2_HASH_KEY_MAX )
memcpy ( ctx - > hash_key , key , keylen ) ;
ctx - > hash_key_len = keylen ;
return err ;
}
2010-05-19 08:14:04 +04:00
static unsigned long wait_for_tail ( struct spu_queue * qp )
{
unsigned long head , hv_ret ;
do {
hv_ret = sun4v_ncs_gethead ( qp - > qhandle , & head ) ;
if ( hv_ret ! = HV_EOK ) {
pr_err ( " Hypervisor error on gethead \n " ) ;
break ;
}
if ( head = = qp - > tail ) {
qp - > head = head ;
break ;
}
} while ( 1 ) ;
return hv_ret ;
}
static unsigned long submit_and_wait_for_tail ( struct spu_queue * qp ,
struct cwq_initial_entry * ent )
{
unsigned long hv_ret = spu_queue_submit ( qp , ent ) ;
if ( hv_ret = = HV_EOK )
hv_ret = wait_for_tail ( qp ) ;
return hv_ret ;
}
2010-05-22 13:45:56 +04:00
static int n2_do_async_digest ( struct ahash_request * req ,
unsigned int auth_type , unsigned int digest_size ,
2010-05-23 09:53:09 +04:00
unsigned int result_size , void * hash_loc ,
unsigned long auth_key , unsigned int auth_key_len )
2010-05-19 08:14:04 +04:00
{
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct cwq_initial_entry * ent ;
struct crypto_hash_walk walk ;
struct spu_queue * qp ;
unsigned long flags ;
int err = - ENODEV ;
int nbytes , cpu ;
/* The total effective length of the operation may not
* exceed 2 ^ 16.
*/
if ( unlikely ( req - > nbytes > ( 1 < < 16 ) ) ) {
2010-05-22 12:09:04 +04:00
struct n2_hash_req_ctx * rctx = ahash_request_ctx ( req ) ;
2010-05-22 12:11:03 +04:00
struct n2_hash_ctx * ctx = crypto_ahash_ctx ( tfm ) ;
2010-05-22 12:09:04 +04:00
ahash_request_set_tfm ( & rctx - > fallback_req , ctx - > fallback_tfm ) ;
rctx - > fallback_req . base . flags =
2010-05-19 08:14:04 +04:00
req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ;
2010-05-22 12:09:04 +04:00
rctx - > fallback_req . nbytes = req - > nbytes ;
rctx - > fallback_req . src = req - > src ;
rctx - > fallback_req . result = req - > result ;
2010-05-19 08:14:04 +04:00
2010-05-22 12:09:04 +04:00
return crypto_ahash_digest ( & rctx - > fallback_req ) ;
2010-05-19 08:14:04 +04:00
}
nbytes = crypto_hash_walk_first ( req , & walk ) ;
cpu = get_cpu ( ) ;
qp = cpu_to_cwq [ cpu ] ;
if ( ! qp )
goto out ;
spin_lock_irqsave ( & qp - > lock , flags ) ;
/* XXX can do better, improve this later by doing a by-hand scatterlist
* XXX walk , etc .
*/
ent = qp - > q + qp - > tail ;
2010-05-23 09:53:09 +04:00
ent - > control = control_word_base ( nbytes , auth_key_len , 0 ,
2010-05-19 08:14:04 +04:00
auth_type , digest_size ,
false , true , false , false ,
OPCODE_INPLACE_BIT |
OPCODE_AUTH_MAC ) ;
ent - > src_addr = __pa ( walk . data ) ;
2010-05-23 09:53:09 +04:00
ent - > auth_key_addr = auth_key ;
2010-05-19 08:14:04 +04:00
ent - > auth_iv_addr = __pa ( hash_loc ) ;
ent - > final_auth_state_addr = 0UL ;
ent - > enc_key_addr = 0UL ;
ent - > enc_iv_addr = 0UL ;
ent - > dest_addr = __pa ( hash_loc ) ;
nbytes = crypto_hash_walk_done ( & walk , 0 ) ;
while ( nbytes > 0 ) {
ent = spu_queue_next ( qp , ent ) ;
ent - > control = ( nbytes - 1 ) ;
ent - > src_addr = __pa ( walk . data ) ;
ent - > auth_key_addr = 0UL ;
ent - > auth_iv_addr = 0UL ;
ent - > final_auth_state_addr = 0UL ;
ent - > enc_key_addr = 0UL ;
ent - > enc_iv_addr = 0UL ;
ent - > dest_addr = 0UL ;
nbytes = crypto_hash_walk_done ( & walk , 0 ) ;
}
ent - > control | = CONTROL_END_OF_BLOCK ;
if ( submit_and_wait_for_tail ( qp , ent ) ! = HV_EOK )
err = - EINVAL ;
else
err = 0 ;
spin_unlock_irqrestore ( & qp - > lock , flags ) ;
if ( ! err )
memcpy ( req - > result , hash_loc , result_size ) ;
out :
put_cpu ( ) ;
return err ;
}
2010-05-22 13:45:56 +04:00
static int n2_hash_async_digest ( struct ahash_request * req )
2010-05-19 08:14:04 +04:00
{
2010-05-22 13:45:56 +04:00
struct n2_ahash_alg * n2alg = n2_ahash_alg ( req - > base . tfm ) ;
2010-05-22 12:09:04 +04:00
struct n2_hash_req_ctx * rctx = ahash_request_ctx ( req ) ;
2010-05-22 13:45:56 +04:00
int ds ;
2010-05-19 08:14:04 +04:00
2010-05-22 13:45:56 +04:00
ds = n2alg - > digest_size ;
2010-05-19 08:14:04 +04:00
if ( unlikely ( req - > nbytes = = 0 ) ) {
2010-05-22 13:45:56 +04:00
memcpy ( req - > result , n2alg - > hash_zero , ds ) ;
2010-05-19 08:14:04 +04:00
return 0 ;
}
2010-05-22 13:45:56 +04:00
memcpy ( & rctx - > u , n2alg - > hash_init , n2alg - > hw_op_hashsz ) ;
2010-05-19 08:14:04 +04:00
2010-05-22 13:45:56 +04:00
return n2_do_async_digest ( req , n2alg - > auth_type ,
n2alg - > hw_op_hashsz , ds ,
2010-05-23 09:53:09 +04:00
& rctx - > u , 0UL , 0 ) ;
}
static int n2_hmac_async_digest ( struct ahash_request * req )
{
struct n2_hmac_alg * n2alg = n2_hmac_alg ( req - > base . tfm ) ;
struct n2_hash_req_ctx * rctx = ahash_request_ctx ( req ) ;
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct n2_hmac_ctx * ctx = crypto_ahash_ctx ( tfm ) ;
int ds ;
ds = n2alg - > derived . digest_size ;
if ( unlikely ( req - > nbytes = = 0 ) | |
unlikely ( ctx - > hash_key_len > N2_HASH_KEY_MAX ) ) {
struct n2_hash_req_ctx * rctx = ahash_request_ctx ( req ) ;
struct n2_hash_ctx * ctx = crypto_ahash_ctx ( tfm ) ;
ahash_request_set_tfm ( & rctx - > fallback_req , ctx - > fallback_tfm ) ;
rctx - > fallback_req . base . flags =
req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ;
rctx - > fallback_req . nbytes = req - > nbytes ;
rctx - > fallback_req . src = req - > src ;
rctx - > fallback_req . result = req - > result ;
return crypto_ahash_digest ( & rctx - > fallback_req ) ;
}
memcpy ( & rctx - > u , n2alg - > derived . hash_init ,
n2alg - > derived . hw_op_hashsz ) ;
return n2_do_async_digest ( req , n2alg - > derived . hmac_type ,
n2alg - > derived . hw_op_hashsz , ds ,
& rctx - > u ,
__pa ( & ctx - > hash_key ) ,
ctx - > hash_key_len ) ;
2010-05-19 08:14:04 +04:00
}
2019-11-09 20:09:47 +03:00
struct n2_skcipher_context {
2010-05-19 08:14:04 +04:00
int key_len ;
int enc_type ;
union {
u8 aes [ AES_MAX_KEY_SIZE ] ;
u8 des [ DES_KEY_SIZE ] ;
u8 des3 [ 3 * DES_KEY_SIZE ] ;
} key ;
} ;
# define N2_CHUNK_ARR_LEN 16
struct n2_crypto_chunk {
struct list_head entry ;
unsigned long iv_paddr : 44 ;
unsigned long arr_len : 20 ;
unsigned long dest_paddr ;
unsigned long dest_final ;
struct {
unsigned long src_paddr : 44 ;
unsigned long src_len : 20 ;
} arr [ N2_CHUNK_ARR_LEN ] ;
} ;
struct n2_request_context {
2019-11-09 20:09:47 +03:00
struct skcipher_walk walk ;
2010-05-19 08:14:04 +04:00
struct list_head chunk_list ;
struct n2_crypto_chunk chunk ;
u8 temp_iv [ 16 ] ;
} ;
/* The SPU allows some level of flexibility for partial cipher blocks
* being specified in a descriptor .
*
* It merely requires that every descriptor ' s length field is at least
* as large as the cipher block size . This means that a cipher block
* can span at most 2 descriptors . However , this does not allow a
* partial block to span into the final descriptor as that would
* violate the rule ( since every descriptor ' s length must be at lest
* the block size ) . So , for example , assuming an 8 byte block size :
*
* 0xe - - > 0xa - - > 0x8
*
* is a valid length sequence , whereas :
*
* 0xe - - > 0xb - - > 0x7
*
* is not a valid sequence .
*/
2019-11-09 20:09:47 +03:00
struct n2_skcipher_alg {
2010-05-19 08:14:04 +04:00
struct list_head entry ;
u8 enc_type ;
2019-11-09 20:09:47 +03:00
struct skcipher_alg skcipher ;
2010-05-19 08:14:04 +04:00
} ;
2019-11-09 20:09:47 +03:00
static inline struct n2_skcipher_alg * n2_skcipher_alg ( struct crypto_skcipher * tfm )
2010-05-19 08:14:04 +04:00
{
2019-11-09 20:09:47 +03:00
struct skcipher_alg * alg = crypto_skcipher_alg ( tfm ) ;
2010-05-19 08:14:04 +04:00
2019-11-09 20:09:47 +03:00
return container_of ( alg , struct n2_skcipher_alg , skcipher ) ;
2010-05-19 08:14:04 +04:00
}
2019-11-09 20:09:47 +03:00
struct n2_skcipher_request_context {
struct skcipher_walk walk ;
2010-05-19 08:14:04 +04:00
} ;
2019-11-09 20:09:47 +03:00
static int n2_aes_setkey ( struct crypto_skcipher * skcipher , const u8 * key ,
2010-05-19 08:14:04 +04:00
unsigned int keylen )
{
2019-11-09 20:09:47 +03:00
struct crypto_tfm * tfm = crypto_skcipher_tfm ( skcipher ) ;
struct n2_skcipher_context * ctx = crypto_tfm_ctx ( tfm ) ;
struct n2_skcipher_alg * n2alg = n2_skcipher_alg ( skcipher ) ;
2010-05-19 08:14:04 +04:00
ctx - > enc_type = ( n2alg - > enc_type & ENC_TYPE_CHAINING_MASK ) ;
switch ( keylen ) {
case AES_KEYSIZE_128 :
ctx - > enc_type | = ENC_TYPE_ALG_AES128 ;
break ;
case AES_KEYSIZE_192 :
ctx - > enc_type | = ENC_TYPE_ALG_AES192 ;
break ;
case AES_KEYSIZE_256 :
ctx - > enc_type | = ENC_TYPE_ALG_AES256 ;
break ;
default :
return - EINVAL ;
}
ctx - > key_len = keylen ;
memcpy ( ctx - > key . aes , key , keylen ) ;
return 0 ;
}
2019-11-09 20:09:47 +03:00
static int n2_des_setkey ( struct crypto_skcipher * skcipher , const u8 * key ,
2010-05-19 08:14:04 +04:00
unsigned int keylen )
{
2019-11-09 20:09:47 +03:00
struct crypto_tfm * tfm = crypto_skcipher_tfm ( skcipher ) ;
struct n2_skcipher_context * ctx = crypto_tfm_ctx ( tfm ) ;
struct n2_skcipher_alg * n2alg = n2_skcipher_alg ( skcipher ) ;
2010-05-19 08:14:04 +04:00
int err ;
2019-11-09 20:09:47 +03:00
err = verify_skcipher_des_key ( skcipher , key ) ;
2019-08-15 12:00:58 +03:00
if ( err )
return err ;
2010-05-19 08:14:04 +04:00
2019-08-15 12:00:58 +03:00
ctx - > enc_type = n2alg - > enc_type ;
2010-05-19 08:14:04 +04:00
ctx - > key_len = keylen ;
memcpy ( ctx - > key . des , key , keylen ) ;
return 0 ;
}
2019-11-09 20:09:47 +03:00
static int n2_3des_setkey ( struct crypto_skcipher * skcipher , const u8 * key ,
2010-05-19 08:14:04 +04:00
unsigned int keylen )
{
2019-11-09 20:09:47 +03:00
struct crypto_tfm * tfm = crypto_skcipher_tfm ( skcipher ) ;
struct n2_skcipher_context * ctx = crypto_tfm_ctx ( tfm ) ;
struct n2_skcipher_alg * n2alg = n2_skcipher_alg ( skcipher ) ;
2019-04-11 11:51:13 +03:00
int err ;
2019-11-09 20:09:47 +03:00
err = verify_skcipher_des3_key ( skcipher , key ) ;
2019-08-15 12:00:58 +03:00
if ( err )
2019-04-11 11:51:13 +03:00
return err ;
2010-05-19 08:14:04 +04:00
ctx - > enc_type = n2alg - > enc_type ;
ctx - > key_len = keylen ;
memcpy ( ctx - > key . des3 , key , keylen ) ;
return 0 ;
}
2019-11-09 20:09:47 +03:00
static inline int skcipher_descriptor_len ( int nbytes , unsigned int block_size )
2010-05-19 08:14:04 +04:00
{
int this_len = nbytes ;
this_len - = ( nbytes & ( block_size - 1 ) ) ;
return this_len > ( 1 < < 16 ) ? ( 1 < < 16 ) : this_len ;
}
2019-11-09 20:09:47 +03:00
static int __n2_crypt_chunk ( struct crypto_skcipher * skcipher ,
struct n2_crypto_chunk * cp ,
2010-05-19 08:14:04 +04:00
struct spu_queue * qp , bool encrypt )
{
2019-11-09 20:09:47 +03:00
struct n2_skcipher_context * ctx = crypto_skcipher_ctx ( skcipher ) ;
2010-05-19 08:14:04 +04:00
struct cwq_initial_entry * ent ;
bool in_place ;
int i ;
ent = spu_queue_alloc ( qp , cp - > arr_len ) ;
if ( ! ent ) {
pr_info ( " queue_alloc() of %d fails \n " ,
cp - > arr_len ) ;
return - EBUSY ;
}
in_place = ( cp - > dest_paddr = = cp - > arr [ 0 ] . src_paddr ) ;
ent - > control = control_word_base ( cp - > arr [ 0 ] . src_len ,
0 , ctx - > enc_type , 0 , 0 ,
false , true , false , encrypt ,
OPCODE_ENCRYPT |
( in_place ? OPCODE_INPLACE_BIT : 0 ) ) ;
ent - > src_addr = cp - > arr [ 0 ] . src_paddr ;
ent - > auth_key_addr = 0UL ;
ent - > auth_iv_addr = 0UL ;
ent - > final_auth_state_addr = 0UL ;
ent - > enc_key_addr = __pa ( & ctx - > key ) ;
ent - > enc_iv_addr = cp - > iv_paddr ;
ent - > dest_addr = ( in_place ? 0UL : cp - > dest_paddr ) ;
for ( i = 1 ; i < cp - > arr_len ; i + + ) {
ent = spu_queue_next ( qp , ent ) ;
ent - > control = cp - > arr [ i ] . src_len - 1 ;
ent - > src_addr = cp - > arr [ i ] . src_paddr ;
ent - > auth_key_addr = 0UL ;
ent - > auth_iv_addr = 0UL ;
ent - > final_auth_state_addr = 0UL ;
ent - > enc_key_addr = 0UL ;
ent - > enc_iv_addr = 0UL ;
ent - > dest_addr = 0UL ;
}
ent - > control | = CONTROL_END_OF_BLOCK ;
return ( spu_queue_submit ( qp , ent ) ! = HV_EOK ) ? - EINVAL : 0 ;
}
2019-11-09 20:09:47 +03:00
static int n2_compute_chunks ( struct skcipher_request * req )
2010-05-19 08:14:04 +04:00
{
2019-11-09 20:09:47 +03:00
struct n2_request_context * rctx = skcipher_request_ctx ( req ) ;
struct skcipher_walk * walk = & rctx - > walk ;
2010-05-19 08:14:04 +04:00
struct n2_crypto_chunk * chunk ;
unsigned long dest_prev ;
unsigned int tot_len ;
bool prev_in_place ;
int err , nbytes ;
2019-11-09 20:09:47 +03:00
err = skcipher_walk_async ( walk , req ) ;
2010-05-19 08:14:04 +04:00
if ( err )
return err ;
INIT_LIST_HEAD ( & rctx - > chunk_list ) ;
chunk = & rctx - > chunk ;
INIT_LIST_HEAD ( & chunk - > entry ) ;
chunk - > iv_paddr = 0UL ;
chunk - > arr_len = 0 ;
chunk - > dest_paddr = 0UL ;
prev_in_place = false ;
dest_prev = ~ 0UL ;
tot_len = 0 ;
while ( ( nbytes = walk - > nbytes ) ! = 0 ) {
unsigned long dest_paddr , src_paddr ;
bool in_place ;
int this_len ;
2019-11-09 20:09:47 +03:00
src_paddr = ( page_to_phys ( walk - > src . phys . page ) +
walk - > src . phys . offset ) ;
dest_paddr = ( page_to_phys ( walk - > dst . phys . page ) +
walk - > dst . phys . offset ) ;
2010-05-19 08:14:04 +04:00
in_place = ( src_paddr = = dest_paddr ) ;
2019-11-09 20:09:47 +03:00
this_len = skcipher_descriptor_len ( nbytes , walk - > blocksize ) ;
2010-05-19 08:14:04 +04:00
if ( chunk - > arr_len ! = 0 ) {
if ( in_place ! = prev_in_place | |
( ! prev_in_place & &
dest_paddr ! = dest_prev ) | |
chunk - > arr_len = = N2_CHUNK_ARR_LEN | |
tot_len + this_len > ( 1 < < 16 ) ) {
chunk - > dest_final = dest_prev ;
list_add_tail ( & chunk - > entry ,
& rctx - > chunk_list ) ;
chunk = kzalloc ( sizeof ( * chunk ) , GFP_ATOMIC ) ;
if ( ! chunk ) {
err = - ENOMEM ;
break ;
}
INIT_LIST_HEAD ( & chunk - > entry ) ;
}
}
if ( chunk - > arr_len = = 0 ) {
chunk - > dest_paddr = dest_paddr ;
tot_len = 0 ;
}
chunk - > arr [ chunk - > arr_len ] . src_paddr = src_paddr ;
chunk - > arr [ chunk - > arr_len ] . src_len = this_len ;
chunk - > arr_len + + ;
dest_prev = dest_paddr + this_len ;
prev_in_place = in_place ;
tot_len + = this_len ;
2019-11-09 20:09:47 +03:00
err = skcipher_walk_done ( walk , nbytes - this_len ) ;
2010-05-19 08:14:04 +04:00
if ( err )
break ;
}
if ( ! err & & chunk - > arr_len ! = 0 ) {
chunk - > dest_final = dest_prev ;
list_add_tail ( & chunk - > entry , & rctx - > chunk_list ) ;
}
return err ;
}
2019-11-09 20:09:47 +03:00
static void n2_chunk_complete ( struct skcipher_request * req , void * final_iv )
2010-05-19 08:14:04 +04:00
{
2019-11-09 20:09:47 +03:00
struct n2_request_context * rctx = skcipher_request_ctx ( req ) ;
2010-05-19 08:14:04 +04:00
struct n2_crypto_chunk * c , * tmp ;
if ( final_iv )
memcpy ( rctx - > walk . iv , final_iv , rctx - > walk . blocksize ) ;
list_for_each_entry_safe ( c , tmp , & rctx - > chunk_list , entry ) {
list_del ( & c - > entry ) ;
if ( unlikely ( c ! = & rctx - > chunk ) )
kfree ( c ) ;
}
}
2019-11-09 20:09:47 +03:00
static int n2_do_ecb ( struct skcipher_request * req , bool encrypt )
2010-05-19 08:14:04 +04:00
{
2019-11-09 20:09:47 +03:00
struct n2_request_context * rctx = skcipher_request_ctx ( req ) ;
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm ( req ) ;
2010-05-19 08:14:04 +04:00
int err = n2_compute_chunks ( req ) ;
struct n2_crypto_chunk * c , * tmp ;
unsigned long flags , hv_ret ;
struct spu_queue * qp ;
if ( err )
return err ;
qp = cpu_to_cwq [ get_cpu ( ) ] ;
err = - ENODEV ;
if ( ! qp )
goto out ;
spin_lock_irqsave ( & qp - > lock , flags ) ;
list_for_each_entry_safe ( c , tmp , & rctx - > chunk_list , entry ) {
err = __n2_crypt_chunk ( tfm , c , qp , encrypt ) ;
if ( err )
break ;
list_del ( & c - > entry ) ;
if ( unlikely ( c ! = & rctx - > chunk ) )
kfree ( c ) ;
}
if ( ! err ) {
hv_ret = wait_for_tail ( qp ) ;
if ( hv_ret ! = HV_EOK )
err = - EINVAL ;
}
spin_unlock_irqrestore ( & qp - > lock , flags ) ;
2011-08-15 11:20:19 +04:00
out :
2010-05-19 08:14:04 +04:00
put_cpu ( ) ;
n2_chunk_complete ( req , NULL ) ;
return err ;
}
2019-11-09 20:09:47 +03:00
static int n2_encrypt_ecb ( struct skcipher_request * req )
2010-05-19 08:14:04 +04:00
{
return n2_do_ecb ( req , true ) ;
}
2019-11-09 20:09:47 +03:00
static int n2_decrypt_ecb ( struct skcipher_request * req )
2010-05-19 08:14:04 +04:00
{
return n2_do_ecb ( req , false ) ;
}
2019-11-09 20:09:47 +03:00
static int n2_do_chaining ( struct skcipher_request * req , bool encrypt )
2010-05-19 08:14:04 +04:00
{
2019-11-09 20:09:47 +03:00
struct n2_request_context * rctx = skcipher_request_ctx ( req ) ;
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm ( req ) ;
2010-05-19 08:14:04 +04:00
unsigned long flags , hv_ret , iv_paddr ;
int err = n2_compute_chunks ( req ) ;
struct n2_crypto_chunk * c , * tmp ;
struct spu_queue * qp ;
void * final_iv_addr ;
final_iv_addr = NULL ;
if ( err )
return err ;
qp = cpu_to_cwq [ get_cpu ( ) ] ;
err = - ENODEV ;
if ( ! qp )
goto out ;
spin_lock_irqsave ( & qp - > lock , flags ) ;
if ( encrypt ) {
iv_paddr = __pa ( rctx - > walk . iv ) ;
list_for_each_entry_safe ( c , tmp , & rctx - > chunk_list ,
entry ) {
c - > iv_paddr = iv_paddr ;
err = __n2_crypt_chunk ( tfm , c , qp , true ) ;
if ( err )
break ;
iv_paddr = c - > dest_final - rctx - > walk . blocksize ;
list_del ( & c - > entry ) ;
if ( unlikely ( c ! = & rctx - > chunk ) )
kfree ( c ) ;
}
final_iv_addr = __va ( iv_paddr ) ;
} else {
list_for_each_entry_safe_reverse ( c , tmp , & rctx - > chunk_list ,
entry ) {
if ( c = = & rctx - > chunk ) {
iv_paddr = __pa ( rctx - > walk . iv ) ;
} else {
iv_paddr = ( tmp - > arr [ tmp - > arr_len - 1 ] . src_paddr +
tmp - > arr [ tmp - > arr_len - 1 ] . src_len -
rctx - > walk . blocksize ) ;
}
if ( ! final_iv_addr ) {
unsigned long pa ;
pa = ( c - > arr [ c - > arr_len - 1 ] . src_paddr +
c - > arr [ c - > arr_len - 1 ] . src_len -
rctx - > walk . blocksize ) ;
final_iv_addr = rctx - > temp_iv ;
memcpy ( rctx - > temp_iv , __va ( pa ) ,
rctx - > walk . blocksize ) ;
}
c - > iv_paddr = iv_paddr ;
err = __n2_crypt_chunk ( tfm , c , qp , false ) ;
if ( err )
break ;
list_del ( & c - > entry ) ;
if ( unlikely ( c ! = & rctx - > chunk ) )
kfree ( c ) ;
}
}
if ( ! err ) {
hv_ret = wait_for_tail ( qp ) ;
if ( hv_ret ! = HV_EOK )
err = - EINVAL ;
}
spin_unlock_irqrestore ( & qp - > lock , flags ) ;
2011-08-15 11:20:19 +04:00
out :
2010-05-19 08:14:04 +04:00
put_cpu ( ) ;
n2_chunk_complete ( req , err ? NULL : final_iv_addr ) ;
return err ;
}
2019-11-09 20:09:47 +03:00
static int n2_encrypt_chaining ( struct skcipher_request * req )
2010-05-19 08:14:04 +04:00
{
return n2_do_chaining ( req , true ) ;
}
2019-11-09 20:09:47 +03:00
static int n2_decrypt_chaining ( struct skcipher_request * req )
2010-05-19 08:14:04 +04:00
{
return n2_do_chaining ( req , false ) ;
}
2019-11-09 20:09:47 +03:00
struct n2_skcipher_tmpl {
2010-05-19 08:14:04 +04:00
const char * name ;
const char * drv_name ;
u8 block_size ;
u8 enc_type ;
2019-11-09 20:09:47 +03:00
struct skcipher_alg skcipher ;
2010-05-19 08:14:04 +04:00
} ;
2019-11-09 20:09:47 +03:00
static const struct n2_skcipher_tmpl skcipher_tmpls [ ] = {
2010-05-19 08:14:04 +04:00
/* DES: ECB CBC and CFB are supported */
{ . name = " ecb(des) " ,
. drv_name = " ecb-des " ,
. block_size = DES_BLOCK_SIZE ,
. enc_type = ( ENC_TYPE_ALG_DES |
ENC_TYPE_CHAINING_ECB ) ,
2019-11-09 20:09:47 +03:00
. skcipher = {
2010-05-19 08:14:04 +04:00
. min_keysize = DES_KEY_SIZE ,
. max_keysize = DES_KEY_SIZE ,
. setkey = n2_des_setkey ,
. encrypt = n2_encrypt_ecb ,
. decrypt = n2_decrypt_ecb ,
} ,
} ,
{ . name = " cbc(des) " ,
. drv_name = " cbc-des " ,
. block_size = DES_BLOCK_SIZE ,
. enc_type = ( ENC_TYPE_ALG_DES |
ENC_TYPE_CHAINING_CBC ) ,
2019-11-09 20:09:47 +03:00
. skcipher = {
2010-05-19 08:14:04 +04:00
. ivsize = DES_BLOCK_SIZE ,
. min_keysize = DES_KEY_SIZE ,
. max_keysize = DES_KEY_SIZE ,
. setkey = n2_des_setkey ,
. encrypt = n2_encrypt_chaining ,
. decrypt = n2_decrypt_chaining ,
} ,
} ,
/* 3DES: ECB CBC and CFB are supported */
{ . name = " ecb(des3_ede) " ,
. drv_name = " ecb-3des " ,
. block_size = DES_BLOCK_SIZE ,
. enc_type = ( ENC_TYPE_ALG_3DES |
ENC_TYPE_CHAINING_ECB ) ,
2019-11-09 20:09:47 +03:00
. skcipher = {
2010-05-19 08:14:04 +04:00
. min_keysize = 3 * DES_KEY_SIZE ,
. max_keysize = 3 * DES_KEY_SIZE ,
. setkey = n2_3des_setkey ,
. encrypt = n2_encrypt_ecb ,
. decrypt = n2_decrypt_ecb ,
} ,
} ,
{ . name = " cbc(des3_ede) " ,
. drv_name = " cbc-3des " ,
. block_size = DES_BLOCK_SIZE ,
. enc_type = ( ENC_TYPE_ALG_3DES |
ENC_TYPE_CHAINING_CBC ) ,
2019-11-09 20:09:47 +03:00
. skcipher = {
2010-05-19 08:14:04 +04:00
. ivsize = DES_BLOCK_SIZE ,
. min_keysize = 3 * DES_KEY_SIZE ,
. max_keysize = 3 * DES_KEY_SIZE ,
. setkey = n2_3des_setkey ,
. encrypt = n2_encrypt_chaining ,
. decrypt = n2_decrypt_chaining ,
} ,
} ,
2023-11-30 13:11:51 +03:00
2010-05-19 08:14:04 +04:00
/* AES: ECB CBC and CTR are supported */
{ . name = " ecb(aes) " ,
. drv_name = " ecb-aes " ,
. block_size = AES_BLOCK_SIZE ,
. enc_type = ( ENC_TYPE_ALG_AES128 |
ENC_TYPE_CHAINING_ECB ) ,
2019-11-09 20:09:47 +03:00
. skcipher = {
2010-05-19 08:14:04 +04:00
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. setkey = n2_aes_setkey ,
. encrypt = n2_encrypt_ecb ,
. decrypt = n2_decrypt_ecb ,
} ,
} ,
{ . name = " cbc(aes) " ,
. drv_name = " cbc-aes " ,
. block_size = AES_BLOCK_SIZE ,
. enc_type = ( ENC_TYPE_ALG_AES128 |
ENC_TYPE_CHAINING_CBC ) ,
2019-11-09 20:09:47 +03:00
. skcipher = {
2010-05-19 08:14:04 +04:00
. ivsize = AES_BLOCK_SIZE ,
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. setkey = n2_aes_setkey ,
. encrypt = n2_encrypt_chaining ,
. decrypt = n2_decrypt_chaining ,
} ,
} ,
{ . name = " ctr(aes) " ,
. drv_name = " ctr-aes " ,
. block_size = AES_BLOCK_SIZE ,
. enc_type = ( ENC_TYPE_ALG_AES128 |
ENC_TYPE_CHAINING_COUNTER ) ,
2019-11-09 20:09:47 +03:00
. skcipher = {
2010-05-19 08:14:04 +04:00
. ivsize = AES_BLOCK_SIZE ,
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. setkey = n2_aes_setkey ,
. encrypt = n2_encrypt_chaining ,
. decrypt = n2_encrypt_chaining ,
} ,
} ,
} ;
2019-11-09 20:09:47 +03:00
# define NUM_CIPHER_TMPLS ARRAY_SIZE(skcipher_tmpls)
2010-05-19 08:14:04 +04:00
2019-11-09 20:09:47 +03:00
static LIST_HEAD ( skcipher_algs ) ;
2010-05-19 08:14:04 +04:00
struct n2_hash_tmpl {
const char * name ;
2015-12-17 15:45:40 +03:00
const u8 * hash_zero ;
2020-09-11 09:55:05 +03:00
const u8 * hash_init ;
2010-05-22 13:45:56 +04:00
u8 hw_op_hashsz ;
2010-05-19 08:14:04 +04:00
u8 digest_size ;
2022-10-06 07:34:19 +03:00
u8 statesize ;
2010-05-19 08:14:04 +04:00
u8 block_size ;
2010-05-22 13:45:56 +04:00
u8 auth_type ;
2010-05-23 09:53:09 +04:00
u8 hmac_type ;
2010-05-22 13:45:56 +04:00
} ;
2020-09-11 09:55:05 +03:00
static const __le32 n2_md5_init [ MD5_HASH_WORDS ] = {
2015-05-17 13:54:16 +03:00
cpu_to_le32 ( MD5_H0 ) ,
cpu_to_le32 ( MD5_H1 ) ,
cpu_to_le32 ( MD5_H2 ) ,
cpu_to_le32 ( MD5_H3 ) ,
2010-05-22 13:45:56 +04:00
} ;
2019-09-01 23:35:30 +03:00
static const u32 n2_sha1_init [ SHA1_DIGEST_SIZE / 4 ] = {
2010-05-22 13:45:56 +04:00
SHA1_H0 , SHA1_H1 , SHA1_H2 , SHA1_H3 , SHA1_H4 ,
} ;
2019-09-01 23:35:30 +03:00
static const u32 n2_sha256_init [ SHA256_DIGEST_SIZE / 4 ] = {
2010-05-22 13:45:56 +04:00
SHA256_H0 , SHA256_H1 , SHA256_H2 , SHA256_H3 ,
SHA256_H4 , SHA256_H5 , SHA256_H6 , SHA256_H7 ,
} ;
2019-09-01 23:35:30 +03:00
static const u32 n2_sha224_init [ SHA256_DIGEST_SIZE / 4 ] = {
2010-05-22 13:45:56 +04:00
SHA224_H0 , SHA224_H1 , SHA224_H2 , SHA224_H3 ,
SHA224_H4 , SHA224_H5 , SHA224_H6 , SHA224_H7 ,
} ;
2010-05-19 08:14:04 +04:00
static const struct n2_hash_tmpl hash_tmpls [ ] = {
{ . name = " md5 " ,
2015-12-17 15:45:40 +03:00
. hash_zero = md5_zero_message_hash ,
2020-09-11 09:55:05 +03:00
. hash_init = ( u8 * ) n2_md5_init ,
2010-05-22 13:45:56 +04:00
. auth_type = AUTH_TYPE_MD5 ,
2010-05-23 09:53:09 +04:00
. hmac_type = AUTH_TYPE_HMAC_MD5 ,
2010-05-22 13:45:56 +04:00
. hw_op_hashsz = MD5_DIGEST_SIZE ,
2010-05-19 08:14:04 +04:00
. digest_size = MD5_DIGEST_SIZE ,
2022-10-06 07:34:19 +03:00
. statesize = sizeof ( struct md5_state ) ,
2010-05-19 08:14:04 +04:00
. block_size = MD5_HMAC_BLOCK_SIZE } ,
{ . name = " sha1 " ,
2015-12-17 15:45:40 +03:00
. hash_zero = sha1_zero_message_hash ,
2020-09-11 09:55:05 +03:00
. hash_init = ( u8 * ) n2_sha1_init ,
2010-05-22 13:45:56 +04:00
. auth_type = AUTH_TYPE_SHA1 ,
2010-05-23 09:53:09 +04:00
. hmac_type = AUTH_TYPE_HMAC_SHA1 ,
2010-05-22 13:45:56 +04:00
. hw_op_hashsz = SHA1_DIGEST_SIZE ,
2010-05-19 08:14:04 +04:00
. digest_size = SHA1_DIGEST_SIZE ,
2022-10-06 07:34:19 +03:00
. statesize = sizeof ( struct sha1_state ) ,
2010-05-19 08:14:04 +04:00
. block_size = SHA1_BLOCK_SIZE } ,
{ . name = " sha256 " ,
2015-12-17 15:45:40 +03:00
. hash_zero = sha256_zero_message_hash ,
2020-09-11 09:55:05 +03:00
. hash_init = ( u8 * ) n2_sha256_init ,
2010-05-22 13:45:56 +04:00
. auth_type = AUTH_TYPE_SHA256 ,
2010-05-23 09:53:09 +04:00
. hmac_type = AUTH_TYPE_HMAC_SHA256 ,
2010-05-22 13:45:56 +04:00
. hw_op_hashsz = SHA256_DIGEST_SIZE ,
2010-05-19 08:14:04 +04:00
. digest_size = SHA256_DIGEST_SIZE ,
2022-10-06 07:34:19 +03:00
. statesize = sizeof ( struct sha256_state ) ,
2010-05-19 08:14:04 +04:00
. block_size = SHA256_BLOCK_SIZE } ,
{ . name = " sha224 " ,
2015-12-17 15:45:40 +03:00
. hash_zero = sha224_zero_message_hash ,
2020-09-11 09:55:05 +03:00
. hash_init = ( u8 * ) n2_sha224_init ,
2010-05-22 13:45:56 +04:00
. auth_type = AUTH_TYPE_SHA256 ,
2010-05-23 09:53:09 +04:00
. hmac_type = AUTH_TYPE_RESERVED ,
2010-05-22 13:45:56 +04:00
. hw_op_hashsz = SHA256_DIGEST_SIZE ,
2010-05-19 08:14:04 +04:00
. digest_size = SHA224_DIGEST_SIZE ,
2022-10-06 07:34:19 +03:00
. statesize = sizeof ( struct sha256_state ) ,
2010-05-19 08:14:04 +04:00
. block_size = SHA224_BLOCK_SIZE } ,
} ;
# define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
static LIST_HEAD ( ahash_algs ) ;
2010-05-23 09:53:09 +04:00
static LIST_HEAD ( hmac_algs ) ;
2010-05-19 08:14:04 +04:00
static int algs_registered ;
static void __n2_unregister_algs ( void )
{
2019-11-09 20:09:47 +03:00
struct n2_skcipher_alg * skcipher , * skcipher_tmp ;
2010-05-19 08:14:04 +04:00
struct n2_ahash_alg * alg , * alg_tmp ;
2010-05-23 09:53:09 +04:00
struct n2_hmac_alg * hmac , * hmac_tmp ;
2010-05-19 08:14:04 +04:00
2019-11-09 20:09:47 +03:00
list_for_each_entry_safe ( skcipher , skcipher_tmp , & skcipher_algs , entry ) {
crypto_unregister_skcipher ( & skcipher - > skcipher ) ;
list_del ( & skcipher - > entry ) ;
kfree ( skcipher ) ;
2010-05-19 08:14:04 +04:00
}
2010-05-23 09:53:09 +04:00
list_for_each_entry_safe ( hmac , hmac_tmp , & hmac_algs , derived . entry ) {
crypto_unregister_ahash ( & hmac - > derived . alg ) ;
list_del ( & hmac - > derived . entry ) ;
kfree ( hmac ) ;
}
2010-05-19 08:14:04 +04:00
list_for_each_entry_safe ( alg , alg_tmp , & ahash_algs , entry ) {
crypto_unregister_ahash ( & alg - > alg ) ;
list_del ( & alg - > entry ) ;
kfree ( alg ) ;
}
}
2019-11-09 20:09:47 +03:00
static int n2_skcipher_init_tfm ( struct crypto_skcipher * tfm )
2010-05-19 08:14:04 +04:00
{
2019-11-09 20:09:47 +03:00
crypto_skcipher_set_reqsize ( tfm , sizeof ( struct n2_request_context ) ) ;
2010-05-19 08:14:04 +04:00
return 0 ;
}
2019-11-09 20:09:47 +03:00
static int __n2_register_one_skcipher ( const struct n2_skcipher_tmpl * tmpl )
2010-05-19 08:14:04 +04:00
{
2019-11-09 20:09:47 +03:00
struct n2_skcipher_alg * p = kzalloc ( sizeof ( * p ) , GFP_KERNEL ) ;
struct skcipher_alg * alg ;
2010-05-19 08:14:04 +04:00
int err ;
if ( ! p )
return - ENOMEM ;
2019-11-09 20:09:47 +03:00
alg = & p - > skcipher ;
* alg = tmpl - > skcipher ;
2010-05-19 08:14:04 +04:00
2019-11-09 20:09:47 +03:00
snprintf ( alg - > base . cra_name , CRYPTO_MAX_ALG_NAME , " %s " , tmpl - > name ) ;
snprintf ( alg - > base . cra_driver_name , CRYPTO_MAX_ALG_NAME , " %s-n2 " , tmpl - > drv_name ) ;
alg - > base . cra_priority = N2_CRA_PRIORITY ;
2020-07-10 09:20:41 +03:00
alg - > base . cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY ;
2019-11-09 20:09:47 +03:00
alg - > base . cra_blocksize = tmpl - > block_size ;
2010-05-19 08:14:04 +04:00
p - > enc_type = tmpl - > enc_type ;
2019-11-09 20:09:47 +03:00
alg - > base . cra_ctxsize = sizeof ( struct n2_skcipher_context ) ;
alg - > base . cra_module = THIS_MODULE ;
alg - > init = n2_skcipher_init_tfm ;
list_add ( & p - > entry , & skcipher_algs ) ;
err = crypto_register_skcipher ( alg ) ;
2010-05-19 08:14:04 +04:00
if ( err ) {
2019-11-09 20:09:47 +03:00
pr_err ( " %s alg registration failed \n " , alg - > base . cra_name ) ;
2010-05-19 08:14:04 +04:00
list_del ( & p - > entry ) ;
kfree ( p ) ;
2010-05-20 10:16:05 +04:00
} else {
2019-11-09 20:09:47 +03:00
pr_info ( " %s alg registered \n " , alg - > base . cra_name ) ;
2010-05-19 08:14:04 +04:00
}
return err ;
}
2012-12-22 01:14:09 +04:00
static int __n2_register_one_hmac ( struct n2_ahash_alg * n2ahash )
2010-05-23 09:53:09 +04:00
{
struct n2_hmac_alg * p = kzalloc ( sizeof ( * p ) , GFP_KERNEL ) ;
struct ahash_alg * ahash ;
struct crypto_alg * base ;
int err ;
if ( ! p )
return - ENOMEM ;
p - > child_alg = n2ahash - > alg . halg . base . cra_name ;
memcpy ( & p - > derived , n2ahash , sizeof ( struct n2_ahash_alg ) ) ;
INIT_LIST_HEAD ( & p - > derived . entry ) ;
ahash = & p - > derived . alg ;
ahash - > digest = n2_hmac_async_digest ;
ahash - > setkey = n2_hmac_async_setkey ;
base = & ahash - > halg . base ;
2023-10-27 13:44:34 +03:00
if ( snprintf ( base - > cra_name , CRYPTO_MAX_ALG_NAME , " hmac(%s) " ,
p - > child_alg ) > = CRYPTO_MAX_ALG_NAME )
goto out_free_p ;
if ( snprintf ( base - > cra_driver_name , CRYPTO_MAX_ALG_NAME , " hmac-%s-n2 " ,
p - > child_alg ) > = CRYPTO_MAX_ALG_NAME )
goto out_free_p ;
2010-05-23 09:53:09 +04:00
base - > cra_ctxsize = sizeof ( struct n2_hmac_ctx ) ;
base - > cra_init = n2_hmac_cra_init ;
base - > cra_exit = n2_hmac_cra_exit ;
list_add ( & p - > derived . entry , & hmac_algs ) ;
err = crypto_register_ahash ( ahash ) ;
if ( err ) {
pr_err ( " %s alg registration failed \n " , base - > cra_name ) ;
list_del ( & p - > derived . entry ) ;
2023-10-27 13:44:34 +03:00
out_free_p :
2010-05-23 09:53:09 +04:00
kfree ( p ) ;
} else {
pr_info ( " %s alg registered \n " , base - > cra_name ) ;
}
return err ;
}
2012-12-22 01:14:09 +04:00
static int __n2_register_one_ahash ( const struct n2_hash_tmpl * tmpl )
2010-05-19 08:14:04 +04:00
{
struct n2_ahash_alg * p = kzalloc ( sizeof ( * p ) , GFP_KERNEL ) ;
struct hash_alg_common * halg ;
struct crypto_alg * base ;
struct ahash_alg * ahash ;
int err ;
if ( ! p )
return - ENOMEM ;
2010-05-22 13:45:56 +04:00
p - > hash_zero = tmpl - > hash_zero ;
p - > hash_init = tmpl - > hash_init ;
p - > auth_type = tmpl - > auth_type ;
2010-05-23 09:53:09 +04:00
p - > hmac_type = tmpl - > hmac_type ;
2010-05-22 13:45:56 +04:00
p - > hw_op_hashsz = tmpl - > hw_op_hashsz ;
p - > digest_size = tmpl - > digest_size ;
2010-05-19 08:14:04 +04:00
ahash = & p - > alg ;
ahash - > init = n2_hash_async_init ;
ahash - > update = n2_hash_async_update ;
ahash - > final = n2_hash_async_final ;
ahash - > finup = n2_hash_async_finup ;
2010-05-22 13:45:56 +04:00
ahash - > digest = n2_hash_async_digest ;
2018-01-18 21:34:01 +03:00
ahash - > export = n2_hash_async_noexport ;
ahash - > import = n2_hash_async_noimport ;
2010-05-19 08:14:04 +04:00
halg = & ahash - > halg ;
halg - > digestsize = tmpl - > digest_size ;
2022-10-06 07:34:19 +03:00
halg - > statesize = tmpl - > statesize ;
2010-05-19 08:14:04 +04:00
base = & halg - > base ;
snprintf ( base - > cra_name , CRYPTO_MAX_ALG_NAME , " %s " , tmpl - > name ) ;
snprintf ( base - > cra_driver_name , CRYPTO_MAX_ALG_NAME , " %s-n2 " , tmpl - > name ) ;
base - > cra_priority = N2_CRA_PRIORITY ;
2018-07-01 01:16:12 +03:00
base - > cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
2011-11-01 16:39:56 +04:00
CRYPTO_ALG_NEED_FALLBACK ;
2010-05-19 08:14:04 +04:00
base - > cra_blocksize = tmpl - > block_size ;
base - > cra_ctxsize = sizeof ( struct n2_hash_ctx ) ;
base - > cra_module = THIS_MODULE ;
base - > cra_init = n2_hash_cra_init ;
base - > cra_exit = n2_hash_cra_exit ;
list_add ( & p - > entry , & ahash_algs ) ;
err = crypto_register_ahash ( ahash ) ;
if ( err ) {
2010-05-20 10:16:05 +04:00
pr_err ( " %s alg registration failed \n " , base - > cra_name ) ;
2010-05-19 08:14:04 +04:00
list_del ( & p - > entry ) ;
kfree ( p ) ;
2010-05-20 10:16:05 +04:00
} else {
pr_info ( " %s alg registered \n " , base - > cra_name ) ;
2010-05-19 08:14:04 +04:00
}
2010-05-23 09:53:09 +04:00
if ( ! err & & p - > hmac_type ! = AUTH_TYPE_RESERVED )
err = __n2_register_one_hmac ( p ) ;
2010-05-19 08:14:04 +04:00
return err ;
}
2012-12-22 01:14:09 +04:00
static int n2_register_algs ( void )
2010-05-19 08:14:04 +04:00
{
int i , err = 0 ;
mutex_lock ( & spu_lock ) ;
if ( algs_registered + + )
goto out ;
for ( i = 0 ; i < NUM_HASH_TMPLS ; i + + ) {
err = __n2_register_one_ahash ( & hash_tmpls [ i ] ) ;
if ( err ) {
__n2_unregister_algs ( ) ;
goto out ;
}
}
for ( i = 0 ; i < NUM_CIPHER_TMPLS ; i + + ) {
2019-11-09 20:09:47 +03:00
err = __n2_register_one_skcipher ( & skcipher_tmpls [ i ] ) ;
2010-05-19 08:14:04 +04:00
if ( err ) {
__n2_unregister_algs ( ) ;
goto out ;
}
}
out :
mutex_unlock ( & spu_lock ) ;
return err ;
}
2012-12-22 01:14:09 +04:00
static void n2_unregister_algs ( void )
2010-05-19 08:14:04 +04:00
{
mutex_lock ( & spu_lock ) ;
if ( ! - - algs_registered )
__n2_unregister_algs ( ) ;
mutex_unlock ( & spu_lock ) ;
}
/* To map CWQ queues to interrupt sources, the hypervisor API provides
* a devino . This isn ' t very useful to us because all of the
2010-08-06 19:25:50 +04:00
* interrupts listed in the device_node have been translated to
2010-05-19 08:14:04 +04:00
* Linux virtual IRQ cookie numbers .
*
* So we have to back - translate , going through the ' intr ' and ' ino '
* property tables of the n2cp MDESC node , matching it with the OF
2022-08-23 16:52:23 +03:00
* ' interrupts ' property entries , in order to figure out which
2010-05-19 08:14:04 +04:00
* devino goes to which already - translated IRQ .
*/
2010-08-06 19:25:50 +04:00
static int find_devino_index ( struct platform_device * dev , struct spu_mdesc_info * ip ,
2010-05-19 08:14:04 +04:00
unsigned long dev_ino )
{
const unsigned int * dev_intrs ;
unsigned int intr ;
int i ;
for ( i = 0 ; i < ip - > num_intrs ; i + + ) {
if ( ip - > ino_table [ i ] . ino = = dev_ino )
break ;
}
if ( i = = ip - > num_intrs )
return - ENODEV ;
intr = ip - > ino_table [ i ] . intr ;
2010-05-26 04:37:08 +04:00
dev_intrs = of_get_property ( dev - > dev . of_node , " interrupts " , NULL ) ;
2010-05-19 08:14:04 +04:00
if ( ! dev_intrs )
return - ENODEV ;
2010-08-08 10:23:26 +04:00
for ( i = 0 ; i < dev - > archdata . num_irqs ; i + + ) {
2010-05-19 08:14:04 +04:00
if ( dev_intrs [ i ] = = intr )
return i ;
}
return - ENODEV ;
}
2010-08-06 19:25:50 +04:00
static int spu_map_ino ( struct platform_device * dev , struct spu_mdesc_info * ip ,
2010-05-19 08:14:04 +04:00
const char * irq_name , struct spu_queue * p ,
irq_handler_t handler )
{
unsigned long herr ;
int index ;
herr = sun4v_ncs_qhandle_to_devino ( p - > qhandle , & p - > devino ) ;
if ( herr )
return - EINVAL ;
index = find_devino_index ( dev , ip , p - > devino ) ;
if ( index < 0 )
return index ;
2010-08-08 10:23:26 +04:00
p - > irq = dev - > archdata . irqs [ index ] ;
2010-05-19 08:14:04 +04:00
sprintf ( p - > irq_name , " %s-%d " , irq_name , index ) ;
2012-07-17 21:42:34 +04:00
return request_irq ( p - > irq , handler , 0 , p - > irq_name , p ) ;
2010-05-19 08:14:04 +04:00
}
static struct kmem_cache * queue_cache [ 2 ] ;
static void * new_queue ( unsigned long q_type )
{
return kmem_cache_zalloc ( queue_cache [ q_type - 1 ] , GFP_KERNEL ) ;
}
static void free_queue ( void * p , unsigned long q_type )
{
2016-03-18 16:38:48 +03:00
kmem_cache_free ( queue_cache [ q_type - 1 ] , p ) ;
2010-05-19 08:14:04 +04:00
}
static int queue_cache_init ( void )
{
if ( ! queue_cache [ HV_NCS_QTYPE_MAU - 1 ] )
queue_cache [ HV_NCS_QTYPE_MAU - 1 ] =
2010-05-22 11:50:12 +04:00
kmem_cache_create ( " mau_queue " ,
2010-05-19 08:14:04 +04:00
( MAU_NUM_ENTRIES *
MAU_ENTRY_SIZE ) ,
MAU_ENTRY_SIZE , 0 , NULL ) ;
if ( ! queue_cache [ HV_NCS_QTYPE_MAU - 1 ] )
return - ENOMEM ;
if ( ! queue_cache [ HV_NCS_QTYPE_CWQ - 1 ] )
queue_cache [ HV_NCS_QTYPE_CWQ - 1 ] =
kmem_cache_create ( " cwq_queue " ,
( CWQ_NUM_ENTRIES *
CWQ_ENTRY_SIZE ) ,
CWQ_ENTRY_SIZE , 0 , NULL ) ;
if ( ! queue_cache [ HV_NCS_QTYPE_CWQ - 1 ] ) {
kmem_cache_destroy ( queue_cache [ HV_NCS_QTYPE_MAU - 1 ] ) ;
2017-12-19 21:09:07 +03:00
queue_cache [ HV_NCS_QTYPE_MAU - 1 ] = NULL ;
2010-05-19 08:14:04 +04:00
return - ENOMEM ;
}
return 0 ;
}
static void queue_cache_destroy ( void )
{
kmem_cache_destroy ( queue_cache [ HV_NCS_QTYPE_MAU - 1 ] ) ;
kmem_cache_destroy ( queue_cache [ HV_NCS_QTYPE_CWQ - 1 ] ) ;
2017-12-19 21:09:07 +03:00
queue_cache [ HV_NCS_QTYPE_MAU - 1 ] = NULL ;
queue_cache [ HV_NCS_QTYPE_CWQ - 1 ] = NULL ;
2010-05-19 08:14:04 +04:00
}
2017-04-13 11:20:23 +03:00
static long spu_queue_register_workfn ( void * arg )
2010-05-19 08:14:04 +04:00
{
2017-04-13 11:20:23 +03:00
struct spu_qreg * qr = arg ;
struct spu_queue * p = qr - > queue ;
unsigned long q_type = qr - > type ;
2010-05-19 08:14:04 +04:00
unsigned long hv_ret ;
hv_ret = sun4v_ncs_qconf ( q_type , __pa ( p - > q ) ,
CWQ_NUM_ENTRIES , & p - > qhandle ) ;
if ( ! hv_ret )
sun4v_ncs_sethead_marker ( p - > qhandle , 0 ) ;
2017-04-13 11:20:23 +03:00
return hv_ret ? - EINVAL : 0 ;
}
2010-05-19 08:14:04 +04:00
2017-04-13 11:20:23 +03:00
static int spu_queue_register ( struct spu_queue * p , unsigned long q_type )
{
int cpu = cpumask_any_and ( & p - > sharing , cpu_online_mask ) ;
struct spu_qreg qr = { . queue = p , . type = q_type } ;
2010-05-19 08:14:04 +04:00
2017-04-13 11:20:23 +03:00
return work_on_cpu_safe ( cpu , spu_queue_register_workfn , & qr ) ;
2010-05-19 08:14:04 +04:00
}
static int spu_queue_setup ( struct spu_queue * p )
{
int err ;
p - > q = new_queue ( p - > q_type ) ;
if ( ! p - > q )
return - ENOMEM ;
err = spu_queue_register ( p , p - > q_type ) ;
if ( err ) {
free_queue ( p - > q , p - > q_type ) ;
p - > q = NULL ;
}
return err ;
}
static void spu_queue_destroy ( struct spu_queue * p )
{
unsigned long hv_ret ;
if ( ! p - > q )
return ;
hv_ret = sun4v_ncs_qconf ( p - > q_type , p - > qhandle , 0 , & p - > qhandle ) ;
if ( ! hv_ret )
free_queue ( p - > q , p - > q_type ) ;
}
static void spu_list_destroy ( struct list_head * list )
{
struct spu_queue * p , * n ;
list_for_each_entry_safe ( p , n , list , list ) {
int i ;
for ( i = 0 ; i < NR_CPUS ; i + + ) {
if ( cpu_to_cwq [ i ] = = p )
cpu_to_cwq [ i ] = NULL ;
}
if ( p - > irq ) {
free_irq ( p - > irq , p ) ;
p - > irq = 0 ;
}
spu_queue_destroy ( p ) ;
list_del ( & p - > list ) ;
kfree ( p ) ;
}
}
/* Walk the backward arcs of a CWQ 'exec-unit' node,
* gathering cpu membership information .
*/
static int spu_mdesc_walk_arcs ( struct mdesc_handle * mdesc ,
2010-08-06 19:25:50 +04:00
struct platform_device * dev ,
2010-05-19 08:14:04 +04:00
u64 node , struct spu_queue * p ,
struct spu_queue * * table )
{
u64 arc ;
mdesc_for_each_arc ( arc , mdesc , node , MDESC_ARC_TYPE_BACK ) {
u64 tgt = mdesc_arc_target ( mdesc , arc ) ;
const char * name = mdesc_node_name ( mdesc , tgt ) ;
const u64 * id ;
if ( strcmp ( name , " cpu " ) )
continue ;
id = mdesc_get_property ( mdesc , tgt , " id " , NULL ) ;
if ( table [ * id ] ! = NULL ) {
2017-07-19 00:42:56 +03:00
dev_err ( & dev - > dev , " %pOF: SPU cpu slot already set. \n " ,
dev - > dev . of_node ) ;
2010-05-19 08:14:04 +04:00
return - EINVAL ;
}
2015-03-05 03:19:16 +03:00
cpumask_set_cpu ( * id , & p - > sharing ) ;
2010-05-19 08:14:04 +04:00
table [ * id ] = p ;
}
return 0 ;
}
/* Process an 'exec-unit' MDESC node of type 'cwq'. */
static int handle_exec_unit ( struct spu_mdesc_info * ip , struct list_head * list ,
2010-08-06 19:25:50 +04:00
struct platform_device * dev , struct mdesc_handle * mdesc ,
2010-05-19 08:14:04 +04:00
u64 node , const char * iname , unsigned long q_type ,
irq_handler_t handler , struct spu_queue * * table )
{
struct spu_queue * p ;
int err ;
p = kzalloc ( sizeof ( struct spu_queue ) , GFP_KERNEL ) ;
if ( ! p ) {
2017-07-19 00:42:56 +03:00
dev_err ( & dev - > dev , " %pOF: Could not allocate SPU queue. \n " ,
dev - > dev . of_node ) ;
2010-05-19 08:14:04 +04:00
return - ENOMEM ;
}
2015-03-05 03:19:16 +03:00
cpumask_clear ( & p - > sharing ) ;
2010-05-19 08:14:04 +04:00
spin_lock_init ( & p - > lock ) ;
p - > q_type = q_type ;
INIT_LIST_HEAD ( & p - > jobs ) ;
list_add ( & p - > list , list ) ;
err = spu_mdesc_walk_arcs ( mdesc , dev , node , p , table ) ;
if ( err )
return err ;
err = spu_queue_setup ( p ) ;
if ( err )
return err ;
return spu_map_ino ( dev , ip , iname , p , handler ) ;
}
2010-08-06 19:25:50 +04:00
static int spu_mdesc_scan ( struct mdesc_handle * mdesc , struct platform_device * dev ,
2010-05-19 08:14:04 +04:00
struct spu_mdesc_info * ip , struct list_head * list ,
const char * exec_name , unsigned long q_type ,
irq_handler_t handler , struct spu_queue * * table )
{
int err = 0 ;
u64 node ;
mdesc_for_each_node_by_name ( mdesc , node , " exec-unit " ) {
const char * type ;
type = mdesc_get_property ( mdesc , node , " type " , NULL ) ;
if ( ! type | | strcmp ( type , exec_name ) )
continue ;
err = handle_exec_unit ( ip , list , dev , mdesc , node ,
exec_name , q_type , handler , table ) ;
if ( err ) {
spu_list_destroy ( list ) ;
break ;
}
}
return err ;
}
2012-12-22 01:14:09 +04:00
static int get_irq_props ( struct mdesc_handle * mdesc , u64 node ,
struct spu_mdesc_info * ip )
2010-05-19 08:14:04 +04:00
{
2011-07-28 12:30:07 +04:00
const u64 * ino ;
int ino_len ;
2010-05-19 08:14:04 +04:00
int i ;
ino = mdesc_get_property ( mdesc , node , " ino " , & ino_len ) ;
2011-07-28 12:30:07 +04:00
if ( ! ino ) {
printk ( " NO 'ino' \n " ) ;
2010-05-19 08:14:04 +04:00
return - ENODEV ;
2011-07-28 12:30:07 +04:00
}
2010-05-19 08:14:04 +04:00
2011-07-28 12:30:07 +04:00
ip - > num_intrs = ino_len / sizeof ( u64 ) ;
2010-05-19 08:14:04 +04:00
ip - > ino_table = kzalloc ( ( sizeof ( struct ino_blob ) *
ip - > num_intrs ) ,
GFP_KERNEL ) ;
if ( ! ip - > ino_table )
return - ENOMEM ;
for ( i = 0 ; i < ip - > num_intrs ; i + + ) {
struct ino_blob * b = & ip - > ino_table [ i ] ;
2011-07-28 12:30:07 +04:00
b - > intr = i + 1 ;
2010-05-19 08:14:04 +04:00
b - > ino = ino [ i ] ;
}
return 0 ;
}
2012-12-22 01:14:09 +04:00
static int grab_mdesc_irq_props ( struct mdesc_handle * mdesc ,
struct platform_device * dev ,
struct spu_mdesc_info * ip ,
const char * node_name )
2010-05-19 08:14:04 +04:00
{
2023-06-09 21:31:05 +03:00
u64 node , reg ;
2010-05-19 08:14:04 +04:00
2023-06-09 21:31:05 +03:00
if ( of_property_read_reg ( dev - > dev . of_node , 0 , & reg , NULL ) < 0 )
2010-05-19 08:14:04 +04:00
return - ENODEV ;
mdesc_for_each_node_by_name ( mdesc , node , " virtual-device " ) {
const char * name ;
const u64 * chdl ;
name = mdesc_get_property ( mdesc , node , " name " , NULL ) ;
if ( ! name | | strcmp ( name , node_name ) )
continue ;
chdl = mdesc_get_property ( mdesc , node , " cfg-handle " , NULL ) ;
2023-06-09 21:31:05 +03:00
if ( ! chdl | | ( * chdl ! = reg ) )
2010-05-19 08:14:04 +04:00
continue ;
ip - > cfg_handle = * chdl ;
return get_irq_props ( mdesc , node , ip ) ;
}
return - ENODEV ;
}
static unsigned long n2_spu_hvapi_major ;
static unsigned long n2_spu_hvapi_minor ;
2012-12-22 01:14:09 +04:00
static int n2_spu_hvapi_register ( void )
2010-05-19 08:14:04 +04:00
{
int err ;
n2_spu_hvapi_major = 2 ;
n2_spu_hvapi_minor = 0 ;
err = sun4v_hvapi_register ( HV_GRP_NCS ,
n2_spu_hvapi_major ,
& n2_spu_hvapi_minor ) ;
if ( ! err )
pr_info ( " Registered NCS HVAPI version %lu.%lu \n " ,
n2_spu_hvapi_major ,
n2_spu_hvapi_minor ) ;
return err ;
}
static void n2_spu_hvapi_unregister ( void )
{
sun4v_hvapi_unregister ( HV_GRP_NCS ) ;
}
static int global_ref ;
2012-12-22 01:14:09 +04:00
static int grab_global_resources ( void )
2010-05-19 08:14:04 +04:00
{
int err = 0 ;
mutex_lock ( & spu_lock ) ;
if ( global_ref + + )
goto out ;
err = n2_spu_hvapi_register ( ) ;
if ( err )
goto out ;
err = queue_cache_init ( ) ;
if ( err )
goto out_hvapi_release ;
err = - ENOMEM ;
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 00:03:40 +03:00
cpu_to_cwq = kcalloc ( NR_CPUS , sizeof ( struct spu_queue * ) ,
2010-05-19 08:14:04 +04:00
GFP_KERNEL ) ;
if ( ! cpu_to_cwq )
goto out_queue_cache_destroy ;
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 00:03:40 +03:00
cpu_to_mau = kcalloc ( NR_CPUS , sizeof ( struct spu_queue * ) ,
2010-05-19 08:14:04 +04:00
GFP_KERNEL ) ;
if ( ! cpu_to_mau )
goto out_free_cwq_table ;
err = 0 ;
out :
if ( err )
global_ref - - ;
mutex_unlock ( & spu_lock ) ;
return err ;
out_free_cwq_table :
kfree ( cpu_to_cwq ) ;
cpu_to_cwq = NULL ;
out_queue_cache_destroy :
queue_cache_destroy ( ) ;
out_hvapi_release :
n2_spu_hvapi_unregister ( ) ;
goto out ;
}
static void release_global_resources ( void )
{
mutex_lock ( & spu_lock ) ;
if ( ! - - global_ref ) {
kfree ( cpu_to_cwq ) ;
cpu_to_cwq = NULL ;
kfree ( cpu_to_mau ) ;
cpu_to_mau = NULL ;
queue_cache_destroy ( ) ;
n2_spu_hvapi_unregister ( ) ;
}
mutex_unlock ( & spu_lock ) ;
}
2012-12-22 01:14:09 +04:00
static struct n2_crypto * alloc_n2cp ( void )
2010-05-19 08:14:04 +04:00
{
struct n2_crypto * np = kzalloc ( sizeof ( struct n2_crypto ) , GFP_KERNEL ) ;
if ( np )
INIT_LIST_HEAD ( & np - > cwq_list ) ;
return np ;
}
static void free_n2cp ( struct n2_crypto * np )
{
2017-08-27 00:15:29 +03:00
kfree ( np - > cwq_info . ino_table ) ;
np - > cwq_info . ino_table = NULL ;
2010-05-19 08:14:04 +04:00
kfree ( np ) ;
}
2012-12-22 01:14:09 +04:00
static void n2_spu_driver_version ( void )
2010-05-19 08:14:04 +04:00
{
static int n2_spu_version_printed ;
if ( n2_spu_version_printed + + = = 0 )
pr_info ( " %s " , version ) ;
}
2012-12-22 01:14:09 +04:00
static int n2_crypto_probe ( struct platform_device * dev )
2010-05-19 08:14:04 +04:00
{
struct mdesc_handle * mdesc ;
struct n2_crypto * np ;
int err ;
n2_spu_driver_version ( ) ;
2017-07-19 00:42:56 +03:00
pr_info ( " Found N2CP at %pOF \n " , dev - > dev . of_node ) ;
2010-05-19 08:14:04 +04:00
np = alloc_n2cp ( ) ;
if ( ! np ) {
2017-07-19 00:42:56 +03:00
dev_err ( & dev - > dev , " %pOF: Unable to allocate n2cp. \n " ,
dev - > dev . of_node ) ;
2010-05-19 08:14:04 +04:00
return - ENOMEM ;
}
err = grab_global_resources ( ) ;
if ( err ) {
2017-07-19 00:42:56 +03:00
dev_err ( & dev - > dev , " %pOF: Unable to grab global resources. \n " ,
dev - > dev . of_node ) ;
2010-05-19 08:14:04 +04:00
goto out_free_n2cp ;
}
mdesc = mdesc_grab ( ) ;
if ( ! mdesc ) {
2017-07-19 00:42:56 +03:00
dev_err ( & dev - > dev , " %pOF: Unable to grab MDESC. \n " ,
dev - > dev . of_node ) ;
2010-05-19 08:14:04 +04:00
err = - ENODEV ;
goto out_free_global ;
}
err = grab_mdesc_irq_props ( mdesc , dev , & np - > cwq_info , " n2cp " ) ;
if ( err ) {
2017-07-19 00:42:56 +03:00
dev_err ( & dev - > dev , " %pOF: Unable to grab IRQ props. \n " ,
dev - > dev . of_node ) ;
2010-05-19 08:14:04 +04:00
mdesc_release ( mdesc ) ;
goto out_free_global ;
}
err = spu_mdesc_scan ( mdesc , dev , & np - > cwq_info , & np - > cwq_list ,
" cwq " , HV_NCS_QTYPE_CWQ , cwq_intr ,
cpu_to_cwq ) ;
mdesc_release ( mdesc ) ;
if ( err ) {
2017-07-19 00:42:56 +03:00
dev_err ( & dev - > dev , " %pOF: CWQ MDESC scan failed. \n " ,
dev - > dev . of_node ) ;
2010-05-19 08:14:04 +04:00
goto out_free_global ;
}
err = n2_register_algs ( ) ;
if ( err ) {
2017-07-19 00:42:56 +03:00
dev_err ( & dev - > dev , " %pOF: Unable to register algorithms. \n " ,
dev - > dev . of_node ) ;
2010-05-19 08:14:04 +04:00
goto out_free_spu_list ;
}
dev_set_drvdata ( & dev - > dev , np ) ;
return 0 ;
out_free_spu_list :
spu_list_destroy ( & np - > cwq_list ) ;
out_free_global :
release_global_resources ( ) ;
out_free_n2cp :
free_n2cp ( np ) ;
return err ;
}
2023-10-20 10:55:50 +03:00
static void n2_crypto_remove ( struct platform_device * dev )
2010-05-19 08:14:04 +04:00
{
struct n2_crypto * np = dev_get_drvdata ( & dev - > dev ) ;
n2_unregister_algs ( ) ;
spu_list_destroy ( & np - > cwq_list ) ;
release_global_resources ( ) ;
free_n2cp ( np ) ;
}
2012-12-22 01:14:09 +04:00
static struct n2_mau * alloc_ncp ( void )
2010-05-19 08:14:04 +04:00
{
struct n2_mau * mp = kzalloc ( sizeof ( struct n2_mau ) , GFP_KERNEL ) ;
if ( mp )
INIT_LIST_HEAD ( & mp - > mau_list ) ;
return mp ;
}
static void free_ncp ( struct n2_mau * mp )
{
2017-08-27 00:15:29 +03:00
kfree ( mp - > mau_info . ino_table ) ;
mp - > mau_info . ino_table = NULL ;
2010-05-19 08:14:04 +04:00
kfree ( mp ) ;
}
2012-12-22 01:14:09 +04:00
static int n2_mau_probe ( struct platform_device * dev )
2010-05-19 08:14:04 +04:00
{
struct mdesc_handle * mdesc ;
struct n2_mau * mp ;
int err ;
n2_spu_driver_version ( ) ;
2017-07-19 00:42:56 +03:00
pr_info ( " Found NCP at %pOF \n " , dev - > dev . of_node ) ;
2010-05-19 08:14:04 +04:00
mp = alloc_ncp ( ) ;
if ( ! mp ) {
2017-07-19 00:42:56 +03:00
dev_err ( & dev - > dev , " %pOF: Unable to allocate ncp. \n " ,
dev - > dev . of_node ) ;
2010-05-19 08:14:04 +04:00
return - ENOMEM ;
}
err = grab_global_resources ( ) ;
if ( err ) {
2017-07-19 00:42:56 +03:00
dev_err ( & dev - > dev , " %pOF: Unable to grab global resources. \n " ,
dev - > dev . of_node ) ;
2010-05-19 08:14:04 +04:00
goto out_free_ncp ;
}
mdesc = mdesc_grab ( ) ;
if ( ! mdesc ) {
2017-07-19 00:42:56 +03:00
dev_err ( & dev - > dev , " %pOF: Unable to grab MDESC. \n " ,
dev - > dev . of_node ) ;
2010-05-19 08:14:04 +04:00
err = - ENODEV ;
goto out_free_global ;
}
err = grab_mdesc_irq_props ( mdesc , dev , & mp - > mau_info , " ncp " ) ;
if ( err ) {
2017-07-19 00:42:56 +03:00
dev_err ( & dev - > dev , " %pOF: Unable to grab IRQ props. \n " ,
dev - > dev . of_node ) ;
2010-05-19 08:14:04 +04:00
mdesc_release ( mdesc ) ;
goto out_free_global ;
}
err = spu_mdesc_scan ( mdesc , dev , & mp - > mau_info , & mp - > mau_list ,
" mau " , HV_NCS_QTYPE_MAU , mau_intr ,
cpu_to_mau ) ;
mdesc_release ( mdesc ) ;
if ( err ) {
2017-07-19 00:42:56 +03:00
dev_err ( & dev - > dev , " %pOF: MAU MDESC scan failed. \n " ,
dev - > dev . of_node ) ;
2010-05-19 08:14:04 +04:00
goto out_free_global ;
}
dev_set_drvdata ( & dev - > dev , mp ) ;
return 0 ;
out_free_global :
release_global_resources ( ) ;
out_free_ncp :
free_ncp ( mp ) ;
return err ;
}
2023-10-20 10:55:50 +03:00
static void n2_mau_remove ( struct platform_device * dev )
2010-05-19 08:14:04 +04:00
{
struct n2_mau * mp = dev_get_drvdata ( & dev - > dev ) ;
spu_list_destroy ( & mp - > mau_list ) ;
release_global_resources ( ) ;
free_ncp ( mp ) ;
}
2017-06-15 14:58:10 +03:00
static const struct of_device_id n2_crypto_match [ ] = {
2010-05-19 08:14:04 +04:00
{
. name = " n2cp " ,
. compatible = " SUNW,n2-cwq " ,
} ,
{
. name = " n2cp " ,
. compatible = " SUNW,vf-cwq " ,
} ,
2011-07-28 12:30:07 +04:00
{
. name = " n2cp " ,
. compatible = " SUNW,kt-cwq " ,
} ,
2010-05-19 08:14:04 +04:00
{ } ,
} ;
MODULE_DEVICE_TABLE ( of , n2_crypto_match ) ;
2011-02-23 06:01:33 +03:00
static struct platform_driver n2_crypto_driver = {
2010-05-26 04:37:08 +04:00
. driver = {
. name = " n2cp " ,
. of_match_table = n2_crypto_match ,
} ,
2010-05-19 08:14:04 +04:00
. probe = n2_crypto_probe ,
2023-10-20 10:55:50 +03:00
. remove_new = n2_crypto_remove ,
2010-05-19 08:14:04 +04:00
} ;
2017-06-15 14:58:10 +03:00
static const struct of_device_id n2_mau_match [ ] = {
2010-05-19 08:14:04 +04:00
{
. name = " ncp " ,
. compatible = " SUNW,n2-mau " ,
} ,
{
. name = " ncp " ,
. compatible = " SUNW,vf-mau " ,
} ,
2011-07-28 12:30:07 +04:00
{
. name = " ncp " ,
. compatible = " SUNW,kt-mau " ,
} ,
2010-05-19 08:14:04 +04:00
{ } ,
} ;
MODULE_DEVICE_TABLE ( of , n2_mau_match ) ;
2011-02-23 06:01:33 +03:00
static struct platform_driver n2_mau_driver = {
2010-05-26 04:37:08 +04:00
. driver = {
. name = " ncp " ,
. of_match_table = n2_mau_match ,
} ,
2010-05-19 08:14:04 +04:00
. probe = n2_mau_probe ,
2023-10-20 10:55:50 +03:00
. remove_new = n2_mau_remove ,
2010-05-19 08:14:04 +04:00
} ;
2015-12-02 19:16:36 +03:00
static struct platform_driver * const drivers [ ] = {
& n2_crypto_driver ,
& n2_mau_driver ,
} ;
2010-05-19 08:14:04 +04:00
static int __init n2_init ( void )
{
2015-12-02 19:16:36 +03:00
return platform_register_drivers ( drivers , ARRAY_SIZE ( drivers ) ) ;
2010-05-19 08:14:04 +04:00
}
static void __exit n2_exit ( void )
{
2015-12-02 19:16:36 +03:00
platform_unregister_drivers ( drivers , ARRAY_SIZE ( drivers ) ) ;
2010-05-19 08:14:04 +04:00
}
module_init ( n2_init ) ;
module_exit ( n2_exit ) ;