2019-05-29 17:18:13 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2012-05-14 14:59:38 +04:00
/**
* Routines supporting the Power 7 + Nest Accelerators driver
*
* Copyright ( C ) 2011 - 2012 International Business Machines Inc .
*
* Author : Kent Yoder < yoder1 @ us . ibm . com >
*/
2015-06-16 08:54:21 +03:00
# include <crypto/internal/aead.h>
2012-05-14 14:59:38 +04:00
# include <crypto/internal/hash.h>
# include <crypto/aes.h>
# include <crypto/sha.h>
# include <crypto/algapi.h>
# include <crypto/scatterwalk.h>
# include <linux/module.h>
# include <linux/moduleparam.h>
# include <linux/types.h>
# include <linux/mm.h>
# include <linux/scatterlist.h>
# include <linux/device.h>
# include <linux/of.h>
# include <asm/hvcall.h>
# include <asm/vio.h>
# include "nx_csbcpb.h"
# include "nx.h"
/**
* nx_hcall_sync - make an H_COP_OP hcall for the passed in op structure
*
* @ nx_ctx : the crypto context handle
* @ op : PFO operation struct to pass in
* @ may_sleep : flag indicating the request can sleep
*
* Make the hcall , retrying while the hardware is busy . If we cannot yield
* the thread , limit the number of retries to 10 here .
*/
int nx_hcall_sync ( struct nx_crypto_ctx * nx_ctx ,
struct vio_pfo_op * op ,
u32 may_sleep )
{
int rc , retries = 10 ;
struct vio_dev * viodev = nx_driver . viodev ;
atomic_inc ( & ( nx_ctx - > stats - > sync_ops ) ) ;
do {
rc = vio_h_cop_sync ( viodev , op ) ;
2013-08-13 01:49:37 +04:00
} while ( rc = = - EBUSY & & ! may_sleep & & retries - - ) ;
2012-05-14 14:59:38 +04:00
if ( rc ) {
dev_dbg ( & viodev - > dev , " vio_h_cop_sync failed: rc: %d "
" hcall rc: %ld \n " , rc , op - > hcall_err ) ;
atomic_inc ( & ( nx_ctx - > stats - > errors ) ) ;
atomic_set ( & ( nx_ctx - > stats - > last_error ) , op - > hcall_err ) ;
atomic_set ( & ( nx_ctx - > stats - > last_error_pid ) , current - > pid ) ;
}
return rc ;
}
/**
* nx_build_sg_list - build an NX scatter list describing a single buffer
*
* @ sg_head : pointer to the first scatter list element to build
* @ start_addr : pointer to the linear buffer
* @ len : length of the data at @ start_addr
* @ sgmax : the largest number of scatter list elements we ' re allowed to create
*
* This function will start writing nx_sg elements at @ sg_head and keep
* writing them until all of the data from @ start_addr is described or
* until sgmax elements have been written . Scatter list elements will be
* created such that none of the elements describes a buffer that crosses a 4 K
* boundary .
*/
struct nx_sg * nx_build_sg_list ( struct nx_sg * sg_head ,
u8 * start_addr ,
2014-10-28 20:50:45 +03:00
unsigned int * len ,
2012-05-14 14:59:38 +04:00
u32 sgmax )
{
unsigned int sg_len = 0 ;
struct nx_sg * sg ;
u64 sg_addr = ( u64 ) start_addr ;
u64 end_addr ;
/* determine the start and end for this address range - slightly
* different if this is in VMALLOC_REGION */
if ( is_vmalloc_addr ( start_addr ) )
2012-07-26 01:19:48 +04:00
sg_addr = page_to_phys ( vmalloc_to_page ( start_addr ) )
2012-05-14 14:59:38 +04:00
+ offset_in_page ( sg_addr ) ;
else
2012-07-26 01:19:48 +04:00
sg_addr = __pa ( sg_addr ) ;
2012-05-14 14:59:38 +04:00
2014-10-28 20:50:45 +03:00
end_addr = sg_addr + * len ;
2012-05-14 14:59:38 +04:00
/* each iteration will write one struct nx_sg element and add the
* length of data described by that element to sg_len . Once @ len bytes
* have been described ( or @ sgmax elements have been written ) , the
* loop ends . min_t is used to ensure @ end_addr falls on the same page
* as sg_addr , if not , we need to create another nx_sg element for the
2013-08-02 16:09:51 +04:00
* data on the next page .
*
* Also when using vmalloc ' ed data , every time that a system page
* boundary is crossed the physical address needs to be re - calculated .
*/
2014-10-28 20:50:45 +03:00
for ( sg = sg_head ; sg_len < * len ; sg + + ) {
2013-08-02 16:09:51 +04:00
u64 next_page ;
2012-05-14 14:59:38 +04:00
sg - > addr = sg_addr ;
2013-08-02 16:09:51 +04:00
sg_addr = min_t ( u64 , NX_PAGE_NUM ( sg_addr + NX_PAGE_SIZE ) ,
end_addr ) ;
next_page = ( sg - > addr & PAGE_MASK ) + PAGE_SIZE ;
sg - > len = min_t ( u64 , sg_addr , next_page ) - sg - > addr ;
2012-05-14 14:59:38 +04:00
sg_len + = sg - > len ;
2013-08-02 16:09:51 +04:00
if ( sg_addr > = next_page & &
is_vmalloc_addr ( start_addr + sg_len ) ) {
sg_addr = page_to_phys ( vmalloc_to_page (
start_addr + sg_len ) ) ;
2014-10-28 20:50:45 +03:00
end_addr = sg_addr + * len - sg_len ;
2013-08-02 16:09:51 +04:00
}
2012-05-14 14:59:38 +04:00
if ( ( sg - sg_head ) = = sgmax ) {
pr_err ( " nx: scatter/gather list overflow, pid: %d \n " ,
current - > pid ) ;
2014-10-28 20:50:45 +03:00
sg + + ;
break ;
2012-05-14 14:59:38 +04:00
}
}
2014-10-28 20:50:45 +03:00
* len = sg_len ;
2012-05-14 14:59:38 +04:00
/* return the moved sg_head pointer */
return sg ;
}
/**
* nx_walk_and_build - walk a linux scatterlist and build an nx scatterlist
*
* @ nx_dst : pointer to the first nx_sg element to write
* @ sglen : max number of nx_sg entries we ' re allowed to write
* @ sg_src : pointer to the source linux scatterlist to walk
* @ start : number of bytes to fast - forward past at the beginning of @ sg_src
* @ src_len : number of bytes to walk in @ sg_src
*/
struct nx_sg * nx_walk_and_build ( struct nx_sg * nx_dst ,
unsigned int sglen ,
struct scatterlist * sg_src ,
unsigned int start ,
2014-10-28 20:50:45 +03:00
unsigned int * src_len )
2012-05-14 14:59:38 +04:00
{
struct scatter_walk walk ;
struct nx_sg * nx_sg = nx_dst ;
2014-10-28 20:50:45 +03:00
unsigned int n , offset = 0 , len = * src_len ;
2012-05-14 14:59:38 +04:00
char * dst ;
/* we need to fast forward through @start bytes first */
for ( ; ; ) {
scatterwalk_start ( & walk , sg_src ) ;
if ( start < offset + sg_src - > length )
break ;
offset + = sg_src - > length ;
2015-01-20 11:06:16 +03:00
sg_src = sg_next ( sg_src ) ;
2012-05-14 14:59:38 +04:00
}
/* start - offset is the number of bytes to advance in the scatterlist
* element we ' re currently looking at */
scatterwalk_advance ( & walk , start - offset ) ;
2014-10-28 20:50:45 +03:00
while ( len & & ( nx_sg - nx_dst ) < sglen ) {
2012-05-14 14:59:38 +04:00
n = scatterwalk_clamp ( & walk , len ) ;
if ( ! n ) {
2015-01-20 11:06:16 +03:00
/* In cases where we have scatterlist chain sg_next
2014-10-28 20:50:45 +03:00
* handles with it properly */
2015-01-20 11:06:16 +03:00
scatterwalk_start ( & walk , sg_next ( walk . sg ) ) ;
2012-05-14 14:59:38 +04:00
n = scatterwalk_clamp ( & walk , len ) ;
}
dst = scatterwalk_map ( & walk ) ;
2014-10-28 20:50:45 +03:00
nx_sg = nx_build_sg_list ( nx_sg , dst , & n , sglen - ( nx_sg - nx_dst ) ) ;
2012-05-14 14:59:38 +04:00
len - = n ;
scatterwalk_unmap ( dst ) ;
scatterwalk_advance ( & walk , n ) ;
scatterwalk_done ( & walk , SCATTERWALK_FROM_SG , len ) ;
}
2014-10-28 20:50:45 +03:00
/* update to_process */
* src_len - = len ;
2012-05-14 14:59:38 +04:00
/* return the moved destination pointer */
return nx_sg ;
}
2014-10-28 20:50:45 +03:00
/**
* trim_sg_list - ensures the bound in sg list .
* @ sg : sg list head
* @ end : sg lisg end
* @ delta : is the amount we need to crop in order to bound the list .
*
*/
2015-04-23 23:40:30 +03:00
static long int trim_sg_list ( struct nx_sg * sg ,
struct nx_sg * end ,
unsigned int delta ,
unsigned int * nbytes )
2014-10-28 20:50:45 +03:00
{
2015-04-23 23:40:30 +03:00
long int oplen ;
long int data_back ;
unsigned int is_delta = delta ;
2014-10-28 20:50:45 +03:00
while ( delta & & end > sg ) {
struct nx_sg * last = end - 1 ;
if ( last - > len > delta ) {
last - > len - = delta ;
delta = 0 ;
} else {
end - - ;
delta - = last - > len ;
}
}
2015-04-23 23:40:30 +03:00
/* There are cases where we need to crop list in order to make it
* a block size multiple , but we also need to align data . In order to
* that we need to calculate how much we need to put back to be
* processed
*/
oplen = ( sg - end ) * sizeof ( struct nx_sg ) ;
if ( is_delta ) {
data_back = ( abs ( oplen ) / AES_BLOCK_SIZE ) * sg - > len ;
data_back = * nbytes - ( data_back & ~ ( AES_BLOCK_SIZE - 1 ) ) ;
* nbytes - = data_back ;
}
return oplen ;
2014-10-28 20:50:45 +03:00
}
2012-05-14 14:59:38 +04:00
/**
* nx_build_sg_lists - walk the input scatterlists and build arrays of NX
* scatterlists based on them .
*
* @ nx_ctx : NX crypto context for the lists we ' re building
2019-10-13 07:39:15 +03:00
* @ iv : iv data , if the algorithm requires it
2012-05-14 14:59:38 +04:00
* @ dst : destination scatterlist
* @ src : source scatterlist
* @ nbytes : length of data described in the scatterlists
2013-08-29 18:36:31 +04:00
* @ offset : number of bytes to fast - forward past at the beginning of
* scatterlists .
2019-10-13 07:39:15 +03:00
* @ oiv : destination for the iv data , if the algorithm requires it
2012-05-14 14:59:38 +04:00
*
2019-10-13 07:39:15 +03:00
* This is common code shared by all the AES algorithms . It uses the crypto
* scatterlist walk routines to traverse input and output scatterlists , building
2012-05-14 14:59:38 +04:00
* corresponding NX scatterlists
*/
int nx_build_sg_lists ( struct nx_crypto_ctx * nx_ctx ,
2019-10-13 07:39:15 +03:00
const u8 * iv ,
2012-05-14 14:59:38 +04:00
struct scatterlist * dst ,
struct scatterlist * src ,
2014-10-28 20:50:45 +03:00
unsigned int * nbytes ,
2013-08-29 18:36:31 +04:00
unsigned int offset ,
2019-10-13 07:39:15 +03:00
u8 * oiv )
2012-05-14 14:59:38 +04:00
{
2014-10-28 20:50:45 +03:00
unsigned int delta = 0 ;
unsigned int total = * nbytes ;
2012-05-14 14:59:38 +04:00
struct nx_sg * nx_insg = nx_ctx - > in_sg ;
struct nx_sg * nx_outsg = nx_ctx - > out_sg ;
2014-10-28 20:50:45 +03:00
unsigned int max_sg_len ;
max_sg_len = min_t ( u64 , nx_ctx - > ap - > sglen ,
nx_driver . of . max_sg_len / sizeof ( struct nx_sg ) ) ;
max_sg_len = min_t ( u64 , max_sg_len ,
nx_ctx - > ap - > databytelen / NX_PAGE_SIZE ) ;
2012-05-14 14:59:38 +04:00
2019-10-13 07:39:15 +03:00
if ( oiv )
memcpy ( oiv , iv , AES_BLOCK_SIZE ) ;
2012-05-14 14:59:38 +04:00
2014-10-28 20:50:45 +03:00
* nbytes = min_t ( u64 , * nbytes , nx_ctx - > ap - > databytelen ) ;
nx_outsg = nx_walk_and_build ( nx_outsg , max_sg_len , dst ,
offset , nbytes ) ;
nx_insg = nx_walk_and_build ( nx_insg , max_sg_len , src ,
offset , nbytes ) ;
if ( * nbytes < total )
delta = * nbytes - ( * nbytes & ~ ( AES_BLOCK_SIZE - 1 ) ) ;
2012-05-14 14:59:38 +04:00
/* these lengths should be negative, which will indicate to phyp that
* the input and output parameters are scatterlists , not linear
* buffers */
2015-04-23 23:40:30 +03:00
nx_ctx - > op . inlen = trim_sg_list ( nx_ctx - > in_sg , nx_insg , delta , nbytes ) ;
nx_ctx - > op . outlen = trim_sg_list ( nx_ctx - > out_sg , nx_outsg , delta , nbytes ) ;
2013-04-12 21:13:59 +04:00
return 0 ;
2012-05-14 14:59:38 +04:00
}
/**
* nx_ctx_init - initialize an nx_ctx ' s vio_pfo_op struct
*
* @ nx_ctx : the nx context to initialize
* @ function : the function code for the op
*/
void nx_ctx_init ( struct nx_crypto_ctx * nx_ctx , unsigned int function )
{
2013-08-13 01:49:37 +04:00
spin_lock_init ( & nx_ctx - > lock ) ;
2012-05-14 14:59:38 +04:00
memset ( nx_ctx - > kmem , 0 , nx_ctx - > kmem_len ) ;
nx_ctx - > csbcpb - > csb . valid | = NX_CSB_VALID_BIT ;
nx_ctx - > op . flags = function ;
2012-07-26 01:19:48 +04:00
nx_ctx - > op . csbcpb = __pa ( nx_ctx - > csbcpb ) ;
nx_ctx - > op . in = __pa ( nx_ctx - > in_sg ) ;
nx_ctx - > op . out = __pa ( nx_ctx - > out_sg ) ;
2012-05-14 14:59:38 +04:00
if ( nx_ctx - > csbcpb_aead ) {
nx_ctx - > csbcpb_aead - > csb . valid | = NX_CSB_VALID_BIT ;
nx_ctx - > op_aead . flags = function ;
2012-07-26 01:19:48 +04:00
nx_ctx - > op_aead . csbcpb = __pa ( nx_ctx - > csbcpb_aead ) ;
nx_ctx - > op_aead . in = __pa ( nx_ctx - > in_sg ) ;
nx_ctx - > op_aead . out = __pa ( nx_ctx - > out_sg ) ;
2012-05-14 14:59:38 +04:00
}
}
static void nx_of_update_status ( struct device * dev ,
struct property * p ,
struct nx_of * props )
{
if ( ! strncmp ( p - > value , " okay " , p - > length ) ) {
props - > status = NX_WAITING ;
props - > flags | = NX_OF_FLAG_STATUS_SET ;
} else {
dev_info ( dev , " %s: status '%s' is not 'okay' \n " , __func__ ,
( char * ) p - > value ) ;
}
}
static void nx_of_update_sglen ( struct device * dev ,
struct property * p ,
struct nx_of * props )
{
if ( p - > length ! = sizeof ( props - > max_sg_len ) ) {
dev_err ( dev , " %s: unexpected format for "
" ibm,max-sg-len property \n " , __func__ ) ;
dev_dbg ( dev , " %s: ibm,max-sg-len is %d bytes "
" long, expected %zd bytes \n " , __func__ ,
p - > length , sizeof ( props - > max_sg_len ) ) ;
return ;
}
props - > max_sg_len = * ( u32 * ) p - > value ;
props - > flags | = NX_OF_FLAG_MAXSGLEN_SET ;
}
static void nx_of_update_msc ( struct device * dev ,
struct property * p ,
struct nx_of * props )
{
struct msc_triplet * trip ;
struct max_sync_cop * msc ;
unsigned int bytes_so_far , i , lenp ;
msc = ( struct max_sync_cop * ) p - > value ;
lenp = p - > length ;
/* You can't tell if the data read in for this property is sane by its
* size alone . This is because there are sizes embedded in the data
* structure . The best we can do is check lengths as we parse and bail
* as soon as a length error is detected . */
bytes_so_far = 0 ;
while ( ( bytes_so_far + sizeof ( struct max_sync_cop ) ) < = lenp ) {
bytes_so_far + = sizeof ( struct max_sync_cop ) ;
trip = msc - > trip ;
for ( i = 0 ;
( ( bytes_so_far + sizeof ( struct msc_triplet ) ) < = lenp ) & &
i < msc - > triplets ;
i + + ) {
2016-07-15 14:09:13 +03:00
if ( msc - > fc > = NX_MAX_FC | | msc - > mode > = NX_MAX_MODE ) {
2012-05-14 14:59:38 +04:00
dev_err ( dev , " unknown function code/mode "
" combo: %d/%d (ignored) \n " , msc - > fc ,
msc - > mode ) ;
goto next_loop ;
}
2015-06-19 07:07:54 +03:00
if ( ! trip - > sglen | | trip - > databytelen < NX_PAGE_SIZE ) {
dev_warn ( dev , " bogus sglen/databytelen: "
" %u/%u (ignored) \n " , trip - > sglen ,
trip - > databytelen ) ;
goto next_loop ;
}
2012-05-14 14:59:38 +04:00
switch ( trip - > keybitlen ) {
case 128 :
case 160 :
props - > ap [ msc - > fc ] [ msc - > mode ] [ 0 ] . databytelen =
trip - > databytelen ;
props - > ap [ msc - > fc ] [ msc - > mode ] [ 0 ] . sglen =
trip - > sglen ;
break ;
case 192 :
props - > ap [ msc - > fc ] [ msc - > mode ] [ 1 ] . databytelen =
trip - > databytelen ;
props - > ap [ msc - > fc ] [ msc - > mode ] [ 1 ] . sglen =
trip - > sglen ;
break ;
case 256 :
if ( msc - > fc = = NX_FC_AES ) {
props - > ap [ msc - > fc ] [ msc - > mode ] [ 2 ] .
databytelen = trip - > databytelen ;
props - > ap [ msc - > fc ] [ msc - > mode ] [ 2 ] . sglen =
trip - > sglen ;
} else if ( msc - > fc = = NX_FC_AES_HMAC | |
msc - > fc = = NX_FC_SHA ) {
props - > ap [ msc - > fc ] [ msc - > mode ] [ 1 ] .
databytelen = trip - > databytelen ;
props - > ap [ msc - > fc ] [ msc - > mode ] [ 1 ] . sglen =
trip - > sglen ;
} else {
dev_warn ( dev , " unknown function "
" code/key bit len combo "
" : (%u/256) \n " , msc - > fc ) ;
}
break ;
case 512 :
props - > ap [ msc - > fc ] [ msc - > mode ] [ 2 ] . databytelen =
trip - > databytelen ;
props - > ap [ msc - > fc ] [ msc - > mode ] [ 2 ] . sglen =
trip - > sglen ;
break ;
default :
dev_warn ( dev , " unknown function code/key bit "
" len combo: (%u/%u) \n " , msc - > fc ,
trip - > keybitlen ) ;
break ;
}
next_loop :
bytes_so_far + = sizeof ( struct msc_triplet ) ;
trip + + ;
}
msc = ( struct max_sync_cop * ) trip ;
}
props - > flags | = NX_OF_FLAG_MAXSYNCCOP_SET ;
}
/**
* nx_of_init - read openFirmware values from the device tree
*
* @ dev : device handle
* @ props : pointer to struct to hold the properties values
*
* Called once at driver probe time , this function will read out the
* openFirmware properties we use at runtime . If all the OF properties are
* acceptable , when we exit this function props - > flags will indicate that
* we ' re ready to register our crypto algorithms .
*/
static void nx_of_init ( struct device * dev , struct nx_of * props )
{
struct device_node * base_node = dev - > of_node ;
struct property * p ;
p = of_find_property ( base_node , " status " , NULL ) ;
if ( ! p )
dev_info ( dev , " %s: property 'status' not found \n " , __func__ ) ;
else
nx_of_update_status ( dev , p , props ) ;
p = of_find_property ( base_node , " ibm,max-sg-len " , NULL ) ;
if ( ! p )
dev_info ( dev , " %s: property 'ibm,max-sg-len' not found \n " ,
__func__ ) ;
else
nx_of_update_sglen ( dev , p , props ) ;
p = of_find_property ( base_node , " ibm,max-sync-cop " , NULL ) ;
if ( ! p )
dev_info ( dev , " %s: property 'ibm,max-sync-cop' not found \n " ,
__func__ ) ;
else
nx_of_update_msc ( dev , p , props ) ;
}
2015-06-19 07:07:54 +03:00
static bool nx_check_prop ( struct device * dev , u32 fc , u32 mode , int slot )
{
struct alg_props * props = & nx_driver . of . ap [ fc ] [ mode ] [ slot ] ;
if ( ! props - > sglen | | props - > databytelen < NX_PAGE_SIZE ) {
if ( dev )
dev_warn ( dev , " bogus sglen/databytelen for %u/%u/%u: "
" %u/%u (ignored) \n " , fc , mode , slot ,
props - > sglen , props - > databytelen ) ;
return false ;
}
return true ;
}
static bool nx_check_props ( struct device * dev , u32 fc , u32 mode )
{
int i ;
for ( i = 0 ; i < 3 ; i + + )
if ( ! nx_check_prop ( dev , fc , mode , i ) )
return false ;
return true ;
}
2019-10-13 07:39:16 +03:00
static int nx_register_skcipher ( struct skcipher_alg * alg , u32 fc , u32 mode )
{
return nx_check_props ( & nx_driver . viodev - > dev , fc , mode ) ?
crypto_register_skcipher ( alg ) : 0 ;
}
2015-06-19 07:07:54 +03:00
static int nx_register_aead ( struct aead_alg * alg , u32 fc , u32 mode )
{
return nx_check_props ( & nx_driver . viodev - > dev , fc , mode ) ?
crypto_register_aead ( alg ) : 0 ;
}
static int nx_register_shash ( struct shash_alg * alg , u32 fc , u32 mode , int slot )
{
return ( slot > = 0 ? nx_check_prop ( & nx_driver . viodev - > dev ,
fc , mode , slot ) :
nx_check_props ( & nx_driver . viodev - > dev , fc , mode ) ) ?
crypto_register_shash ( alg ) : 0 ;
}
2019-10-13 07:39:16 +03:00
static void nx_unregister_skcipher ( struct skcipher_alg * alg , u32 fc , u32 mode )
{
if ( nx_check_props ( NULL , fc , mode ) )
crypto_unregister_skcipher ( alg ) ;
}
2015-06-19 07:07:54 +03:00
static void nx_unregister_aead ( struct aead_alg * alg , u32 fc , u32 mode )
{
if ( nx_check_props ( NULL , fc , mode ) )
crypto_unregister_aead ( alg ) ;
}
static void nx_unregister_shash ( struct shash_alg * alg , u32 fc , u32 mode ,
int slot )
{
if ( slot > = 0 ? nx_check_prop ( NULL , fc , mode , slot ) :
nx_check_props ( NULL , fc , mode ) )
crypto_unregister_shash ( alg ) ;
}
2012-05-14 14:59:38 +04:00
/**
* nx_register_algs - register algorithms with the crypto API
*
* Called from nx_probe ( )
*
* If all OF properties are in an acceptable state , the driver flags will
* indicate that we ' re ready and we ' ll create our debugfs files and register
* out crypto algorithms .
*/
static int nx_register_algs ( void )
{
int rc = - 1 ;
if ( nx_driver . of . flags ! = NX_OF_FLAG_MASK_READY )
goto out ;
memset ( & nx_driver . stats , 0 , sizeof ( struct nx_stats ) ) ;
2019-06-14 17:29:04 +03:00
NX_DEBUGFS_INIT ( & nx_driver ) ;
2012-05-14 14:59:38 +04:00
2013-04-12 21:13:59 +04:00
nx_driver . of . status = NX_OKAY ;
2019-10-13 07:39:16 +03:00
rc = nx_register_skcipher ( & nx_ecb_aes_alg , NX_FC_AES , NX_MODE_AES_ECB ) ;
2012-05-14 14:59:38 +04:00
if ( rc )
goto out ;
2019-10-13 07:39:17 +03:00
rc = nx_register_skcipher ( & nx_cbc_aes_alg , NX_FC_AES , NX_MODE_AES_CBC ) ;
2012-05-14 14:59:38 +04:00
if ( rc )
goto out_unreg_ecb ;
2019-10-13 07:39:18 +03:00
rc = nx_register_skcipher ( & nx_ctr3686_aes_alg , NX_FC_AES ,
NX_MODE_AES_CTR ) ;
2012-05-14 14:59:38 +04:00
if ( rc )
2015-08-09 00:31:01 +03:00
goto out_unreg_cbc ;
2012-05-14 14:59:38 +04:00
2015-06-19 07:07:54 +03:00
rc = nx_register_aead ( & nx_gcm_aes_alg , NX_FC_AES , NX_MODE_AES_GCM ) ;
2012-05-14 14:59:38 +04:00
if ( rc )
goto out_unreg_ctr3686 ;
2015-06-19 07:07:54 +03:00
rc = nx_register_aead ( & nx_gcm4106_aes_alg , NX_FC_AES , NX_MODE_AES_GCM ) ;
2012-05-14 14:59:38 +04:00
if ( rc )
goto out_unreg_gcm ;
2015-07-14 11:53:21 +03:00
rc = nx_register_aead ( & nx_ccm_aes_alg , NX_FC_AES , NX_MODE_AES_CCM ) ;
2012-05-14 14:59:38 +04:00
if ( rc )
goto out_unreg_gcm4106 ;
2015-07-14 11:53:21 +03:00
rc = nx_register_aead ( & nx_ccm4309_aes_alg , NX_FC_AES , NX_MODE_AES_CCM ) ;
2012-05-14 14:59:38 +04:00
if ( rc )
goto out_unreg_ccm ;
2015-06-19 07:07:54 +03:00
rc = nx_register_shash ( & nx_shash_sha256_alg , NX_FC_SHA , NX_MODE_SHA ,
NX_PROPS_SHA256 ) ;
2012-05-14 14:59:38 +04:00
if ( rc )
goto out_unreg_ccm4309 ;
2015-06-19 07:07:54 +03:00
rc = nx_register_shash ( & nx_shash_sha512_alg , NX_FC_SHA , NX_MODE_SHA ,
NX_PROPS_SHA512 ) ;
2012-05-14 14:59:38 +04:00
if ( rc )
goto out_unreg_s256 ;
2015-06-19 07:07:54 +03:00
rc = nx_register_shash ( & nx_shash_aes_xcbc_alg ,
NX_FC_AES , NX_MODE_AES_XCBC_MAC , - 1 ) ;
2012-05-14 14:59:38 +04:00
if ( rc )
goto out_unreg_s512 ;
goto out ;
out_unreg_s512 :
2015-06-19 07:07:54 +03:00
nx_unregister_shash ( & nx_shash_sha512_alg , NX_FC_SHA , NX_MODE_SHA ,
NX_PROPS_SHA512 ) ;
2012-05-14 14:59:38 +04:00
out_unreg_s256 :
2015-06-19 07:07:54 +03:00
nx_unregister_shash ( & nx_shash_sha256_alg , NX_FC_SHA , NX_MODE_SHA ,
NX_PROPS_SHA256 ) ;
2012-05-14 14:59:38 +04:00
out_unreg_ccm4309 :
2015-07-14 11:53:21 +03:00
nx_unregister_aead ( & nx_ccm4309_aes_alg , NX_FC_AES , NX_MODE_AES_CCM ) ;
2012-05-14 14:59:38 +04:00
out_unreg_ccm :
2015-07-14 11:53:21 +03:00
nx_unregister_aead ( & nx_ccm_aes_alg , NX_FC_AES , NX_MODE_AES_CCM ) ;
2012-05-14 14:59:38 +04:00
out_unreg_gcm4106 :
2015-06-19 07:07:54 +03:00
nx_unregister_aead ( & nx_gcm4106_aes_alg , NX_FC_AES , NX_MODE_AES_GCM ) ;
2012-05-14 14:59:38 +04:00
out_unreg_gcm :
2015-06-19 07:07:54 +03:00
nx_unregister_aead ( & nx_gcm_aes_alg , NX_FC_AES , NX_MODE_AES_GCM ) ;
2012-05-14 14:59:38 +04:00
out_unreg_ctr3686 :
2019-10-13 07:39:18 +03:00
nx_unregister_skcipher ( & nx_ctr3686_aes_alg , NX_FC_AES , NX_MODE_AES_CTR ) ;
2012-05-14 14:59:38 +04:00
out_unreg_cbc :
2019-10-13 07:39:17 +03:00
nx_unregister_skcipher ( & nx_cbc_aes_alg , NX_FC_AES , NX_MODE_AES_CBC ) ;
2012-05-14 14:59:38 +04:00
out_unreg_ecb :
2019-10-13 07:39:16 +03:00
nx_unregister_skcipher ( & nx_ecb_aes_alg , NX_FC_AES , NX_MODE_AES_ECB ) ;
2012-05-14 14:59:38 +04:00
out :
return rc ;
}
/**
* nx_crypto_ctx_init - create and initialize a crypto api context
*
* @ nx_ctx : the crypto api context
* @ fc : function code for the context
* @ mode : the function code specific mode for this context
*/
static int nx_crypto_ctx_init ( struct nx_crypto_ctx * nx_ctx , u32 fc , u32 mode )
{
if ( nx_driver . of . status ! = NX_OKAY ) {
pr_err ( " Attempt to initialize NX crypto context while device "
" is not available! \n " ) ;
return - ENODEV ;
}
/* we need an extra page for csbcpb_aead for these modes */
if ( mode = = NX_MODE_AES_GCM | | mode = = NX_MODE_AES_CCM )
2014-10-28 20:50:45 +03:00
nx_ctx - > kmem_len = ( 5 * NX_PAGE_SIZE ) +
2012-05-14 14:59:38 +04:00
sizeof ( struct nx_csbcpb ) ;
else
2014-10-28 20:50:45 +03:00
nx_ctx - > kmem_len = ( 4 * NX_PAGE_SIZE ) +
2012-05-14 14:59:38 +04:00
sizeof ( struct nx_csbcpb ) ;
nx_ctx - > kmem = kmalloc ( nx_ctx - > kmem_len , GFP_KERNEL ) ;
if ( ! nx_ctx - > kmem )
return - ENOMEM ;
/* the csbcpb and scatterlists must be 4K aligned pages */
nx_ctx - > csbcpb = ( struct nx_csbcpb * ) ( round_up ( ( u64 ) nx_ctx - > kmem ,
( u64 ) NX_PAGE_SIZE ) ) ;
nx_ctx - > in_sg = ( struct nx_sg * ) ( ( u8 * ) nx_ctx - > csbcpb + NX_PAGE_SIZE ) ;
nx_ctx - > out_sg = ( struct nx_sg * ) ( ( u8 * ) nx_ctx - > in_sg + NX_PAGE_SIZE ) ;
if ( mode = = NX_MODE_AES_GCM | | mode = = NX_MODE_AES_CCM )
nx_ctx - > csbcpb_aead =
( struct nx_csbcpb * ) ( ( u8 * ) nx_ctx - > out_sg +
NX_PAGE_SIZE ) ;
/* give each context a pointer to global stats and their OF
* properties */
nx_ctx - > stats = & nx_driver . stats ;
memcpy ( nx_ctx - > props , nx_driver . of . ap [ fc ] [ mode ] ,
sizeof ( struct alg_props ) * 3 ) ;
return 0 ;
}
/* entry points from the crypto tfm initializers */
2015-07-14 11:53:21 +03:00
int nx_crypto_ctx_aes_ccm_init ( struct crypto_aead * tfm )
2012-05-14 14:59:38 +04:00
{
2015-07-14 11:53:21 +03:00
crypto_aead_set_reqsize ( tfm , sizeof ( struct nx_ccm_rctx ) ) ;
return nx_crypto_ctx_init ( crypto_aead_ctx ( tfm ) , NX_FC_AES ,
2012-05-14 14:59:38 +04:00
NX_MODE_AES_CCM ) ;
}
2015-06-16 08:54:21 +03:00
int nx_crypto_ctx_aes_gcm_init ( struct crypto_aead * tfm )
2012-05-14 14:59:38 +04:00
{
2015-07-07 12:30:25 +03:00
crypto_aead_set_reqsize ( tfm , sizeof ( struct nx_gcm_rctx ) ) ;
2015-06-16 08:54:21 +03:00
return nx_crypto_ctx_init ( crypto_aead_ctx ( tfm ) , NX_FC_AES ,
2012-05-14 14:59:38 +04:00
NX_MODE_AES_GCM ) ;
}
2019-10-13 07:39:18 +03:00
int nx_crypto_ctx_aes_ctr_init ( struct crypto_skcipher * tfm )
2012-05-14 14:59:38 +04:00
{
2019-10-13 07:39:18 +03:00
return nx_crypto_ctx_init ( crypto_skcipher_ctx ( tfm ) , NX_FC_AES ,
2012-05-14 14:59:38 +04:00
NX_MODE_AES_CTR ) ;
}
2019-10-13 07:39:17 +03:00
int nx_crypto_ctx_aes_cbc_init ( struct crypto_skcipher * tfm )
2012-05-14 14:59:38 +04:00
{
2019-10-13 07:39:17 +03:00
return nx_crypto_ctx_init ( crypto_skcipher_ctx ( tfm ) , NX_FC_AES ,
2012-05-14 14:59:38 +04:00
NX_MODE_AES_CBC ) ;
}
2019-10-13 07:39:16 +03:00
int nx_crypto_ctx_aes_ecb_init ( struct crypto_skcipher * tfm )
2012-05-14 14:59:38 +04:00
{
2019-10-13 07:39:16 +03:00
return nx_crypto_ctx_init ( crypto_skcipher_ctx ( tfm ) , NX_FC_AES ,
2012-05-14 14:59:38 +04:00
NX_MODE_AES_ECB ) ;
}
int nx_crypto_ctx_sha_init ( struct crypto_tfm * tfm )
{
return nx_crypto_ctx_init ( crypto_tfm_ctx ( tfm ) , NX_FC_SHA , NX_MODE_SHA ) ;
}
int nx_crypto_ctx_aes_xcbc_init ( struct crypto_tfm * tfm )
{
return nx_crypto_ctx_init ( crypto_tfm_ctx ( tfm ) , NX_FC_AES ,
NX_MODE_AES_XCBC_MAC ) ;
}
/**
* nx_crypto_ctx_exit - destroy a crypto api context
*
* @ tfm : the crypto transform pointer for the context
*
* As crypto API contexts are destroyed , this exit hook is called to free the
* memory associated with it .
*/
void nx_crypto_ctx_exit ( struct crypto_tfm * tfm )
{
struct nx_crypto_ctx * nx_ctx = crypto_tfm_ctx ( tfm ) ;
kzfree ( nx_ctx - > kmem ) ;
nx_ctx - > csbcpb = NULL ;
nx_ctx - > csbcpb_aead = NULL ;
nx_ctx - > in_sg = NULL ;
nx_ctx - > out_sg = NULL ;
}
2019-10-13 07:39:16 +03:00
void nx_crypto_ctx_skcipher_exit ( struct crypto_skcipher * tfm )
{
nx_crypto_ctx_exit ( crypto_skcipher_ctx ( tfm ) ) ;
}
2015-06-16 08:54:21 +03:00
void nx_crypto_ctx_aead_exit ( struct crypto_aead * tfm )
{
struct nx_crypto_ctx * nx_ctx = crypto_aead_ctx ( tfm ) ;
kzfree ( nx_ctx - > kmem ) ;
}
2012-12-22 01:14:09 +04:00
static int nx_probe ( struct vio_dev * viodev , const struct vio_device_id * id )
2012-05-14 14:59:38 +04:00
{
dev_dbg ( & viodev - > dev , " driver probed: %s resource id: 0x%x \n " ,
viodev - > name , viodev - > resource_id ) ;
if ( nx_driver . viodev ) {
dev_err ( & viodev - > dev , " %s: Attempt to register more than one "
" instance of the hardware \n " , __func__ ) ;
return - EINVAL ;
}
nx_driver . viodev = viodev ;
nx_of_init ( & viodev - > dev , & nx_driver . of ) ;
return nx_register_algs ( ) ;
}
2012-12-22 01:14:09 +04:00
static int nx_remove ( struct vio_dev * viodev )
2012-05-14 14:59:38 +04:00
{
dev_dbg ( & viodev - > dev , " entering nx_remove for UA 0x%x \n " ,
viodev - > unit_address ) ;
if ( nx_driver . of . status = = NX_OKAY ) {
NX_DEBUGFS_FINI ( & nx_driver ) ;
2015-06-19 07:07:54 +03:00
nx_unregister_shash ( & nx_shash_aes_xcbc_alg ,
NX_FC_AES , NX_MODE_AES_XCBC_MAC , - 1 ) ;
nx_unregister_shash ( & nx_shash_sha512_alg ,
NX_FC_SHA , NX_MODE_SHA , NX_PROPS_SHA256 ) ;
nx_unregister_shash ( & nx_shash_sha256_alg ,
NX_FC_SHA , NX_MODE_SHA , NX_PROPS_SHA512 ) ;
2015-07-14 11:53:21 +03:00
nx_unregister_aead ( & nx_ccm4309_aes_alg ,
NX_FC_AES , NX_MODE_AES_CCM ) ;
nx_unregister_aead ( & nx_ccm_aes_alg , NX_FC_AES , NX_MODE_AES_CCM ) ;
2015-06-19 07:07:54 +03:00
nx_unregister_aead ( & nx_gcm4106_aes_alg ,
NX_FC_AES , NX_MODE_AES_GCM ) ;
nx_unregister_aead ( & nx_gcm_aes_alg ,
NX_FC_AES , NX_MODE_AES_GCM ) ;
2019-10-13 07:39:18 +03:00
nx_unregister_skcipher ( & nx_ctr3686_aes_alg ,
NX_FC_AES , NX_MODE_AES_CTR ) ;
2019-10-13 07:39:17 +03:00
nx_unregister_skcipher ( & nx_cbc_aes_alg , NX_FC_AES ,
NX_MODE_AES_CBC ) ;
2019-10-13 07:39:16 +03:00
nx_unregister_skcipher ( & nx_ecb_aes_alg , NX_FC_AES ,
NX_MODE_AES_ECB ) ;
2012-05-14 14:59:38 +04:00
}
return 0 ;
}
/* module wide initialization/cleanup */
static int __init nx_init ( void )
{
return vio_register_driver ( & nx_driver . viodriver ) ;
}
static void __exit nx_fini ( void )
{
vio_unregister_driver ( & nx_driver . viodriver ) ;
}
2017-08-17 16:14:10 +03:00
static const struct vio_device_id nx_crypto_driver_ids [ ] = {
2012-05-14 14:59:38 +04:00
{ " ibm,sym-encryption-v1 " , " ibm,sym-encryption " } ,
{ " " , " " }
} ;
MODULE_DEVICE_TABLE ( vio , nx_crypto_driver_ids ) ;
/* driver state structure */
struct nx_crypto_driver nx_driver = {
. viodriver = {
. id_table = nx_crypto_driver_ids ,
. probe = nx_probe ,
. remove = nx_remove ,
. name = NX_NAME ,
} ,
} ;
module_init ( nx_init ) ;
module_exit ( nx_fini ) ;
MODULE_AUTHOR ( " Kent Yoder <yoder1@us.ibm.com> " ) ;
MODULE_DESCRIPTION ( NX_STRING ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_VERSION ( NX_VERSION ) ;