2018-07-13 16:51:37 +02:00
// SPDX-License-Identifier: GPL-2.0
2017-05-24 16:10:34 +02:00
/*
* Copyright ( C ) 2017 Marvell
*
* Antoine Tenart < antoine . tenart @ free - electrons . com >
*/
# include <linux/dma-mapping.h>
# include <linux/spinlock.h>
# include "safexcel.h"
int safexcel_init_ring_descriptors ( struct safexcel_crypto_priv * priv ,
2018-06-28 17:15:36 +02:00
struct safexcel_desc_ring * cdr ,
struct safexcel_desc_ring * rdr )
2017-05-24 16:10:34 +02:00
{
2019-12-11 17:32:35 +01:00
int i ;
struct safexcel_command_desc * cdesc ;
dma_addr_t atok ;
/* Actual command descriptor ring */
2019-09-18 08:42:39 +02:00
cdr - > offset = priv - > config . cd_offset ;
2017-05-24 16:10:34 +02:00
cdr - > base = dmam_alloc_coherent ( priv - > dev ,
cdr - > offset * EIP197_DEFAULT_RING_SIZE ,
& cdr - > base_dma , GFP_KERNEL ) ;
if ( ! cdr - > base )
return - ENOMEM ;
cdr - > write = cdr - > base ;
2018-06-28 17:21:57 +02:00
cdr - > base_end = cdr - > base + cdr - > offset * ( EIP197_DEFAULT_RING_SIZE - 1 ) ;
2017-05-24 16:10:34 +02:00
cdr - > read = cdr - > base ;
2019-12-11 17:32:35 +01:00
/* Command descriptor shadow ring for storing additional token data */
cdr - > shoffset = priv - > config . cdsh_offset ;
cdr - > shbase = dmam_alloc_coherent ( priv - > dev ,
cdr - > shoffset *
EIP197_DEFAULT_RING_SIZE ,
& cdr - > shbase_dma , GFP_KERNEL ) ;
if ( ! cdr - > shbase )
return - ENOMEM ;
cdr - > shwrite = cdr - > shbase ;
cdr - > shbase_end = cdr - > shbase + cdr - > shoffset *
( EIP197_DEFAULT_RING_SIZE - 1 ) ;
/*
* Populate command descriptors with physical pointers to shadow descs .
* Note that we only need to do this once if we don ' t overwrite them .
*/
cdesc = cdr - > base ;
atok = cdr - > shbase_dma ;
for ( i = 0 ; i < EIP197_DEFAULT_RING_SIZE ; i + + ) {
cdesc - > atok_lo = lower_32_bits ( atok ) ;
cdesc - > atok_hi = upper_32_bits ( atok ) ;
cdesc = ( void * ) cdesc + cdr - > offset ;
atok + = cdr - > shoffset ;
}
2019-09-18 08:42:39 +02:00
rdr - > offset = priv - > config . rd_offset ;
2019-12-11 17:32:35 +01:00
/* Use shoffset for result token offset here */
rdr - > shoffset = priv - > config . res_offset ;
2017-05-24 16:10:34 +02:00
rdr - > base = dmam_alloc_coherent ( priv - > dev ,
rdr - > offset * EIP197_DEFAULT_RING_SIZE ,
& rdr - > base_dma , GFP_KERNEL ) ;
if ( ! rdr - > base )
return - ENOMEM ;
rdr - > write = rdr - > base ;
2018-06-28 17:21:57 +02:00
rdr - > base_end = rdr - > base + rdr - > offset * ( EIP197_DEFAULT_RING_SIZE - 1 ) ;
2017-05-24 16:10:34 +02:00
rdr - > read = rdr - > base ;
return 0 ;
}
inline int safexcel_select_ring ( struct safexcel_crypto_priv * priv )
{
return ( atomic_inc_return ( & priv - > ring_used ) % priv - > config . rings ) ;
}
2019-12-11 17:32:35 +01:00
static void * safexcel_ring_next_cwptr ( struct safexcel_crypto_priv * priv ,
struct safexcel_desc_ring * ring ,
bool first ,
struct safexcel_token * * atoken )
2017-05-24 16:10:34 +02:00
{
void * ptr = ring - > write ;
2019-12-11 17:32:35 +01:00
if ( first )
* atoken = ring - > shwrite ;
if ( ( ring - > write = = ring - > read - ring - > offset ) | |
( ring - > read = = ring - > base & & ring - > write = = ring - > base_end ) )
return ERR_PTR ( - ENOMEM ) ;
if ( ring - > write = = ring - > base_end ) {
ring - > write = ring - > base ;
ring - > shwrite = ring - > shbase ;
} else {
ring - > write + = ring - > offset ;
ring - > shwrite + = ring - > shoffset ;
}
return ptr ;
}
static void * safexcel_ring_next_rwptr ( struct safexcel_crypto_priv * priv ,
struct safexcel_desc_ring * ring ,
struct result_data_desc * * rtoken )
{
void * ptr = ring - > write ;
/* Result token at relative offset shoffset */
* rtoken = ring - > write + ring - > shoffset ;
2018-06-28 17:21:57 +02:00
if ( ( ring - > write = = ring - > read - ring - > offset ) | |
( ring - > read = = ring - > base & & ring - > write = = ring - > base_end ) )
2017-05-24 16:10:34 +02:00
return ERR_PTR ( - ENOMEM ) ;
if ( ring - > write = = ring - > base_end )
ring - > write = ring - > base ;
2018-06-28 17:21:57 +02:00
else
ring - > write + = ring - > offset ;
2017-05-24 16:10:34 +02:00
return ptr ;
}
void * safexcel_ring_next_rptr ( struct safexcel_crypto_priv * priv ,
2018-06-28 17:15:36 +02:00
struct safexcel_desc_ring * ring )
2017-05-24 16:10:34 +02:00
{
void * ptr = ring - > read ;
2018-06-28 17:21:57 +02:00
if ( ring - > write = = ring - > read )
2017-05-24 16:10:34 +02:00
return ERR_PTR ( - ENOENT ) ;
if ( ring - > read = = ring - > base_end )
ring - > read = ring - > base ;
2018-06-28 17:21:57 +02:00
else
ring - > read + = ring - > offset ;
2017-05-24 16:10:34 +02:00
return ptr ;
}
2018-06-28 17:21:57 +02:00
inline void * safexcel_ring_curr_rptr ( struct safexcel_crypto_priv * priv ,
int ring )
{
struct safexcel_desc_ring * rdr = & priv - > ring [ ring ] . rdr ;
return rdr - > read ;
}
inline int safexcel_ring_first_rdr_index ( struct safexcel_crypto_priv * priv ,
int ring )
{
struct safexcel_desc_ring * rdr = & priv - > ring [ ring ] . rdr ;
return ( rdr - > read - rdr - > base ) / rdr - > offset ;
}
inline int safexcel_ring_rdr_rdesc_index ( struct safexcel_crypto_priv * priv ,
int ring ,
struct safexcel_result_desc * rdesc )
{
struct safexcel_desc_ring * rdr = & priv - > ring [ ring ] . rdr ;
return ( ( void * ) rdesc - rdr - > base ) / rdr - > offset ;
}
2017-05-24 16:10:34 +02:00
void safexcel_ring_rollback_wptr ( struct safexcel_crypto_priv * priv ,
2018-06-28 17:15:36 +02:00
struct safexcel_desc_ring * ring )
2017-05-24 16:10:34 +02:00
{
2018-06-28 17:21:57 +02:00
if ( ring - > write = = ring - > read )
2017-05-24 16:10:34 +02:00
return ;
2019-12-11 17:32:35 +01:00
if ( ring - > write = = ring - > base ) {
2018-06-28 17:21:57 +02:00
ring - > write = ring - > base_end ;
2019-12-11 17:32:35 +01:00
ring - > shwrite = ring - > shbase_end ;
} else {
2017-05-24 16:10:34 +02:00
ring - > write - = ring - > offset ;
2019-12-11 17:32:35 +01:00
ring - > shwrite - = ring - > shoffset ;
}
2017-05-24 16:10:34 +02:00
}
struct safexcel_command_desc * safexcel_add_cdesc ( struct safexcel_crypto_priv * priv ,
int ring_id ,
bool first , bool last ,
dma_addr_t data , u32 data_len ,
u32 full_data_len ,
2019-12-11 17:32:35 +01:00
dma_addr_t context ,
struct safexcel_token * * atoken )
{
2017-05-24 16:10:34 +02:00
struct safexcel_command_desc * cdesc ;
2019-12-11 17:32:35 +01:00
cdesc = safexcel_ring_next_cwptr ( priv , & priv - > ring [ ring_id ] . cdr ,
first , atoken ) ;
2017-05-24 16:10:34 +02:00
if ( IS_ERR ( cdesc ) )
return cdesc ;
cdesc - > particle_size = data_len ;
2019-12-11 17:32:35 +01:00
cdesc - > rsvd0 = 0 ;
cdesc - > last_seg = last ;
cdesc - > first_seg = first ;
cdesc - > additional_cdata_size = 0 ;
cdesc - > rsvd1 = 0 ;
2017-05-24 16:10:34 +02:00
cdesc - > data_lo = lower_32_bits ( data ) ;
cdesc - > data_hi = upper_32_bits ( data ) ;
2019-12-11 17:32:35 +01:00
if ( first ) {
2019-08-30 09:52:30 +02:00
/*
* Note that the length here MUST be > 0 or else the EIP ( 1 ) 97
* may hang . Newer EIP197 firmware actually incorporates this
* fix already , but that doesn ' t help the EIP97 and we may
* also be running older firmware .
*/
cdesc - > control_data . packet_length = full_data_len ? : 1 ;
2017-05-24 16:10:34 +02:00
cdesc - > control_data . options = EIP197_OPTION_MAGIC_VALUE |
EIP197_OPTION_64BIT_CTX |
2019-12-11 17:32:35 +01:00
EIP197_OPTION_CTX_CTRL_IN_CMD |
EIP197_OPTION_RC_AUTO ;
cdesc - > control_data . type = EIP197_TYPE_BCLA ;
cdesc - > control_data . context_lo = lower_32_bits ( context ) |
EIP197_CONTEXT_SMALL ;
2017-05-24 16:10:34 +02:00
cdesc - > control_data . context_hi = upper_32_bits ( context ) ;
}
return cdesc ;
}
struct safexcel_result_desc * safexcel_add_rdesc ( struct safexcel_crypto_priv * priv ,
int ring_id ,
bool first , bool last ,
dma_addr_t data , u32 len )
{
struct safexcel_result_desc * rdesc ;
2019-12-11 17:32:35 +01:00
struct result_data_desc * rtoken ;
2017-05-24 16:10:34 +02:00
2019-12-11 17:32:35 +01:00
rdesc = safexcel_ring_next_rwptr ( priv , & priv - > ring [ ring_id ] . rdr ,
& rtoken ) ;
2017-05-24 16:10:34 +02:00
if ( IS_ERR ( rdesc ) )
return rdesc ;
2019-12-11 17:32:35 +01:00
rdesc - > particle_size = len ;
rdesc - > rsvd0 = 0 ;
2020-09-08 08:10:45 +02:00
rdesc - > descriptor_overflow = 1 ; /* assume error */
rdesc - > buffer_overflow = 1 ; /* assume error */
2017-05-24 16:10:34 +02:00
rdesc - > last_seg = last ;
2019-12-11 17:32:35 +01:00
rdesc - > first_seg = first ;
2019-09-18 12:41:26 +02:00
rdesc - > result_size = EIP197_RD64_RESULT_SIZE ;
2019-12-11 17:32:35 +01:00
rdesc - > rsvd1 = 0 ;
2017-05-24 16:10:34 +02:00
rdesc - > data_lo = lower_32_bits ( data ) ;
rdesc - > data_hi = upper_32_bits ( data ) ;
2020-09-08 08:10:45 +02:00
/* Clear length in result token */
2019-12-11 17:32:35 +01:00
rtoken - > packet_length = 0 ;
2020-09-08 08:10:45 +02:00
/* Assume errors - HW will clear if not the case */
rtoken - > error_code = 0x7fff ;
2019-12-11 17:32:35 +01:00
2017-05-24 16:10:34 +02:00
return rdesc ;
}