2015-06-09 18:15:23 -07:00
/**********************************************************************
* Author : Cavium , Inc .
*
* Contact : support @ cavium . com
* Please include " LiquidIO " in the subject .
*
2016-11-14 15:54:46 -08:00
* Copyright ( c ) 2003 - 2016 Cavium , Inc .
2015-06-09 18:15:23 -07:00
*
* This file is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License , Version 2 , as
* published by the Free Software Foundation .
*
* This file is distributed in the hope that it will be useful , but
* AS - IS and WITHOUT ANY WARRANTY ; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE , TITLE , or
* NONINFRINGEMENT . See the GNU General Public License for more
* details .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# include <linux/pci.h>
# include <linux/netdevice.h>
2015-06-12 18:11:50 -07:00
# include <linux/vmalloc.h>
2015-06-09 18:15:23 -07:00
# include "liquidio_common.h"
# include "octeon_droq.h"
# include "octeon_iq.h"
# include "response_manager.h"
# include "octeon_device.h"
# include "octeon_main.h"
# include "octeon_network.h"
# include "cn66xx_device.h"
2016-09-01 11:16:07 -07:00
# include "cn23xx_pf_device.h"
2016-11-28 16:54:38 -08:00
# include "cn23xx_vf_device.h"
2015-06-09 18:15:23 -07:00
struct iq_post_status {
int status ;
int index ;
} ;
static void check_db_timeout ( struct work_struct * work ) ;
2016-06-21 22:53:06 -07:00
static void __check_db_timeout ( struct octeon_device * oct , u64 iq_no ) ;
2015-06-09 18:15:23 -07:00
static void ( * reqtype_free_fn [ MAX_OCTEON_DEVICES ] [ REQTYPE_LAST + 1 ] ) ( void * ) ;
static inline int IQ_INSTR_MODE_64B ( struct octeon_device * oct , int iq_no )
{
struct octeon_instr_queue * iq =
( struct octeon_instr_queue * ) oct - > instr_queue [ iq_no ] ;
return iq - > iqcmd_64B ;
}
# define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no))
/* Define this to return the request status comaptible to old code */
/*#define OCTEON_USE_OLD_REQ_STATUS*/
/* Return 0 on success, 1 on failure */
int octeon_init_instr_queue ( struct octeon_device * oct ,
2016-06-14 16:54:44 -07:00
union oct_txpciq txpciq ,
u32 num_descs )
2015-06-09 18:15:23 -07:00
{
struct octeon_instr_queue * iq ;
struct octeon_iq_config * conf = NULL ;
2016-06-14 16:54:44 -07:00
u32 iq_no = ( u32 ) txpciq . s . q_no ;
2015-06-09 18:15:23 -07:00
u32 q_size ;
struct cavium_wq * db_wq ;
2017-03-09 17:03:24 -08:00
int numa_node = dev_to_node ( & oct - > pci_dev - > dev ) ;
2015-06-09 18:15:23 -07:00
if ( OCTEON_CN6XXX ( oct ) )
2016-11-14 15:54:47 -08:00
conf = & ( CFG_GET_IQ_CFG ( CHIP_CONF ( oct , cn6xxx ) ) ) ;
2016-09-01 11:16:07 -07:00
else if ( OCTEON_CN23XX_PF ( oct ) )
2016-11-14 15:54:47 -08:00
conf = & ( CFG_GET_IQ_CFG ( CHIP_CONF ( oct , cn23xx_pf ) ) ) ;
2016-11-28 16:54:38 -08:00
else if ( OCTEON_CN23XX_VF ( oct ) )
conf = & ( CFG_GET_IQ_CFG ( CHIP_CONF ( oct , cn23xx_vf ) ) ) ;
2015-06-09 18:15:23 -07:00
if ( ! conf ) {
dev_err ( & oct - > pci_dev - > dev , " Unsupported Chip %x \n " ,
oct - > chip_id ) ;
return 1 ;
}
q_size = ( u32 ) conf - > instr_type * num_descs ;
iq = oct - > instr_queue [ iq_no ] ;
2016-09-01 11:16:07 -07:00
2016-06-14 16:54:51 -07:00
iq - > oct_dev = oct ;
2015-06-09 18:15:23 -07:00
2017-03-09 17:03:24 -08:00
iq - > base_addr = lio_dma_alloc ( oct , q_size , & iq - > base_addr_dma ) ;
2015-06-09 18:15:23 -07:00
if ( ! iq - > base_addr ) {
dev_err ( & oct - > pci_dev - > dev , " Cannot allocate memory for instr queue %d \n " ,
iq_no ) ;
return 1 ;
}
iq - > max_count = num_descs ;
/* Initialize a list to holds requests that have been posted to Octeon
* but has yet to be fetched by octeon
*/
2016-06-14 16:54:44 -07:00
iq - > request_list = vmalloc_node ( ( sizeof ( * iq - > request_list ) * num_descs ) ,
numa_node ) ;
if ( ! iq - > request_list )
treewide: Use array_size() in vmalloc()
The vmalloc() function has no 2-factor argument form, so multiplication
factors need to be wrapped in array_size(). This patch replaces cases of:
vmalloc(a * b)
with:
vmalloc(array_size(a, b))
as well as handling cases of:
vmalloc(a * b * c)
with:
vmalloc(array3_size(a, b, c))
This does, however, attempt to ignore constant size factors like:
vmalloc(4 * 1024)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
vmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
vmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
vmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
vmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
vmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
vmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
vmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
vmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
vmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
vmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
vmalloc(
- sizeof(TYPE) * (COUNT_ID)
+ array_size(COUNT_ID, sizeof(TYPE))
, ...)
|
vmalloc(
- sizeof(TYPE) * COUNT_ID
+ array_size(COUNT_ID, sizeof(TYPE))
, ...)
|
vmalloc(
- sizeof(TYPE) * (COUNT_CONST)
+ array_size(COUNT_CONST, sizeof(TYPE))
, ...)
|
vmalloc(
- sizeof(TYPE) * COUNT_CONST
+ array_size(COUNT_CONST, sizeof(TYPE))
, ...)
|
vmalloc(
- sizeof(THING) * (COUNT_ID)
+ array_size(COUNT_ID, sizeof(THING))
, ...)
|
vmalloc(
- sizeof(THING) * COUNT_ID
+ array_size(COUNT_ID, sizeof(THING))
, ...)
|
vmalloc(
- sizeof(THING) * (COUNT_CONST)
+ array_size(COUNT_CONST, sizeof(THING))
, ...)
|
vmalloc(
- sizeof(THING) * COUNT_CONST
+ array_size(COUNT_CONST, sizeof(THING))
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
vmalloc(
- SIZE * COUNT
+ array_size(COUNT, SIZE)
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
vmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
vmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
vmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
vmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
vmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
vmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
vmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
vmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
vmalloc(C1 * C2 * C3, ...)
|
vmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants.
@@
expression E1, E2;
constant C1, C2;
@@
(
vmalloc(C1 * C2, ...)
|
vmalloc(
- E1 * E2
+ array_size(E1, E2)
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 14:27:11 -07:00
iq - > request_list =
vmalloc ( array_size ( num_descs ,
sizeof ( * iq - > request_list ) ) ) ;
2015-06-09 18:15:23 -07:00
if ( ! iq - > request_list ) {
lio_dma_free ( oct , q_size , iq - > base_addr , iq - > base_addr_dma ) ;
dev_err ( & oct - > pci_dev - > dev , " Alloc failed for IQ[%d] nr free list \n " ,
iq_no ) ;
return 1 ;
}
memset ( iq - > request_list , 0 , sizeof ( * iq - > request_list ) * num_descs ) ;
2018-07-12 22:36:29 +02:00
dev_dbg ( & oct - > pci_dev - > dev , " IQ[%d]: base: %p basedma: %pad count: %d \n " ,
iq_no , iq - > base_addr , & iq - > base_addr_dma , iq - > max_count ) ;
2015-06-09 18:15:23 -07:00
2016-06-14 16:54:44 -07:00
iq - > txpciq . u64 = txpciq . u64 ;
2015-06-09 18:15:23 -07:00
iq - > fill_threshold = ( u32 ) conf - > db_min ;
iq - > fill_cnt = 0 ;
iq - > host_write_index = 0 ;
iq - > octeon_read_index = 0 ;
iq - > flush_index = 0 ;
iq - > last_db_time = 0 ;
iq - > do_auto_flush = 1 ;
iq - > db_timeout = ( u32 ) conf - > db_timeout ;
atomic_set ( & iq - > instr_pending , 0 ) ;
2018-08-28 11:32:55 -07:00
iq - > pkts_processed = 0 ;
2015-06-09 18:15:23 -07:00
/* Initialize the spinlock for this instruction queue */
spin_lock_init ( & iq - > lock ) ;
2018-08-06 13:09:40 -07:00
if ( iq_no = = 0 ) {
iq - > allow_soft_cmds = true ;
spin_lock_init ( & iq - > post_lock ) ;
} else {
iq - > allow_soft_cmds = false ;
}
2016-06-21 22:53:06 -07:00
spin_lock_init ( & iq - > iq_flush_running_lock ) ;
2015-06-09 18:15:23 -07:00
2016-11-14 15:54:45 -08:00
oct - > io_qmask . iq | = BIT_ULL ( iq_no ) ;
2015-06-09 18:15:23 -07:00
/* Set the 32B/64B mode for each input queue */
oct - > io_qmask . iq64B | = ( ( conf - > instr_type = = 64 ) < < iq_no ) ;
iq - > iqcmd_64B = ( conf - > instr_type = = 64 ) ;
oct - > fn_list . setup_iq_regs ( oct , iq_no ) ;
2016-06-04 20:54:00 +05:30
oct - > check_db_wq [ iq_no ] . wq = alloc_workqueue ( " check_iq_db " ,
WQ_MEM_RECLAIM ,
0 ) ;
2015-06-09 18:15:23 -07:00
if ( ! oct - > check_db_wq [ iq_no ] . wq ) {
2016-11-14 15:54:44 -08:00
vfree ( iq - > request_list ) ;
iq - > request_list = NULL ;
2015-06-09 18:15:23 -07:00
lio_dma_free ( oct , q_size , iq - > base_addr , iq - > base_addr_dma ) ;
dev_err ( & oct - > pci_dev - > dev , " check db wq create failed for iq %d \n " ,
iq_no ) ;
return 1 ;
}
db_wq = & oct - > check_db_wq [ iq_no ] ;
INIT_DELAYED_WORK ( & db_wq - > wk . work , check_db_timeout ) ;
db_wq - > wk . ctxptr = oct ;
db_wq - > wk . ctxul = iq_no ;
queue_delayed_work ( db_wq - > wq , & db_wq - > wk . work , msecs_to_jiffies ( 1 ) ) ;
return 0 ;
}
int octeon_delete_instr_queue ( struct octeon_device * oct , u32 iq_no )
{
u64 desc_size = 0 , q_size ;
struct octeon_instr_queue * iq = oct - > instr_queue [ iq_no ] ;
cancel_delayed_work_sync ( & oct - > check_db_wq [ iq_no ] . wk . work ) ;
destroy_workqueue ( oct - > check_db_wq [ iq_no ] . wq ) ;
if ( OCTEON_CN6XXX ( oct ) )
desc_size =
2016-11-14 15:54:47 -08:00
CFG_GET_IQ_INSTR_TYPE ( CHIP_CONF ( oct , cn6xxx ) ) ;
2016-09-01 11:16:07 -07:00
else if ( OCTEON_CN23XX_PF ( oct ) )
desc_size =
2016-11-14 15:54:47 -08:00
CFG_GET_IQ_INSTR_TYPE ( CHIP_CONF ( oct , cn23xx_pf ) ) ;
2016-11-28 16:54:38 -08:00
else if ( OCTEON_CN23XX_VF ( oct ) )
desc_size =
CFG_GET_IQ_INSTR_TYPE ( CHIP_CONF ( oct , cn23xx_vf ) ) ;
2015-06-09 18:15:23 -07:00
2015-06-29 12:22:24 +02:00
vfree ( iq - > request_list ) ;
2015-06-09 18:15:23 -07:00
if ( iq - > base_addr ) {
q_size = iq - > max_count * desc_size ;
lio_dma_free ( oct , ( u32 ) q_size , iq - > base_addr ,
iq - > base_addr_dma ) ;
2017-08-09 12:07:08 -07:00
oct - > io_qmask . iq & = ~ ( 1ULL < < iq_no ) ;
vfree ( oct - > instr_queue [ iq_no ] ) ;
oct - > instr_queue [ iq_no ] = NULL ;
oct - > num_iqs - - ;
2015-06-09 18:15:23 -07:00
return 0 ;
}
return 1 ;
}
/* Return 0 on success, 1 on failure */
int octeon_setup_iq ( struct octeon_device * oct ,
2016-06-14 16:54:50 -07:00
int ifidx ,
int q_index ,
2016-06-14 16:54:44 -07:00
union oct_txpciq txpciq ,
2015-06-09 18:15:23 -07:00
u32 num_descs ,
void * app_ctx )
{
2016-06-14 16:54:44 -07:00
u32 iq_no = ( u32 ) txpciq . s . q_no ;
2017-03-09 17:03:24 -08:00
int numa_node = dev_to_node ( & oct - > pci_dev - > dev ) ;
2016-06-14 16:54:44 -07:00
2015-06-09 18:15:23 -07:00
if ( oct - > instr_queue [ iq_no ] ) {
dev_dbg ( & oct - > pci_dev - > dev , " IQ is in use. Cannot create the IQ: %d again \n " ,
iq_no ) ;
2016-06-14 16:54:44 -07:00
oct - > instr_queue [ iq_no ] - > txpciq . u64 = txpciq . u64 ;
2015-06-09 18:15:23 -07:00
oct - > instr_queue [ iq_no ] - > app_ctx = app_ctx ;
return 0 ;
}
oct - > instr_queue [ iq_no ] =
2019-07-18 15:45:42 +08:00
vzalloc_node ( sizeof ( struct octeon_instr_queue ) , numa_node ) ;
2016-06-14 16:54:44 -07:00
if ( ! oct - > instr_queue [ iq_no ] )
oct - > instr_queue [ iq_no ] =
2019-07-18 15:45:42 +08:00
vzalloc ( sizeof ( struct octeon_instr_queue ) ) ;
2015-06-09 18:15:23 -07:00
if ( ! oct - > instr_queue [ iq_no ] )
return 1 ;
2016-06-14 16:54:50 -07:00
oct - > instr_queue [ iq_no ] - > q_index = q_index ;
2015-06-09 18:15:23 -07:00
oct - > instr_queue [ iq_no ] - > app_ctx = app_ctx ;
2016-06-14 16:54:50 -07:00
oct - > instr_queue [ iq_no ] - > ifidx = ifidx ;
2016-06-14 16:54:44 -07:00
if ( octeon_init_instr_queue ( oct , txpciq , num_descs ) ) {
2015-06-09 18:15:23 -07:00
vfree ( oct - > instr_queue [ iq_no ] ) ;
oct - > instr_queue [ iq_no ] = NULL ;
return 1 ;
}
oct - > num_iqs + + ;
2019-08-14 00:14:49 -05:00
if ( oct - > fn_list . enable_io_queues ( oct ) ) {
octeon_delete_instr_queue ( oct , iq_no ) ;
2016-11-28 16:54:36 -08:00
return 1 ;
2019-08-14 00:14:49 -05:00
}
2016-11-28 16:54:36 -08:00
2015-06-09 18:15:23 -07:00
return 0 ;
}
int lio_wait_for_instr_fetch ( struct octeon_device * oct )
{
int i , retry = 1000 , pending , instr_cnt = 0 ;
do {
instr_cnt = 0 ;
2016-06-21 22:53:03 -07:00
for ( i = 0 ; i < MAX_OCTEON_INSTR_QUEUES ( oct ) ; i + + ) {
2016-11-14 15:54:45 -08:00
if ( ! ( oct - > io_qmask . iq & BIT_ULL ( i ) ) )
2015-06-09 18:15:23 -07:00
continue ;
pending =
2017-05-31 10:45:15 -07:00
atomic_read ( & oct - > instr_queue [ i ] - > instr_pending ) ;
2015-06-09 18:15:23 -07:00
if ( pending )
__check_db_timeout ( oct , i ) ;
instr_cnt + = pending ;
}
if ( instr_cnt = = 0 )
break ;
schedule_timeout_uninterruptible ( 1 ) ;
} while ( retry - - & & instr_cnt ) ;
return instr_cnt ;
}
static inline void
ring_doorbell ( struct octeon_device * oct , struct octeon_instr_queue * iq )
{
if ( atomic_read ( & oct - > status ) = = OCT_DEV_RUNNING ) {
writel ( iq - > fill_cnt , iq - > doorbell_reg ) ;
/* make sure doorbell write goes through */
iq - > fill_cnt = 0 ;
iq - > last_db_time = jiffies ;
return ;
}
}
2017-10-26 16:18:20 -07:00
void
octeon_ring_doorbell_locked ( struct octeon_device * oct , u32 iq_no )
{
struct octeon_instr_queue * iq ;
iq = oct - > instr_queue [ iq_no ] ;
spin_lock ( & iq - > post_lock ) ;
if ( iq - > fill_cnt )
ring_doorbell ( oct , iq ) ;
spin_unlock ( & iq - > post_lock ) ;
}
2015-06-09 18:15:23 -07:00
static inline void __copy_cmd_into_iq ( struct octeon_instr_queue * iq ,
u8 * cmd )
{
u8 * iqptr , cmdsize ;
cmdsize = ( ( iq - > iqcmd_64B ) ? 64 : 32 ) ;
iqptr = iq - > base_addr + ( cmdsize * iq - > host_write_index ) ;
memcpy ( iqptr , cmd , cmdsize ) ;
}
static inline struct iq_post_status
2016-07-03 13:56:48 -07:00
__post_command2 ( struct octeon_instr_queue * iq , u8 * cmd )
2015-06-09 18:15:23 -07:00
{
struct iq_post_status st ;
st . status = IQ_SEND_OK ;
/* This ensures that the read index does not wrap around to the same
* position if queue gets full before Octeon could fetch any instr .
*/
if ( atomic_read ( & iq - > instr_pending ) > = ( s32 ) ( iq - > max_count - 1 ) ) {
st . status = IQ_SEND_FAILED ;
st . index = - 1 ;
return st ;
}
if ( atomic_read ( & iq - > instr_pending ) > = ( s32 ) ( iq - > max_count - 2 ) )
st . status = IQ_SEND_STOP ;
__copy_cmd_into_iq ( iq , cmd ) ;
/* "index" is returned, host_write_index is modified. */
st . index = iq - > host_write_index ;
2016-11-14 15:54:47 -08:00
iq - > host_write_index = incr_index ( iq - > host_write_index , 1 ,
iq - > max_count ) ;
2015-06-09 18:15:23 -07:00
iq - > fill_cnt + + ;
/* Flush the command into memory. We need to be sure the data is in
* memory before indicating that the instruction is pending .
*/
wmb ( ) ;
atomic_inc ( & iq - > instr_pending ) ;
return st ;
}
int
octeon_register_reqtype_free_fn ( struct octeon_device * oct , int reqtype ,
void ( * fn ) ( void * ) )
{
if ( reqtype > REQTYPE_LAST ) {
dev_err ( & oct - > pci_dev - > dev , " %s: Invalid reqtype: %d \n " ,
__func__ , reqtype ) ;
return - EINVAL ;
}
reqtype_free_fn [ oct - > octeon_id ] [ reqtype ] = fn ;
return 0 ;
}
static inline void
__add_to_request_list ( struct octeon_instr_queue * iq ,
int idx , void * buf , int reqtype )
{
iq - > request_list [ idx ] . buf = buf ;
iq - > request_list [ idx ] . reqtype = reqtype ;
}
2016-07-03 13:56:55 -07:00
/* Can only run in process context */
2015-06-09 18:15:23 -07:00
int
lio_process_iq_request_list ( struct octeon_device * oct ,
2016-06-21 22:53:06 -07:00
struct octeon_instr_queue * iq , u32 napi_budget )
2015-06-09 18:15:23 -07:00
{
2018-03-07 22:23:32 -08:00
struct cavium_wq * cwq = & oct - > dma_comp_wq ;
2015-06-09 18:15:23 -07:00
int reqtype ;
void * buf ;
u32 old = iq - > flush_index ;
u32 inst_count = 0 ;
2016-06-21 22:53:06 -07:00
unsigned int pkts_compl = 0 , bytes_compl = 0 ;
2015-06-09 18:15:23 -07:00
struct octeon_soft_command * sc ;
2016-07-03 13:56:49 -07:00
unsigned long flags ;
2015-06-09 18:15:23 -07:00
while ( old ! = iq - > octeon_read_index ) {
reqtype = iq - > request_list [ old ] . reqtype ;
buf = iq - > request_list [ old ] . buf ;
if ( reqtype = = REQTYPE_NONE )
goto skip_this ;
octeon_update_tx_completion_counters ( buf , reqtype , & pkts_compl ,
& bytes_compl ) ;
switch ( reqtype ) {
case REQTYPE_NORESP_NET :
case REQTYPE_NORESP_NET_SG :
case REQTYPE_RESP_NET_SG :
reqtype_free_fn [ oct - > octeon_id ] [ reqtype ] ( buf ) ;
break ;
case REQTYPE_RESP_NET :
case REQTYPE_SOFT_COMMAND :
sc = buf ;
2018-08-28 18:51:30 -07:00
/* We're expecting a response from Octeon.
* It ' s up to lio_process_ordered_list ( ) to
* process sc . Add sc to the ordered soft
* command response list because we expect
* a response from Octeon .
*/
spin_lock_irqsave ( & oct - > response_list
[ OCTEON_ORDERED_SC_LIST ] . lock , flags ) ;
atomic_inc ( & oct - > response_list
[ OCTEON_ORDERED_SC_LIST ] . pending_req_count ) ;
list_add_tail ( & sc - > node , & oct - > response_list
[ OCTEON_ORDERED_SC_LIST ] . head ) ;
spin_unlock_irqrestore ( & oct - > response_list
[ OCTEON_ORDERED_SC_LIST ] . lock ,
flags ) ;
2015-06-09 18:15:23 -07:00
break ;
default :
dev_err ( & oct - > pci_dev - > dev ,
" %s Unknown reqtype: %d buf: %p at idx %d \n " ,
__func__ , reqtype , buf , old ) ;
}
iq - > request_list [ old ] . buf = NULL ;
iq - > request_list [ old ] . reqtype = 0 ;
skip_this :
inst_count + + ;
2016-11-14 15:54:47 -08:00
old = incr_index ( old , 1 , iq - > max_count ) ;
2016-06-21 22:53:06 -07:00
if ( ( napi_budget ) & & ( inst_count > = napi_budget ) )
break ;
2015-06-09 18:15:23 -07:00
}
if ( bytes_compl )
octeon_report_tx_completion_to_bql ( iq - > app_ctx , pkts_compl ,
bytes_compl ) ;
iq - > flush_index = old ;
2018-03-07 22:23:32 -08:00
if ( atomic_read ( & oct - > response_list
[ OCTEON_ORDERED_SC_LIST ] . pending_req_count ) )
2018-08-28 11:19:54 -07:00
queue_work ( cwq - > wq , & cwq - > wk . work . work ) ;
2018-03-07 22:23:32 -08:00
2015-06-09 18:15:23 -07:00
return inst_count ;
}
2016-06-21 22:53:06 -07:00
/* Can only be called from process context */
int
octeon_flush_iq ( struct octeon_device * oct , struct octeon_instr_queue * iq ,
2017-01-06 17:16:12 -08:00
u32 napi_budget )
2015-06-09 18:15:23 -07:00
{
u32 inst_processed = 0 ;
2016-06-21 22:53:06 -07:00
u32 tot_inst_processed = 0 ;
int tx_done = 1 ;
2015-06-09 18:15:23 -07:00
2016-06-21 22:53:06 -07:00
if ( ! spin_trylock ( & iq - > iq_flush_running_lock ) )
return tx_done ;
2015-06-09 18:15:23 -07:00
2016-06-21 22:53:06 -07:00
spin_lock_bh ( & iq - > lock ) ;
2015-06-09 18:15:23 -07:00
2016-06-21 22:53:06 -07:00
iq - > octeon_read_index = oct - > fn_list . update_iq_read_idx ( iq ) ;
2015-06-09 18:15:23 -07:00
2017-01-06 17:16:12 -08:00
do {
/* Process any outstanding IQ packets. */
if ( iq - > flush_index = = iq - > octeon_read_index )
break ;
2016-06-21 22:53:06 -07:00
2017-01-06 17:16:12 -08:00
if ( napi_budget )
inst_processed =
lio_process_iq_request_list ( oct , iq ,
napi_budget -
tot_inst_processed ) ;
else
inst_processed =
lio_process_iq_request_list ( oct , iq , 0 ) ;
if ( inst_processed ) {
2018-08-28 11:32:55 -07:00
iq - > pkts_processed + = inst_processed ;
2017-01-06 17:16:12 -08:00
atomic_sub ( inst_processed , & iq - > instr_pending ) ;
iq - > stats . instr_processed + = inst_processed ;
}
2016-06-21 22:53:06 -07:00
2017-01-06 17:16:12 -08:00
tot_inst_processed + = inst_processed ;
} while ( tot_inst_processed < napi_budget ) ;
2016-06-21 22:53:06 -07:00
2017-01-06 17:16:12 -08:00
if ( napi_budget & & ( tot_inst_processed > = napi_budget ) )
tx_done = 0 ;
2016-06-21 22:53:06 -07:00
iq - > last_db_time = jiffies ;
spin_unlock_bh ( & iq - > lock ) ;
spin_unlock ( & iq - > iq_flush_running_lock ) ;
return tx_done ;
2015-06-09 18:15:23 -07:00
}
2016-06-21 22:53:06 -07:00
/* Process instruction queue after timeout.
* This routine gets called from a workqueue or when removing the module .
*/
static void __check_db_timeout ( struct octeon_device * oct , u64 iq_no )
2015-06-09 18:15:23 -07:00
{
struct octeon_instr_queue * iq ;
u64 next_time ;
if ( ! oct )
return ;
2016-08-31 11:03:22 -07:00
2015-06-09 18:15:23 -07:00
iq = oct - > instr_queue [ iq_no ] ;
if ( ! iq )
return ;
2016-06-21 22:53:06 -07:00
/* return immediately, if no work pending */
if ( ! atomic_read ( & iq - > instr_pending ) )
return ;
2015-06-09 18:15:23 -07:00
/* If jiffies - last_db_time < db_timeout do nothing */
next_time = iq - > last_db_time + iq - > db_timeout ;
if ( ! time_after ( jiffies , ( unsigned long ) next_time ) )
return ;
iq - > last_db_time = jiffies ;
/* Flush the instruction queue */
2017-01-06 17:16:12 -08:00
octeon_flush_iq ( oct , iq , 0 ) ;
2016-08-31 11:03:22 -07:00
lio_enable_irq ( NULL , iq ) ;
2015-06-09 18:15:23 -07:00
}
/* Called by the Poll thread at regular intervals to check the instruction
* queue for commands to be posted and for commands that were fetched by Octeon .
*/
static void check_db_timeout ( struct work_struct * work )
{
struct cavium_wk * wk = ( struct cavium_wk * ) work ;
struct octeon_device * oct = ( struct octeon_device * ) wk - > ctxptr ;
2016-07-03 13:56:55 -07:00
u64 iq_no = wk - > ctxul ;
2015-06-09 18:15:23 -07:00
struct cavium_wq * db_wq = & oct - > check_db_wq [ iq_no ] ;
2016-07-03 13:56:50 -07:00
u32 delay = 10 ;
2015-06-09 18:15:23 -07:00
__check_db_timeout ( oct , iq_no ) ;
2016-07-03 13:56:50 -07:00
queue_delayed_work ( db_wq - > wq , & db_wq - > wk . work , msecs_to_jiffies ( delay ) ) ;
2015-06-09 18:15:23 -07:00
}
int
octeon_send_command ( struct octeon_device * oct , u32 iq_no ,
u32 force_db , void * cmd , void * buf ,
u32 datasize , u32 reqtype )
{
2017-10-26 16:18:20 -07:00
int xmit_stopped ;
2015-06-09 18:15:23 -07:00
struct iq_post_status st ;
struct octeon_instr_queue * iq = oct - > instr_queue [ iq_no ] ;
2016-06-21 22:53:06 -07:00
/* Get the lock and prevent other tasks and tx interrupt handler from
* running .
*/
2018-08-06 13:09:40 -07:00
if ( iq - > allow_soft_cmds )
spin_lock_bh ( & iq - > post_lock ) ;
2015-06-09 18:15:23 -07:00
2016-07-03 13:56:48 -07:00
st = __post_command2 ( iq , cmd ) ;
2015-06-09 18:15:23 -07:00
if ( st . status ! = IQ_SEND_FAILED ) {
2017-10-26 16:18:20 -07:00
xmit_stopped = octeon_report_sent_bytes_to_bql ( buf , reqtype ) ;
2015-06-09 18:15:23 -07:00
__add_to_request_list ( iq , st . index , buf , reqtype ) ;
INCR_INSTRQUEUE_PKT_COUNT ( oct , iq_no , bytes_sent , datasize ) ;
INCR_INSTRQUEUE_PKT_COUNT ( oct , iq_no , instr_posted , 1 ) ;
2017-10-26 16:18:20 -07:00
if ( iq - > fill_cnt > = MAX_OCTEON_FILL_COUNT | | force_db | |
xmit_stopped | | st . status = = IQ_SEND_STOP )
2015-06-09 18:15:23 -07:00
ring_doorbell ( oct , iq ) ;
} else {
INCR_INSTRQUEUE_PKT_COUNT ( oct , iq_no , instr_dropped , 1 ) ;
}
2018-08-06 13:09:40 -07:00
if ( iq - > allow_soft_cmds )
spin_unlock_bh ( & iq - > post_lock ) ;
2015-06-09 18:15:23 -07:00
2016-06-21 22:53:06 -07:00
/* This is only done here to expedite packets being flushed
* for cases where there are no IQ completion interrupts .
*/
2015-06-09 18:15:23 -07:00
return st . status ;
}
void
octeon_prepare_soft_command ( struct octeon_device * oct ,
struct octeon_soft_command * sc ,
u8 opcode ,
u8 subcode ,
u32 irh_ossp ,
u64 ossp0 ,
u64 ossp1 )
{
struct octeon_config * oct_cfg ;
2016-06-14 16:54:51 -07:00
struct octeon_instr_ih2 * ih2 ;
2016-09-01 11:16:07 -07:00
struct octeon_instr_ih3 * ih3 ;
struct octeon_instr_pki_ih3 * pki_ih3 ;
2015-06-09 18:15:23 -07:00
struct octeon_instr_irh * irh ;
struct octeon_instr_rdp * rdp ;
2016-07-03 13:56:48 -07:00
WARN_ON ( opcode > 15 ) ;
WARN_ON ( subcode > 127 ) ;
2015-06-09 18:15:23 -07:00
oct_cfg = octeon_get_conf ( oct ) ;
2016-12-07 08:54:35 -08:00
if ( OCTEON_CN23XX_PF ( oct ) | | OCTEON_CN23XX_VF ( oct ) ) {
2016-09-01 11:16:07 -07:00
ih3 = ( struct octeon_instr_ih3 * ) & sc - > cmd . cmd3 . ih3 ;
ih3 - > pkind = oct - > instr_queue [ sc - > iq_no ] - > txpciq . s . pkind ;
pki_ih3 = ( struct octeon_instr_pki_ih3 * ) & sc - > cmd . cmd3 . pki_ih3 ;
pki_ih3 - > w = 1 ;
pki_ih3 - > raw = 1 ;
pki_ih3 - > utag = 1 ;
pki_ih3 - > uqpg =
oct - > instr_queue [ sc - > iq_no ] - > txpciq . s . use_qpg ;
pki_ih3 - > utt = 1 ;
pki_ih3 - > tag = LIO_CONTROL ;
pki_ih3 - > tagtype = ATOMIC_TAG ;
pki_ih3 - > qpg =
2018-03-27 19:25:18 -07:00
oct - > instr_queue [ sc - > iq_no ] - > txpciq . s . ctrl_qpg ;
2016-09-01 11:16:07 -07:00
pki_ih3 - > pm = 0x7 ;
pki_ih3 - > sl = 8 ;
if ( sc - > datasize )
ih3 - > dlengsz = sc - > datasize ;
irh = ( struct octeon_instr_irh * ) & sc - > cmd . cmd3 . irh ;
irh - > opcode = opcode ;
irh - > subcode = subcode ;
/* opcode/subcode specific parameters (ossp) */
irh - > ossp = irh_ossp ;
sc - > cmd . cmd3 . ossp [ 0 ] = ossp0 ;
sc - > cmd . cmd3 . ossp [ 1 ] = ossp1 ;
if ( sc - > rdatasize ) {
rdp = ( struct octeon_instr_rdp * ) & sc - > cmd . cmd3 . rdp ;
rdp - > pcie_port = oct - > pcie_port ;
rdp - > rlen = sc - > rdatasize ;
irh - > rflag = 1 ;
/*PKI IH3*/
/* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */
ih3 - > fsz = LIO_SOFTCMDRESP_IH3 ;
} else {
irh - > rflag = 0 ;
/*PKI IH3*/
/* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
ih3 - > fsz = LIO_PCICMD_O3 ;
}
2015-06-09 18:15:23 -07:00
} else {
2016-09-01 11:16:07 -07:00
ih2 = ( struct octeon_instr_ih2 * ) & sc - > cmd . cmd2 . ih2 ;
ih2 - > tagtype = ATOMIC_TAG ;
ih2 - > tag = LIO_CONTROL ;
ih2 - > raw = 1 ;
ih2 - > grp = CFG_GET_CTRL_Q_GRP ( oct_cfg ) ;
if ( sc - > datasize ) {
ih2 - > dlengsz = sc - > datasize ;
ih2 - > rs = 1 ;
}
irh = ( struct octeon_instr_irh * ) & sc - > cmd . cmd2 . irh ;
irh - > opcode = opcode ;
irh - > subcode = subcode ;
/* opcode/subcode specific parameters (ossp) */
irh - > ossp = irh_ossp ;
sc - > cmd . cmd2 . ossp [ 0 ] = ossp0 ;
sc - > cmd . cmd2 . ossp [ 1 ] = ossp1 ;
if ( sc - > rdatasize ) {
rdp = ( struct octeon_instr_rdp * ) & sc - > cmd . cmd2 . rdp ;
rdp - > pcie_port = oct - > pcie_port ;
rdp - > rlen = sc - > rdatasize ;
irh - > rflag = 1 ;
/* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
ih2 - > fsz = LIO_SOFTCMDRESP_IH2 ;
} else {
irh - > rflag = 0 ;
/* irh + ossp[0] + ossp[1] = 24 bytes */
ih2 - > fsz = LIO_PCICMD_O2 ;
}
2015-06-09 18:15:23 -07:00
}
}
int octeon_send_soft_command ( struct octeon_device * oct ,
struct octeon_soft_command * sc )
{
2018-08-06 13:09:40 -07:00
struct octeon_instr_queue * iq ;
2016-06-14 16:54:51 -07:00
struct octeon_instr_ih2 * ih2 ;
2016-09-01 11:16:07 -07:00
struct octeon_instr_ih3 * ih3 ;
2015-06-09 18:15:23 -07:00
struct octeon_instr_irh * irh ;
2016-06-14 16:54:51 -07:00
u32 len ;
2015-06-09 18:15:23 -07:00
2018-08-06 13:09:40 -07:00
iq = oct - > instr_queue [ sc - > iq_no ] ;
if ( ! iq - > allow_soft_cmds ) {
dev_err ( & oct - > pci_dev - > dev , " Soft commands are not allowed on Queue %d \n " ,
sc - > iq_no ) ;
INCR_INSTRQUEUE_PKT_COUNT ( oct , sc - > iq_no , instr_dropped , 1 ) ;
return IQ_SEND_FAILED ;
}
2016-12-07 08:54:35 -08:00
if ( OCTEON_CN23XX_PF ( oct ) | | OCTEON_CN23XX_VF ( oct ) ) {
2016-09-01 11:16:07 -07:00
ih3 = ( struct octeon_instr_ih3 * ) & sc - > cmd . cmd3 . ih3 ;
if ( ih3 - > dlengsz ) {
WARN_ON ( ! sc - > dmadptr ) ;
sc - > cmd . cmd3 . dptr = sc - > dmadptr ;
}
irh = ( struct octeon_instr_irh * ) & sc - > cmd . cmd3 . irh ;
if ( irh - > rflag ) {
WARN_ON ( ! sc - > dmarptr ) ;
WARN_ON ( ! sc - > status_word ) ;
* sc - > status_word = COMPLETION_WORD_INIT ;
sc - > cmd . cmd3 . rptr = sc - > dmarptr ;
}
len = ( u32 ) ih3 - > dlengsz ;
} else {
ih2 = ( struct octeon_instr_ih2 * ) & sc - > cmd . cmd2 . ih2 ;
if ( ih2 - > dlengsz ) {
WARN_ON ( ! sc - > dmadptr ) ;
sc - > cmd . cmd2 . dptr = sc - > dmadptr ;
}
irh = ( struct octeon_instr_irh * ) & sc - > cmd . cmd2 . irh ;
if ( irh - > rflag ) {
WARN_ON ( ! sc - > dmarptr ) ;
WARN_ON ( ! sc - > status_word ) ;
* sc - > status_word = COMPLETION_WORD_INIT ;
sc - > cmd . cmd2 . rptr = sc - > dmarptr ;
}
len = ( u32 ) ih2 - > dlengsz ;
2015-06-09 18:15:23 -07:00
}
2018-08-28 18:51:30 -07:00
sc - > expiry_time = jiffies + msecs_to_jiffies ( LIO_SC_MAX_TMO_MS ) ;
2015-06-09 18:15:23 -07:00
2016-06-14 16:54:51 -07:00
return ( octeon_send_command ( oct , sc - > iq_no , 1 , & sc - > cmd , sc ,
len , REQTYPE_SOFT_COMMAND ) ) ;
2015-06-09 18:15:23 -07:00
}
int octeon_setup_sc_buffer_pool ( struct octeon_device * oct )
{
int i ;
u64 dma_addr ;
struct octeon_soft_command * sc ;
INIT_LIST_HEAD ( & oct - > sc_buf_pool . head ) ;
spin_lock_init ( & oct - > sc_buf_pool . lock ) ;
atomic_set ( & oct - > sc_buf_pool . alloc_buf_count , 0 ) ;
for ( i = 0 ; i < MAX_SOFT_COMMAND_BUFFERS ; i + + ) {
sc = ( struct octeon_soft_command * )
lio_dma_alloc ( oct ,
SOFT_COMMAND_BUFFER_SIZE ,
( dma_addr_t * ) & dma_addr ) ;
2016-11-14 15:54:44 -08:00
if ( ! sc ) {
octeon_free_sc_buffer_pool ( oct ) ;
2015-06-09 18:15:23 -07:00
return 1 ;
2016-11-14 15:54:44 -08:00
}
2015-06-09 18:15:23 -07:00
sc - > dma_addr = dma_addr ;
sc - > size = SOFT_COMMAND_BUFFER_SIZE ;
list_add_tail ( & sc - > node , & oct - > sc_buf_pool . head ) ;
}
return 0 ;
}
2018-08-28 18:51:30 -07:00
int octeon_free_sc_done_list ( struct octeon_device * oct )
{
struct octeon_response_list * done_sc_list , * zombie_sc_list ;
struct octeon_soft_command * sc ;
struct list_head * tmp , * tmp2 ;
spinlock_t * sc_lists_lock ; /* lock for response_list */
done_sc_list = & oct - > response_list [ OCTEON_DONE_SC_LIST ] ;
zombie_sc_list = & oct - > response_list [ OCTEON_ZOMBIE_SC_LIST ] ;
if ( ! atomic_read ( & done_sc_list - > pending_req_count ) )
return 0 ;
sc_lists_lock = & oct - > response_list [ OCTEON_ORDERED_SC_LIST ] . lock ;
spin_lock_bh ( sc_lists_lock ) ;
list_for_each_safe ( tmp , tmp2 , & done_sc_list - > head ) {
sc = list_entry ( tmp , struct octeon_soft_command , node ) ;
if ( READ_ONCE ( sc - > caller_is_done ) ) {
list_del ( & sc - > node ) ;
atomic_dec ( & done_sc_list - > pending_req_count ) ;
if ( * sc - > status_word = = COMPLETION_WORD_INIT ) {
/* timeout; move sc to zombie list */
list_add_tail ( & sc - > node , & zombie_sc_list - > head ) ;
atomic_inc ( & zombie_sc_list - > pending_req_count ) ;
} else {
octeon_free_soft_command ( oct , sc ) ;
}
}
}
spin_unlock_bh ( sc_lists_lock ) ;
return 0 ;
}
int octeon_free_sc_zombie_list ( struct octeon_device * oct )
{
struct octeon_response_list * zombie_sc_list ;
struct octeon_soft_command * sc ;
struct list_head * tmp , * tmp2 ;
spinlock_t * sc_lists_lock ; /* lock for response_list */
zombie_sc_list = & oct - > response_list [ OCTEON_ZOMBIE_SC_LIST ] ;
sc_lists_lock = & oct - > response_list [ OCTEON_ORDERED_SC_LIST ] . lock ;
spin_lock_bh ( sc_lists_lock ) ;
list_for_each_safe ( tmp , tmp2 , & zombie_sc_list - > head ) {
list_del ( tmp ) ;
atomic_dec ( & zombie_sc_list - > pending_req_count ) ;
sc = list_entry ( tmp , struct octeon_soft_command , node ) ;
octeon_free_soft_command ( oct , sc ) ;
}
spin_unlock_bh ( sc_lists_lock ) ;
return 0 ;
}
2015-06-09 18:15:23 -07:00
int octeon_free_sc_buffer_pool ( struct octeon_device * oct )
{
struct list_head * tmp , * tmp2 ;
struct octeon_soft_command * sc ;
2018-08-28 18:51:30 -07:00
octeon_free_sc_zombie_list ( oct ) ;
2016-07-03 13:56:49 -07:00
spin_lock_bh ( & oct - > sc_buf_pool . lock ) ;
2015-06-09 18:15:23 -07:00
list_for_each_safe ( tmp , tmp2 , & oct - > sc_buf_pool . head ) {
list_del ( tmp ) ;
sc = ( struct octeon_soft_command * ) tmp ;
lio_dma_free ( oct , sc - > size , sc , sc - > dma_addr ) ;
}
INIT_LIST_HEAD ( & oct - > sc_buf_pool . head ) ;
2016-07-03 13:56:49 -07:00
spin_unlock_bh ( & oct - > sc_buf_pool . lock ) ;
2015-06-09 18:15:23 -07:00
return 0 ;
}
struct octeon_soft_command * octeon_alloc_soft_command ( struct octeon_device * oct ,
u32 datasize ,
u32 rdatasize ,
u32 ctxsize )
{
u64 dma_addr ;
u32 size ;
u32 offset = sizeof ( struct octeon_soft_command ) ;
struct octeon_soft_command * sc = NULL ;
struct list_head * tmp ;
2018-08-28 18:51:30 -07:00
if ( ! rdatasize )
rdatasize = 16 ;
2016-07-03 13:56:48 -07:00
WARN_ON ( ( offset + datasize + rdatasize + ctxsize ) >
2015-06-09 18:15:23 -07:00
SOFT_COMMAND_BUFFER_SIZE ) ;
2016-07-03 13:56:49 -07:00
spin_lock_bh ( & oct - > sc_buf_pool . lock ) ;
2015-06-09 18:15:23 -07:00
if ( list_empty ( & oct - > sc_buf_pool . head ) ) {
2016-07-03 13:56:49 -07:00
spin_unlock_bh ( & oct - > sc_buf_pool . lock ) ;
2015-06-09 18:15:23 -07:00
return NULL ;
}
list_for_each ( tmp , & oct - > sc_buf_pool . head )
break ;
list_del ( tmp ) ;
atomic_inc ( & oct - > sc_buf_pool . alloc_buf_count ) ;
2016-07-03 13:56:49 -07:00
spin_unlock_bh ( & oct - > sc_buf_pool . lock ) ;
2015-06-09 18:15:23 -07:00
sc = ( struct octeon_soft_command * ) tmp ;
dma_addr = sc - > dma_addr ;
size = sc - > size ;
memset ( sc , 0 , sc - > size ) ;
sc - > dma_addr = dma_addr ;
sc - > size = size ;
if ( ctxsize ) {
sc - > ctxptr = ( u8 * ) sc + offset ;
sc - > ctxsize = ctxsize ;
}
/* Start data at 128 byte boundary */
offset = ( offset + ctxsize + 127 ) & 0xffffff80 ;
if ( datasize ) {
sc - > virtdptr = ( u8 * ) sc + offset ;
sc - > dmadptr = dma_addr + offset ;
sc - > datasize = datasize ;
}
/* Start rdata at 128 byte boundary */
offset = ( offset + datasize + 127 ) & 0xffffff80 ;
if ( rdatasize ) {
2016-07-03 13:56:48 -07:00
WARN_ON ( rdatasize < 16 ) ;
2015-06-09 18:15:23 -07:00
sc - > virtrptr = ( u8 * ) sc + offset ;
sc - > dmarptr = dma_addr + offset ;
sc - > rdatasize = rdatasize ;
sc - > status_word = ( u64 * ) ( ( u8 * ) ( sc - > virtrptr ) + rdatasize - 8 ) ;
}
return sc ;
}
void octeon_free_soft_command ( struct octeon_device * oct ,
struct octeon_soft_command * sc )
{
2016-07-03 13:56:49 -07:00
spin_lock_bh ( & oct - > sc_buf_pool . lock ) ;
2015-06-09 18:15:23 -07:00
list_add_tail ( & sc - > node , & oct - > sc_buf_pool . head ) ;
atomic_dec ( & oct - > sc_buf_pool . alloc_buf_count ) ;
2016-07-03 13:56:49 -07:00
spin_unlock_bh ( & oct - > sc_buf_pool . lock ) ;
2015-06-09 18:15:23 -07:00
}