2016-08-10 14:03:22 +03:00
/*
* Copyright 2015 Amazon . com , Inc . or its affiliates .
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# ifndef ENA_ETH_COM_H_
# define ENA_ETH_COM_H_
# include "ena_com.h"
/* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
# define ENA_COMP_HEAD_THRESH 4
struct ena_com_tx_ctx {
struct ena_com_tx_meta ena_meta ;
struct ena_com_buf * ena_bufs ;
/* For LLQ, header buffer - pushed to the device mem space */
void * push_header ;
enum ena_eth_io_l3_proto_index l3_proto ;
enum ena_eth_io_l4_proto_index l4_proto ;
u16 num_bufs ;
u16 req_id ;
/* For regular queue, indicate the size of the header
* For LLQ , indicate the size of the pushed buffer
*/
u16 header_len ;
u8 meta_valid ;
u8 tso_enable ;
u8 l3_csum_enable ;
u8 l4_csum_enable ;
u8 l4_csum_partial ;
u8 df ; /* Don't fragment */
} ;
struct ena_com_rx_ctx {
struct ena_com_rx_buf_info * ena_bufs ;
enum ena_eth_io_l3_proto_index l3_proto ;
enum ena_eth_io_l4_proto_index l4_proto ;
bool l3_csum_err ;
bool l4_csum_err ;
2018-10-11 11:26:21 +03:00
u8 l4_csum_checked ;
2016-08-10 14:03:22 +03:00
/* fragmented packet */
bool frag ;
u32 hash ;
u16 descs ;
int max_bufs ;
} ;
int ena_com_prepare_tx ( struct ena_com_io_sq * io_sq ,
struct ena_com_tx_ctx * ena_tx_ctx ,
int * nb_hw_desc ) ;
int ena_com_rx_pkt ( struct ena_com_io_cq * io_cq ,
struct ena_com_io_sq * io_sq ,
struct ena_com_rx_ctx * ena_rx_ctx ) ;
int ena_com_add_single_rx_desc ( struct ena_com_io_sq * io_sq ,
struct ena_com_buf * ena_buf ,
u16 req_id ) ;
2017-12-28 21:31:30 +00:00
bool ena_com_cq_empty ( struct ena_com_io_cq * io_cq ) ;
2016-08-10 14:03:22 +03:00
static inline void ena_com_unmask_intr ( struct ena_com_io_cq * io_cq ,
struct ena_eth_io_intr_reg * intr_reg )
{
writel ( intr_reg - > intr_control , io_cq - > unmask_reg ) ;
}
2018-10-11 11:26:19 +03:00
static inline int ena_com_free_desc ( struct ena_com_io_sq * io_sq )
2016-08-10 14:03:22 +03:00
{
u16 tail , next_to_comp , cnt ;
next_to_comp = io_sq - > next_to_comp ;
tail = io_sq - > tail ;
cnt = tail - next_to_comp ;
return io_sq - > q_depth - 1 - cnt ;
}
2018-10-11 11:26:19 +03:00
/* Check if the submission queue has enough space to hold required_buffers */
static inline bool ena_com_sq_have_enough_space ( struct ena_com_io_sq * io_sq ,
u16 required_buffers )
2016-08-10 14:03:22 +03:00
{
2018-10-11 11:26:19 +03:00
int temp ;
2016-08-10 14:03:22 +03:00
2018-10-11 11:26:19 +03:00
if ( io_sq - > mem_queue_type = = ENA_ADMIN_PLACEMENT_POLICY_HOST )
return ena_com_free_desc ( io_sq ) > = required_buffers ;
/* This calculation doesn't need to be 100% accurate. So to reduce
* the calculation overhead just Subtract 2 lines from the free descs
* ( one for the header line and one to compensate the devision
* down calculation .
*/
temp = required_buffers / io_sq - > llq_info . descs_per_entry + 2 ;
return ena_com_free_desc ( io_sq ) > temp ;
}
2019-06-03 17:43:19 +03:00
static inline bool ena_com_meta_desc_changed ( struct ena_com_io_sq * io_sq ,
struct ena_com_tx_ctx * ena_tx_ctx )
{
if ( ! ena_tx_ctx - > meta_valid )
return false ;
return ! ! memcmp ( & io_sq - > cached_tx_meta ,
& ena_tx_ctx - > ena_meta ,
sizeof ( struct ena_com_tx_meta ) ) ;
}
static inline bool is_llq_max_tx_burst_exists ( struct ena_com_io_sq * io_sq )
{
return ( io_sq - > mem_queue_type = = ENA_ADMIN_PLACEMENT_POLICY_DEV ) & &
io_sq - > llq_info . max_entries_in_tx_burst > 0 ;
}
static inline bool ena_com_is_doorbell_needed ( struct ena_com_io_sq * io_sq ,
struct ena_com_tx_ctx * ena_tx_ctx )
{
struct ena_com_llq_info * llq_info ;
int descs_after_first_entry ;
int num_entries_needed = 1 ;
u16 num_descs ;
if ( ! is_llq_max_tx_burst_exists ( io_sq ) )
return false ;
llq_info = & io_sq - > llq_info ;
num_descs = ena_tx_ctx - > num_bufs ;
if ( unlikely ( ena_com_meta_desc_changed ( io_sq , ena_tx_ctx ) ) )
+ + num_descs ;
if ( num_descs > llq_info - > descs_num_before_header ) {
descs_after_first_entry = num_descs - llq_info - > descs_num_before_header ;
num_entries_needed + = DIV_ROUND_UP ( descs_after_first_entry ,
llq_info - > descs_per_entry ) ;
}
pr_debug ( " queue: %d num_descs: %d num_entries_needed: %d \n " , io_sq - > qid ,
num_descs , num_entries_needed ) ;
return num_entries_needed > io_sq - > entries_in_tx_burst_left ;
}
2018-10-11 11:26:19 +03:00
static inline int ena_com_write_sq_doorbell ( struct ena_com_io_sq * io_sq )
{
2019-06-03 17:43:19 +03:00
u16 max_entries_in_tx_burst = io_sq - > llq_info . max_entries_in_tx_burst ;
2018-10-11 11:26:19 +03:00
u16 tail = io_sq - > tail ;
2016-08-10 14:03:22 +03:00
pr_debug ( " write submission queue doorbell for queue: %d tail: %d \n " ,
io_sq - > qid , tail ) ;
2018-09-09 08:15:26 +00:00
writel ( tail , io_sq - > db_addr ) ;
2016-08-10 14:03:22 +03:00
2019-06-03 17:43:19 +03:00
if ( is_llq_max_tx_burst_exists ( io_sq ) ) {
pr_debug ( " reset available entries in tx burst for queue %d to %d \n " ,
io_sq - > qid , max_entries_in_tx_burst ) ;
io_sq - > entries_in_tx_burst_left = max_entries_in_tx_burst ;
}
2016-08-10 14:03:22 +03:00
return 0 ;
}
static inline int ena_com_update_dev_comp_head ( struct ena_com_io_cq * io_cq )
{
u16 unreported_comp , head ;
bool need_update ;
head = io_cq - > head ;
unreported_comp = head - io_cq - > last_head_update ;
need_update = unreported_comp > ( io_cq - > q_depth / ENA_COMP_HEAD_THRESH ) ;
if ( io_cq - > cq_head_db_reg & & need_update ) {
pr_debug ( " Write completion queue doorbell for queue %d: head: %d \n " ,
io_cq - > qid , head ) ;
writel ( head , io_cq - > cq_head_db_reg ) ;
io_cq - > last_head_update = head ;
}
return 0 ;
}
static inline void ena_com_update_numa_node ( struct ena_com_io_cq * io_cq ,
u8 numa_node )
{
struct ena_eth_io_numa_node_cfg_reg numa_cfg ;
if ( ! io_cq - > numa_node_cfg_reg )
return ;
numa_cfg . numa_cfg = ( numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK )
| ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK ;
writel ( numa_cfg . numa_cfg , io_cq - > numa_node_cfg_reg ) ;
}
static inline void ena_com_comp_ack ( struct ena_com_io_sq * io_sq , u16 elem )
{
io_sq - > next_to_comp + = elem ;
}
2018-10-11 11:26:16 +03:00
static inline void ena_com_cq_inc_head ( struct ena_com_io_cq * io_cq )
{
io_cq - > head + + ;
/* Switch phase bit in case of wrap around */
if ( unlikely ( ( io_cq - > head & ( io_cq - > q_depth - 1 ) ) = = 0 ) )
io_cq - > phase ^ = 1 ;
}
static inline int ena_com_tx_comp_req_id_get ( struct ena_com_io_cq * io_cq ,
u16 * req_id )
{
u8 expected_phase , cdesc_phase ;
struct ena_eth_io_tx_cdesc * cdesc ;
u16 masked_head ;
masked_head = io_cq - > head & ( io_cq - > q_depth - 1 ) ;
expected_phase = io_cq - > phase ;
cdesc = ( struct ena_eth_io_tx_cdesc * )
( ( uintptr_t ) io_cq - > cdesc_addr . virt_addr +
( masked_head * io_cq - > cdesc_entry_size_in_bytes ) ) ;
/* When the current completion descriptor phase isn't the same as the
* expected , it mean that the device still didn ' t update
* this completion .
*/
cdesc_phase = READ_ONCE ( cdesc - > flags ) & ENA_ETH_IO_TX_CDESC_PHASE_MASK ;
if ( cdesc_phase ! = expected_phase )
return - EAGAIN ;
dma_rmb ( ) ;
* req_id = READ_ONCE ( cdesc - > req_id ) ;
if ( unlikely ( * req_id > = io_cq - > q_depth ) ) {
pr_err ( " Invalid req id %d \n " , cdesc - > req_id ) ;
return - EINVAL ;
}
ena_com_cq_inc_head ( io_cq ) ;
return 0 ;
}
2016-08-10 14:03:22 +03:00
# endif /* ENA_ETH_COM_H_ */