Merge branch 'ena-dynamic-queue-sizes'
Sameeh Jubran says: ==================== Support for dynamic queue size changes This patchset introduces the following: * add new admin command for supporting different queue size for Tx/Rx * add support for Tx/Rx queues size modification through ethtool * allow queues allocation backoff when low on memory * update driver version Difference from v2: * Dropped superfluous range checks which are already done in ethtool. [patch 5/7] * Dropped inline keyword from function. [patch 4/7] * Added a new patch which drops inline keyword all *.c files. [patch 6/7] Difference from v1: * Changed ena_update_queue_sizes() signature to use u32 instead of int type for the size arguments. [patch 5/7] ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
e0ffbd37f3
@ -64,6 +64,7 @@ enum ena_admin_aq_feature_id {
|
||||
ENA_ADMIN_LLQ = 4,
|
||||
ENA_ADMIN_EXTRA_PROPERTIES_STRINGS = 5,
|
||||
ENA_ADMIN_EXTRA_PROPERTIES_FLAGS = 6,
|
||||
ENA_ADMIN_MAX_QUEUES_EXT = 7,
|
||||
ENA_ADMIN_RSS_HASH_FUNCTION = 10,
|
||||
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
|
||||
ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
|
||||
@ -425,7 +426,13 @@ struct ena_admin_get_set_feature_common_desc {
|
||||
/* as appears in ena_admin_aq_feature_id */
|
||||
u8 feature_id;
|
||||
|
||||
u16 reserved16;
|
||||
/* The driver specifies the max feature version it supports and the
|
||||
* device responds with the currently supported feature version. The
|
||||
* field is zero based
|
||||
*/
|
||||
u8 feature_version;
|
||||
|
||||
u8 reserved8;
|
||||
};
|
||||
|
||||
struct ena_admin_device_attr_feature_desc {
|
||||
@ -535,6 +542,34 @@ struct ena_admin_feature_llq_desc {
|
||||
u32 max_tx_burst_size;
|
||||
};
|
||||
|
||||
struct ena_admin_queue_ext_feature_fields {
|
||||
u32 max_tx_sq_num;
|
||||
|
||||
u32 max_tx_cq_num;
|
||||
|
||||
u32 max_rx_sq_num;
|
||||
|
||||
u32 max_rx_cq_num;
|
||||
|
||||
u32 max_tx_sq_depth;
|
||||
|
||||
u32 max_tx_cq_depth;
|
||||
|
||||
u32 max_rx_sq_depth;
|
||||
|
||||
u32 max_rx_cq_depth;
|
||||
|
||||
u32 max_tx_header_size;
|
||||
|
||||
/* Maximum Descriptors number, including meta descriptor, allowed for
|
||||
* a single Tx packet
|
||||
*/
|
||||
u16 max_per_packet_tx_descs;
|
||||
|
||||
/* Maximum Descriptors number allowed for a single Rx packet */
|
||||
u16 max_per_packet_rx_descs;
|
||||
};
|
||||
|
||||
struct ena_admin_queue_feature_desc {
|
||||
u32 max_sq_num;
|
||||
|
||||
@ -849,6 +884,19 @@ struct ena_admin_get_feat_cmd {
|
||||
u32 raw[11];
|
||||
};
|
||||
|
||||
struct ena_admin_queue_ext_feature_desc {
|
||||
/* version */
|
||||
u8 version;
|
||||
|
||||
u8 reserved1[3];
|
||||
|
||||
union {
|
||||
struct ena_admin_queue_ext_feature_fields max_queue_ext;
|
||||
|
||||
u32 raw[10];
|
||||
};
|
||||
};
|
||||
|
||||
struct ena_admin_get_feat_resp {
|
||||
struct ena_admin_acq_common_desc acq_common_desc;
|
||||
|
||||
@ -861,6 +909,8 @@ struct ena_admin_get_feat_resp {
|
||||
|
||||
struct ena_admin_queue_feature_desc max_queue;
|
||||
|
||||
struct ena_admin_queue_ext_feature_desc max_queue_ext;
|
||||
|
||||
struct ena_admin_feature_aenq_desc aenq;
|
||||
|
||||
struct ena_admin_get_feature_link_desc link;
|
||||
@ -929,7 +979,9 @@ struct ena_admin_aenq_common_desc {
|
||||
|
||||
u16 syndrom;
|
||||
|
||||
/* 0 : phase */
|
||||
/* 0 : phase
|
||||
* 7:1 : reserved - MBZ
|
||||
*/
|
||||
u8 flags;
|
||||
|
||||
u8 reserved1[3];
|
||||
|
@ -91,7 +91,7 @@ struct ena_com_stats_ctx {
|
||||
struct ena_admin_acq_get_stats_resp get_resp;
|
||||
};
|
||||
|
||||
static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
|
||||
static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
|
||||
struct ena_common_mem_addr *ena_addr,
|
||||
dma_addr_t addr)
|
||||
{
|
||||
@ -190,7 +190,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
|
||||
static void comp_ctxt_release(struct ena_com_admin_queue *queue,
|
||||
struct ena_comp_ctx *comp_ctx)
|
||||
{
|
||||
comp_ctx->occupied = false;
|
||||
@ -277,7 +277,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
|
||||
return comp_ctx;
|
||||
}
|
||||
|
||||
static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
|
||||
static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
|
||||
{
|
||||
size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
|
||||
struct ena_comp_ctx *comp_ctx;
|
||||
@ -978,7 +978,8 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
|
||||
struct ena_admin_get_feat_resp *get_resp,
|
||||
enum ena_admin_aq_feature_id feature_id,
|
||||
dma_addr_t control_buf_dma_addr,
|
||||
u32 control_buff_size)
|
||||
u32 control_buff_size,
|
||||
u8 feature_ver)
|
||||
{
|
||||
struct ena_com_admin_queue *admin_queue;
|
||||
struct ena_admin_get_feat_cmd get_cmd;
|
||||
@ -1009,7 +1010,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
|
||||
}
|
||||
|
||||
get_cmd.control_buffer.length = control_buff_size;
|
||||
|
||||
get_cmd.feat_common.feature_version = feature_ver;
|
||||
get_cmd.feat_common.feature_id = feature_id;
|
||||
|
||||
ret = ena_com_execute_admin_command(admin_queue,
|
||||
@ -1029,13 +1030,15 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
|
||||
|
||||
static int ena_com_get_feature(struct ena_com_dev *ena_dev,
|
||||
struct ena_admin_get_feat_resp *get_resp,
|
||||
enum ena_admin_aq_feature_id feature_id)
|
||||
enum ena_admin_aq_feature_id feature_id,
|
||||
u8 feature_ver)
|
||||
{
|
||||
return ena_com_get_feature_ex(ena_dev,
|
||||
get_resp,
|
||||
feature_id,
|
||||
0,
|
||||
0);
|
||||
0,
|
||||
feature_ver);
|
||||
}
|
||||
|
||||
static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
|
||||
@ -1095,7 +1098,7 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
|
||||
int ret;
|
||||
|
||||
ret = ena_com_get_feature(ena_dev, &get_resp,
|
||||
ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
|
||||
ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
@ -1515,7 +1518,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
|
||||
struct ena_admin_get_feat_resp get_resp;
|
||||
int ret;
|
||||
|
||||
ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
|
||||
ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
|
||||
if (ret) {
|
||||
pr_info("Can't get aenq configuration\n");
|
||||
return ret;
|
||||
@ -1890,7 +1893,7 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
|
||||
int ena_com_get_link_params(struct ena_com_dev *ena_dev,
|
||||
struct ena_admin_get_feat_resp *resp)
|
||||
{
|
||||
return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
|
||||
return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
|
||||
}
|
||||
|
||||
int ena_com_extra_properties_strings_init(struct ena_com_dev *ena_dev)
|
||||
@ -1916,7 +1919,7 @@ int ena_com_extra_properties_strings_init(struct ena_com_dev *ena_dev)
|
||||
rc = ena_com_get_feature_ex(ena_dev, &resp,
|
||||
ENA_ADMIN_EXTRA_PROPERTIES_STRINGS,
|
||||
extra_properties_strings->dma_addr,
|
||||
extra_properties_strings->size);
|
||||
extra_properties_strings->size, 0);
|
||||
if (rc) {
|
||||
pr_debug("Failed to get extra properties strings\n");
|
||||
goto err;
|
||||
@ -1946,7 +1949,7 @@ int ena_com_get_extra_properties_flags(struct ena_com_dev *ena_dev,
|
||||
struct ena_admin_get_feat_resp *resp)
|
||||
{
|
||||
return ena_com_get_feature(ena_dev, resp,
|
||||
ENA_ADMIN_EXTRA_PROPERTIES_FLAGS);
|
||||
ENA_ADMIN_EXTRA_PROPERTIES_FLAGS, 0);
|
||||
}
|
||||
|
||||
int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
|
||||
@ -1956,7 +1959,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
|
||||
int rc;
|
||||
|
||||
rc = ena_com_get_feature(ena_dev, &get_resp,
|
||||
ENA_ADMIN_DEVICE_ATTRIBUTES);
|
||||
ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@ -1964,17 +1967,34 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
|
||||
sizeof(get_resp.u.dev_attr));
|
||||
ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
|
||||
|
||||
rc = ena_com_get_feature(ena_dev, &get_resp,
|
||||
ENA_ADMIN_MAX_QUEUES_NUM);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
|
||||
rc = ena_com_get_feature(ena_dev, &get_resp,
|
||||
ENA_ADMIN_MAX_QUEUES_EXT,
|
||||
ENA_FEATURE_MAX_QUEUE_EXT_VER);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
|
||||
sizeof(get_resp.u.max_queue));
|
||||
ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
|
||||
if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
|
||||
sizeof(get_resp.u.max_queue_ext));
|
||||
ena_dev->tx_max_header_size =
|
||||
get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
|
||||
} else {
|
||||
rc = ena_com_get_feature(ena_dev, &get_resp,
|
||||
ENA_ADMIN_MAX_QUEUES_NUM, 0);
|
||||
memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
|
||||
sizeof(get_resp.u.max_queue));
|
||||
ena_dev->tx_max_header_size =
|
||||
get_resp.u.max_queue.max_header_size;
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = ena_com_get_feature(ena_dev, &get_resp,
|
||||
ENA_ADMIN_AENQ_CONFIG);
|
||||
ENA_ADMIN_AENQ_CONFIG, 0);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@ -1982,7 +2002,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
|
||||
sizeof(get_resp.u.aenq));
|
||||
|
||||
rc = ena_com_get_feature(ena_dev, &get_resp,
|
||||
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
|
||||
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@ -1992,7 +2012,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
|
||||
/* Driver hints isn't mandatory admin command. So in case the
|
||||
* command isn't supported set driver hints to 0
|
||||
*/
|
||||
rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
|
||||
rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
|
||||
|
||||
if (!rc)
|
||||
memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
|
||||
@ -2003,7 +2023,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
|
||||
else
|
||||
return rc;
|
||||
|
||||
rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ);
|
||||
rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
|
||||
if (!rc)
|
||||
memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
|
||||
sizeof(get_resp.u.llq));
|
||||
@ -2240,7 +2260,7 @@ int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
|
||||
struct ena_admin_get_feat_resp resp;
|
||||
|
||||
ret = ena_com_get_feature(ena_dev, &resp,
|
||||
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
|
||||
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
|
||||
if (unlikely(ret)) {
|
||||
pr_err("Failed to get offload capabilities %d\n", ret);
|
||||
return ret;
|
||||
@ -2269,7 +2289,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
|
||||
|
||||
/* Validate hash function is supported */
|
||||
ret = ena_com_get_feature(ena_dev, &get_resp,
|
||||
ENA_ADMIN_RSS_HASH_FUNCTION);
|
||||
ENA_ADMIN_RSS_HASH_FUNCTION, 0);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
@ -2329,7 +2349,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
|
||||
rc = ena_com_get_feature_ex(ena_dev, &get_resp,
|
||||
ENA_ADMIN_RSS_HASH_FUNCTION,
|
||||
rss->hash_key_dma_addr,
|
||||
sizeof(*rss->hash_key));
|
||||
sizeof(*rss->hash_key), 0);
|
||||
if (unlikely(rc))
|
||||
return rc;
|
||||
|
||||
@ -2381,7 +2401,7 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
|
||||
rc = ena_com_get_feature_ex(ena_dev, &get_resp,
|
||||
ENA_ADMIN_RSS_HASH_FUNCTION,
|
||||
rss->hash_key_dma_addr,
|
||||
sizeof(*rss->hash_key));
|
||||
sizeof(*rss->hash_key), 0);
|
||||
if (unlikely(rc))
|
||||
return rc;
|
||||
|
||||
@ -2406,7 +2426,7 @@ int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
|
||||
rc = ena_com_get_feature_ex(ena_dev, &get_resp,
|
||||
ENA_ADMIN_RSS_HASH_INPUT,
|
||||
rss->hash_ctrl_dma_addr,
|
||||
sizeof(*rss->hash_ctrl));
|
||||
sizeof(*rss->hash_ctrl), 0);
|
||||
if (unlikely(rc))
|
||||
return rc;
|
||||
|
||||
@ -2642,7 +2662,7 @@ int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
|
||||
rc = ena_com_get_feature_ex(ena_dev, &get_resp,
|
||||
ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
|
||||
rss->rss_ind_tbl_dma_addr,
|
||||
tbl_size);
|
||||
tbl_size, 0);
|
||||
if (unlikely(rc))
|
||||
return rc;
|
||||
|
||||
@ -2857,7 +2877,7 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
|
||||
int rc;
|
||||
|
||||
rc = ena_com_get_feature(ena_dev, &get_resp,
|
||||
ENA_ADMIN_INTERRUPT_MODERATION);
|
||||
ENA_ADMIN_INTERRUPT_MODERATION, 0);
|
||||
|
||||
if (rc) {
|
||||
if (rc == -EOPNOTSUPP) {
|
||||
|
@ -101,6 +101,8 @@
|
||||
|
||||
#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF
|
||||
|
||||
#define ENA_FEATURE_MAX_QUEUE_EXT_VER 1
|
||||
|
||||
enum ena_intr_moder_level {
|
||||
ENA_INTR_MODER_LOWEST = 0,
|
||||
ENA_INTR_MODER_LOW,
|
||||
@ -389,6 +391,7 @@ struct ena_com_dev {
|
||||
|
||||
struct ena_com_dev_get_features_ctx {
|
||||
struct ena_admin_queue_feature_desc max_queues;
|
||||
struct ena_admin_queue_ext_feature_desc max_queue_ext;
|
||||
struct ena_admin_device_attr_feature_desc dev_attr;
|
||||
struct ena_admin_feature_aenq_desc aenq;
|
||||
struct ena_admin_feature_offload_desc offload;
|
||||
|
@ -32,7 +32,7 @@
|
||||
|
||||
#include "ena_eth_com.h"
|
||||
|
||||
static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
|
||||
static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
|
||||
struct ena_com_io_cq *io_cq)
|
||||
{
|
||||
struct ena_eth_io_rx_cdesc_base *cdesc;
|
||||
@ -59,7 +59,7 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
|
||||
return cdesc;
|
||||
}
|
||||
|
||||
static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
|
||||
static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
|
||||
{
|
||||
u16 tail_masked;
|
||||
u32 offset;
|
||||
@ -71,7 +71,7 @@ static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
|
||||
return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
|
||||
}
|
||||
|
||||
static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
|
||||
static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
|
||||
u8 *bounce_buffer)
|
||||
{
|
||||
struct ena_com_llq_info *llq_info = &io_sq->llq_info;
|
||||
@ -111,7 +111,7 @@ static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
|
||||
static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
|
||||
u8 *header_src,
|
||||
u16 header_len)
|
||||
{
|
||||
@ -142,7 +142,7 @@ static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
|
||||
static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
|
||||
{
|
||||
struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
|
||||
u8 *bounce_buffer;
|
||||
@ -162,7 +162,7 @@ static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
|
||||
return sq_desc;
|
||||
}
|
||||
|
||||
static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
|
||||
static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
|
||||
{
|
||||
struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
|
||||
struct ena_com_llq_info *llq_info = &io_sq->llq_info;
|
||||
@ -189,7 +189,7 @@ static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
|
||||
static void *get_sq_desc(struct ena_com_io_sq *io_sq)
|
||||
{
|
||||
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
|
||||
return get_sq_desc_llq(io_sq);
|
||||
@ -197,7 +197,7 @@ static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
|
||||
return get_sq_desc_regular_queue(io_sq);
|
||||
}
|
||||
|
||||
static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
|
||||
static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
|
||||
{
|
||||
struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
|
||||
struct ena_com_llq_info *llq_info = &io_sq->llq_info;
|
||||
@ -225,7 +225,7 @@ static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
|
||||
static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
|
||||
{
|
||||
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
|
||||
return ena_com_sq_update_llq_tail(io_sq);
|
||||
@ -239,7 +239,7 @@ static inline int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct ena_eth_io_rx_cdesc_base *
|
||||
static struct ena_eth_io_rx_cdesc_base *
|
||||
ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
|
||||
{
|
||||
idx &= (io_cq->q_depth - 1);
|
||||
@ -248,7 +248,7 @@ static inline struct ena_eth_io_rx_cdesc_base *
|
||||
idx * io_cq->cdesc_entry_size_in_bytes);
|
||||
}
|
||||
|
||||
static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
|
||||
static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
|
||||
u16 *first_cdesc_idx)
|
||||
{
|
||||
struct ena_eth_io_rx_cdesc_base *cdesc;
|
||||
@ -285,7 +285,7 @@ static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
|
||||
return count;
|
||||
}
|
||||
|
||||
static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
|
||||
static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
|
||||
struct ena_com_tx_ctx *ena_tx_ctx)
|
||||
{
|
||||
struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
|
||||
@ -334,7 +334,7 @@ static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io
|
||||
return ena_com_sq_update_tail(io_sq);
|
||||
}
|
||||
|
||||
static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
|
||||
static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
|
||||
struct ena_eth_io_rx_cdesc_base *cdesc)
|
||||
{
|
||||
ena_rx_ctx->l3_proto = cdesc->status &
|
||||
|
@ -486,13 +486,32 @@ static void ena_get_ringparam(struct net_device *netdev,
|
||||
struct ethtool_ringparam *ring)
|
||||
{
|
||||
struct ena_adapter *adapter = netdev_priv(netdev);
|
||||
struct ena_ring *tx_ring = &adapter->tx_ring[0];
|
||||
struct ena_ring *rx_ring = &adapter->rx_ring[0];
|
||||
|
||||
ring->rx_max_pending = rx_ring->ring_size;
|
||||
ring->tx_max_pending = tx_ring->ring_size;
|
||||
ring->rx_pending = rx_ring->ring_size;
|
||||
ring->tx_pending = tx_ring->ring_size;
|
||||
ring->tx_max_pending = adapter->max_tx_ring_size;
|
||||
ring->rx_max_pending = adapter->max_rx_ring_size;
|
||||
ring->tx_pending = adapter->tx_ring[0].ring_size;
|
||||
ring->rx_pending = adapter->rx_ring[0].ring_size;
|
||||
}
|
||||
|
||||
static int ena_set_ringparam(struct net_device *netdev,
|
||||
struct ethtool_ringparam *ring)
|
||||
{
|
||||
struct ena_adapter *adapter = netdev_priv(netdev);
|
||||
u32 new_tx_size, new_rx_size;
|
||||
|
||||
new_tx_size = ring->tx_pending < ENA_MIN_RING_SIZE ?
|
||||
ENA_MIN_RING_SIZE : ring->tx_pending;
|
||||
new_tx_size = rounddown_pow_of_two(new_tx_size);
|
||||
|
||||
new_rx_size = ring->rx_pending < ENA_MIN_RING_SIZE ?
|
||||
ENA_MIN_RING_SIZE : ring->rx_pending;
|
||||
new_rx_size = rounddown_pow_of_two(new_rx_size);
|
||||
|
||||
if (new_tx_size == adapter->requested_tx_ring_size &&
|
||||
new_rx_size == adapter->requested_rx_ring_size)
|
||||
return 0;
|
||||
|
||||
return ena_update_queue_sizes(adapter, new_tx_size, new_rx_size);
|
||||
}
|
||||
|
||||
static u32 ena_flow_hash_to_flow_type(u16 hash_fields)
|
||||
@ -860,6 +879,7 @@ static const struct ethtool_ops ena_ethtool_ops = {
|
||||
.get_coalesce = ena_get_coalesce,
|
||||
.set_coalesce = ena_set_coalesce,
|
||||
.get_ringparam = ena_get_ringparam,
|
||||
.set_ringparam = ena_set_ringparam,
|
||||
.get_sset_count = ena_get_sset_count,
|
||||
.get_strings = ena_get_strings,
|
||||
.get_ethtool_stats = ena_get_ethtool_stats,
|
||||
|
@ -182,7 +182,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter)
|
||||
ena_init_io_rings_common(adapter, rxr, i);
|
||||
|
||||
/* TX specific ring state */
|
||||
txr->ring_size = adapter->tx_ring_size;
|
||||
txr->ring_size = adapter->requested_tx_ring_size;
|
||||
txr->tx_max_header_size = ena_dev->tx_max_header_size;
|
||||
txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
|
||||
txr->sgl_size = adapter->max_tx_sgl_size;
|
||||
@ -190,7 +190,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter)
|
||||
ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
|
||||
|
||||
/* RX specific ring state */
|
||||
rxr->ring_size = adapter->rx_ring_size;
|
||||
rxr->ring_size = adapter->requested_rx_ring_size;
|
||||
rxr->rx_copybreak = adapter->rx_copybreak;
|
||||
rxr->sgl_size = adapter->max_rx_sgl_size;
|
||||
rxr->smoothed_interval =
|
||||
@ -326,7 +326,7 @@ static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
|
||||
ena_free_tx_resources(adapter, i);
|
||||
}
|
||||
|
||||
static inline int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
|
||||
static int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
|
||||
{
|
||||
if (likely(req_id < rx_ring->ring_size))
|
||||
return 0;
|
||||
@ -460,7 +460,7 @@ static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
|
||||
ena_free_rx_resources(adapter, i);
|
||||
}
|
||||
|
||||
static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
|
||||
static int ena_alloc_rx_page(struct ena_ring *rx_ring,
|
||||
struct ena_rx_buffer *rx_info, gfp_t gfp)
|
||||
{
|
||||
struct ena_com_buf *ena_buf;
|
||||
@ -594,7 +594,6 @@ static void ena_free_rx_bufs(struct ena_adapter *adapter,
|
||||
|
||||
/* ena_refill_all_rx_bufs - allocate all queues Rx buffers
|
||||
* @adapter: board private structure
|
||||
*
|
||||
*/
|
||||
static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
|
||||
{
|
||||
@ -621,7 +620,7 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
|
||||
ena_free_rx_bufs(adapter, i);
|
||||
}
|
||||
|
||||
static inline void ena_unmap_tx_skb(struct ena_ring *tx_ring,
|
||||
static void ena_unmap_tx_skb(struct ena_ring *tx_ring,
|
||||
struct ena_tx_buffer *tx_info)
|
||||
{
|
||||
struct ena_com_buf *ena_buf;
|
||||
@ -956,7 +955,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
|
||||
* @ena_rx_ctx: received packet context/metadata
|
||||
* @skb: skb currently being received and modified
|
||||
*/
|
||||
static inline void ena_rx_checksum(struct ena_ring *rx_ring,
|
||||
static void ena_rx_checksum(struct ena_ring *rx_ring,
|
||||
struct ena_com_rx_ctx *ena_rx_ctx,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
@ -1156,7 +1155,7 @@ error:
|
||||
return 0;
|
||||
}
|
||||
|
||||
inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
|
||||
void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
|
||||
struct ena_ring *tx_ring)
|
||||
{
|
||||
/* We apply adaptive moderation on Rx path only.
|
||||
@ -1175,7 +1174,7 @@ inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
|
||||
rx_ring->per_napi_bytes = 0;
|
||||
}
|
||||
|
||||
static inline void ena_unmask_interrupt(struct ena_ring *tx_ring,
|
||||
static void ena_unmask_interrupt(struct ena_ring *tx_ring,
|
||||
struct ena_ring *rx_ring)
|
||||
{
|
||||
struct ena_eth_io_intr_reg intr_reg;
|
||||
@ -1195,7 +1194,7 @@ static inline void ena_unmask_interrupt(struct ena_ring *tx_ring,
|
||||
ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
|
||||
}
|
||||
|
||||
static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring,
|
||||
static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
|
||||
struct ena_ring *rx_ring)
|
||||
{
|
||||
int cpu = get_cpu();
|
||||
@ -1638,7 +1637,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
|
||||
ctx.qid = ena_qid;
|
||||
ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
|
||||
ctx.msix_vector = msix_vector;
|
||||
ctx.queue_size = adapter->tx_ring_size;
|
||||
ctx.queue_size = tx_ring->ring_size;
|
||||
ctx.numa_node = cpu_to_node(tx_ring->cpu);
|
||||
|
||||
rc = ena_com_create_io_queue(ena_dev, &ctx);
|
||||
@ -1705,7 +1704,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
|
||||
ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
|
||||
ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
|
||||
ctx.msix_vector = msix_vector;
|
||||
ctx.queue_size = adapter->rx_ring_size;
|
||||
ctx.queue_size = rx_ring->ring_size;
|
||||
ctx.numa_node = cpu_to_node(rx_ring->cpu);
|
||||
|
||||
rc = ena_com_create_io_queue(ena_dev, &ctx);
|
||||
@ -1752,6 +1751,112 @@ create_err:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void set_io_rings_size(struct ena_adapter *adapter,
|
||||
int new_tx_size, int new_rx_size)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adapter->num_queues; i++) {
|
||||
adapter->tx_ring[i].ring_size = new_tx_size;
|
||||
adapter->rx_ring[i].ring_size = new_rx_size;
|
||||
}
|
||||
}
|
||||
|
||||
/* This function allows queue allocation to backoff when the system is
|
||||
* low on memory. If there is not enough memory to allocate io queues
|
||||
* the driver will try to allocate smaller queues.
|
||||
*
|
||||
* The backoff algorithm is as follows:
|
||||
* 1. Try to allocate TX and RX and if successful.
|
||||
* 1.1. return success
|
||||
*
|
||||
* 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same).
|
||||
*
|
||||
* 3. If TX or RX is smaller than 256
|
||||
* 3.1. return failure.
|
||||
* 4. else
|
||||
* 4.1. go back to 1.
|
||||
*/
|
||||
static int create_queues_with_size_backoff(struct ena_adapter *adapter)
|
||||
{
|
||||
int rc, cur_rx_ring_size, cur_tx_ring_size;
|
||||
int new_rx_ring_size, new_tx_ring_size;
|
||||
|
||||
/* current queue sizes might be set to smaller than the requested
|
||||
* ones due to past queue allocation failures.
|
||||
*/
|
||||
set_io_rings_size(adapter, adapter->requested_tx_ring_size,
|
||||
adapter->requested_rx_ring_size);
|
||||
|
||||
while (1) {
|
||||
rc = ena_setup_all_tx_resources(adapter);
|
||||
if (rc)
|
||||
goto err_setup_tx;
|
||||
|
||||
rc = ena_create_all_io_tx_queues(adapter);
|
||||
if (rc)
|
||||
goto err_create_tx_queues;
|
||||
|
||||
rc = ena_setup_all_rx_resources(adapter);
|
||||
if (rc)
|
||||
goto err_setup_rx;
|
||||
|
||||
rc = ena_create_all_io_rx_queues(adapter);
|
||||
if (rc)
|
||||
goto err_create_rx_queues;
|
||||
|
||||
return 0;
|
||||
|
||||
err_create_rx_queues:
|
||||
ena_free_all_io_rx_resources(adapter);
|
||||
err_setup_rx:
|
||||
ena_destroy_all_tx_queues(adapter);
|
||||
err_create_tx_queues:
|
||||
ena_free_all_io_tx_resources(adapter);
|
||||
err_setup_tx:
|
||||
if (rc != -ENOMEM) {
|
||||
netif_err(adapter, ifup, adapter->netdev,
|
||||
"Queue creation failed with error code %d\n",
|
||||
rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
cur_tx_ring_size = adapter->tx_ring[0].ring_size;
|
||||
cur_rx_ring_size = adapter->rx_ring[0].ring_size;
|
||||
|
||||
netif_err(adapter, ifup, adapter->netdev,
|
||||
"Not enough memory to create queues with sizes TX=%d, RX=%d\n",
|
||||
cur_tx_ring_size, cur_rx_ring_size);
|
||||
|
||||
new_tx_ring_size = cur_tx_ring_size;
|
||||
new_rx_ring_size = cur_rx_ring_size;
|
||||
|
||||
/* Decrease the size of the larger queue, or
|
||||
* decrease both if they are the same size.
|
||||
*/
|
||||
if (cur_rx_ring_size <= cur_tx_ring_size)
|
||||
new_tx_ring_size = cur_tx_ring_size / 2;
|
||||
if (cur_rx_ring_size >= cur_tx_ring_size)
|
||||
new_rx_ring_size = cur_rx_ring_size / 2;
|
||||
|
||||
if (cur_tx_ring_size < ENA_MIN_RING_SIZE ||
|
||||
cur_rx_ring_size < ENA_MIN_RING_SIZE) {
|
||||
netif_err(adapter, ifup, adapter->netdev,
|
||||
"Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
|
||||
ENA_MIN_RING_SIZE);
|
||||
return rc;
|
||||
}
|
||||
|
||||
netif_err(adapter, ifup, adapter->netdev,
|
||||
"Retrying queue creation with sizes TX=%d, RX=%d\n",
|
||||
new_tx_ring_size,
|
||||
new_rx_ring_size);
|
||||
|
||||
set_io_rings_size(adapter, new_tx_ring_size,
|
||||
new_rx_ring_size);
|
||||
}
|
||||
}
|
||||
|
||||
static int ena_up(struct ena_adapter *adapter)
|
||||
{
|
||||
int rc, i;
|
||||
@ -1771,25 +1876,9 @@ static int ena_up(struct ena_adapter *adapter)
|
||||
if (rc)
|
||||
goto err_req_irq;
|
||||
|
||||
/* allocate transmit descriptors */
|
||||
rc = ena_setup_all_tx_resources(adapter);
|
||||
rc = create_queues_with_size_backoff(adapter);
|
||||
if (rc)
|
||||
goto err_setup_tx;
|
||||
|
||||
/* allocate receive descriptors */
|
||||
rc = ena_setup_all_rx_resources(adapter);
|
||||
if (rc)
|
||||
goto err_setup_rx;
|
||||
|
||||
/* Create TX queues */
|
||||
rc = ena_create_all_io_tx_queues(adapter);
|
||||
if (rc)
|
||||
goto err_create_tx_queues;
|
||||
|
||||
/* Create RX queues */
|
||||
rc = ena_create_all_io_rx_queues(adapter);
|
||||
if (rc)
|
||||
goto err_create_rx_queues;
|
||||
goto err_create_queues_with_backoff;
|
||||
|
||||
rc = ena_up_complete(adapter);
|
||||
if (rc)
|
||||
@ -1818,14 +1907,11 @@ static int ena_up(struct ena_adapter *adapter)
|
||||
return rc;
|
||||
|
||||
err_up:
|
||||
ena_destroy_all_rx_queues(adapter);
|
||||
err_create_rx_queues:
|
||||
ena_destroy_all_tx_queues(adapter);
|
||||
err_create_tx_queues:
|
||||
ena_free_all_io_rx_resources(adapter);
|
||||
err_setup_rx:
|
||||
ena_free_all_io_tx_resources(adapter);
|
||||
err_setup_tx:
|
||||
ena_destroy_all_rx_queues(adapter);
|
||||
ena_free_all_io_rx_resources(adapter);
|
||||
err_create_queues_with_backoff:
|
||||
ena_free_io_irq(adapter);
|
||||
err_req_irq:
|
||||
ena_del_napi(adapter);
|
||||
@ -1945,6 +2031,20 @@ static int ena_close(struct net_device *netdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ena_update_queue_sizes(struct ena_adapter *adapter,
|
||||
u32 new_tx_size,
|
||||
u32 new_rx_size)
|
||||
{
|
||||
bool dev_up;
|
||||
|
||||
dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
|
||||
ena_close(adapter->netdev);
|
||||
adapter->requested_tx_ring_size = new_tx_size;
|
||||
adapter->requested_rx_ring_size = new_rx_size;
|
||||
ena_init_io_rings(adapter);
|
||||
return dev_up ? ena_up(adapter) : 0;
|
||||
}
|
||||
|
||||
static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
|
||||
{
|
||||
u32 mss = skb_shinfo(skb)->gso_size;
|
||||
@ -2465,13 +2565,6 @@ static int ena_device_validate_params(struct ena_adapter *adapter,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((get_feat_ctx->max_queues.max_cq_num < adapter->num_queues) ||
|
||||
(get_feat_ctx->max_queues.max_sq_num < adapter->num_queues)) {
|
||||
netif_err(adapter, drv, netdev,
|
||||
"Error, device doesn't support enough queues\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
|
||||
netif_err(adapter, drv, netdev,
|
||||
"Error, device max mtu is smaller than netdev MTU\n");
|
||||
@ -3045,18 +3138,32 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev,
|
||||
struct ena_com_dev *ena_dev,
|
||||
struct ena_com_dev_get_features_ctx *get_feat_ctx)
|
||||
{
|
||||
int io_sq_num, io_queue_num;
|
||||
int io_tx_sq_num, io_tx_cq_num, io_rx_num, io_queue_num;
|
||||
|
||||
/* In case of LLQ use the llq number in the get feature cmd */
|
||||
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
|
||||
struct ena_admin_queue_ext_feature_fields *max_queue_ext =
|
||||
&get_feat_ctx->max_queue_ext.max_queue_ext;
|
||||
io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num,
|
||||
max_queue_ext->max_rx_cq_num);
|
||||
|
||||
io_tx_sq_num = max_queue_ext->max_tx_sq_num;
|
||||
io_tx_cq_num = max_queue_ext->max_tx_cq_num;
|
||||
} else {
|
||||
struct ena_admin_queue_feature_desc *max_queues =
|
||||
&get_feat_ctx->max_queues;
|
||||
io_tx_sq_num = max_queues->max_sq_num;
|
||||
io_tx_cq_num = max_queues->max_cq_num;
|
||||
io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num);
|
||||
}
|
||||
|
||||
/* In case of LLQ use the llq fields for the tx SQ/CQ */
|
||||
if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
|
||||
io_sq_num = get_feat_ctx->llq.max_llq_num;
|
||||
else
|
||||
io_sq_num = get_feat_ctx->max_queues.max_sq_num;
|
||||
io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
|
||||
|
||||
io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
|
||||
io_queue_num = min_t(int, io_queue_num, io_sq_num);
|
||||
io_queue_num = min_t(int, io_queue_num,
|
||||
get_feat_ctx->max_queues.max_cq_num);
|
||||
io_queue_num = min_t(int, io_queue_num, io_rx_num);
|
||||
io_queue_num = min_t(int, io_queue_num, io_tx_sq_num);
|
||||
io_queue_num = min_t(int, io_queue_num, io_tx_cq_num);
|
||||
/* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
|
||||
io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1);
|
||||
if (unlikely(!io_queue_num)) {
|
||||
@ -3230,7 +3337,7 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
|
||||
pci_release_selected_regions(pdev, release_bars);
|
||||
}
|
||||
|
||||
static inline void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
|
||||
static void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
|
||||
{
|
||||
llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
|
||||
llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
|
||||
@ -3239,36 +3346,70 @@ static inline void set_default_llq_configurations(struct ena_llq_configurations
|
||||
llq_config->llq_ring_entry_size_value = 128;
|
||||
}
|
||||
|
||||
static int ena_calc_queue_size(struct pci_dev *pdev,
|
||||
struct ena_com_dev *ena_dev,
|
||||
u16 *max_tx_sgl_size,
|
||||
u16 *max_rx_sgl_size,
|
||||
struct ena_com_dev_get_features_ctx *get_feat_ctx)
|
||||
static int ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx)
|
||||
{
|
||||
u32 queue_size = ENA_DEFAULT_RING_SIZE;
|
||||
struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
|
||||
struct ena_com_dev *ena_dev = ctx->ena_dev;
|
||||
u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
|
||||
u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
|
||||
u32 max_tx_queue_size;
|
||||
u32 max_rx_queue_size;
|
||||
|
||||
queue_size = min_t(u32, queue_size,
|
||||
get_feat_ctx->max_queues.max_cq_depth);
|
||||
queue_size = min_t(u32, queue_size,
|
||||
get_feat_ctx->max_queues.max_sq_depth);
|
||||
if (ctx->ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
|
||||
struct ena_admin_queue_ext_feature_fields *max_queue_ext =
|
||||
&ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
|
||||
max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
|
||||
max_queue_ext->max_rx_sq_depth);
|
||||
max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
|
||||
|
||||
if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
|
||||
queue_size = min_t(u32, queue_size,
|
||||
get_feat_ctx->llq.max_llq_depth);
|
||||
if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
|
||||
max_tx_queue_size = min_t(u32, max_tx_queue_size,
|
||||
llq->max_llq_depth);
|
||||
else
|
||||
max_tx_queue_size = min_t(u32, max_tx_queue_size,
|
||||
max_queue_ext->max_tx_sq_depth);
|
||||
|
||||
queue_size = rounddown_pow_of_two(queue_size);
|
||||
ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
|
||||
max_queue_ext->max_per_packet_tx_descs);
|
||||
ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
|
||||
max_queue_ext->max_per_packet_rx_descs);
|
||||
} else {
|
||||
struct ena_admin_queue_feature_desc *max_queues =
|
||||
&ctx->get_feat_ctx->max_queues;
|
||||
max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
|
||||
max_queues->max_sq_depth);
|
||||
max_tx_queue_size = max_queues->max_cq_depth;
|
||||
|
||||
if (unlikely(!queue_size)) {
|
||||
dev_err(&pdev->dev, "Invalid queue size\n");
|
||||
return -EFAULT;
|
||||
if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
|
||||
max_tx_queue_size = min_t(u32, max_tx_queue_size,
|
||||
llq->max_llq_depth);
|
||||
else
|
||||
max_tx_queue_size = min_t(u32, max_tx_queue_size,
|
||||
max_queues->max_sq_depth);
|
||||
|
||||
ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
|
||||
max_queues->max_packet_tx_descs);
|
||||
ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
|
||||
max_queues->max_packet_rx_descs);
|
||||
}
|
||||
|
||||
*max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
|
||||
get_feat_ctx->max_queues.max_packet_tx_descs);
|
||||
*max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
|
||||
get_feat_ctx->max_queues.max_packet_rx_descs);
|
||||
max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
|
||||
max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
|
||||
|
||||
return queue_size;
|
||||
tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
|
||||
max_tx_queue_size);
|
||||
rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
|
||||
max_rx_queue_size);
|
||||
|
||||
tx_queue_size = rounddown_pow_of_two(tx_queue_size);
|
||||
rx_queue_size = rounddown_pow_of_two(rx_queue_size);
|
||||
|
||||
ctx->max_tx_queue_size = max_tx_queue_size;
|
||||
ctx->max_rx_queue_size = max_rx_queue_size;
|
||||
ctx->tx_queue_size = tx_queue_size;
|
||||
ctx->rx_queue_size = rx_queue_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* ena_probe - Device Initialization Routine
|
||||
@ -3284,6 +3425,7 @@ static int ena_calc_queue_size(struct pci_dev *pdev,
|
||||
static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
{
|
||||
struct ena_com_dev_get_features_ctx get_feat_ctx;
|
||||
struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
|
||||
struct ena_llq_configurations llq_config;
|
||||
struct ena_com_dev *ena_dev = NULL;
|
||||
struct ena_adapter *adapter;
|
||||
@ -3291,9 +3433,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
struct net_device *netdev;
|
||||
static int adapters_found;
|
||||
char *queue_type_str;
|
||||
u16 tx_sgl_size = 0;
|
||||
u16 rx_sgl_size = 0;
|
||||
int queue_size;
|
||||
bool wd_state;
|
||||
|
||||
dev_dbg(&pdev->dev, "%s\n", __func__);
|
||||
@ -3350,20 +3489,25 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
goto err_device_destroy;
|
||||
}
|
||||
|
||||
calc_queue_ctx.ena_dev = ena_dev;
|
||||
calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
|
||||
calc_queue_ctx.pdev = pdev;
|
||||
|
||||
/* initial Tx interrupt delay, Assumes 1 usec granularity.
|
||||
* Updated during device initialization with the real granularity
|
||||
*/
|
||||
ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
|
||||
io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx);
|
||||
queue_size = ena_calc_queue_size(pdev, ena_dev, &tx_sgl_size,
|
||||
&rx_sgl_size, &get_feat_ctx);
|
||||
if ((queue_size <= 0) || (io_queue_num <= 0)) {
|
||||
rc = ena_calc_queue_size(&calc_queue_ctx);
|
||||
if (rc || io_queue_num <= 0) {
|
||||
rc = -EFAULT;
|
||||
goto err_device_destroy;
|
||||
}
|
||||
|
||||
dev_info(&pdev->dev, "creating %d io queues. queue size: %d. LLQ is %s\n",
|
||||
io_queue_num, queue_size,
|
||||
dev_info(&pdev->dev, "creating %d io queues. rx queue size: %d tx queue size. %d LLQ is %s\n",
|
||||
io_queue_num,
|
||||
calc_queue_ctx.rx_queue_size,
|
||||
calc_queue_ctx.tx_queue_size,
|
||||
(ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) ?
|
||||
"ENABLED" : "DISABLED");
|
||||
|
||||
@ -3389,11 +3533,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
|
||||
adapter->reset_reason = ENA_REGS_RESET_NORMAL;
|
||||
|
||||
adapter->tx_ring_size = queue_size;
|
||||
adapter->rx_ring_size = queue_size;
|
||||
|
||||
adapter->max_tx_sgl_size = tx_sgl_size;
|
||||
adapter->max_rx_sgl_size = rx_sgl_size;
|
||||
adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
|
||||
adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
|
||||
adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
|
||||
adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
|
||||
adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
|
||||
adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
|
||||
|
||||
adapter->num_queues = io_queue_num;
|
||||
adapter->last_monitored_tx_qid = 0;
|
||||
|
@ -44,8 +44,8 @@
|
||||
#include "ena_eth_com.h"
|
||||
|
||||
#define DRV_MODULE_VER_MAJOR 2
|
||||
#define DRV_MODULE_VER_MINOR 0
|
||||
#define DRV_MODULE_VER_SUBMINOR 3
|
||||
#define DRV_MODULE_VER_MINOR 1
|
||||
#define DRV_MODULE_VER_SUBMINOR 0
|
||||
|
||||
#define DRV_MODULE_NAME "ena"
|
||||
#ifndef DRV_MODULE_VERSION
|
||||
@ -79,6 +79,7 @@
|
||||
#define ENA_BAR_MASK (BIT(ENA_REG_BAR) | BIT(ENA_MEM_BAR))
|
||||
|
||||
#define ENA_DEFAULT_RING_SIZE (1024)
|
||||
#define ENA_MIN_RING_SIZE (256)
|
||||
|
||||
#define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2)
|
||||
#define ENA_DEFAULT_RX_COPYBREAK (256 - NET_IP_ALIGN)
|
||||
@ -154,6 +155,18 @@ struct ena_napi {
|
||||
u32 qid;
|
||||
};
|
||||
|
||||
struct ena_calc_queue_size_ctx {
|
||||
struct ena_com_dev_get_features_ctx *get_feat_ctx;
|
||||
struct ena_com_dev *ena_dev;
|
||||
struct pci_dev *pdev;
|
||||
u16 tx_queue_size;
|
||||
u16 rx_queue_size;
|
||||
u16 max_tx_queue_size;
|
||||
u16 max_rx_queue_size;
|
||||
u16 max_tx_sgl_size;
|
||||
u16 max_rx_sgl_size;
|
||||
};
|
||||
|
||||
struct ena_tx_buffer {
|
||||
struct sk_buff *skb;
|
||||
/* num of ena desc for this specific skb
|
||||
@ -319,8 +332,11 @@ struct ena_adapter {
|
||||
u32 tx_usecs, rx_usecs; /* interrupt moderation */
|
||||
u32 tx_frames, rx_frames; /* interrupt moderation */
|
||||
|
||||
u32 tx_ring_size;
|
||||
u32 rx_ring_size;
|
||||
u32 requested_tx_ring_size;
|
||||
u32 requested_rx_ring_size;
|
||||
|
||||
u32 max_tx_ring_size;
|
||||
u32 max_rx_ring_size;
|
||||
|
||||
u32 msg_enable;
|
||||
|
||||
@ -372,6 +388,10 @@ void ena_dump_stats_to_dmesg(struct ena_adapter *adapter);
|
||||
|
||||
void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
|
||||
|
||||
int ena_update_queue_sizes(struct ena_adapter *adapter,
|
||||
u32 new_tx_size,
|
||||
u32 new_rx_size);
|
||||
|
||||
int ena_get_sset_count(struct net_device *netdev, int sset);
|
||||
|
||||
#endif /* !(ENA_H) */
|
||||
|
Loading…
x
Reference in New Issue
Block a user