diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h index 336742f6e3c3..b818a169c193 100644 --- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h @@ -491,6 +491,36 @@ enum ena_admin_llq_stride_ctrl { ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY = 2, }; +enum ena_admin_accel_mode_feat { + ENA_ADMIN_DISABLE_META_CACHING = 0, + ENA_ADMIN_LIMIT_TX_BURST = 1, +}; + +struct ena_admin_accel_mode_get { + /* bit field of enum ena_admin_accel_mode_feat */ + u16 supported_flags; + + /* maximum burst size between two doorbells. The size is in bytes */ + u16 max_tx_burst_size; +}; + +struct ena_admin_accel_mode_set { + /* bit field of enum ena_admin_accel_mode_feat */ + u16 enabled_flags; + + u16 reserved; +}; + +struct ena_admin_accel_mode_req { + union { + u32 raw[2]; + + struct ena_admin_accel_mode_get get; + + struct ena_admin_accel_mode_set set; + } u; +}; + struct ena_admin_feature_llq_desc { u32 max_llq_num; @@ -536,10 +566,13 @@ struct ena_admin_feature_llq_desc { /* the stride control the driver selected to use */ u16 descriptors_stride_ctrl_enabled; - /* Maximum size in bytes taken by llq entries in a single tx burst. - * Set to 0 when there is no such limit. + /* reserved */ + u32 reserved1; + + /* accelerated low latency queues requirement. driver needs to + * support those requirements in order to use accelerated llq */ - u32 max_tx_burst_size; + struct ena_admin_accel_mode_req accel_mode; }; struct ena_admin_queue_ext_feature_fields { @@ -816,7 +849,9 @@ struct ena_admin_host_info { /* 0 : reserved * 1 : rx_offset * 2 : interrupt_moderation - * 31:3 : reserved + * 3 : rx_buf_mirroring + * 4 : rss_configurable_function_key + * 31:5 : reserved */ u32 driver_supported_features; }; @@ -1129,6 +1164,10 @@ struct ena_admin_ena_mmio_req_read_less_resp { #define ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK BIT(1) #define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT 2 #define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK BIT(2) +#define ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_SHIFT 3 +#define ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK BIT(3) +#define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_SHIFT 4 +#define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK BIT(4) /* aenq_common_desc */ #define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0) diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 432f143559a1..435bf05a853c 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -403,6 +403,8 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, 0x0, io_sq->llq_info.desc_list_entry_size); io_sq->llq_buf_ctrl.descs_left_in_line = io_sq->llq_info.descs_num_before_header; + io_sq->disable_meta_caching = + io_sq->llq_info.disable_meta_caching; if (io_sq->llq_info.max_entries_in_tx_burst > 0) io_sq->entries_in_tx_burst_left = @@ -626,6 +628,10 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev) cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header; cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl; + cmd.u.llq.accel_mode.u.set.enabled_flags = + BIT(ENA_ADMIN_DISABLE_META_CACHING) | + BIT(ENA_ADMIN_LIMIT_TX_BURST); + ret = ena_com_execute_admin_command(admin_queue, (struct ena_admin_aq_entry *)&cmd, sizeof(cmd), @@ -643,6 +649,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, struct ena_llq_configurations *llq_default_cfg) { struct ena_com_llq_info *llq_info = &ena_dev->llq_info; + struct ena_admin_accel_mode_get llq_accel_mode_get; u16 supported_feat; int rc; @@ -742,9 +749,17 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, llq_default_cfg->llq_num_decs_before_header, supported_feat, llq_info->descs_num_before_header); } + /* Check for accelerated queue supported */ + llq_accel_mode_get = llq_features->accel_mode.u.get; - llq_info->max_entries_in_tx_burst = - (u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value); + llq_info->disable_meta_caching = + !!(llq_accel_mode_get.supported_flags & + BIT(ENA_ADMIN_DISABLE_META_CACHING)); + + if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST)) + llq_info->max_entries_in_tx_burst = + llq_accel_mode_get.max_tx_burst_size / + llq_default_cfg->llq_ring_entry_size_value; rc = ena_com_set_llq(ena_dev); if (rc) diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index bc187adf54e4..4287d47b2b0b 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -127,6 +127,7 @@ struct ena_com_llq_info { u16 descs_num_before_header; u16 descs_per_entry; u16 max_entries_in_tx_burst; + bool disable_meta_caching; }; struct ena_com_io_cq { @@ -189,6 +190,8 @@ struct ena_com_io_sq { enum queue_direction direction; enum ena_admin_placement_policy_type mem_queue_type; + bool disable_meta_caching; + u32 msix_vector; struct ena_com_tx_meta cached_tx_meta; struct ena_com_llq_info llq_info; @@ -230,11 +233,11 @@ struct ena_com_admin_sq { }; struct ena_com_stats_admin { - u32 aborted_cmd; - u32 submitted_cmd; - u32 completed_cmd; - u32 out_of_space; - u32 no_completion; + u64 aborted_cmd; + u64 submitted_cmd; + u64 completed_cmd; + u64 out_of_space; + u64 no_completion; }; struct ena_com_admin_queue { diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index ec8ea25e988d..ccd440589565 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c @@ -285,11 +285,10 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, return count; } -static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, - struct ena_com_tx_ctx *ena_tx_ctx) +static int ena_com_create_meta(struct ena_com_io_sq *io_sq, + struct ena_com_tx_meta *ena_meta) { struct ena_eth_io_tx_meta_desc *meta_desc = NULL; - struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; meta_desc = get_sq_desc(io_sq); memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc)); @@ -309,12 +308,13 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, /* Extended meta desc */ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK; - meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK; meta_desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_META_DESC_PHASE_MASK; meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK; + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK; + meta_desc->word2 |= ena_meta->l3_hdr_len & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK; meta_desc->word2 |= (ena_meta->l3_hdr_offset << @@ -325,15 +325,38 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK; - meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK; - - /* Cached the meta desc */ - memcpy(&io_sq->cached_tx_meta, ena_meta, - sizeof(struct ena_com_tx_meta)); - return ena_com_sq_update_tail(io_sq); } +static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx, + bool *have_meta) +{ + struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; + + /* When disable meta caching is set, don't bother to save the meta and + * compare it to the stored version, just create the meta + */ + if (io_sq->disable_meta_caching) { + if (unlikely(!ena_tx_ctx->meta_valid)) + return -EINVAL; + + *have_meta = true; + return ena_com_create_meta(io_sq, ena_meta); + } + + if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) { + *have_meta = true; + /* Cache the meta desc */ + memcpy(&io_sq->cached_tx_meta, ena_meta, + sizeof(struct ena_com_tx_meta)); + return ena_com_create_meta(io_sq, ena_meta); + } + + *have_meta = false; + return 0; +} + static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, struct ena_eth_io_rx_cdesc_base *cdesc) { @@ -402,12 +425,10 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, if (unlikely(rc)) return rc; - have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq, - ena_tx_ctx); - if (have_meta) { - rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx); - if (unlikely(rc)) - return rc; + rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta); + if (unlikely(rc)) { + pr_err("failed to create and store tx meta desc\n"); + return rc; } /* If the caller doesn't want to send packets */ diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h index 8b1afd3b32f2..b6592cb93b04 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h @@ -157,7 +157,8 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq, llq_info = &io_sq->llq_info; num_descs = ena_tx_ctx->num_bufs; - if (unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx))) + if (llq_info->disable_meta_caching || + unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx))) ++num_descs; if (num_descs > llq_info->descs_num_before_header) { diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index e340b65af08c..430275bc0d04 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -164,13 +164,13 @@ static void ena_queue_stats(struct ena_adapter *adapter, u64 **data) static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data) { const struct ena_stats *ena_stats; - u32 *ptr; + u64 *ptr; int i; for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) { ena_stats = &ena_stats_ena_com_strings[i]; - ptr = (u32 *)((uintptr_t)&adapter->ena_dev->admin_queue.stats + + ptr = (u64 *)((uintptr_t)&adapter->ena_dev->admin_queue.stats + (uintptr_t)ena_stats->stat_offset); *(*data)++ = *ptr; diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 3eb63b12dd68..6478c1e0d137 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -307,7 +307,7 @@ static int ena_xdp_xmit_buff(struct net_device *dev, struct ena_rx_buffer *rx_info) { struct ena_adapter *adapter = netdev_priv(dev); - struct ena_com_tx_ctx ena_tx_ctx = {0}; + struct ena_com_tx_ctx ena_tx_ctx = {}; struct ena_tx_buffer *tx_info; struct ena_ring *xdp_ring; u16 next_to_use, req_id; @@ -655,6 +655,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter, txr->sgl_size = adapter->max_tx_sgl_size; txr->smoothed_interval = ena_com_get_nonadaptive_moderation_interval_tx(ena_dev); + txr->disable_meta_caching = adapter->disable_meta_caching; /* Don't init RX queues for xdp queues */ if (!ENA_IS_XDP_INDEX(adapter, i)) { @@ -959,8 +960,11 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring, return -ENOMEM; } + /* To enable NIC-side port-mirroring, AKA SPAN port, + * we make the buffer readable from the nic as well + */ dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, - DMA_FROM_DEVICE); + DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { u64_stats_update_begin(&rx_ring->syncp); rx_ring->rx_stats.dma_mapping_err++; @@ -993,10 +997,9 @@ static void ena_free_rx_page(struct ena_ring *rx_ring, return; } - dma_unmap_page(rx_ring->dev, - ena_buf->paddr - rx_ring->rx_headroom, + dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom, ENA_PAGE_SIZE, - DMA_FROM_DEVICE); + DMA_BIDIRECTIONAL); __free_page(page); rx_info->page = NULL; @@ -1431,7 +1434,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, do { dma_unmap_page(rx_ring->dev, dma_unmap_addr(&rx_info->ena_buf, paddr), - ENA_PAGE_SIZE, DMA_FROM_DEVICE); + ENA_PAGE_SIZE, DMA_BIDIRECTIONAL); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, rx_info->page_offset, len, ENA_PAGE_SIZE); @@ -1913,7 +1916,10 @@ static int ena_io_poll(struct napi_struct *napi, int budget) /* Update numa and unmask the interrupt only when schedule * from the interrupt context (vs from sk_busy_loop) */ - if (napi_complete_done(napi, rx_work_done)) { + if (napi_complete_done(napi, rx_work_done) && + READ_ONCE(ena_napi->interrupts_masked)) { + smp_rmb(); /* make sure interrupts_masked is read */ + WRITE_ONCE(ena_napi->interrupts_masked, false); /* We apply adaptive moderation on Rx path only. * Tx uses static interrupt moderation. */ @@ -1961,6 +1967,9 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data) ena_napi->first_interrupt = true; + WRITE_ONCE(ena_napi->interrupts_masked, true); + smp_wmb(); /* write interrupts_masked before calling napi */ + napi_schedule_irqoff(&ena_napi->napi); return IRQ_HANDLED; @@ -2775,7 +2784,9 @@ int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count) return dev_was_up ? ena_open(adapter->netdev) : 0; } -static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb) +static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, + struct sk_buff *skb, + bool disable_meta_caching) { u32 mss = skb_shinfo(skb)->gso_size; struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; @@ -2819,7 +2830,9 @@ static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb) ena_meta->l3_hdr_len = skb_network_header_len(skb); ena_meta->l3_hdr_offset = skb_network_offset(skb); ena_tx_ctx->meta_valid = 1; - + } else if (disable_meta_caching) { + memset(ena_meta, 0, sizeof(*ena_meta)); + ena_tx_ctx->meta_valid = 1; } else { ena_tx_ctx->meta_valid = 0; } @@ -3003,7 +3016,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) ena_tx_ctx.header_len = header_len; /* set flags and meta data */ - ena_tx_csum(&ena_tx_ctx, skb); + ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching); rc = ena_xmit_common(dev, tx_ring, @@ -3117,7 +3130,9 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd host_info->driver_supported_features = ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | - ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK; + ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK | + ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK | + ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK; rc = ena_com_set_host_attributes(ena_dev); if (rc) { @@ -3270,10 +3285,71 @@ static int ena_device_validate_params(struct ena_adapter *adapter, return 0; } +static void set_default_llq_configurations(struct ena_llq_configurations *llq_config) +{ + llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; + llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; + llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; + llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; + llq_config->llq_ring_entry_size_value = 128; +} + +static int ena_set_queues_placement_policy(struct pci_dev *pdev, + struct ena_com_dev *ena_dev, + struct ena_admin_feature_llq_desc *llq, + struct ena_llq_configurations *llq_default_configurations) +{ + int rc; + u32 llq_feature_mask; + + llq_feature_mask = 1 << ENA_ADMIN_LLQ; + if (!(ena_dev->supported_features & llq_feature_mask)) { + dev_err(&pdev->dev, + "LLQ is not supported Fallback to host mode policy.\n"); + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + return 0; + } + + rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); + if (unlikely(rc)) { + dev_err(&pdev->dev, + "Failed to configure the device mode. Fallback to host mode policy.\n"); + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + } + + return 0; +} + +static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev, + int bars) +{ + bool has_mem_bar = !!(bars & BIT(ENA_MEM_BAR)); + + if (!has_mem_bar) { + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + dev_err(&pdev->dev, + "ENA device does not expose LLQ bar. Fallback to host mode policy.\n"); + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + } + + return 0; + } + + ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, + pci_resource_start(pdev, ENA_MEM_BAR), + pci_resource_len(pdev, ENA_MEM_BAR)); + + if (!ena_dev->mem_bar) + return -EFAULT; + + return 0; +} + static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, struct ena_com_dev_get_features_ctx *get_feat_ctx, bool *wd_state) { + struct ena_llq_configurations llq_config; struct device *dev = &pdev->dev; bool readless_supported; u32 aenq_groups; @@ -3364,6 +3440,15 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); + set_default_llq_configurations(&llq_config); + + rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq, + &llq_config); + if (rc) { + dev_err(&pdev->dev, "ena device init failed\n"); + goto err_admin_init; + } + return 0; err_admin_init: @@ -3870,54 +3955,6 @@ static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev, return max_num_io_queues; } -static int ena_set_queues_placement_policy(struct pci_dev *pdev, - struct ena_com_dev *ena_dev, - struct ena_admin_feature_llq_desc *llq, - struct ena_llq_configurations *llq_default_configurations) -{ - bool has_mem_bar; - int rc; - u32 llq_feature_mask; - - llq_feature_mask = 1 << ENA_ADMIN_LLQ; - if (!(ena_dev->supported_features & llq_feature_mask)) { - dev_err(&pdev->dev, - "LLQ is not supported Fallback to host mode policy.\n"); - ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; - return 0; - } - - has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR); - - rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); - if (unlikely(rc)) { - dev_err(&pdev->dev, - "Failed to configure the device mode. Fallback to host mode policy.\n"); - ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; - return 0; - } - - /* Nothing to config, exit */ - if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) - return 0; - - if (!has_mem_bar) { - dev_err(&pdev->dev, - "ENA device does not expose LLQ bar. Fallback to host mode policy.\n"); - ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; - return 0; - } - - ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, - pci_resource_start(pdev, ENA_MEM_BAR), - pci_resource_len(pdev, ENA_MEM_BAR)); - - if (!ena_dev->mem_bar) - return -EFAULT; - - return 0; -} - static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat, struct net_device *netdev) { @@ -4033,14 +4070,6 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) pci_release_selected_regions(pdev, release_bars); } -static void set_default_llq_configurations(struct ena_llq_configurations *llq_config) -{ - llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; - llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; - llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; - llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; - llq_config->llq_ring_entry_size_value = 128; -} static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx) { @@ -4122,7 +4151,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; struct ena_com_dev_get_features_ctx get_feat_ctx; - struct ena_llq_configurations llq_config; struct ena_com_dev *ena_dev = NULL; struct ena_adapter *adapter; struct net_device *netdev; @@ -4177,13 +4205,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_free_region; } - set_default_llq_configurations(&llq_config); - - rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq, - &llq_config); + rc = ena_map_llq_mem_bar(pdev, ena_dev, bars); if (rc) { - dev_err(&pdev->dev, "ena device init failed\n"); - goto err_device_destroy; + dev_err(&pdev->dev, "ena llq bar mapping failed\n"); + goto err_free_ena_dev; } calc_queue_ctx.ena_dev = ena_dev; @@ -4240,6 +4265,11 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->xdp_num_queues = 0; adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK; + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) + adapter->disable_meta_caching = + !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags & + BIT(ENA_ADMIN_DISABLE_META_CACHING)); + adapter->wd_state = wd_state; snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index ba030d260940..0c8504006247 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -167,6 +167,7 @@ struct ena_napi { struct ena_ring *rx_ring; struct ena_ring *xdp_ring; bool first_interrupt; + bool interrupts_masked; u32 qid; struct dim dim; }; @@ -297,6 +298,7 @@ struct ena_ring { u8 tx_max_header_size; bool first_interrupt; + bool disable_meta_caching; u16 no_interrupt_event_cnt; /* cpu for TPH */ @@ -398,6 +400,7 @@ struct ena_adapter { bool wd_state; bool dev_up_before_reset; + bool disable_meta_caching; unsigned long last_keep_alive_jiffies; struct u64_stats_sync syncp; diff --git a/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h b/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h index f80d2a47fa94..426e57e10a7f 100644 --- a/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h +++ b/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h @@ -53,10 +53,15 @@ #define PCI_DEV_ID_ENA_LLQ_VF 0xec21 #endif +#ifndef PCI_DEV_ID_ENA_RESRV0 +#define PCI_DEV_ID_ENA_RESRV0 0x0051 +#endif + #define ENA_PCI_ID_TABLE_ENTRY(devid) \ {PCI_DEVICE(PCI_VENDOR_ID_AMAZON, devid)}, static const struct pci_device_id ena_pci_tbl[] = { + ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_RESRV0) ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_PF) ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_LLQ_PF) ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_VF)