Merge branch 'mana-EQ-sharing'
Haiyang Zhang says: ==================== net: mana: Add support for EQ sharing The existing code uses (1 + #vPorts * #Queues) MSIXs, which may exceed the device limit. Support EQ sharing, so that multiple vPorts can share the same set of MSIXs. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
e93826d35c
drivers/net/ethernet/microsoft/mana
@ -239,10 +239,8 @@ struct gdma_event {
|
||||
|
||||
struct gdma_queue;
|
||||
|
||||
#define CQE_POLLING_BUFFER 512
|
||||
struct mana_eq {
|
||||
struct gdma_queue *eq;
|
||||
struct gdma_comp cqe_poll[CQE_POLLING_BUFFER];
|
||||
};
|
||||
|
||||
typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
|
||||
@ -291,11 +289,6 @@ struct gdma_queue {
|
||||
unsigned int msix_index;
|
||||
|
||||
u32 log2_throttle_limit;
|
||||
|
||||
/* NAPI data */
|
||||
struct napi_struct napi;
|
||||
int work_done;
|
||||
int budget;
|
||||
} eq;
|
||||
|
||||
struct {
|
||||
@ -319,9 +312,6 @@ struct gdma_queue_spec {
|
||||
void *context;
|
||||
|
||||
unsigned long log2_throttle_limit;
|
||||
|
||||
/* Only used by the MANA device. */
|
||||
struct net_device *ndev;
|
||||
} eq;
|
||||
|
||||
struct {
|
||||
@ -406,7 +396,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
|
||||
|
||||
int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
|
||||
|
||||
void mana_gd_arm_cq(struct gdma_queue *cq);
|
||||
void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
|
||||
|
||||
struct gdma_wqe {
|
||||
u32 reserved :24;
|
||||
@ -496,16 +486,28 @@ enum {
|
||||
GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1,
|
||||
};
|
||||
|
||||
#define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0)
|
||||
|
||||
#define GDMA_DRV_CAP_FLAGS1 GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT
|
||||
|
||||
#define GDMA_DRV_CAP_FLAGS2 0
|
||||
|
||||
#define GDMA_DRV_CAP_FLAGS3 0
|
||||
|
||||
#define GDMA_DRV_CAP_FLAGS4 0
|
||||
|
||||
struct gdma_verify_ver_req {
|
||||
struct gdma_req_hdr hdr;
|
||||
|
||||
/* Mandatory fields required for protocol establishment */
|
||||
u64 protocol_ver_min;
|
||||
u64 protocol_ver_max;
|
||||
u64 drv_cap_flags1;
|
||||
u64 drv_cap_flags2;
|
||||
u64 drv_cap_flags3;
|
||||
u64 drv_cap_flags4;
|
||||
|
||||
/* Gdma Driver Capability Flags */
|
||||
u64 gd_drv_cap_flags1;
|
||||
u64 gd_drv_cap_flags2;
|
||||
u64 gd_drv_cap_flags3;
|
||||
u64 gd_drv_cap_flags4;
|
||||
|
||||
/* Advisory fields */
|
||||
u64 drv_ver;
|
||||
|
@ -67,6 +67,10 @@ static int mana_gd_query_max_resources(struct pci_dev *pdev)
|
||||
if (gc->max_num_queues > resp.max_rq)
|
||||
gc->max_num_queues = resp.max_rq;
|
||||
|
||||
/* The Hardware Channel (HWC) used 1 MSI-X */
|
||||
if (gc->max_num_queues > gc->num_msix_usable - 1)
|
||||
gc->max_num_queues = gc->num_msix_usable - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -267,7 +271,7 @@ void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
|
||||
queue->id, queue->head * GDMA_WQE_BU_SIZE, 1);
|
||||
}
|
||||
|
||||
void mana_gd_arm_cq(struct gdma_queue *cq)
|
||||
void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
|
||||
{
|
||||
struct gdma_context *gc = cq->gdma_dev->gdma_context;
|
||||
|
||||
@ -276,7 +280,7 @@ void mana_gd_arm_cq(struct gdma_queue *cq)
|
||||
u32 head = cq->head % (num_cqe << GDMA_CQE_OWNER_BITS);
|
||||
|
||||
mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id,
|
||||
head, SET_ARM_BIT);
|
||||
head, arm_bit);
|
||||
}
|
||||
|
||||
static void mana_gd_process_eqe(struct gdma_queue *eq)
|
||||
@ -339,7 +343,6 @@ static void mana_gd_process_eq_events(void *arg)
|
||||
struct gdma_queue *eq = arg;
|
||||
struct gdma_context *gc;
|
||||
struct gdma_eqe *eqe;
|
||||
unsigned int arm_bit;
|
||||
u32 head, num_eqe;
|
||||
int i;
|
||||
|
||||
@ -370,92 +373,54 @@ static void mana_gd_process_eq_events(void *arg)
|
||||
eq->head++;
|
||||
}
|
||||
|
||||
/* Always rearm the EQ for HWC. For MANA, rearm it when NAPI is done. */
|
||||
if (mana_gd_is_hwc(eq->gdma_dev)) {
|
||||
arm_bit = SET_ARM_BIT;
|
||||
} else if (eq->eq.work_done < eq->eq.budget &&
|
||||
napi_complete_done(&eq->eq.napi, eq->eq.work_done)) {
|
||||
arm_bit = SET_ARM_BIT;
|
||||
} else {
|
||||
arm_bit = 0;
|
||||
}
|
||||
|
||||
head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS);
|
||||
|
||||
mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id,
|
||||
head, arm_bit);
|
||||
}
|
||||
|
||||
static int mana_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct gdma_queue *eq = container_of(napi, struct gdma_queue, eq.napi);
|
||||
|
||||
eq->eq.work_done = 0;
|
||||
eq->eq.budget = budget;
|
||||
|
||||
mana_gd_process_eq_events(eq);
|
||||
|
||||
return min(eq->eq.work_done, budget);
|
||||
}
|
||||
|
||||
static void mana_gd_schedule_napi(void *arg)
|
||||
{
|
||||
struct gdma_queue *eq = arg;
|
||||
struct napi_struct *napi;
|
||||
|
||||
napi = &eq->eq.napi;
|
||||
napi_schedule_irqoff(napi);
|
||||
head, SET_ARM_BIT);
|
||||
}
|
||||
|
||||
static int mana_gd_register_irq(struct gdma_queue *queue,
|
||||
const struct gdma_queue_spec *spec)
|
||||
{
|
||||
struct gdma_dev *gd = queue->gdma_dev;
|
||||
bool is_mana = mana_gd_is_mana(gd);
|
||||
struct gdma_irq_context *gic;
|
||||
struct gdma_context *gc;
|
||||
struct gdma_resource *r;
|
||||
unsigned int msi_index;
|
||||
unsigned long flags;
|
||||
int err;
|
||||
struct device *dev;
|
||||
int err = 0;
|
||||
|
||||
gc = gd->gdma_context;
|
||||
r = &gc->msix_resource;
|
||||
dev = gc->dev;
|
||||
|
||||
spin_lock_irqsave(&r->lock, flags);
|
||||
|
||||
msi_index = find_first_zero_bit(r->map, r->size);
|
||||
if (msi_index >= r->size) {
|
||||
if (msi_index >= r->size || msi_index >= gc->num_msix_usable) {
|
||||
err = -ENOSPC;
|
||||
} else {
|
||||
bitmap_set(r->map, msi_index, 1);
|
||||
queue->eq.msix_index = msi_index;
|
||||
err = 0;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&r->lock, flags);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
if (err) {
|
||||
dev_err(dev, "Register IRQ err:%d, msi:%u rsize:%u, nMSI:%u",
|
||||
err, msi_index, r->size, gc->num_msix_usable);
|
||||
|
||||
WARN_ON(msi_index >= gc->num_msix_usable);
|
||||
return err;
|
||||
}
|
||||
|
||||
gic = &gc->irq_contexts[msi_index];
|
||||
|
||||
if (is_mana) {
|
||||
netif_napi_add(spec->eq.ndev, &queue->eq.napi, mana_poll,
|
||||
NAPI_POLL_WEIGHT);
|
||||
napi_enable(&queue->eq.napi);
|
||||
}
|
||||
|
||||
WARN_ON(gic->handler || gic->arg);
|
||||
|
||||
gic->arg = queue;
|
||||
|
||||
if (is_mana)
|
||||
gic->handler = mana_gd_schedule_napi;
|
||||
else
|
||||
gic->handler = mana_gd_process_eq_events;
|
||||
gic->handler = mana_gd_process_eq_events;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -549,11 +514,6 @@ static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,
|
||||
|
||||
mana_gd_deregiser_irq(queue);
|
||||
|
||||
if (mana_gd_is_mana(queue->gdma_dev)) {
|
||||
napi_disable(&queue->eq.napi);
|
||||
netif_napi_del(&queue->eq.napi);
|
||||
}
|
||||
|
||||
if (queue->eq.disable_needed)
|
||||
mana_gd_disable_queue(queue);
|
||||
}
|
||||
@ -883,6 +843,11 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev)
|
||||
req.protocol_ver_min = GDMA_PROTOCOL_FIRST;
|
||||
req.protocol_ver_max = GDMA_PROTOCOL_LAST;
|
||||
|
||||
req.gd_drv_cap_flags1 = GDMA_DRV_CAP_FLAGS1;
|
||||
req.gd_drv_cap_flags2 = GDMA_DRV_CAP_FLAGS2;
|
||||
req.gd_drv_cap_flags3 = GDMA_DRV_CAP_FLAGS3;
|
||||
req.gd_drv_cap_flags4 = GDMA_DRV_CAP_FLAGS4;
|
||||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err || resp.hdr.status) {
|
||||
dev_err(gc->dev, "VfVerifyVersionOutput: %d, status=0x%x\n",
|
||||
@ -1128,7 +1093,7 @@ static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
|
||||
|
||||
new_bits = (cq->head / num_cqe) & GDMA_CQE_OWNER_MASK;
|
||||
/* Return -1 if overflow detected. */
|
||||
if (owner_bits != new_bits)
|
||||
if (WARN_ON_ONCE(owner_bits != new_bits))
|
||||
return -1;
|
||||
|
||||
comp->wq_num = cqe->cqe_info.wq_num;
|
||||
@ -1201,10 +1166,8 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
|
||||
if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
|
||||
max_queues_per_port = MANA_MAX_NUM_QUEUES;
|
||||
|
||||
max_irqs = max_queues_per_port * MAX_PORTS_IN_MANA_DEV;
|
||||
|
||||
/* Need 1 interrupt for the Hardware communication Channel (HWC) */
|
||||
max_irqs++;
|
||||
max_irqs = max_queues_per_port + 1;
|
||||
|
||||
nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
|
||||
if (nvec < 0)
|
||||
@ -1291,6 +1254,9 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
int bar = 0;
|
||||
int err;
|
||||
|
||||
/* Each port has 2 CQs, each CQ has at most 1 EQE at a time */
|
||||
BUILD_BUG_ON(2 * MAX_PORTS_IN_MANA_DEV * GDMA_EQE_SIZE > EQ_SIZE);
|
||||
|
||||
err = pci_enable_device(pdev);
|
||||
if (err)
|
||||
return -ENXIO;
|
||||
|
@ -304,7 +304,7 @@ static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
|
||||
&comp_data);
|
||||
}
|
||||
|
||||
mana_gd_arm_cq(q_self);
|
||||
mana_gd_ring_cq(q_self, SET_ARM_BIT);
|
||||
}
|
||||
|
||||
static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
|
||||
|
@ -46,7 +46,7 @@ enum TRI_STATE {
|
||||
#define EQ_SIZE (8 * PAGE_SIZE)
|
||||
#define LOG2_EQ_THROTTLE 3
|
||||
|
||||
#define MAX_PORTS_IN_MANA_DEV 16
|
||||
#define MAX_PORTS_IN_MANA_DEV 256
|
||||
|
||||
struct mana_stats {
|
||||
u64 packets;
|
||||
@ -225,6 +225,8 @@ struct mana_tx_comp_oob {
|
||||
|
||||
struct mana_rxq;
|
||||
|
||||
#define CQE_POLLING_BUFFER 512
|
||||
|
||||
struct mana_cq {
|
||||
struct gdma_queue *gdma_cq;
|
||||
|
||||
@ -244,8 +246,13 @@ struct mana_cq {
|
||||
*/
|
||||
struct mana_txq *txq;
|
||||
|
||||
/* Pointer to a buffer which the CQ handler can copy the CQE's into. */
|
||||
struct gdma_comp *gdma_comp_buf;
|
||||
/* Buffer which the CQ handler can copy the CQE's into. */
|
||||
struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER];
|
||||
|
||||
/* NAPI data */
|
||||
struct napi_struct napi;
|
||||
int work_done;
|
||||
int budget;
|
||||
};
|
||||
|
||||
#define GDMA_MAX_RQE_SGES 15
|
||||
@ -315,6 +322,8 @@ struct mana_context {
|
||||
|
||||
u16 num_ports;
|
||||
|
||||
struct mana_eq *eqs;
|
||||
|
||||
struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
|
||||
};
|
||||
|
||||
@ -324,8 +333,6 @@ struct mana_port_context {
|
||||
|
||||
u8 mac_addr[ETH_ALEN];
|
||||
|
||||
struct mana_eq *eqs;
|
||||
|
||||
enum TRI_STATE rss_state;
|
||||
|
||||
mana_handle_t default_rxobj;
|
||||
@ -395,11 +402,11 @@ enum mana_command_code {
|
||||
struct mana_query_device_cfg_req {
|
||||
struct gdma_req_hdr hdr;
|
||||
|
||||
/* Driver Capability flags */
|
||||
u64 drv_cap_flags1;
|
||||
u64 drv_cap_flags2;
|
||||
u64 drv_cap_flags3;
|
||||
u64 drv_cap_flags4;
|
||||
/* MANA Nic Driver Capability flags */
|
||||
u64 mn_drv_cap_flags1;
|
||||
u64 mn_drv_cap_flags2;
|
||||
u64 mn_drv_cap_flags3;
|
||||
u64 mn_drv_cap_flags4;
|
||||
|
||||
u32 proto_major_ver;
|
||||
u32 proto_minor_ver;
|
||||
@ -516,7 +523,7 @@ struct mana_cfg_rx_steer_resp {
|
||||
struct gdma_resp_hdr hdr;
|
||||
}; /* HW DATA */
|
||||
|
||||
#define MANA_MAX_NUM_QUEUES 16
|
||||
#define MANA_MAX_NUM_QUEUES 64
|
||||
|
||||
#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
|
||||
|
||||
|
@ -696,66 +696,56 @@ static void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
|
||||
resp.hdr.status);
|
||||
}
|
||||
|
||||
static void mana_init_cqe_poll_buf(struct gdma_comp *cqe_poll_buf)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < CQE_POLLING_BUFFER; i++)
|
||||
memset(&cqe_poll_buf[i], 0, sizeof(struct gdma_comp));
|
||||
}
|
||||
|
||||
static void mana_destroy_eq(struct gdma_context *gc,
|
||||
struct mana_port_context *apc)
|
||||
static void mana_destroy_eq(struct mana_context *ac)
|
||||
{
|
||||
struct gdma_context *gc = ac->gdma_dev->gdma_context;
|
||||
struct gdma_queue *eq;
|
||||
int i;
|
||||
|
||||
if (!apc->eqs)
|
||||
if (!ac->eqs)
|
||||
return;
|
||||
|
||||
for (i = 0; i < apc->num_queues; i++) {
|
||||
eq = apc->eqs[i].eq;
|
||||
for (i = 0; i < gc->max_num_queues; i++) {
|
||||
eq = ac->eqs[i].eq;
|
||||
if (!eq)
|
||||
continue;
|
||||
|
||||
mana_gd_destroy_queue(gc, eq);
|
||||
}
|
||||
|
||||
kfree(apc->eqs);
|
||||
apc->eqs = NULL;
|
||||
kfree(ac->eqs);
|
||||
ac->eqs = NULL;
|
||||
}
|
||||
|
||||
static int mana_create_eq(struct mana_port_context *apc)
|
||||
static int mana_create_eq(struct mana_context *ac)
|
||||
{
|
||||
struct gdma_dev *gd = apc->ac->gdma_dev;
|
||||
struct gdma_dev *gd = ac->gdma_dev;
|
||||
struct gdma_context *gc = gd->gdma_context;
|
||||
struct gdma_queue_spec spec = {};
|
||||
int err;
|
||||
int i;
|
||||
|
||||
apc->eqs = kcalloc(apc->num_queues, sizeof(struct mana_eq),
|
||||
GFP_KERNEL);
|
||||
if (!apc->eqs)
|
||||
ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq),
|
||||
GFP_KERNEL);
|
||||
if (!ac->eqs)
|
||||
return -ENOMEM;
|
||||
|
||||
spec.type = GDMA_EQ;
|
||||
spec.monitor_avl_buf = false;
|
||||
spec.queue_size = EQ_SIZE;
|
||||
spec.eq.callback = NULL;
|
||||
spec.eq.context = apc->eqs;
|
||||
spec.eq.context = ac->eqs;
|
||||
spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
|
||||
spec.eq.ndev = apc->ndev;
|
||||
|
||||
for (i = 0; i < apc->num_queues; i++) {
|
||||
mana_init_cqe_poll_buf(apc->eqs[i].cqe_poll);
|
||||
|
||||
err = mana_gd_create_mana_eq(gd, &spec, &apc->eqs[i].eq);
|
||||
for (i = 0; i < gc->max_num_queues; i++) {
|
||||
err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
return 0;
|
||||
out:
|
||||
mana_destroy_eq(gd->gdma_context, apc);
|
||||
mana_destroy_eq(ac);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -790,7 +780,6 @@ static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
|
||||
|
||||
static void mana_poll_tx_cq(struct mana_cq *cq)
|
||||
{
|
||||
struct gdma_queue *gdma_eq = cq->gdma_cq->cq.parent;
|
||||
struct gdma_comp *completions = cq->gdma_comp_buf;
|
||||
struct gdma_posted_wqe_info *wqe_info;
|
||||
unsigned int pkt_transmitted = 0;
|
||||
@ -812,6 +801,9 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
|
||||
comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
|
||||
CQE_POLLING_BUFFER);
|
||||
|
||||
if (comp_read < 1)
|
||||
return;
|
||||
|
||||
for (i = 0; i < comp_read; i++) {
|
||||
struct mana_tx_comp_oob *cqe_oob;
|
||||
|
||||
@ -861,7 +853,7 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
|
||||
|
||||
mana_unmap_skb(skb, apc);
|
||||
|
||||
napi_consume_skb(skb, gdma_eq->eq.budget);
|
||||
napi_consume_skb(skb, cq->budget);
|
||||
|
||||
pkt_transmitted++;
|
||||
}
|
||||
@ -890,6 +882,8 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
|
||||
|
||||
if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
|
||||
WARN_ON_ONCE(1);
|
||||
|
||||
cq->work_done = pkt_transmitted;
|
||||
}
|
||||
|
||||
static void mana_post_pkt_rxq(struct mana_rxq *rxq)
|
||||
@ -918,17 +912,13 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
|
||||
struct mana_stats *rx_stats = &rxq->stats;
|
||||
struct net_device *ndev = rxq->ndev;
|
||||
uint pkt_len = cqe->ppi[0].pkt_len;
|
||||
struct mana_port_context *apc;
|
||||
u16 rxq_idx = rxq->rxq_idx;
|
||||
struct napi_struct *napi;
|
||||
struct gdma_queue *eq;
|
||||
struct sk_buff *skb;
|
||||
u32 hash_value;
|
||||
|
||||
apc = netdev_priv(ndev);
|
||||
eq = apc->eqs[rxq_idx].eq;
|
||||
eq->eq.work_done++;
|
||||
napi = &eq->eq.napi;
|
||||
rxq->rx_cq.work_done++;
|
||||
napi = &rxq->rx_cq.napi;
|
||||
|
||||
if (!buf_va) {
|
||||
++ndev->stats.rx_dropped;
|
||||
@ -1081,6 +1071,7 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
|
||||
static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
|
||||
{
|
||||
struct mana_cq *cq = context;
|
||||
u8 arm_bit;
|
||||
|
||||
WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
|
||||
|
||||
@ -1089,7 +1080,33 @@ static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
|
||||
else
|
||||
mana_poll_tx_cq(cq);
|
||||
|
||||
mana_gd_arm_cq(gdma_queue);
|
||||
if (cq->work_done < cq->budget &&
|
||||
napi_complete_done(&cq->napi, cq->work_done)) {
|
||||
arm_bit = SET_ARM_BIT;
|
||||
} else {
|
||||
arm_bit = 0;
|
||||
}
|
||||
|
||||
mana_gd_ring_cq(gdma_queue, arm_bit);
|
||||
}
|
||||
|
||||
static int mana_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct mana_cq *cq = container_of(napi, struct mana_cq, napi);
|
||||
|
||||
cq->work_done = 0;
|
||||
cq->budget = budget;
|
||||
|
||||
mana_cq_handler(cq, cq->gdma_cq);
|
||||
|
||||
return min(cq->work_done, budget);
|
||||
}
|
||||
|
||||
static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue)
|
||||
{
|
||||
struct mana_cq *cq = context;
|
||||
|
||||
napi_schedule_irqoff(&cq->napi);
|
||||
}
|
||||
|
||||
static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
|
||||
@ -1114,12 +1131,18 @@ static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
|
||||
|
||||
static void mana_destroy_txq(struct mana_port_context *apc)
|
||||
{
|
||||
struct napi_struct *napi;
|
||||
int i;
|
||||
|
||||
if (!apc->tx_qp)
|
||||
return;
|
||||
|
||||
for (i = 0; i < apc->num_queues; i++) {
|
||||
napi = &apc->tx_qp[i].tx_cq.napi;
|
||||
napi_synchronize(napi);
|
||||
napi_disable(napi);
|
||||
netif_napi_del(napi);
|
||||
|
||||
mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
|
||||
|
||||
mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
|
||||
@ -1134,7 +1157,8 @@ static void mana_destroy_txq(struct mana_port_context *apc)
|
||||
static int mana_create_txq(struct mana_port_context *apc,
|
||||
struct net_device *net)
|
||||
{
|
||||
struct gdma_dev *gd = apc->ac->gdma_dev;
|
||||
struct mana_context *ac = apc->ac;
|
||||
struct gdma_dev *gd = ac->gdma_dev;
|
||||
struct mana_obj_spec wq_spec;
|
||||
struct mana_obj_spec cq_spec;
|
||||
struct gdma_queue_spec spec;
|
||||
@ -1186,7 +1210,6 @@ static int mana_create_txq(struct mana_port_context *apc,
|
||||
|
||||
/* Create SQ's CQ */
|
||||
cq = &apc->tx_qp[i].tx_cq;
|
||||
cq->gdma_comp_buf = apc->eqs[i].cqe_poll;
|
||||
cq->type = MANA_CQ_TYPE_TX;
|
||||
|
||||
cq->txq = txq;
|
||||
@ -1195,8 +1218,8 @@ static int mana_create_txq(struct mana_port_context *apc,
|
||||
spec.type = GDMA_CQ;
|
||||
spec.monitor_avl_buf = false;
|
||||
spec.queue_size = cq_size;
|
||||
spec.cq.callback = mana_cq_handler;
|
||||
spec.cq.parent_eq = apc->eqs[i].eq;
|
||||
spec.cq.callback = mana_schedule_napi;
|
||||
spec.cq.parent_eq = ac->eqs[i].eq;
|
||||
spec.cq.context = cq;
|
||||
err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
|
||||
if (err)
|
||||
@ -1237,7 +1260,10 @@ static int mana_create_txq(struct mana_port_context *apc,
|
||||
|
||||
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
|
||||
|
||||
mana_gd_arm_cq(cq->gdma_cq);
|
||||
netif_tx_napi_add(net, &cq->napi, mana_poll, NAPI_POLL_WEIGHT);
|
||||
napi_enable(&cq->napi);
|
||||
|
||||
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1246,21 +1272,6 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mana_napi_sync_for_rx(struct mana_rxq *rxq)
|
||||
{
|
||||
struct net_device *ndev = rxq->ndev;
|
||||
struct mana_port_context *apc;
|
||||
u16 rxq_idx = rxq->rxq_idx;
|
||||
struct napi_struct *napi;
|
||||
struct gdma_queue *eq;
|
||||
|
||||
apc = netdev_priv(ndev);
|
||||
eq = apc->eqs[rxq_idx].eq;
|
||||
napi = &eq->eq.napi;
|
||||
|
||||
napi_synchronize(napi);
|
||||
}
|
||||
|
||||
static void mana_destroy_rxq(struct mana_port_context *apc,
|
||||
struct mana_rxq *rxq, bool validate_state)
|
||||
|
||||
@ -1268,13 +1279,19 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
|
||||
struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
|
||||
struct mana_recv_buf_oob *rx_oob;
|
||||
struct device *dev = gc->dev;
|
||||
struct napi_struct *napi;
|
||||
int i;
|
||||
|
||||
if (!rxq)
|
||||
return;
|
||||
|
||||
napi = &rxq->rx_cq.napi;
|
||||
|
||||
if (validate_state)
|
||||
mana_napi_sync_for_rx(rxq);
|
||||
napi_synchronize(napi);
|
||||
|
||||
napi_disable(napi);
|
||||
netif_napi_del(napi);
|
||||
|
||||
mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
|
||||
|
||||
@ -1418,7 +1435,6 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
|
||||
|
||||
/* Create RQ's CQ */
|
||||
cq = &rxq->rx_cq;
|
||||
cq->gdma_comp_buf = eq->cqe_poll;
|
||||
cq->type = MANA_CQ_TYPE_RX;
|
||||
cq->rxq = rxq;
|
||||
|
||||
@ -1426,7 +1442,7 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
|
||||
spec.type = GDMA_CQ;
|
||||
spec.monitor_avl_buf = false;
|
||||
spec.queue_size = cq_size;
|
||||
spec.cq.callback = mana_cq_handler;
|
||||
spec.cq.callback = mana_schedule_napi;
|
||||
spec.cq.parent_eq = eq->eq;
|
||||
spec.cq.context = cq;
|
||||
err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
|
||||
@ -1466,7 +1482,10 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
|
||||
|
||||
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
|
||||
|
||||
mana_gd_arm_cq(cq->gdma_cq);
|
||||
netif_napi_add(ndev, &cq->napi, mana_poll, 1);
|
||||
napi_enable(&cq->napi);
|
||||
|
||||
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
|
||||
out:
|
||||
if (!err)
|
||||
return rxq;
|
||||
@ -1484,12 +1503,13 @@ out:
|
||||
static int mana_add_rx_queues(struct mana_port_context *apc,
|
||||
struct net_device *ndev)
|
||||
{
|
||||
struct mana_context *ac = apc->ac;
|
||||
struct mana_rxq *rxq;
|
||||
int err = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < apc->num_queues; i++) {
|
||||
rxq = mana_create_rxq(apc, i, &apc->eqs[i], ndev);
|
||||
rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
|
||||
if (!rxq) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
@ -1601,16 +1621,11 @@ reset_apc:
|
||||
int mana_alloc_queues(struct net_device *ndev)
|
||||
{
|
||||
struct mana_port_context *apc = netdev_priv(ndev);
|
||||
struct gdma_dev *gd = apc->ac->gdma_dev;
|
||||
int err;
|
||||
|
||||
err = mana_create_eq(apc);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mana_create_vport(apc, ndev);
|
||||
if (err)
|
||||
goto destroy_eq;
|
||||
return err;
|
||||
|
||||
err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
|
||||
if (err)
|
||||
@ -1636,8 +1651,6 @@ int mana_alloc_queues(struct net_device *ndev)
|
||||
|
||||
destroy_vport:
|
||||
mana_destroy_vport(apc);
|
||||
destroy_eq:
|
||||
mana_destroy_eq(gd->gdma_context, apc);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1714,8 +1727,6 @@ static int mana_dealloc_queues(struct net_device *ndev)
|
||||
|
||||
mana_destroy_vport(apc);
|
||||
|
||||
mana_destroy_eq(apc->ac->gdma_dev->gdma_context, apc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1768,7 +1779,7 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
|
||||
apc->ac = ac;
|
||||
apc->ndev = ndev;
|
||||
apc->max_queues = gc->max_num_queues;
|
||||
apc->num_queues = min_t(uint, gc->max_num_queues, MANA_MAX_NUM_QUEUES);
|
||||
apc->num_queues = gc->max_num_queues;
|
||||
apc->port_handle = INVALID_MANA_HANDLE;
|
||||
apc->port_idx = port_idx;
|
||||
|
||||
@ -1839,6 +1850,10 @@ int mana_probe(struct gdma_dev *gd)
|
||||
ac->num_ports = 1;
|
||||
gd->driver_data = ac;
|
||||
|
||||
err = mana_create_eq(ac);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
|
||||
MANA_MICRO_VERSION, &ac->num_ports);
|
||||
if (err)
|
||||
@ -1888,6 +1903,9 @@ void mana_remove(struct gdma_dev *gd)
|
||||
|
||||
free_netdev(ndev);
|
||||
}
|
||||
|
||||
mana_destroy_eq(ac);
|
||||
|
||||
out:
|
||||
mana_gd_deregister_device(gd);
|
||||
gd->driver_data = NULL;
|
||||
|
Loading…
x
Reference in New Issue
Block a user