net: mana: add msix index sharing between EQs
This patch allows to assign and poll more than one EQ on the same msix index. It is achieved by introducing a list of attached EQs in each IRQ context. It also removes the existing msix_index map that tried to ensure that there is only one EQ at each msix_index. This patch exports symbols for creating EQs from other MANA kernel modules. Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
10b7572d17
commit
02fed6d92b
@ -414,8 +414,12 @@ static void mana_gd_process_eq_events(void *arg)
|
|||||||
|
|
||||||
old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK;
|
old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK;
|
||||||
/* No more entries */
|
/* No more entries */
|
||||||
if (owner_bits == old_bits)
|
if (owner_bits == old_bits) {
|
||||||
|
/* return here without ringing the doorbell */
|
||||||
|
if (i == 0)
|
||||||
|
return;
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK;
|
new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK;
|
||||||
if (owner_bits != new_bits) {
|
if (owner_bits != new_bits) {
|
||||||
@ -445,42 +449,29 @@ static int mana_gd_register_irq(struct gdma_queue *queue,
|
|||||||
struct gdma_dev *gd = queue->gdma_dev;
|
struct gdma_dev *gd = queue->gdma_dev;
|
||||||
struct gdma_irq_context *gic;
|
struct gdma_irq_context *gic;
|
||||||
struct gdma_context *gc;
|
struct gdma_context *gc;
|
||||||
struct gdma_resource *r;
|
|
||||||
unsigned int msi_index;
|
unsigned int msi_index;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
gc = gd->gdma_context;
|
gc = gd->gdma_context;
|
||||||
r = &gc->msix_resource;
|
|
||||||
dev = gc->dev;
|
dev = gc->dev;
|
||||||
|
msi_index = spec->eq.msix_index;
|
||||||
|
|
||||||
spin_lock_irqsave(&r->lock, flags);
|
if (msi_index >= gc->num_msix_usable) {
|
||||||
|
|
||||||
msi_index = find_first_zero_bit(r->map, r->size);
|
|
||||||
if (msi_index >= r->size || msi_index >= gc->num_msix_usable) {
|
|
||||||
err = -ENOSPC;
|
err = -ENOSPC;
|
||||||
} else {
|
dev_err(dev, "Register IRQ err:%d, msi:%u nMSI:%u",
|
||||||
bitmap_set(r->map, msi_index, 1);
|
err, msi_index, gc->num_msix_usable);
|
||||||
queue->eq.msix_index = msi_index;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&r->lock, flags);
|
|
||||||
|
|
||||||
if (err) {
|
|
||||||
dev_err(dev, "Register IRQ err:%d, msi:%u rsize:%u, nMSI:%u",
|
|
||||||
err, msi_index, r->size, gc->num_msix_usable);
|
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
queue->eq.msix_index = msi_index;
|
||||||
gic = &gc->irq_contexts[msi_index];
|
gic = &gc->irq_contexts[msi_index];
|
||||||
|
|
||||||
WARN_ON(gic->handler || gic->arg);
|
spin_lock_irqsave(&gic->lock, flags);
|
||||||
|
list_add_rcu(&queue->entry, &gic->eq_list);
|
||||||
gic->arg = queue;
|
spin_unlock_irqrestore(&gic->lock, flags);
|
||||||
|
|
||||||
gic->handler = mana_gd_process_eq_events;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -490,12 +481,11 @@ static void mana_gd_deregiser_irq(struct gdma_queue *queue)
|
|||||||
struct gdma_dev *gd = queue->gdma_dev;
|
struct gdma_dev *gd = queue->gdma_dev;
|
||||||
struct gdma_irq_context *gic;
|
struct gdma_irq_context *gic;
|
||||||
struct gdma_context *gc;
|
struct gdma_context *gc;
|
||||||
struct gdma_resource *r;
|
|
||||||
unsigned int msix_index;
|
unsigned int msix_index;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
struct gdma_queue *eq;
|
||||||
|
|
||||||
gc = gd->gdma_context;
|
gc = gd->gdma_context;
|
||||||
r = &gc->msix_resource;
|
|
||||||
|
|
||||||
/* At most num_online_cpus() + 1 interrupts are used. */
|
/* At most num_online_cpus() + 1 interrupts are used. */
|
||||||
msix_index = queue->eq.msix_index;
|
msix_index = queue->eq.msix_index;
|
||||||
@ -503,14 +493,17 @@ static void mana_gd_deregiser_irq(struct gdma_queue *queue)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
gic = &gc->irq_contexts[msix_index];
|
gic = &gc->irq_contexts[msix_index];
|
||||||
gic->handler = NULL;
|
spin_lock_irqsave(&gic->lock, flags);
|
||||||
gic->arg = NULL;
|
list_for_each_entry_rcu(eq, &gic->eq_list, entry) {
|
||||||
|
if (queue == eq) {
|
||||||
spin_lock_irqsave(&r->lock, flags);
|
list_del_rcu(&eq->entry);
|
||||||
bitmap_clear(r->map, msix_index, 1);
|
break;
|
||||||
spin_unlock_irqrestore(&r->lock, flags);
|
}
|
||||||
|
}
|
||||||
|
spin_unlock_irqrestore(&gic->lock, flags);
|
||||||
|
|
||||||
queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
|
queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
|
||||||
|
synchronize_rcu();
|
||||||
}
|
}
|
||||||
|
|
||||||
int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
|
int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
|
||||||
@ -588,6 +581,7 @@ static int mana_gd_create_eq(struct gdma_dev *gd,
|
|||||||
int err;
|
int err;
|
||||||
|
|
||||||
queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
|
queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
|
||||||
|
queue->id = INVALID_QUEUE_ID;
|
||||||
|
|
||||||
log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);
|
log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);
|
||||||
|
|
||||||
@ -819,6 +813,7 @@ free_q:
|
|||||||
kfree(queue);
|
kfree(queue);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_NS(mana_gd_create_mana_eq, NET_MANA);
|
||||||
|
|
||||||
int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
|
int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
|
||||||
const struct gdma_queue_spec *spec,
|
const struct gdma_queue_spec *spec,
|
||||||
@ -895,6 +890,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
|
|||||||
mana_gd_free_memory(gmi);
|
mana_gd_free_memory(gmi);
|
||||||
kfree(queue);
|
kfree(queue);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_NS(mana_gd_destroy_queue, NET_MANA);
|
||||||
|
|
||||||
int mana_gd_verify_vf_version(struct pci_dev *pdev)
|
int mana_gd_verify_vf_version(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
@ -1217,9 +1213,14 @@ int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
|
|||||||
static irqreturn_t mana_gd_intr(int irq, void *arg)
|
static irqreturn_t mana_gd_intr(int irq, void *arg)
|
||||||
{
|
{
|
||||||
struct gdma_irq_context *gic = arg;
|
struct gdma_irq_context *gic = arg;
|
||||||
|
struct list_head *eq_list = &gic->eq_list;
|
||||||
|
struct gdma_queue *eq;
|
||||||
|
|
||||||
if (gic->handler)
|
rcu_read_lock();
|
||||||
gic->handler(gic->arg);
|
list_for_each_entry_rcu(eq, eq_list, entry) {
|
||||||
|
gic->handler(eq);
|
||||||
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
@ -1271,8 +1272,9 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
|
|||||||
|
|
||||||
for (i = 0; i < nvec; i++) {
|
for (i = 0; i < nvec; i++) {
|
||||||
gic = &gc->irq_contexts[i];
|
gic = &gc->irq_contexts[i];
|
||||||
gic->handler = NULL;
|
gic->handler = mana_gd_process_eq_events;
|
||||||
gic->arg = NULL;
|
INIT_LIST_HEAD(&gic->eq_list);
|
||||||
|
spin_lock_init(&gic->lock);
|
||||||
|
|
||||||
if (!i)
|
if (!i)
|
||||||
snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s",
|
snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s",
|
||||||
@ -1295,10 +1297,6 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
|
|||||||
irq_set_affinity_and_hint(irq, cpumask_of(cpu));
|
irq_set_affinity_and_hint(irq, cpumask_of(cpu));
|
||||||
}
|
}
|
||||||
|
|
||||||
err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
|
|
||||||
if (err)
|
|
||||||
goto free_irq;
|
|
||||||
|
|
||||||
gc->max_num_msix = nvec;
|
gc->max_num_msix = nvec;
|
||||||
gc->num_msix_usable = nvec;
|
gc->num_msix_usable = nvec;
|
||||||
|
|
||||||
@ -1329,8 +1327,6 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
|
|||||||
if (gc->max_num_msix < 1)
|
if (gc->max_num_msix < 1)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
mana_gd_free_res_map(&gc->msix_resource);
|
|
||||||
|
|
||||||
for (i = 0; i < gc->max_num_msix; i++) {
|
for (i = 0; i < gc->max_num_msix; i++) {
|
||||||
irq = pci_irq_vector(pdev, i);
|
irq = pci_irq_vector(pdev, i);
|
||||||
if (irq < 0)
|
if (irq < 0)
|
||||||
|
@ -300,6 +300,7 @@ static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
|
|||||||
spec.eq.context = ctx;
|
spec.eq.context = ctx;
|
||||||
spec.eq.callback = cb;
|
spec.eq.callback = cb;
|
||||||
spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
|
spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
|
||||||
|
spec.eq.msix_index = 0;
|
||||||
|
|
||||||
return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
|
return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
|
||||||
}
|
}
|
||||||
|
@ -1244,6 +1244,7 @@ static int mana_create_eq(struct mana_context *ac)
|
|||||||
spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
|
spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
|
||||||
|
|
||||||
for (i = 0; i < gc->max_num_queues; i++) {
|
for (i = 0; i < gc->max_num_queues; i++) {
|
||||||
|
spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
|
||||||
err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
|
err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -293,6 +293,7 @@ struct gdma_queue {
|
|||||||
|
|
||||||
u32 head;
|
u32 head;
|
||||||
u32 tail;
|
u32 tail;
|
||||||
|
struct list_head entry;
|
||||||
|
|
||||||
/* Extra fields specific to EQ/CQ. */
|
/* Extra fields specific to EQ/CQ. */
|
||||||
union {
|
union {
|
||||||
@ -328,6 +329,7 @@ struct gdma_queue_spec {
|
|||||||
void *context;
|
void *context;
|
||||||
|
|
||||||
unsigned long log2_throttle_limit;
|
unsigned long log2_throttle_limit;
|
||||||
|
unsigned int msix_index;
|
||||||
} eq;
|
} eq;
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
@ -344,7 +346,9 @@ struct gdma_queue_spec {
|
|||||||
|
|
||||||
struct gdma_irq_context {
|
struct gdma_irq_context {
|
||||||
void (*handler)(void *arg);
|
void (*handler)(void *arg);
|
||||||
void *arg;
|
/* Protect the eq_list */
|
||||||
|
spinlock_t lock;
|
||||||
|
struct list_head eq_list;
|
||||||
char name[MANA_IRQ_NAME_SZ];
|
char name[MANA_IRQ_NAME_SZ];
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -355,7 +359,6 @@ struct gdma_context {
|
|||||||
unsigned int max_num_queues;
|
unsigned int max_num_queues;
|
||||||
unsigned int max_num_msix;
|
unsigned int max_num_msix;
|
||||||
unsigned int num_msix_usable;
|
unsigned int num_msix_usable;
|
||||||
struct gdma_resource msix_resource;
|
|
||||||
struct gdma_irq_context *irq_contexts;
|
struct gdma_irq_context *irq_contexts;
|
||||||
|
|
||||||
/* L2 MTU */
|
/* L2 MTU */
|
||||||
|
Loading…
Reference in New Issue
Block a user