soc: ti: ti_sci_inta_msi: Rework MSI descriptor allocation

Protect the allocation properly and use the core allocation and free
mechanism.

No functional change intended.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Nishanth Menon <nm@ti.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20211206210748.737904583@linutronix.de
This commit is contained in:
Thomas Gleixner 2021-12-06 23:51:36 +01:00
parent 68e3183580
commit 49fbfdc222

View File

@ -51,6 +51,7 @@ struct irq_domain *ti_sci_inta_msi_create_irq_domain(struct fwnode_handle *fwnod
struct irq_domain *domain;
ti_sci_inta_msi_update_chip_ops(info);
info->flags |= MSI_FLAG_FREE_MSI_DESCS;
domain = msi_create_irq_domain(fwnode, info, parent);
if (domain)
@ -60,50 +61,32 @@ struct irq_domain *ti_sci_inta_msi_create_irq_domain(struct fwnode_handle *fwnod
}
EXPORT_SYMBOL_GPL(ti_sci_inta_msi_create_irq_domain);
static void ti_sci_inta_msi_free_descs(struct device *dev)
{
struct msi_desc *desc, *tmp;
list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
list_del(&desc->list);
free_msi_entry(desc);
}
}
static int ti_sci_inta_msi_alloc_descs(struct device *dev,
struct ti_sci_resource *res)
{
struct msi_desc *msi_desc;
struct msi_desc msi_desc;
int set, i, count = 0;
memset(&msi_desc, 0, sizeof(msi_desc));
msi_desc.nvec_used = 1;
for (set = 0; set < res->sets; set++) {
for (i = 0; i < res->desc[set].num; i++) {
msi_desc = alloc_msi_entry(dev, 1, NULL);
if (!msi_desc) {
ti_sci_inta_msi_free_descs(dev);
return -ENOMEM;
}
msi_desc->msi_index = res->desc[set].start + i;
INIT_LIST_HEAD(&msi_desc->list);
list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
count++;
for (i = 0; i < res->desc[set].num; i++, count++) {
msi_desc.msi_index = res->desc[set].start + i;
if (msi_add_msi_desc(dev, &msi_desc))
goto fail;
}
for (i = 0; i < res->desc[set].num_sec; i++) {
msi_desc = alloc_msi_entry(dev, 1, NULL);
if (!msi_desc) {
ti_sci_inta_msi_free_descs(dev);
return -ENOMEM;
}
msi_desc->msi_index = res->desc[set].start_sec + i;
INIT_LIST_HEAD(&msi_desc->list);
list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
count++;
for (i = 0; i < res->desc[set].num_sec; i++, count++) {
msi_desc.msi_index = res->desc[set].start_sec + i;
if (msi_add_msi_desc(dev, &msi_desc))
goto fail;
}
}
return count;
fail:
msi_free_msi_descs(dev);
return -ENOMEM;
}
int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev,
@ -124,20 +107,18 @@ int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev,
if (ret)
return ret;
msi_lock_descs(dev);
nvec = ti_sci_inta_msi_alloc_descs(dev, res);
if (nvec <= 0)
return nvec;
ret = msi_domain_alloc_irqs(msi_domain, dev, nvec);
if (ret) {
dev_err(dev, "Failed to allocate IRQs %d\n", ret);
goto cleanup;
if (nvec <= 0) {
ret = nvec;
goto unlock;
}
return 0;
cleanup:
ti_sci_inta_msi_free_descs(&pdev->dev);
ret = msi_domain_alloc_irqs_descs_locked(msi_domain, dev, nvec);
if (ret)
dev_err(dev, "Failed to allocate IRQs %d\n", ret);
unlock:
msi_unlock_descs(dev);
return ret;
}
EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_alloc_irqs);
@ -145,6 +126,5 @@ EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_alloc_irqs);
void ti_sci_inta_msi_domain_free_irqs(struct device *dev)
{
msi_domain_free_irqs(dev->msi.domain, dev);
ti_sci_inta_msi_free_descs(dev);
}
EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_free_irqs);