A set of fixes for the interrupt subsystem:
- Core code: A regression fix for the Open Firmware interrupt mapping code where a interrupt controller property in a node caused a map property in the same node to be ignored. - Interrupt chip drivers: - Workaround a limitation in SiFive PLIC interrupt chip which silently ignores an EOI when the interrupt line is masked. - Provide the missing mask/unmask implementation for the CSKY MP interrupt controller. - PCI/MSI: - Prevent a use after free when PCI/MSI interrupts are released by destroying the sysfs entries before freeing the memory which is accessed in the sysfs show() function. - Implement a mask quirk for the Nvidia ION AHCI chip which does not advertise masking capability despite implementing it. Even worse the chip comes out of reset with all MSI entries masked, which due to the missing masking capability never get unmasked. - Move the check which prevents accessing the MSI[X] masking for XEN back into the low level accessors. The recent consolidation missed that these accessors can be invoked from places which do not have that check which broke XEN. Move them back to he original place instead of sprinkling tons of these checks all over the code. -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmGRDCsTHHRnbHhAbGlu dXRyb25peC5kZQAKCRCmGPVMDXSYoTL5D/4n7CUudohHPckr0Rl3LbnSUfyY9g3H irTKur71AT392YerJtQp+WBp3AKYMDD8wPTgydfpWe95ouIjx5jhb/co7uSifG6k ZssXYS10bkvjqyS8E2s5FnA5xbnagunK/R981qju14Ec39xqx1JzlUnO/Pra0Kcr 5rBV7br9jJMBleBI4OFuS9fS8dVL1MH/yushkuDNfIKEnaElnaxaYUk/ZdzkMMAW lt1B+dPhK24t1hXQvZKp/iVQUGrJWdzzy9aDiUYPv1IZP+V5nbLMgmFvEv8jNdNa 6kkfp0l30nXM9rgvcp2KkasVUPVhurVEwitzz9+tT6LRA+/kSwi2yx8/FwCVUcL6 xD0AgKQgxOj/WwGJTZswvPu3afsLuw3rGmx5uH1IV40P9mPX0AiHWgvoaInHjzlJ QKFQ7mJEuUcC6cJ36RGqX9njhKvPIcUENGCTjGSffcXsWltPrOCg2mQFcsDa9fSH qPfXDVv4YINI+0MAlOULh6TLWQ07xy37HiskJu/AgILOfipoDi8pXdqNJRfvxB1S D3O8vB+SH3lPj69w4dtj7539SdNZn8CCyN3RbNlstl2vHV5Bus3cVk0CcOhG8qNW KwK/tSH8O0ZYHAsUu8OqBipXy6qOPi/10MJQn3NOpvvOmS4oDd+82bq+jp5qJpsG 42WNuzEoBdaUiA== =LBQL -----END PGP SIGNATURE----- Merge tag 'irq-urgent-2021-11-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull irq fixes from Thomas Gleixner: "A set of fixes for the interrupt subsystem Core code: - A regression fix for the Open Firmware interrupt mapping code where a interrupt controller property in a node caused a map property in the same node to be ignored. Interrupt chip drivers: - Workaround a limitation in SiFive PLIC interrupt chip which silently ignores an EOI when the interrupt line is masked. - Provide the missing mask/unmask implementation for the CSKY MP interrupt controller. PCI/MSI: - Prevent a use after free when PCI/MSI interrupts are released by destroying the sysfs entries before freeing the memory which is accessed in the sysfs show() function. - Implement a mask quirk for the Nvidia ION AHCI chip which does not advertise masking capability despite implementing it. Even worse the chip comes out of reset with all MSI entries masked, which due to the missing masking capability never get unmasked. - Move the check which prevents accessing the MSI[X] masking for XEN back into the low level accessors. The recent consolidation missed that these accessors can be invoked from places which do not have that check which broke XEN. Move them back to he original place instead of sprinkling tons of these checks all over the code" * tag 'irq-urgent-2021-11-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: of/irq: Don't ignore interrupt-controller when interrupt-map failed irqchip/sifive-plic: Fixup EOI failed when masked irqchip/csky-mpintc: Fixup mask/unmask implementation PCI/MSI: Destroy sysfs before freeing entries PCI: Add MSI masking quirk for Nvidia ION AHCI PCI/MSI: Deal with devices lying about their MSI mask capability PCI/MSI: Move non-mask check back into low level accessors
This commit is contained in:
commit
c36e33e2f4
@ -78,7 +78,7 @@ static void csky_mpintc_handler(struct pt_regs *regs)
|
||||
readl_relaxed(reg_base + INTCL_RDYIR));
|
||||
}
|
||||
|
||||
static void csky_mpintc_enable(struct irq_data *d)
|
||||
static void csky_mpintc_unmask(struct irq_data *d)
|
||||
{
|
||||
void __iomem *reg_base = this_cpu_read(intcl_reg);
|
||||
|
||||
@ -87,7 +87,7 @@ static void csky_mpintc_enable(struct irq_data *d)
|
||||
writel_relaxed(d->hwirq, reg_base + INTCL_SENR);
|
||||
}
|
||||
|
||||
static void csky_mpintc_disable(struct irq_data *d)
|
||||
static void csky_mpintc_mask(struct irq_data *d)
|
||||
{
|
||||
void __iomem *reg_base = this_cpu_read(intcl_reg);
|
||||
|
||||
@ -164,8 +164,8 @@ static int csky_irq_set_affinity(struct irq_data *d,
|
||||
static struct irq_chip csky_irq_chip = {
|
||||
.name = "C-SKY SMP Intc",
|
||||
.irq_eoi = csky_mpintc_eoi,
|
||||
.irq_enable = csky_mpintc_enable,
|
||||
.irq_disable = csky_mpintc_disable,
|
||||
.irq_unmask = csky_mpintc_unmask,
|
||||
.irq_mask = csky_mpintc_mask,
|
||||
.irq_set_type = csky_mpintc_set_type,
|
||||
#ifdef CONFIG_SMP
|
||||
.irq_set_affinity = csky_irq_set_affinity,
|
||||
|
@ -163,7 +163,13 @@ static void plic_irq_eoi(struct irq_data *d)
|
||||
{
|
||||
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
|
||||
|
||||
writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
|
||||
if (irqd_irq_masked(d)) {
|
||||
plic_irq_unmask(d);
|
||||
writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
|
||||
plic_irq_mask(d);
|
||||
} else {
|
||||
writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
|
||||
}
|
||||
}
|
||||
|
||||
static struct irq_chip plic_chip = {
|
||||
|
@ -161,9 +161,10 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
|
||||
* if it is then we are done, unless there is an
|
||||
* interrupt-map which takes precedence.
|
||||
*/
|
||||
bool intc = of_property_read_bool(ipar, "interrupt-controller");
|
||||
|
||||
imap = of_get_property(ipar, "interrupt-map", &imaplen);
|
||||
if (imap == NULL &&
|
||||
of_property_read_bool(ipar, "interrupt-controller")) {
|
||||
if (imap == NULL && intc) {
|
||||
pr_debug(" -> got it !\n");
|
||||
return 0;
|
||||
}
|
||||
@ -244,8 +245,20 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
|
||||
|
||||
pr_debug(" -> imaplen=%d\n", imaplen);
|
||||
}
|
||||
if (!match)
|
||||
if (!match) {
|
||||
if (intc) {
|
||||
/*
|
||||
* The PASEMI Nemo is a known offender, so
|
||||
* let's only warn for anyone else.
|
||||
*/
|
||||
WARN(!IS_ENABLED(CONFIG_PPC_PASEMI),
|
||||
"%pOF interrupt-map failed, using interrupt-controller\n",
|
||||
ipar);
|
||||
return 0;
|
||||
}
|
||||
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/*
|
||||
* Successfully parsed an interrrupt-map translation; copy new
|
||||
|
@ -148,6 +148,9 @@ static noinline void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 s
|
||||
raw_spinlock_t *lock = &desc->dev->msi_lock;
|
||||
unsigned long flags;
|
||||
|
||||
if (!desc->msi_attrib.can_mask)
|
||||
return;
|
||||
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
desc->msi_mask &= ~clear;
|
||||
desc->msi_mask |= set;
|
||||
@ -181,7 +184,8 @@ static void pci_msix_write_vector_ctrl(struct msi_desc *desc, u32 ctrl)
|
||||
{
|
||||
void __iomem *desc_addr = pci_msix_desc_addr(desc);
|
||||
|
||||
writel(ctrl, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
|
||||
if (desc->msi_attrib.can_mask)
|
||||
writel(ctrl, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
|
||||
}
|
||||
|
||||
static inline void pci_msix_mask(struct msi_desc *desc)
|
||||
@ -200,23 +204,17 @@ static inline void pci_msix_unmask(struct msi_desc *desc)
|
||||
|
||||
static void __pci_msi_mask_desc(struct msi_desc *desc, u32 mask)
|
||||
{
|
||||
if (pci_msi_ignore_mask || desc->msi_attrib.is_virtual)
|
||||
return;
|
||||
|
||||
if (desc->msi_attrib.is_msix)
|
||||
pci_msix_mask(desc);
|
||||
else if (desc->msi_attrib.maskbit)
|
||||
else
|
||||
pci_msi_mask(desc, mask);
|
||||
}
|
||||
|
||||
static void __pci_msi_unmask_desc(struct msi_desc *desc, u32 mask)
|
||||
{
|
||||
if (pci_msi_ignore_mask || desc->msi_attrib.is_virtual)
|
||||
return;
|
||||
|
||||
if (desc->msi_attrib.is_msix)
|
||||
pci_msix_unmask(desc);
|
||||
else if (desc->msi_attrib.maskbit)
|
||||
else
|
||||
pci_msi_unmask(desc, mask);
|
||||
}
|
||||
|
||||
@ -370,6 +368,11 @@ static void free_msi_irqs(struct pci_dev *dev)
|
||||
for (i = 0; i < entry->nvec_used; i++)
|
||||
BUG_ON(irq_has_action(entry->irq + i));
|
||||
|
||||
if (dev->msi_irq_groups) {
|
||||
msi_destroy_sysfs(&dev->dev, dev->msi_irq_groups);
|
||||
dev->msi_irq_groups = NULL;
|
||||
}
|
||||
|
||||
pci_msi_teardown_msi_irqs(dev);
|
||||
|
||||
list_for_each_entry_safe(entry, tmp, msi_list, list) {
|
||||
@ -381,11 +384,6 @@ static void free_msi_irqs(struct pci_dev *dev)
|
||||
list_del(&entry->list);
|
||||
free_msi_entry(entry);
|
||||
}
|
||||
|
||||
if (dev->msi_irq_groups) {
|
||||
msi_destroy_sysfs(&dev->dev, dev->msi_irq_groups);
|
||||
dev->msi_irq_groups = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void pci_intx_for_msi(struct pci_dev *dev, int enable)
|
||||
@ -479,12 +477,16 @@ msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd)
|
||||
goto out;
|
||||
|
||||
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
|
||||
/* Lies, damned lies, and MSIs */
|
||||
if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
|
||||
control |= PCI_MSI_FLAGS_MASKBIT;
|
||||
|
||||
entry->msi_attrib.is_msix = 0;
|
||||
entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
|
||||
entry->msi_attrib.is_virtual = 0;
|
||||
entry->msi_attrib.entry_nr = 0;
|
||||
entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT);
|
||||
entry->msi_attrib.can_mask = !pci_msi_ignore_mask &&
|
||||
!!(control & PCI_MSI_FLAGS_MASKBIT);
|
||||
entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
|
||||
entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
|
||||
entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
|
||||
@ -495,7 +497,7 @@ msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd)
|
||||
entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
|
||||
|
||||
/* Save the initial mask status */
|
||||
if (entry->msi_attrib.maskbit)
|
||||
if (entry->msi_attrib.can_mask)
|
||||
pci_read_config_dword(dev, entry->mask_pos, &entry->msi_mask);
|
||||
|
||||
out:
|
||||
@ -639,10 +641,13 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
|
||||
entry->msi_attrib.is_virtual =
|
||||
entry->msi_attrib.entry_nr >= vec_count;
|
||||
|
||||
entry->msi_attrib.can_mask = !pci_msi_ignore_mask &&
|
||||
!entry->msi_attrib.is_virtual;
|
||||
|
||||
entry->msi_attrib.default_irq = dev->irq;
|
||||
entry->mask_base = base;
|
||||
|
||||
if (!entry->msi_attrib.is_virtual) {
|
||||
if (entry->msi_attrib.can_mask) {
|
||||
addr = pci_msix_desc_addr(entry);
|
||||
entry->msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
|
||||
}
|
||||
|
@ -5851,3 +5851,9 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_PERICOM, 0x2303,
|
||||
pci_fixup_pericom_acs_store_forward);
|
||||
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_PERICOM, 0x2303,
|
||||
pci_fixup_pericom_acs_store_forward);
|
||||
|
||||
static void nvidia_ion_ahci_fixup(struct pci_dev *pdev)
|
||||
{
|
||||
pdev->dev_flags |= PCI_DEV_FLAGS_HAS_MSI_MASKING;
|
||||
}
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0ab8, nvidia_ion_ahci_fixup);
|
||||
|
@ -148,7 +148,7 @@ struct msi_desc {
|
||||
u8 is_msix : 1;
|
||||
u8 multiple : 3;
|
||||
u8 multi_cap : 3;
|
||||
u8 maskbit : 1;
|
||||
u8 can_mask : 1;
|
||||
u8 is_64 : 1;
|
||||
u8 is_virtual : 1;
|
||||
u16 entry_nr;
|
||||
|
@ -233,6 +233,8 @@ enum pci_dev_flags {
|
||||
PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
|
||||
/* Don't use Relaxed Ordering for TLPs directed at this device */
|
||||
PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
|
||||
/* Device does honor MSI masking despite saying otherwise */
|
||||
PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
|
||||
};
|
||||
|
||||
enum pci_irq_reroute_variant {
|
||||
|
@ -529,10 +529,10 @@ static bool msi_check_reservation_mode(struct irq_domain *domain,
|
||||
|
||||
/*
|
||||
* Checking the first MSI descriptor is sufficient. MSIX supports
|
||||
* masking and MSI does so when the maskbit is set.
|
||||
* masking and MSI does so when the can_mask attribute is set.
|
||||
*/
|
||||
desc = first_msi_entry(dev);
|
||||
return desc->msi_attrib.is_msix || desc->msi_attrib.maskbit;
|
||||
return desc->msi_attrib.is_msix || desc->msi_attrib.can_mask;
|
||||
}
|
||||
|
||||
int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
|
||||
|
Loading…
Reference in New Issue
Block a user