- Make sure multi-bridge machines get all eiointc interrupt controllers
initialized even if the number of CPUs has been limited by a cmdline param - Make sure interrupt lines on liointc hw are configured properly even when interrupt routing changes - Avoid use-after-free in the error path of the MSI init code -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmaBFiwACgkQEsHwGGHe VUpxMA//QjUEdRe93t4jwcdYKjEPgPHEi+JBgL+tpTrrHUS5xrhLRhFGk7oDBG+u hUMN1NzoPc/Qy8TStjC/rMgwH+NbHyTCb6q0K9ORLtkaZLLz6zlpqQuLiCrd1sMJ GIztWw4WgvwkZVcFk1MiKfGyIbMBHbicYXEu64ymhkQ30aM1fID/gWvFs8pvaJNg MnQ3APt45SoywHyqsqNYMr7G1al0Z/OgTf/MTH8R9QDFCnCo99cgWIU83yr9lNQP z4HZaFSgkIE2Rfc5fsh9wA/K4iqIId06Fx/f4sF5BUXaVPhjUTSv9tIZSVfXfpsS CuophjzKp5g05LIZUI32DZ+OEZpfR8DVdN6L8bhob5niJ0XqPVhkKf2ctHlmqP+p FiRN/1SnZaz+ZbLtIV2groyRvD1N/DeDOZfbuCIyj0OhkrxC16XF3Olzb3ayuQXO n3xXnFMTRmKzTMWQTjS7gMSPEwnImvLsBUhD/Qc7Ka2eiceEm/Q5yziptyYUX+/f /W0b95rvXcMQMRcCcgwDTHvOV31UOWC0WzDNwZTksyhtNvFXd8MtDmlOE4KAGDyD hFNNkxJSovtoTB2ogehsUX2QO09yEFirYqntnAmu3Xih56sg3PBcMZOU4O/5dwyw 2nUUJe1p1lvRf5MVd9re9uWxPvv3CYEAiPbHL8S4HEH8etajywg= =Hh3B -----END PGP SIGNATURE----- Merge tag 'irq_urgent_for_v6.10_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull irq fixes from Borislav Petkov: - Make sure multi-bridge machines get all eiointc interrupt controllers initialized even if the number of CPUs has been limited by a cmdline param - Make sure interrupt lines on liointc hw are configured properly even when interrupt routing changes - Avoid use-after-free in the error path of the MSI init code * tag 'irq_urgent_for_v6.10_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: PCI/MSI: Fix UAF in msi_capability_init irqchip/loongson-liointc: Set different ISRs for different cores irqchip/loongson-eiointc: Use early_cpu_to_node() instead of cpu_to_node()
This commit is contained in:
commit
4e41216009
@ -15,6 +15,7 @@
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/syscore_ops.h>
|
||||
#include <asm/numa.h>
|
||||
|
||||
#define EIOINTC_REG_NODEMAP 0x14a0
|
||||
#define EIOINTC_REG_IPMAP 0x14c0
|
||||
@ -339,7 +340,7 @@ static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
|
||||
int node;
|
||||
|
||||
if (cpu_has_flatmode)
|
||||
node = cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
|
||||
node = early_cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
|
||||
else
|
||||
node = eiointc_priv[nr_pics - 1]->node;
|
||||
|
||||
@ -431,7 +432,7 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
|
||||
goto out_free_handle;
|
||||
|
||||
if (cpu_has_flatmode)
|
||||
node = cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
|
||||
node = early_cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
|
||||
else
|
||||
node = acpi_eiointc->node;
|
||||
acpi_set_vec_parent(node, priv->eiointc_domain, pch_group);
|
||||
|
@ -28,7 +28,7 @@
|
||||
|
||||
#define LIOINTC_INTC_CHIP_START 0x20
|
||||
|
||||
#define LIOINTC_REG_INTC_STATUS (LIOINTC_INTC_CHIP_START + 0x20)
|
||||
#define LIOINTC_REG_INTC_STATUS(core) (LIOINTC_INTC_CHIP_START + 0x20 + (core) * 8)
|
||||
#define LIOINTC_REG_INTC_EN_STATUS (LIOINTC_INTC_CHIP_START + 0x04)
|
||||
#define LIOINTC_REG_INTC_ENABLE (LIOINTC_INTC_CHIP_START + 0x08)
|
||||
#define LIOINTC_REG_INTC_DISABLE (LIOINTC_INTC_CHIP_START + 0x0c)
|
||||
@ -217,7 +217,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
|
||||
goto out_free_priv;
|
||||
|
||||
for (i = 0; i < LIOINTC_NUM_CORES; i++)
|
||||
priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS;
|
||||
priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS(i);
|
||||
|
||||
for (i = 0; i < LIOINTC_NUM_PARENT; i++)
|
||||
priv->handler[i].parent_int_map = parent_int_map[i];
|
||||
|
@ -352,7 +352,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
|
||||
struct irq_affinity *affd)
|
||||
{
|
||||
struct irq_affinity_desc *masks = NULL;
|
||||
struct msi_desc *entry;
|
||||
struct msi_desc *entry, desc;
|
||||
int ret;
|
||||
|
||||
/* Reject multi-MSI early on irq domain enabled architectures */
|
||||
@ -377,6 +377,12 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
|
||||
/* All MSIs are unmasked by default; mask them all */
|
||||
entry = msi_first_desc(&dev->dev, MSI_DESC_ALL);
|
||||
pci_msi_mask(entry, msi_multi_mask(entry));
|
||||
/*
|
||||
* Copy the MSI descriptor for the error path because
|
||||
* pci_msi_setup_msi_irqs() will free it for the hierarchical
|
||||
* interrupt domain case.
|
||||
*/
|
||||
memcpy(&desc, entry, sizeof(desc));
|
||||
|
||||
/* Configure MSI capability structure */
|
||||
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
|
||||
@ -396,7 +402,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
|
||||
goto unlock;
|
||||
|
||||
err:
|
||||
pci_msi_unmask(entry, msi_multi_mask(entry));
|
||||
pci_msi_unmask(&desc, msi_multi_mask(&desc));
|
||||
pci_free_msi_irqs(dev);
|
||||
fail:
|
||||
dev->msi_enabled = 0;
|
||||
|
Loading…
x
Reference in New Issue
Block a user