74c16b2e2b
PARAVIRT config option and PV IPI is added for the guest side, function pv_ipi_init() is used to add IPI sending and IPI receiving hooks. This function firstly checks whether system runs in VM mode, and if kernel runs in VM mode, it will call function kvm_para_available() to detect the current hypervirsor type (now only KVM type detection is supported). The paravirt functions can work only if current hypervisor type is KVM, since there is only KVM supported on LoongArch now. PV IPI uses virtual IPI sender and virtual IPI receiver functions. With virtual IPI sender, IPI message is stored in memory rather than emulated HW. IPI multicast is also supported, and 128 vcpus can received IPIs at the same time like X86 KVM method. Hypercall method is used for IPI sending. With virtual IPI receiver, HW SWI0 is used rather than real IPI HW. Since VCPU has separate HW SWI0 like HW timer, there is no trap in IPI interrupt acknowledge. Since IPI message is stored in memory, there is no trap in getting IPI message. Signed-off-by: Bibo Mao <maobibo@loongson.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
118 lines
2.7 KiB
C
118 lines
2.7 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
|
*/
|
|
#include <linux/kernel.h>
|
|
#include <linux/acpi.h>
|
|
#include <linux/atomic.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/init.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/irqchip.h>
|
|
#include <linux/kernel_stat.h>
|
|
#include <linux/proc_fs.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/kallsyms.h>
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <asm/irq.h>
|
|
#include <asm/loongson.h>
|
|
#include <asm/setup.h>
|
|
|
|
DEFINE_PER_CPU(unsigned long, irq_stack);
|
|
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
|
|
EXPORT_PER_CPU_SYMBOL(irq_stat);
|
|
|
|
struct acpi_vector_group pch_group[MAX_IO_PICS];
|
|
struct acpi_vector_group msi_group[MAX_IO_PICS];
|
|
/*
|
|
* 'what should we do if we get a hw irq event on an illegal vector'.
|
|
* each architecture has to answer this themselves.
|
|
*/
|
|
void ack_bad_irq(unsigned int irq)
|
|
{
|
|
pr_warn("Unexpected IRQ # %d\n", irq);
|
|
}
|
|
|
|
atomic_t irq_err_count;
|
|
|
|
asmlinkage void spurious_interrupt(void)
|
|
{
|
|
atomic_inc(&irq_err_count);
|
|
}
|
|
|
|
int arch_show_interrupts(struct seq_file *p, int prec)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
show_ipi_list(p, prec);
|
|
#endif
|
|
seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
|
|
return 0;
|
|
}
|
|
|
|
static int __init early_pci_mcfg_parse(struct acpi_table_header *header)
|
|
{
|
|
struct acpi_table_mcfg *mcfg;
|
|
struct acpi_mcfg_allocation *mptr;
|
|
int i, n;
|
|
|
|
if (header->length < sizeof(struct acpi_table_mcfg))
|
|
return -EINVAL;
|
|
|
|
n = (header->length - sizeof(struct acpi_table_mcfg)) /
|
|
sizeof(struct acpi_mcfg_allocation);
|
|
mcfg = (struct acpi_table_mcfg *)header;
|
|
mptr = (struct acpi_mcfg_allocation *) &mcfg[1];
|
|
|
|
for (i = 0; i < n; i++, mptr++) {
|
|
msi_group[i].pci_segment = mptr->pci_segment;
|
|
pch_group[i].node = msi_group[i].node = (mptr->address >> 44) & 0xf;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void __init init_vec_parent_group(void)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < MAX_IO_PICS; i++) {
|
|
msi_group[i].pci_segment = -1;
|
|
msi_group[i].node = -1;
|
|
pch_group[i].node = -1;
|
|
}
|
|
|
|
acpi_table_parse(ACPI_SIG_MCFG, early_pci_mcfg_parse);
|
|
}
|
|
|
|
void __init init_IRQ(void)
|
|
{
|
|
int i;
|
|
unsigned int order = get_order(IRQ_STACK_SIZE);
|
|
struct page *page;
|
|
|
|
clear_csr_ecfg(ECFG0_IM);
|
|
clear_csr_estat(ESTATF_IP);
|
|
|
|
init_vec_parent_group();
|
|
irqchip_init();
|
|
#ifdef CONFIG_SMP
|
|
mp_ops.init_ipi();
|
|
#endif
|
|
|
|
for (i = 0; i < NR_IRQS; i++)
|
|
irq_set_noprobe(i);
|
|
|
|
for_each_possible_cpu(i) {
|
|
page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, order);
|
|
|
|
per_cpu(irq_stack, i) = (unsigned long)page_address(page);
|
|
pr_debug("CPU%d IRQ stack at 0x%lx - 0x%lx\n", i,
|
|
per_cpu(irq_stack, i), per_cpu(irq_stack, i) + IRQ_STACK_SIZE);
|
|
}
|
|
|
|
set_csr_ecfg(ECFGF_SIP0 | ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | ECFGF_IPI | ECFGF_PMC);
|
|
}
|