xen: branch for v6.6-rc3

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQRTLbB6QfY48x44uB6AXGG7T9hjvgUCZQv/2QAKCRCAXGG7T9hj
 vlaAAQCsmQ0V9N9I4SW3G2JKGGIWW6+/IzsKF4RhNlSgm15MNgD9E0nHfEQpRGkL
 6qKO7wc/C3PJRrcq8RxWS8jergeT+wY=
 =vBS0
 -----END PGP SIGNATURE-----

Merge tag 'for-linus-6.6a-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen fixes from Juergen Gross:

 - remove some unused functions in the Xen event channel handling

 - fix a regression (introduced during the merge window) when booting as
   Xen PV guest

 - small cleanup removing another strncpy() instance

* tag 'for-linus-6.6a-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/efi: refactor deprecated strncpy
  x86/xen: allow nesting of same lazy mode
  x86/xen: move paravirt lazy code
  arm/xen: remove lazy mode related definitions
  xen: simplify evtchn_do_upcall() call maze
This commit is contained in:
Linus Torvalds 2023-09-21 08:27:42 -07:00
commit 88a174a906
16 changed files with 123 additions and 155 deletions

View File

@ -207,7 +207,7 @@ static void xen_power_off(void)
static irqreturn_t xen_arm_callback(int irq, void *arg) static irqreturn_t xen_arm_callback(int irq, void *arg)
{ {
xen_hvm_evtchn_do_upcall(); xen_evtchn_do_upcall();
return IRQ_HANDLED; return IRQ_HANDLED;
} }

View File

@ -294,7 +294,7 @@ static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
inc_irq_stat(irq_hv_callback_count); inc_irq_stat(irq_hv_callback_count);
xen_hvm_evtchn_do_upcall(); xen_evtchn_do_upcall();
set_irq_regs(old_regs); set_irq_regs(old_regs);
} }

View File

@ -9,13 +9,6 @@ struct paravirt_patch_site {
u8 type; /* type of this instruction */ u8 type; /* type of this instruction */
u8 len; /* length of original instruction */ u8 len; /* length of original instruction */
}; };
/* Lazy mode for batching updates / context switch */
enum paravirt_lazy_mode {
PARAVIRT_LAZY_NONE,
PARAVIRT_LAZY_MMU,
PARAVIRT_LAZY_CPU,
};
#endif #endif
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
@ -549,14 +542,6 @@ int paravirt_disable_iospace(void);
__PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
void paravirt_start_context_switch(struct task_struct *prev);
void paravirt_end_context_switch(struct task_struct *next);
void paravirt_enter_lazy_mmu(void);
void paravirt_leave_lazy_mmu(void);
void paravirt_flush_lazy_mmu(void);
void _paravirt_nop(void); void _paravirt_nop(void);
void paravirt_BUG(void); void paravirt_BUG(void);
unsigned long paravirt_ret0(void); unsigned long paravirt_ret0(void);

View File

@ -36,6 +36,7 @@
extern struct shared_info *HYPERVISOR_shared_info; extern struct shared_info *HYPERVISOR_shared_info;
extern struct start_info *xen_start_info; extern struct start_info *xen_start_info;
#include <asm/bug.h>
#include <asm/processor.h> #include <asm/processor.h>
#define XEN_SIGNATURE "XenVMMXenVMM" #define XEN_SIGNATURE "XenVMMXenVMM"
@ -63,4 +64,40 @@ void __init xen_pvh_init(struct boot_params *boot_params);
void __init mem_map_via_hcall(struct boot_params *boot_params_p); void __init mem_map_via_hcall(struct boot_params *boot_params_p);
#endif #endif
/* Lazy mode for batching updates / context switch */
enum xen_lazy_mode {
XEN_LAZY_NONE,
XEN_LAZY_MMU,
XEN_LAZY_CPU,
};
DECLARE_PER_CPU(enum xen_lazy_mode, xen_lazy_mode);
DECLARE_PER_CPU(unsigned int, xen_lazy_nesting);
static inline void enter_lazy(enum xen_lazy_mode mode)
{
enum xen_lazy_mode old_mode = this_cpu_read(xen_lazy_mode);
if (mode == old_mode) {
this_cpu_inc(xen_lazy_nesting);
return;
}
BUG_ON(old_mode != XEN_LAZY_NONE);
this_cpu_write(xen_lazy_mode, mode);
}
static inline void leave_lazy(enum xen_lazy_mode mode)
{
BUG_ON(this_cpu_read(xen_lazy_mode) != mode);
if (this_cpu_read(xen_lazy_nesting) == 0)
this_cpu_write(xen_lazy_mode, XEN_LAZY_NONE);
else
this_cpu_dec(xen_lazy_nesting);
}
enum xen_lazy_mode xen_get_lazy_mode(void);
#endif /* _ASM_X86_XEN_HYPERVISOR_H */ #endif /* _ASM_X86_XEN_HYPERVISOR_H */

View File

@ -143,66 +143,7 @@ int paravirt_disable_iospace(void)
return request_resource(&ioport_resource, &reserve_ioports); return request_resource(&ioport_resource, &reserve_ioports);
} }
static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
static inline void enter_lazy(enum paravirt_lazy_mode mode)
{
BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
this_cpu_write(paravirt_lazy_mode, mode);
}
static void leave_lazy(enum paravirt_lazy_mode mode)
{
BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode);
this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
}
void paravirt_enter_lazy_mmu(void)
{
enter_lazy(PARAVIRT_LAZY_MMU);
}
void paravirt_leave_lazy_mmu(void)
{
leave_lazy(PARAVIRT_LAZY_MMU);
}
void paravirt_flush_lazy_mmu(void)
{
preempt_disable();
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
arch_leave_lazy_mmu_mode();
arch_enter_lazy_mmu_mode();
}
preempt_enable();
}
#ifdef CONFIG_PARAVIRT_XXL #ifdef CONFIG_PARAVIRT_XXL
void paravirt_start_context_switch(struct task_struct *prev)
{
BUG_ON(preemptible());
if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
arch_leave_lazy_mmu_mode();
set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
}
enter_lazy(PARAVIRT_LAZY_CPU);
}
void paravirt_end_context_switch(struct task_struct *next)
{
BUG_ON(preemptible());
leave_lazy(PARAVIRT_LAZY_CPU);
if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
arch_enter_lazy_mmu_mode();
}
static noinstr void pv_native_write_cr2(unsigned long val) static noinstr void pv_native_write_cr2(unsigned long val)
{ {
native_write_cr2(val); native_write_cr2(val);
@ -229,14 +170,6 @@ static noinstr void pv_native_safe_halt(void)
} }
#endif #endif
enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
{
if (in_interrupt())
return PARAVIRT_LAZY_NONE;
return this_cpu_read(paravirt_lazy_mode);
}
struct pv_info pv_info = { struct pv_info pv_info = {
.name = "bare hardware", .name = "bare hardware",
#ifdef CONFIG_PARAVIRT_XXL #ifdef CONFIG_PARAVIRT_XXL

View File

@ -138,7 +138,7 @@ void __init xen_efi_init(struct boot_params *boot_params)
if (efi_systab_xen == NULL) if (efi_systab_xen == NULL)
return; return;
strncpy((char *)&boot_params->efi_info.efi_loader_signature, "Xen", strscpy((char *)&boot_params->efi_info.efi_loader_signature, "Xen",
sizeof(boot_params->efi_info.efi_loader_signature)); sizeof(boot_params->efi_info.efi_loader_signature));
boot_params->efi_info.efi_systab = (__u32)__pa(efi_systab_xen); boot_params->efi_info.efi_systab = (__u32)__pa(efi_systab_xen);
boot_params->efi_info.efi_systab_hi = (__u32)(__pa(efi_systab_xen) >> 32); boot_params->efi_info.efi_systab_hi = (__u32)(__pa(efi_systab_xen) >> 32);

View File

@ -32,7 +32,7 @@ EXPORT_SYMBOL_GPL(hypercall_page);
* &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info
* and xen_vcpu_setup for details. By default it points to share_info->vcpu_info * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
* but during boot it is switched to point to xen_vcpu_info. * but during boot it is switched to point to xen_vcpu_info.
* The pointer is used in __xen_evtchn_do_upcall to acknowledge pending events. * The pointer is used in xen_evtchn_do_upcall to acknowledge pending events.
*/ */
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);

View File

@ -136,7 +136,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_xen_hvm_callback)
inc_irq_stat(irq_hv_callback_count); inc_irq_stat(irq_hv_callback_count);
xen_hvm_evtchn_do_upcall(); xen_evtchn_do_upcall();
set_irq_regs(old_regs); set_irq_regs(old_regs);
} }

View File

@ -101,6 +101,17 @@ struct tls_descs {
struct desc_struct desc[3]; struct desc_struct desc[3];
}; };
DEFINE_PER_CPU(enum xen_lazy_mode, xen_lazy_mode) = XEN_LAZY_NONE;
DEFINE_PER_CPU(unsigned int, xen_lazy_nesting);
enum xen_lazy_mode xen_get_lazy_mode(void)
{
if (in_interrupt())
return XEN_LAZY_NONE;
return this_cpu_read(xen_lazy_mode);
}
/* /*
* Updating the 3 TLS descriptors in the GDT on every task switch is * Updating the 3 TLS descriptors in the GDT on every task switch is
* surprisingly expensive so we avoid updating them if they haven't * surprisingly expensive so we avoid updating them if they haven't
@ -362,10 +373,25 @@ static noinstr unsigned long xen_get_debugreg(int reg)
return HYPERVISOR_get_debugreg(reg); return HYPERVISOR_get_debugreg(reg);
} }
static void xen_start_context_switch(struct task_struct *prev)
{
BUG_ON(preemptible());
if (this_cpu_read(xen_lazy_mode) == XEN_LAZY_MMU) {
arch_leave_lazy_mmu_mode();
set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
}
enter_lazy(XEN_LAZY_CPU);
}
static void xen_end_context_switch(struct task_struct *next) static void xen_end_context_switch(struct task_struct *next)
{ {
BUG_ON(preemptible());
xen_mc_flush(); xen_mc_flush();
paravirt_end_context_switch(next); leave_lazy(XEN_LAZY_CPU);
if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
arch_enter_lazy_mmu_mode();
} }
static unsigned long xen_store_tr(void) static unsigned long xen_store_tr(void)
@ -472,7 +498,7 @@ static void xen_set_ldt(const void *addr, unsigned entries)
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
xen_mc_issue(PARAVIRT_LAZY_CPU); xen_mc_issue(XEN_LAZY_CPU);
} }
static void xen_load_gdt(const struct desc_ptr *dtr) static void xen_load_gdt(const struct desc_ptr *dtr)
@ -568,7 +594,7 @@ static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
* exception between the new %fs descriptor being loaded and * exception between the new %fs descriptor being loaded and
* %fs being effectively cleared at __switch_to(). * %fs being effectively cleared at __switch_to().
*/ */
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) if (xen_get_lazy_mode() == XEN_LAZY_CPU)
loadsegment(fs, 0); loadsegment(fs, 0);
xen_mc_batch(); xen_mc_batch();
@ -577,7 +603,7 @@ static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
load_TLS_descriptor(t, cpu, 1); load_TLS_descriptor(t, cpu, 1);
load_TLS_descriptor(t, cpu, 2); load_TLS_descriptor(t, cpu, 2);
xen_mc_issue(PARAVIRT_LAZY_CPU); xen_mc_issue(XEN_LAZY_CPU);
} }
static void xen_load_gs_index(unsigned int idx) static void xen_load_gs_index(unsigned int idx)
@ -909,7 +935,7 @@ static void xen_load_sp0(unsigned long sp0)
mcs = xen_mc_entry(0); mcs = xen_mc_entry(0);
MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0); MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0);
xen_mc_issue(PARAVIRT_LAZY_CPU); xen_mc_issue(XEN_LAZY_CPU);
this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0); this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
} }
@ -973,7 +999,7 @@ static void xen_write_cr0(unsigned long cr0)
MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0); MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0);
xen_mc_issue(PARAVIRT_LAZY_CPU); xen_mc_issue(XEN_LAZY_CPU);
} }
static void xen_write_cr4(unsigned long cr4) static void xen_write_cr4(unsigned long cr4)
@ -1156,7 +1182,7 @@ static const typeof(pv_ops) xen_cpu_ops __initconst = {
#endif #endif
.io_delay = xen_io_delay, .io_delay = xen_io_delay,
.start_context_switch = paravirt_start_context_switch, .start_context_switch = xen_start_context_switch,
.end_context_switch = xen_end_context_switch, .end_context_switch = xen_end_context_switch,
}, },
}; };

View File

@ -236,7 +236,7 @@ static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
u.val = pmd_val_ma(val); u.val = pmd_val_ma(val);
xen_extend_mmu_update(&u); xen_extend_mmu_update(&u);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
preempt_enable(); preempt_enable();
} }
@ -270,7 +270,7 @@ static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
{ {
struct mmu_update u; struct mmu_update u;
if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) if (xen_get_lazy_mode() != XEN_LAZY_MMU)
return false; return false;
xen_mc_batch(); xen_mc_batch();
@ -279,7 +279,7 @@ static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
u.val = pte_val_ma(pteval); u.val = pte_val_ma(pteval);
xen_extend_mmu_update(&u); xen_extend_mmu_update(&u);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
return true; return true;
} }
@ -325,7 +325,7 @@ void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
u.val = pte_val_ma(pte); u.val = pte_val_ma(pte);
xen_extend_mmu_update(&u); xen_extend_mmu_update(&u);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
} }
/* Assume pteval_t is equivalent to all the other *val_t types. */ /* Assume pteval_t is equivalent to all the other *val_t types. */
@ -419,7 +419,7 @@ static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
u.val = pud_val_ma(val); u.val = pud_val_ma(val);
xen_extend_mmu_update(&u); xen_extend_mmu_update(&u);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
preempt_enable(); preempt_enable();
} }
@ -499,7 +499,7 @@ static void __init xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
__xen_set_p4d_hyper(ptr, val); __xen_set_p4d_hyper(ptr, val);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
preempt_enable(); preempt_enable();
} }
@ -531,7 +531,7 @@ static void xen_set_p4d(p4d_t *ptr, p4d_t val)
if (user_ptr) if (user_ptr)
__xen_set_p4d_hyper((p4d_t *)user_ptr, val); __xen_set_p4d_hyper((p4d_t *)user_ptr, val);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
} }
#if CONFIG_PGTABLE_LEVELS >= 5 #if CONFIG_PGTABLE_LEVELS >= 5
@ -1245,7 +1245,7 @@ static noinline void xen_flush_tlb(void)
op->cmd = MMUEXT_TLB_FLUSH_LOCAL; op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
preempt_enable(); preempt_enable();
} }
@ -1265,7 +1265,7 @@ static void xen_flush_tlb_one_user(unsigned long addr)
op->arg1.linear_addr = addr & PAGE_MASK; op->arg1.linear_addr = addr & PAGE_MASK;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
preempt_enable(); preempt_enable();
} }
@ -1302,7 +1302,7 @@ static void xen_flush_tlb_multi(const struct cpumask *cpus,
MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
} }
static unsigned long xen_read_cr3(void) static unsigned long xen_read_cr3(void)
@ -1361,7 +1361,7 @@ static void xen_write_cr3(unsigned long cr3)
else else
__xen_write_cr3(false, 0); __xen_write_cr3(false, 0);
xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ xen_mc_issue(XEN_LAZY_CPU); /* interrupts restored */
} }
/* /*
@ -1396,7 +1396,7 @@ static void __init xen_write_cr3_init(unsigned long cr3)
__xen_write_cr3(true, cr3); __xen_write_cr3(true, cr3);
xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ xen_mc_issue(XEN_LAZY_CPU); /* interrupts restored */
} }
static int xen_pgd_alloc(struct mm_struct *mm) static int xen_pgd_alloc(struct mm_struct *mm)
@ -1557,7 +1557,7 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned) if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
} }
} }
@ -1587,7 +1587,7 @@ static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
__set_pfn_prot(pfn, PAGE_KERNEL); __set_pfn_prot(pfn, PAGE_KERNEL);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
ClearPagePinned(page); ClearPagePinned(page);
} }
@ -1804,7 +1804,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
*/ */
xen_mc_batch(); xen_mc_batch();
__xen_write_cr3(true, __pa(init_top_pgt)); __xen_write_cr3(true, __pa(init_top_pgt));
xen_mc_issue(PARAVIRT_LAZY_CPU); xen_mc_issue(XEN_LAZY_CPU);
/* We can't that easily rip out L3 and L2, as the Xen pagetables are /* We can't that easily rip out L3 and L2, as the Xen pagetables are
* set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
@ -2083,6 +2083,23 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
#endif #endif
} }
static void xen_enter_lazy_mmu(void)
{
enter_lazy(XEN_LAZY_MMU);
}
static void xen_flush_lazy_mmu(void)
{
preempt_disable();
if (xen_get_lazy_mode() == XEN_LAZY_MMU) {
arch_leave_lazy_mmu_mode();
arch_enter_lazy_mmu_mode();
}
preempt_enable();
}
static void __init xen_post_allocator_init(void) static void __init xen_post_allocator_init(void)
{ {
pv_ops.mmu.set_pte = xen_set_pte; pv_ops.mmu.set_pte = xen_set_pte;
@ -2107,7 +2124,7 @@ static void xen_leave_lazy_mmu(void)
{ {
preempt_disable(); preempt_disable();
xen_mc_flush(); xen_mc_flush();
paravirt_leave_lazy_mmu(); leave_lazy(XEN_LAZY_MMU);
preempt_enable(); preempt_enable();
} }
@ -2166,9 +2183,9 @@ static const typeof(pv_ops) xen_mmu_ops __initconst = {
.exit_mmap = xen_exit_mmap, .exit_mmap = xen_exit_mmap,
.lazy_mode = { .lazy_mode = {
.enter = paravirt_enter_lazy_mmu, .enter = xen_enter_lazy_mmu,
.leave = xen_leave_lazy_mmu, .leave = xen_leave_lazy_mmu,
.flush = paravirt_flush_lazy_mmu, .flush = xen_flush_lazy_mmu,
}, },
.set_fixmap = xen_set_fixmap, .set_fixmap = xen_set_fixmap,
@ -2385,7 +2402,7 @@ static noinline void xen_flush_tlb_all(void)
op->cmd = MMUEXT_TLB_FLUSH_ALL; op->cmd = MMUEXT_TLB_FLUSH_ALL;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
preempt_enable(); preempt_enable();
} }

View File

@ -26,7 +26,7 @@ static inline void xen_mc_batch(void)
/* need to disable interrupts until this entry is complete */ /* need to disable interrupts until this entry is complete */
local_irq_save(flags); local_irq_save(flags);
trace_xen_mc_batch(paravirt_get_lazy_mode()); trace_xen_mc_batch(xen_get_lazy_mode());
__this_cpu_write(xen_mc_irq_flags, flags); __this_cpu_write(xen_mc_irq_flags, flags);
} }
@ -44,7 +44,7 @@ static inline void xen_mc_issue(unsigned mode)
{ {
trace_xen_mc_issue(mode); trace_xen_mc_issue(mode);
if ((paravirt_get_lazy_mode() & mode) == 0) if ((xen_get_lazy_mode() & mode) == 0)
xen_mc_flush(); xen_mc_flush();
/* restore flags saved in xen_mc_batch */ /* restore flags saved in xen_mc_batch */

View File

@ -1704,7 +1704,7 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
generic_handle_irq(irq); generic_handle_irq(irq);
} }
static int __xen_evtchn_do_upcall(void) int xen_evtchn_do_upcall(void)
{ {
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
int ret = vcpu_info->evtchn_upcall_pending ? IRQ_HANDLED : IRQ_NONE; int ret = vcpu_info->evtchn_upcall_pending ? IRQ_HANDLED : IRQ_NONE;
@ -1735,24 +1735,7 @@ static int __xen_evtchn_do_upcall(void)
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(xen_evtchn_do_upcall);
void xen_evtchn_do_upcall(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
__xen_evtchn_do_upcall();
irq_exit();
set_irq_regs(old_regs);
}
int xen_hvm_evtchn_do_upcall(void)
{
return __xen_evtchn_do_upcall();
}
EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
/* Rebind a new event channel to an existing irq. */ /* Rebind a new event channel to an existing irq. */
void rebind_evtchn_irq(evtchn_port_t evtchn, int irq) void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)

View File

@ -64,7 +64,7 @@ static uint64_t get_callback_via(struct pci_dev *pdev)
static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id) static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
{ {
return xen_hvm_evtchn_do_upcall(); return xen_evtchn_do_upcall();
} }
static int xen_allocate_irq(struct pci_dev *pdev) static int xen_allocate_irq(struct pci_dev *pdev)

View File

@ -6,26 +6,26 @@
#define _TRACE_XEN_H #define _TRACE_XEN_H
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#include <asm/paravirt_types.h> #include <asm/xen/hypervisor.h>
#include <asm/xen/trace_types.h> #include <asm/xen/trace_types.h>
struct multicall_entry; struct multicall_entry;
/* Multicalls */ /* Multicalls */
DECLARE_EVENT_CLASS(xen_mc__batch, DECLARE_EVENT_CLASS(xen_mc__batch,
TP_PROTO(enum paravirt_lazy_mode mode), TP_PROTO(enum xen_lazy_mode mode),
TP_ARGS(mode), TP_ARGS(mode),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(enum paravirt_lazy_mode, mode) __field(enum xen_lazy_mode, mode)
), ),
TP_fast_assign(__entry->mode = mode), TP_fast_assign(__entry->mode = mode),
TP_printk("start batch LAZY_%s", TP_printk("start batch LAZY_%s",
(__entry->mode == PARAVIRT_LAZY_MMU) ? "MMU" : (__entry->mode == XEN_LAZY_MMU) ? "MMU" :
(__entry->mode == PARAVIRT_LAZY_CPU) ? "CPU" : "NONE") (__entry->mode == XEN_LAZY_CPU) ? "CPU" : "NONE")
); );
#define DEFINE_XEN_MC_BATCH(name) \ #define DEFINE_XEN_MC_BATCH(name) \
DEFINE_EVENT(xen_mc__batch, name, \ DEFINE_EVENT(xen_mc__batch, name, \
TP_PROTO(enum paravirt_lazy_mode mode), \ TP_PROTO(enum xen_lazy_mode mode), \
TP_ARGS(mode)) TP_ARGS(mode))
DEFINE_XEN_MC_BATCH(xen_mc_batch); DEFINE_XEN_MC_BATCH(xen_mc_batch);

View File

@ -7,18 +7,6 @@
extern struct shared_info *HYPERVISOR_shared_info; extern struct shared_info *HYPERVISOR_shared_info;
extern struct start_info *xen_start_info; extern struct start_info *xen_start_info;
/* Lazy mode for batching updates / context switch */
enum paravirt_lazy_mode {
PARAVIRT_LAZY_NONE,
PARAVIRT_LAZY_MMU,
PARAVIRT_LAZY_CPU,
};
static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
{
return PARAVIRT_LAZY_NONE;
}
#ifdef CONFIG_XEN #ifdef CONFIG_XEN
void __init xen_early_init(void); void __init xen_early_init(void);
#else #else

View File

@ -105,8 +105,7 @@ int irq_from_virq(unsigned int cpu, unsigned int virq);
evtchn_port_t evtchn_from_irq(unsigned irq); evtchn_port_t evtchn_from_irq(unsigned irq);
int xen_set_callback_via(uint64_t via); int xen_set_callback_via(uint64_t via);
void xen_evtchn_do_upcall(struct pt_regs *regs); int xen_evtchn_do_upcall(void);
int xen_hvm_evtchn_do_upcall(void);
/* Bind a pirq for a physical interrupt to an irq. */ /* Bind a pirq for a physical interrupt to an irq. */
int xen_bind_pirq_gsi_to_irq(unsigned gsi, int xen_bind_pirq_gsi_to_irq(unsigned gsi,