A set of x86 fixes:
- Fix 32-bit PTI for real. pti_clone_entry_text() is called twice, once before initcalls so that initcalls can use the user-mode helper and then again after text is set read only. Setting read only on 32-bit might break up the PMD mapping, which makes the second invocation of pti_clone_entry_text() find the mappings out of sync and failing. Allow the second call to split the existing PMDs in the user mapping and synchronize with the kernel mapping. - Don't make acpi_mp_wake_mailbox not read only after init as the mail box must be writable in the case that CPU hotplug operations happen after boot. Otherwise the attempt to start a CPU crashes with a write to read only memory. - Add a missing sanity check in mtrr_save_state() to ensure that the fixed MTRR MSRs are supported. Otherwise mtrr_save_state() ends up in a #GP, which is fixed up, but the WARN_ON() can bring systems down when panic on warn is set. -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAma4wzkTHHRnbHhAbGlu dXRyb25peC5kZQAKCRCmGPVMDXSYocE6D/4vYjgO04NtRIbWwiem0/O/jrwy9o4f qPztHvgtvCRZRwpzeV5afd7T5CggIg2U5cF6UbYB/v+VrjsYtdR//8h+BeXLUfXI ae9SQBHxP8APtlPxcaN78V/JshIx/HsqrM3icK7Kk8YPXenHKatFqmDYLbKWH4Uw 2T/cqrag/jYWEUnmoG4C+i4oPWZZ0l8c+lURR59euJXJuTxv0RZCkUbcB7bllDBD YgZ2cKAHAaIpercyJUv2JVp3uG8OCGT8aG4vjhapWgk+KDoYR9DTgtGXFZIxrSVR hV7aiZbAc5PRe9QyWpEymcU8NIjs8hlxqMTAlTqXfpp/CrFleKDtFJQXH0ueeGG7 pN2LWBjuXgCXmFjbSHYBIk9LZMtHHFlh886vVp2uzju4Y4yrCzIGTetqWyfjB9Ag MoifyvO/JHuhdMDad6I3Tpg0F7kZss8lMGmNqE8Zu67eQwNs9kXNLIN7u5eJqS6y taR+ove/WQu7tpKAn7TqTcM3TSVxesblkS47VWfqLfsSz9eED3cGUPZLBpniA5YD cq1DEzRLncmf5/mEuOl4GlNyNO+uwUq09G7akphwzWQ5BXxE0USXNW/BHWhXQ+KU oIeNsnkBgf+faFlvRCDOyweJCay/0kQaZ6jT5QbhWjo7Wvod1eseYm1aLN8eIiYV aKi/03yK8DveOw== =pS6f -----END PGP SIGNATURE----- Merge tag 'x86-urgent-2024-08-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 fixes from Thomas Gleixner: - Fix 32-bit PTI for real. pti_clone_entry_text() is called twice, once before initcalls so that initcalls can use the user-mode helper and then again after text is set read only. Setting read only on 32-bit might break up the PMD mapping, which makes the second invocation of pti_clone_entry_text() find the mappings out of sync and failing. Allow the second call to split the existing PMDs in the user mapping and synchronize with the kernel mapping. - Don't make acpi_mp_wake_mailbox read-only after init as the mail box must be writable in the case that CPU hotplug operations happen after boot. Otherwise the attempt to start a CPU crashes with a write to read only memory. - Add a missing sanity check in mtrr_save_state() to ensure that the fixed MTRR MSRs are supported. Otherwise mtrr_save_state() ends up in a #GP, which is fixed up, but the WARN_ON() can bring systems down when panic on warn is set. * tag 'x86-urgent-2024-08-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mtrr: Check if fixed MTRRs exist before saving them x86/paravirt: Fix incorrect virt spinlock setting on bare metal x86/acpi: Remove __ro_after_init from acpi_mp_wake_mailbox x86/mm: Fix PTI for i386 some more
This commit is contained in:
commit
7006fe2f7f
@ -66,13 +66,15 @@ static inline bool vcpu_is_preempted(long cpu)
|
|||||||
|
|
||||||
#ifdef CONFIG_PARAVIRT
|
#ifdef CONFIG_PARAVIRT
|
||||||
/*
|
/*
|
||||||
* virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack.
|
* virt_spin_lock_key - disables by default the virt_spin_lock() hijack.
|
||||||
*
|
*
|
||||||
* Native (and PV wanting native due to vCPU pinning) should disable this key.
|
* Native (and PV wanting native due to vCPU pinning) should keep this key
|
||||||
* It is done in this backwards fashion to only have a single direction change,
|
* disabled. Native does not touch the key.
|
||||||
* which removes ordering between native_pv_spin_init() and HV setup.
|
*
|
||||||
|
* When in a guest then native_pv_lock_init() enables the key first and
|
||||||
|
* KVM/XEN might conditionally disable it later in the boot process again.
|
||||||
*/
|
*/
|
||||||
DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
|
DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Shortcut for the queued_spin_lock_slowpath() function that allows
|
* Shortcut for the queued_spin_lock_slowpath() function that allows
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
static u64 acpi_mp_wake_mailbox_paddr __ro_after_init;
|
static u64 acpi_mp_wake_mailbox_paddr __ro_after_init;
|
||||||
|
|
||||||
/* Virtual address of the Multiprocessor Wakeup Structure mailbox */
|
/* Virtual address of the Multiprocessor Wakeup Structure mailbox */
|
||||||
static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox __ro_after_init;
|
static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox;
|
||||||
|
|
||||||
static u64 acpi_mp_pgd __ro_after_init;
|
static u64 acpi_mp_pgd __ro_after_init;
|
||||||
static u64 acpi_mp_reset_vector_paddr __ro_after_init;
|
static u64 acpi_mp_reset_vector_paddr __ro_after_init;
|
||||||
|
@ -609,7 +609,7 @@ void mtrr_save_state(void)
|
|||||||
{
|
{
|
||||||
int first_cpu;
|
int first_cpu;
|
||||||
|
|
||||||
if (!mtrr_enabled())
|
if (!mtrr_enabled() || !mtrr_state.have_fixed)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
first_cpu = cpumask_first(cpu_online_mask);
|
first_cpu = cpumask_first(cpu_online_mask);
|
||||||
|
@ -51,13 +51,12 @@ DEFINE_ASM_FUNC(pv_native_irq_enable, "sti", .noinstr.text);
|
|||||||
DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text);
|
DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
|
DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
|
||||||
|
|
||||||
void __init native_pv_lock_init(void)
|
void __init native_pv_lock_init(void)
|
||||||
{
|
{
|
||||||
if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) &&
|
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
||||||
!boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
static_branch_enable(&virt_spin_lock_key);
|
||||||
static_branch_disable(&virt_spin_lock_key);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void native_tlb_remove_table(struct mmu_gather *tlb, void *table)
|
static void native_tlb_remove_table(struct mmu_gather *tlb, void *table)
|
||||||
|
@ -241,7 +241,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
|
|||||||
*
|
*
|
||||||
* Returns a pointer to a PTE on success, or NULL on failure.
|
* Returns a pointer to a PTE on success, or NULL on failure.
|
||||||
*/
|
*/
|
||||||
static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
|
static pte_t *pti_user_pagetable_walk_pte(unsigned long address, bool late_text)
|
||||||
{
|
{
|
||||||
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
|
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
|
||||||
pmd_t *pmd;
|
pmd_t *pmd;
|
||||||
@ -251,11 +251,16 @@ static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
|
|||||||
if (!pmd)
|
if (!pmd)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/* We can't do anything sensible if we hit a large mapping. */
|
/* Large PMD mapping found */
|
||||||
if (pmd_leaf(*pmd)) {
|
if (pmd_leaf(*pmd)) {
|
||||||
WARN_ON(1);
|
/* Clear the PMD if we hit a large mapping from the first round */
|
||||||
|
if (late_text) {
|
||||||
|
set_pmd(pmd, __pmd(0));
|
||||||
|
} else {
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (pmd_none(*pmd)) {
|
if (pmd_none(*pmd)) {
|
||||||
unsigned long new_pte_page = __get_free_page(gfp);
|
unsigned long new_pte_page = __get_free_page(gfp);
|
||||||
@ -283,7 +288,7 @@ static void __init pti_setup_vsyscall(void)
|
|||||||
if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
|
if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
|
target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR, false);
|
||||||
if (WARN_ON(!target_pte))
|
if (WARN_ON(!target_pte))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -301,7 +306,7 @@ enum pti_clone_level {
|
|||||||
|
|
||||||
static void
|
static void
|
||||||
pti_clone_pgtable(unsigned long start, unsigned long end,
|
pti_clone_pgtable(unsigned long start, unsigned long end,
|
||||||
enum pti_clone_level level)
|
enum pti_clone_level level, bool late_text)
|
||||||
{
|
{
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
|
|
||||||
@ -390,7 +395,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
/* Allocate PTE in the user page-table */
|
/* Allocate PTE in the user page-table */
|
||||||
target_pte = pti_user_pagetable_walk_pte(addr);
|
target_pte = pti_user_pagetable_walk_pte(addr, late_text);
|
||||||
if (WARN_ON(!target_pte))
|
if (WARN_ON(!target_pte))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -452,7 +457,7 @@ static void __init pti_clone_user_shared(void)
|
|||||||
phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
|
phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
|
||||||
pte_t *target_pte;
|
pte_t *target_pte;
|
||||||
|
|
||||||
target_pte = pti_user_pagetable_walk_pte(va);
|
target_pte = pti_user_pagetable_walk_pte(va, false);
|
||||||
if (WARN_ON(!target_pte))
|
if (WARN_ON(!target_pte))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -475,7 +480,7 @@ static void __init pti_clone_user_shared(void)
|
|||||||
start = CPU_ENTRY_AREA_BASE;
|
start = CPU_ENTRY_AREA_BASE;
|
||||||
end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
|
end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
|
||||||
|
|
||||||
pti_clone_pgtable(start, end, PTI_CLONE_PMD);
|
pti_clone_pgtable(start, end, PTI_CLONE_PMD, false);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_X86_64 */
|
#endif /* CONFIG_X86_64 */
|
||||||
|
|
||||||
@ -492,11 +497,11 @@ static void __init pti_setup_espfix64(void)
|
|||||||
/*
|
/*
|
||||||
* Clone the populated PMDs of the entry text and force it RO.
|
* Clone the populated PMDs of the entry text and force it RO.
|
||||||
*/
|
*/
|
||||||
static void pti_clone_entry_text(void)
|
static void pti_clone_entry_text(bool late)
|
||||||
{
|
{
|
||||||
pti_clone_pgtable((unsigned long) __entry_text_start,
|
pti_clone_pgtable((unsigned long) __entry_text_start,
|
||||||
(unsigned long) __entry_text_end,
|
(unsigned long) __entry_text_end,
|
||||||
PTI_LEVEL_KERNEL_IMAGE);
|
PTI_LEVEL_KERNEL_IMAGE, late);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -571,7 +576,7 @@ static void pti_clone_kernel_text(void)
|
|||||||
* pti_set_kernel_image_nonglobal() did to clear the
|
* pti_set_kernel_image_nonglobal() did to clear the
|
||||||
* global bit.
|
* global bit.
|
||||||
*/
|
*/
|
||||||
pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
|
pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE, false);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* pti_clone_pgtable() will set the global bit in any PMDs
|
* pti_clone_pgtable() will set the global bit in any PMDs
|
||||||
@ -638,8 +643,15 @@ void __init pti_init(void)
|
|||||||
|
|
||||||
/* Undo all global bits from the init pagetables in head_64.S: */
|
/* Undo all global bits from the init pagetables in head_64.S: */
|
||||||
pti_set_kernel_image_nonglobal();
|
pti_set_kernel_image_nonglobal();
|
||||||
|
|
||||||
/* Replace some of the global bits just for shared entry text: */
|
/* Replace some of the global bits just for shared entry text: */
|
||||||
pti_clone_entry_text();
|
/*
|
||||||
|
* This is very early in boot. Device and Late initcalls can do
|
||||||
|
* modprobe before free_initmem() and mark_readonly(). This
|
||||||
|
* pti_clone_entry_text() allows those user-mode-helpers to function,
|
||||||
|
* but notably the text is still RW.
|
||||||
|
*/
|
||||||
|
pti_clone_entry_text(false);
|
||||||
pti_setup_espfix64();
|
pti_setup_espfix64();
|
||||||
pti_setup_vsyscall();
|
pti_setup_vsyscall();
|
||||||
}
|
}
|
||||||
@ -656,10 +668,11 @@ void pti_finalize(void)
|
|||||||
if (!boot_cpu_has(X86_FEATURE_PTI))
|
if (!boot_cpu_has(X86_FEATURE_PTI))
|
||||||
return;
|
return;
|
||||||
/*
|
/*
|
||||||
* We need to clone everything (again) that maps parts of the
|
* This is after free_initmem() (all initcalls are done) and we've done
|
||||||
* kernel image.
|
* mark_readonly(). Text is now NX which might've split some PMDs
|
||||||
|
* relative to the early clone.
|
||||||
*/
|
*/
|
||||||
pti_clone_entry_text();
|
pti_clone_entry_text(true);
|
||||||
pti_clone_kernel_text();
|
pti_clone_kernel_text();
|
||||||
|
|
||||||
debug_checkwx_user();
|
debug_checkwx_user();
|
||||||
|
Loading…
Reference in New Issue
Block a user