* Add a warning when memory encryption conversions fail. These
operations require VMM cooperation, even in CoCo environments where the VMM is untrusted. While it's _possible_ that memory pressure could trigger the new warning, the odds are that a guest would only see this from an attacking VMM. * Simplify page fault code by re-enabling interrupts unconditionally * Avoid truncation issues when pfns are passed in to pfn_to_kaddr() with small (<64-bit) types. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEV76QKkVc4xCGURexaDWVMHDJkrAFAmXvPnEACgkQaDWVMHDJ krAP/RAArzmgZ4TkOSMxVi8nIB45Fd5f9HVhw7cK69hkdTA+WdH7JrAfyNAyzA7S NZdFk6gmp2HUpe+GmipQ3le1UJgCrNZyXvGYU3capm9O1Ql9BOl7htTpoCxCwwDr RRRKnTcRKPKjAU1pX9eTvJGFBKl4UgYgliVMMhrIdgsExJxacSYiHm1C7BLprWVI VBfnf3qAolW+zK4RnJD6uqrm4Xzq/WkZiMlpEP/whBy8AhbffwYho+12a7j3fCtM j9K6fqvuh3U3XsVn9jOHFeS0NcPdBDBxrN21yD9CtsFA8cKcuJXeO0D7XqlGBvIj TlHJz1YByRyWXP92MI2SQ9DBhE21pAiaWEGHz35z0XR6BcTyiLggI2DjVhFkKURg aLBbrd0qG6PRefvz13SOmGTa87OJusgzXdX7FKvyMtGC7hEBf9zQo+E+JEog+mQ1 sqaCWXEC3Oc2WZPeUbLRKw1Q0w0CIZJznu1vtYOOt3iZllvS4RPZMauTFw7iQtVb CKmGy7zWqpchQ75ZsXgU1Exhw8/NnrBL/SsfdfRIG9Bn3laUcEwlRXFf/IgPOFbF dZfd0UJoriIbGw5Zz8ZxVIl9qsB8UdWSZNOlMQe0nswH75L1FWysqA+jwgdMdgm6 /jahkn6oVfj4Eu71GNYkEUIEatdoiI9KpbyteLHUTPQwUspz6gc= =4QdP -----END PGP SIGNATURE----- Merge tag 'x86_mm_for_6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 mm updates from Dave Hansen: - Add a warning when memory encryption conversions fail. These operations require VMM cooperation, even in CoCo environments where the VMM is untrusted. While it's _possible_ that memory pressure could trigger the new warning, the odds are that a guest would only see this from an attacking VMM. - Simplify page fault code by re-enabling interrupts unconditionally - Avoid truncation issues when pfns are passed in to pfn_to_kaddr() with small (<64-bit) types. * tag 'x86_mm_for_6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm/cpa: Warn for set_memory_XXcrypted() VMM fails x86/mm: Get rid of conditional IF flag handling in page fault path x86/mm: Ensure input to pfn_to_kaddr() is treated as a 64-bit type
This commit is contained in:
commit
555b684190
@ -66,10 +66,14 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
|
||||
* virt_addr_valid(kaddr) returns true.
|
||||
*/
|
||||
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
|
||||
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
|
||||
extern bool __virt_addr_valid(unsigned long kaddr);
|
||||
#define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
|
||||
|
||||
static __always_inline void *pfn_to_kaddr(unsigned long pfn)
|
||||
{
|
||||
return __va(pfn << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits)
|
||||
{
|
||||
return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
|
||||
|
@ -1299,21 +1299,14 @@ void do_user_addr_fault(struct pt_regs *regs,
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* It's safe to allow irq's after cr2 has been saved and the
|
||||
* vmalloc fault has been handled.
|
||||
*
|
||||
* User-mode registers count as a user access even for any
|
||||
* potential system fault or CPU buglet:
|
||||
*/
|
||||
if (user_mode(regs)) {
|
||||
local_irq_enable();
|
||||
flags |= FAULT_FLAG_USER;
|
||||
} else {
|
||||
if (regs->flags & X86_EFLAGS_IF)
|
||||
local_irq_enable();
|
||||
/* Legacy check - remove this after verifying that it doesn't trigger */
|
||||
if (WARN_ON_ONCE(!(regs->flags & X86_EFLAGS_IF))) {
|
||||
bad_area_nosemaphore(regs, error_code, address);
|
||||
return;
|
||||
}
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||
|
||||
/*
|
||||
@ -1329,6 +1322,14 @@ void do_user_addr_fault(struct pt_regs *regs,
|
||||
if (error_code & X86_PF_INSTR)
|
||||
flags |= FAULT_FLAG_INSTRUCTION;
|
||||
|
||||
/*
|
||||
* We set FAULT_FLAG_USER based on the register state, not
|
||||
* based on X86_PF_USER. User space accesses that cause
|
||||
* system page faults are still user accesses.
|
||||
*/
|
||||
if (user_mode(regs))
|
||||
flags |= FAULT_FLAG_USER;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* Faults in the vsyscall page might need emulation. The
|
||||
|
@ -2157,7 +2157,7 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
|
||||
|
||||
/* Notify hypervisor that we are about to set/clr encryption attribute. */
|
||||
if (!x86_platform.guest.enc_status_change_prepare(addr, numpages, enc))
|
||||
return -EIO;
|
||||
goto vmm_fail;
|
||||
|
||||
ret = __change_page_attr_set_clr(&cpa, 1);
|
||||
|
||||
@ -2170,13 +2170,20 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
|
||||
*/
|
||||
cpa_flush(&cpa, 0);
|
||||
|
||||
/* Notify hypervisor that we have successfully set/clr encryption attribute. */
|
||||
if (!ret) {
|
||||
if (!x86_platform.guest.enc_status_change_finish(addr, numpages, enc))
|
||||
ret = -EIO;
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return ret;
|
||||
/* Notify hypervisor that we have successfully set/clr encryption attribute. */
|
||||
if (!x86_platform.guest.enc_status_change_finish(addr, numpages, enc))
|
||||
goto vmm_fail;
|
||||
|
||||
return 0;
|
||||
|
||||
vmm_fail:
|
||||
WARN_ONCE(1, "CPA VMM failure to convert memory (addr=%p, numpages=%d) to %s.\n",
|
||||
(void *)addr, numpages, enc ? "private" : "shared");
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
|
||||
|
Loading…
x
Reference in New Issue
Block a user