Misc fixes: a kerneldoc build warning fix, add SRSO mitigation for
AMD-derived Hygon processors, and fix a SGX kernel crash in the page fault handler that can trigger when ksgxd races to reclaim the SECS special page, by making the SECS page unswappable. Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmUZNa4RHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1hFYA//aCXuIxVgMdgwDs7uuaghAX7v3NWlx1Pu qxioHgpGimOl5Sm28siT29GCnFK3a+DBd7wCNQ9yhRxTIESwAG9wGlD8cfZISvyI qinPU6Yo0OLEAI//g2IWzr/Hw8QecrjLGGoqhFj8m2vsLANWcTXkeRoFxNwWlobx OSGQL+SYP5tuAhsrjbsQMHiOAUxXAdAuT62R8nYgCcj6A/VTvjSUF9N3C6G/CtnM Y7pi8n4VPxDuU8dwhi1HptHzBrAl6GYXiC1A9UgddKaDk710R+Fe4+LkJR4NVrnL zXv5YY4qoHzVcQ3gznBUOTwDPDWjsGPaTU2Gcya3FRXxZlc1i7p+/kBvxAxQz05H Z6ixkfnWOhdYPWbr7yau7r0RRR4ZvK22pIoAfxKdTYbbI5lOUqbSof0d9mHAY+An tHkYqqFAabnPs4ogGT4tK7nHr9pnCFEEfd2JAAPDu78XkVlokvV7e4sm7bjS4D4D tIHpp3gd04PaEq9I2mEvP57/Sn2fYr0PO3mg6jUppv35k2+hjjkfPilKzqPbcXP6 bD4gdjYXQV367kCfpN2SXUaPtn0UZfqdEol1UVzteVyNOgXHiPzCd/2K7YtW2MM5 wlJ35BDvC7uQr8XxOJBQoAETJQ7TtePnhIjDHOp+WYzn8dmd+r31/EJMIOC0TD/C nlvkY3/gvYA= =YK/4 -----END PGP SIGNATURE----- Merge tag 'x86-urgent-2023-10-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 fixes from Ingo Molnar: "Misc fixes: a kerneldoc build warning fix, add SRSO mitigation for AMD-derived Hygon processors, and fix a SGX kernel crash in the page fault handler that can trigger when ksgxd races to reclaim the SECS special page, by making the SECS page unswappable" * tag 'x86-urgent-2023-10-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/sgx: Resolves SECS reclaim vs. page fault for EAUG race x86/srso: Add SRSO mitigation for Hygon processors x86/kgdb: Fix a kerneldoc warning when build with W=1
This commit is contained in:
commit
ec8c298121
@ -1303,7 +1303,7 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
|
|||||||
VULNBL_AMD(0x15, RETBLEED),
|
VULNBL_AMD(0x15, RETBLEED),
|
||||||
VULNBL_AMD(0x16, RETBLEED),
|
VULNBL_AMD(0x16, RETBLEED),
|
||||||
VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
|
VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
|
||||||
VULNBL_HYGON(0x18, RETBLEED | SMT_RSB),
|
VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
|
||||||
VULNBL_AMD(0x19, SRSO),
|
VULNBL_AMD(0x19, SRSO),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
@ -235,6 +235,21 @@ static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page,
|
|||||||
return epc_page;
|
return epc_page;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure the SECS page is not swapped out. Must be called with encl->lock
|
||||||
|
* to protect the enclave states including SECS and ensure the SECS page is
|
||||||
|
* not swapped out again while being used.
|
||||||
|
*/
|
||||||
|
static struct sgx_epc_page *sgx_encl_load_secs(struct sgx_encl *encl)
|
||||||
|
{
|
||||||
|
struct sgx_epc_page *epc_page = encl->secs.epc_page;
|
||||||
|
|
||||||
|
if (!epc_page)
|
||||||
|
epc_page = sgx_encl_eldu(&encl->secs, NULL);
|
||||||
|
|
||||||
|
return epc_page;
|
||||||
|
}
|
||||||
|
|
||||||
static struct sgx_encl_page *__sgx_encl_load_page(struct sgx_encl *encl,
|
static struct sgx_encl_page *__sgx_encl_load_page(struct sgx_encl *encl,
|
||||||
struct sgx_encl_page *entry)
|
struct sgx_encl_page *entry)
|
||||||
{
|
{
|
||||||
@ -248,11 +263,9 @@ static struct sgx_encl_page *__sgx_encl_load_page(struct sgx_encl *encl,
|
|||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(encl->secs.epc_page)) {
|
epc_page = sgx_encl_load_secs(encl);
|
||||||
epc_page = sgx_encl_eldu(&encl->secs, NULL);
|
if (IS_ERR(epc_page))
|
||||||
if (IS_ERR(epc_page))
|
return ERR_CAST(epc_page);
|
||||||
return ERR_CAST(epc_page);
|
|
||||||
}
|
|
||||||
|
|
||||||
epc_page = sgx_encl_eldu(entry, encl->secs.epc_page);
|
epc_page = sgx_encl_eldu(entry, encl->secs.epc_page);
|
||||||
if (IS_ERR(epc_page))
|
if (IS_ERR(epc_page))
|
||||||
@ -339,6 +352,13 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma,
|
|||||||
|
|
||||||
mutex_lock(&encl->lock);
|
mutex_lock(&encl->lock);
|
||||||
|
|
||||||
|
epc_page = sgx_encl_load_secs(encl);
|
||||||
|
if (IS_ERR(epc_page)) {
|
||||||
|
if (PTR_ERR(epc_page) == -EBUSY)
|
||||||
|
vmret = VM_FAULT_NOPAGE;
|
||||||
|
goto err_out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
epc_page = sgx_alloc_epc_page(encl_page, false);
|
epc_page = sgx_alloc_epc_page(encl_page, false);
|
||||||
if (IS_ERR(epc_page)) {
|
if (IS_ERR(epc_page)) {
|
||||||
if (PTR_ERR(epc_page) == -EBUSY)
|
if (PTR_ERR(epc_page) == -EBUSY)
|
||||||
|
@ -695,7 +695,6 @@ void kgdb_arch_exit(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
|
||||||
* kgdb_skipexception - Bail out of KGDB when we've been triggered.
|
* kgdb_skipexception - Bail out of KGDB when we've been triggered.
|
||||||
* @exception: Exception vector number
|
* @exception: Exception vector number
|
||||||
* @regs: Current &struct pt_regs.
|
* @regs: Current &struct pt_regs.
|
||||||
|
Loading…
Reference in New Issue
Block a user