- Make sure the poking PGD is pinned for Xen PV as it requires it this way
- Fixes for two resctrl races when moving a task or creating a new monitoring group - Fix SEV-SNP guests running under HyperV where MTRRs are disabled to not return a UC- type mapping type on memremap() and thus cause a serious slowdown - Fix insn mnemonics in bioscall.S now that binutils is starting to fix confusing insn suffixes -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmPD5xsACgkQEsHwGGHe VUr65g/8CkfKQKIQ/kPn1B+M/PI4S8DBmz7CdufQTbB66GSfDwRpGnxIKJKZM1UG pyOmP1kHVXGGCsvFQimalxnBtFx6t3wFS+R+c/l5pRtgU63bwQpNjJ+vBcBpb4xs J7f06VgF6jB8qk0NqXBKNJt4kauZA50wiErYm8PU/yf0tmHratjrvDfKQmus9pI4 AMcQEudRhDDo8xqn8fvIC/S7xm9/TgNzKxYP+HciPa74HZm5vrjS0hIyZkIsh5d7 Q4MsP4VaBH12W6MbpF9TBw5VTDomwY8oS4xTNfkhyOTcHi8uLrMjA66VmaJwWXpe EZjOk4+KhtxYigaI5oQO9M5e9IINK6ZfbzU65P74wTvP3orszsBPG7QvodIzLc76 YI1GEea/bXgxYgPuMR6spZoGMS58K7BXgViVMVRwZ9bZk17+5pfbLkzKqZsfw/s/ nj3n8z4ayoc+ffDPllh0471Dn16ugKMf+EvW2Su1Q1QoaA7icNNkZrKaM6eSuoFr bClsrHeglQFadwy4kmY0fi7BUfiKTp+c5Ur7lM7VojrjH0XcoZreThVg0tNrk3ZR IAyBjtCdtg1rzrRfb7qBf6B+WNGdxYWGuDgOPiH1Xh+/plvyCqQ2/3AGWQAbu/qP to+IYmZg09mRpVBDYpCY4z6IzRbMJ9rRho4rNXCQUeAzSVMCrZE= =W2zR -----END PGP SIGNATURE----- Merge tag 'x86_urgent_for_v6.2_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 fixes from Borislav Petkov: - Make sure the poking PGD is pinned for Xen PV as it requires it this way - Fixes for two resctrl races when moving a task or creating a new monitoring group - Fix SEV-SNP guests running under HyperV where MTRRs are disabled to not return a UC- type mapping type on memremap() and thus cause a serious slowdown - Fix insn mnemonics in bioscall.S now that binutils is starting to fix confusing insn suffixes * tag 'x86_urgent_for_v6.2_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm: fix poking_init() for Xen PV guests x86/resctrl: Fix event counts regression in reused RMIDs x86/resctrl: Fix task CLOSID/RMID update race x86/pat: Fix pat_x_mtrr_type() for MTRR disabled case x86/boot: Avoid using Intel mnemonics in AT&T syntax asm
This commit is contained in:
commit
f0f70ddb8f
@ -32,7 +32,7 @@ intcall:
|
||||
movw %dx, %si
|
||||
movw %sp, %di
|
||||
movw $11, %cx
|
||||
rep; movsd
|
||||
rep; movsl
|
||||
|
||||
/* Pop full state from the stack */
|
||||
popal
|
||||
@ -67,7 +67,7 @@ intcall:
|
||||
jz 4f
|
||||
movw %sp, %si
|
||||
movw $11, %cx
|
||||
rep; movsd
|
||||
rep; movsl
|
||||
4: addw $44, %sp
|
||||
|
||||
/* Restore state and return */
|
||||
|
@ -146,6 +146,30 @@ static inline struct rmid_entry *__rmid_entry(u32 rmid)
|
||||
return entry;
|
||||
}
|
||||
|
||||
static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val)
|
||||
{
|
||||
u64 msr_val;
|
||||
|
||||
/*
|
||||
* As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
|
||||
* with a valid event code for supported resource type and the bits
|
||||
* IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
|
||||
* IA32_QM_CTR.data (bits 61:0) reports the monitored data.
|
||||
* IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
|
||||
* are error bits.
|
||||
*/
|
||||
wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
|
||||
rdmsrl(MSR_IA32_QM_CTR, msr_val);
|
||||
|
||||
if (msr_val & RMID_VAL_ERROR)
|
||||
return -EIO;
|
||||
if (msr_val & RMID_VAL_UNAVAIL)
|
||||
return -EINVAL;
|
||||
|
||||
*val = msr_val;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom,
|
||||
u32 rmid,
|
||||
enum resctrl_event_id eventid)
|
||||
@ -172,8 +196,12 @@ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
|
||||
struct arch_mbm_state *am;
|
||||
|
||||
am = get_arch_mbm_state(hw_dom, rmid, eventid);
|
||||
if (am)
|
||||
if (am) {
|
||||
memset(am, 0, sizeof(*am));
|
||||
|
||||
/* Record any initial, non-zero count value. */
|
||||
__rmid_read(rmid, eventid, &am->prev_msr);
|
||||
}
|
||||
}
|
||||
|
||||
static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
|
||||
@ -191,25 +219,14 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
|
||||
struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
|
||||
struct arch_mbm_state *am;
|
||||
u64 msr_val, chunks;
|
||||
int ret;
|
||||
|
||||
if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
|
||||
* with a valid event code for supported resource type and the bits
|
||||
* IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
|
||||
* IA32_QM_CTR.data (bits 61:0) reports the monitored data.
|
||||
* IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
|
||||
* are error bits.
|
||||
*/
|
||||
wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
|
||||
rdmsrl(MSR_IA32_QM_CTR, msr_val);
|
||||
|
||||
if (msr_val & RMID_VAL_ERROR)
|
||||
return -EIO;
|
||||
if (msr_val & RMID_VAL_UNAVAIL)
|
||||
return -EINVAL;
|
||||
ret = __rmid_read(rmid, eventid, &msr_val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
am = get_arch_mbm_state(hw_dom, rmid, eventid);
|
||||
if (am) {
|
||||
|
@ -580,8 +580,10 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
|
||||
/*
|
||||
* Ensure the task's closid and rmid are written before determining if
|
||||
* the task is current that will decide if it will be interrupted.
|
||||
* This pairs with the full barrier between the rq->curr update and
|
||||
* resctrl_sched_in() during context switch.
|
||||
*/
|
||||
barrier();
|
||||
smp_mb();
|
||||
|
||||
/*
|
||||
* By now, the task's closid and rmid are set. If the task is current
|
||||
@ -2401,6 +2403,14 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
|
||||
WRITE_ONCE(t->closid, to->closid);
|
||||
WRITE_ONCE(t->rmid, to->mon.rmid);
|
||||
|
||||
/*
|
||||
* Order the closid/rmid stores above before the loads
|
||||
* in task_curr(). This pairs with the full barrier
|
||||
* between the rq->curr update and resctrl_sched_in()
|
||||
* during context switch.
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
/*
|
||||
* If the task is on a CPU, set the CPU in the mask.
|
||||
* The detection is inaccurate as tasks might move or
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <asm/pti.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/memtype.h>
|
||||
#include <asm/paravirt.h>
|
||||
|
||||
/*
|
||||
* We need to define the tracepoints somewhere, and tlb.c
|
||||
@ -804,6 +805,9 @@ void __init poking_init(void)
|
||||
poking_mm = mm_alloc();
|
||||
BUG_ON(!poking_mm);
|
||||
|
||||
/* Xen PV guests need the PGD to be pinned. */
|
||||
paravirt_arch_dup_mmap(NULL, poking_mm);
|
||||
|
||||
/*
|
||||
* Randomize the poking address, but make sure that the following page
|
||||
* will be mapped at the same PMD. We need 2 pages, so find space for 3,
|
||||
|
@ -387,7 +387,8 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end,
|
||||
u8 mtrr_type, uniform;
|
||||
|
||||
mtrr_type = mtrr_type_lookup(start, end, &uniform);
|
||||
if (mtrr_type != MTRR_TYPE_WRBACK)
|
||||
if (mtrr_type != MTRR_TYPE_WRBACK &&
|
||||
mtrr_type != MTRR_TYPE_INVALID)
|
||||
return _PAGE_CACHE_MODE_UC_MINUS;
|
||||
|
||||
return _PAGE_CACHE_MODE_WB;
|
||||
|
Loading…
Reference in New Issue
Block a user