kaiser: asm/tlbflush.h handle noPGE at lower level
I found asm/tlbflush.h too twisty, and think it safer not to avoid __native_flush_tlb_global_irq_disabled() in the kaiser_enabled case, but instead let it handle kaiser_enabled along with cr3: it can just use __native_flush_tlb() for that, no harm in re-disabling preemption. (This is not the same change as Kirill and Dave have suggested for upstream, flipping PGE in cr4: that's neat, but needs a cpu_has_pge check; cr3 is enough for kaiser, and thought to be cheaper than cr4.) Also delete the X86_FEATURE_INVPCID invpcid_flush_all_nonglobals() preference from __native_flush_tlb(): unlike the invpcid_flush_all() preference in __native_flush_tlb_global(), it's not seen in upstream 4.14, and was recently reported to be surprisingly slow. Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Jiri Kosina <jkosina@suse.cz> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
28c6de5441
commit
0651b3ad99
@ -151,14 +151,6 @@ static inline void kaiser_flush_tlb_on_return_to_user(void)
|
||||
|
||||
static inline void __native_flush_tlb(void)
|
||||
{
|
||||
if (this_cpu_has(X86_FEATURE_INVPCID)) {
|
||||
/*
|
||||
* Note, this works with CR4.PCIDE=0 or 1.
|
||||
*/
|
||||
invpcid_flush_all_nonglobals();
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If current->mm == NULL then we borrow a mm which may change during a
|
||||
* task switch and therefore we must not be preempted while we write CR3
|
||||
@ -182,11 +174,8 @@ static inline void __native_flush_tlb_global_irq_disabled(void)
|
||||
/* restore PGE as it was before */
|
||||
native_write_cr4(cr4);
|
||||
} else {
|
||||
/*
|
||||
* x86_64 microcode update comes this way when CR4.PGE is not
|
||||
* enabled, and it's safer for all callers to allow this case.
|
||||
*/
|
||||
native_write_cr3(native_read_cr3());
|
||||
/* do it with cr3, letting kaiser flush user PCID */
|
||||
__native_flush_tlb();
|
||||
}
|
||||
}
|
||||
|
||||
@ -194,12 +183,6 @@ static inline void __native_flush_tlb_global(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (kaiser_enabled) {
|
||||
/* Globals are not used at all */
|
||||
__native_flush_tlb();
|
||||
return;
|
||||
}
|
||||
|
||||
if (this_cpu_has(X86_FEATURE_INVPCID)) {
|
||||
/*
|
||||
* Using INVPCID is considerably faster than a pair of writes
|
||||
@ -255,11 +238,7 @@ static inline void __native_flush_tlb_single(unsigned long addr)
|
||||
|
||||
static inline void __flush_tlb_all(void)
|
||||
{
|
||||
if (cpu_has_pge)
|
||||
__flush_tlb_global();
|
||||
else
|
||||
__flush_tlb();
|
||||
|
||||
__flush_tlb_global();
|
||||
/*
|
||||
* Note: if we somehow had PCID but not PGE, then this wouldn't work --
|
||||
* we'd end up flushing kernel translations for the current ASID but
|
||||
|
Loading…
x
Reference in New Issue
Block a user