2018-01-29 23:20:16 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef _ASM_X86_SYNC_CORE_H
# define _ASM_X86_SYNC_CORE_H
# include <linux/preempt.h>
# include <asm/processor.h>
# include <asm/cpufeature.h>
2020-08-07 06:28:33 +03:00
# include <asm/special_insns.h>
2018-01-29 23:20:16 +03:00
2020-07-27 07:31:31 +03:00
# ifdef CONFIG_X86_32
static inline void iret_to_self ( void )
{
asm volatile (
" pushfl \n \t "
" pushl %%cs \n \t "
" pushl $1f \n \t "
" iret \n \t "
" 1: "
: ASM_CALL_CONSTRAINT : : " memory " ) ;
}
# else
static inline void iret_to_self ( void )
{
unsigned int tmp ;
asm volatile (
" mov %%ss, %0 \n \t "
" pushq %q0 \n \t "
" pushq %%rsp \n \t "
" addq $8, (%%rsp) \n \t "
" pushfq \n \t "
" mov %%cs, %0 \n \t "
" pushq %q0 \n \t "
" pushq $1f \n \t "
" iretq \n \t "
" 1: "
: " =&r " ( tmp ) , ASM_CALL_CONSTRAINT : : " cc " , " memory " ) ;
}
# endif /* CONFIG_X86_32 */
2020-07-27 07:31:30 +03:00
/*
* This function forces the icache and prefetched instruction stream to
* catch up with reality in two very specific cases :
*
* a ) Text was modified using one virtual address and is about to be executed
* from the same physical page at a different virtual address .
*
* b ) Text was modified on a different CPU , may subsequently be
* executed on this CPU , and you want to make sure the new version
2020-08-18 08:31:30 +03:00
* gets executed . This generally means you ' re calling this in an IPI .
2020-07-27 07:31:30 +03:00
*
* If you ' re calling this for a different reason , you ' re probably doing
* it wrong .
2020-08-18 08:31:30 +03:00
*
* Like all of Linux ' s memory ordering operations , this is a
* compiler barrier as well .
2020-07-27 07:31:30 +03:00
*/
static inline void sync_core ( void )
{
/*
2020-08-07 06:28:33 +03:00
* The SERIALIZE instruction is the most straightforward way to
2020-08-18 08:31:30 +03:00
* do this , but it is not universally available .
2020-08-07 06:28:33 +03:00
*/
if ( static_cpu_has ( X86_FEATURE_SERIALIZE ) ) {
serialize ( ) ;
return ;
}
/*
* For all other processors , there are quite a few ways to do this .
* IRET - to - self is nice because it works on every CPU , at any CPL
* ( so it ' s compatible with paravirtualization ) , and it never exits
2020-08-18 08:31:30 +03:00
* to a hypervisor . The only downsides are that it ' s a bit slow
2020-08-07 06:28:33 +03:00
* ( it seems to be a bit more than 2 x slower than the fastest
2020-08-18 08:31:30 +03:00
* options ) and that it unmasks NMIs . The " push %cs " is needed ,
* because in paravirtual environments __KERNEL_CS may not be a
2020-08-07 06:28:33 +03:00
* valid CS value when we do IRET directly .
2020-07-27 07:31:30 +03:00
*
* In case NMI unmasking or performance ever becomes a problem ,
* the next best option appears to be MOV - to - CR2 and an
* unconditional jump . That sequence also works on all CPUs ,
* but it will fault at CPL3 ( i . e . Xen PV ) .
*
* CPUID is the conventional way , but it ' s nasty : it doesn ' t
* exist on some 486 - like CPUs , and it usually exits to a
* hypervisor .
*/
2020-07-27 07:31:31 +03:00
iret_to_self ( ) ;
2020-07-27 07:31:30 +03:00
}
2018-01-29 23:20:16 +03:00
/*
* Ensure that a core serializing instruction is issued before returning
* to user - mode . x86 implements return to user - space through sysexit ,
* sysrel , and sysretq , which are not core serializing .
*/
static inline void sync_core_before_usermode ( void )
{
/* With PTI, we unconditionally serialize before running user code. */
if ( static_cpu_has ( X86_FEATURE_PTI ) )
return ;
2020-12-04 08:07:03 +03:00
2018-01-29 23:20:16 +03:00
/*
2020-12-04 08:07:03 +03:00
* Even if we ' re in an interrupt , we might reschedule before returning ,
* in which case we could switch to a different thread in the same mm
* and return using SYSRET or SYSEXIT . Instead of trying to keep
* track of our need to sync the core , just sync right away .
2018-01-29 23:20:16 +03:00
*/
sync_core ( ) ;
}
# endif /* _ASM_X86_SYNC_CORE_H */