x86/entry/32: Put ESPFIX code into a macro
This makes it easier to split up the shared iret code path. Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Pavel Machek <pavel@ucw.cz> Cc: "H . Peter Anvin" <hpa@zytor.com> Cc: linux-mm@kvack.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Brian Gerst <brgerst@gmail.com> Cc: David Laight <David.Laight@aculab.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Eduardo Valentin <eduval@amazon.com> Cc: Greg KH <gregkh@linuxfoundation.org> Cc: Will Deacon <will.deacon@arm.com> Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Waiman Long <llong@redhat.com> Cc: "David H . Gutteridge" <dhgutteridge@sympatico.ca> Cc: joro@8bytes.org Link: https://lkml.kernel.org/r/1531906876-13451-5-git-send-email-joro@8bytes.org
This commit is contained in:
parent
a6b744f3ce
commit
46eabca284
@ -221,6 +221,54 @@
|
||||
POP_GS_EX
|
||||
.endm
|
||||
|
||||
.macro CHECK_AND_APPLY_ESPFIX
|
||||
#ifdef CONFIG_X86_ESPFIX32
|
||||
#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
|
||||
|
||||
ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX
|
||||
|
||||
movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
|
||||
/*
|
||||
* Warning: PT_OLDSS(%esp) contains the wrong/random values if we
|
||||
* are returning to the kernel.
|
||||
* See comments in process.c:copy_thread() for details.
|
||||
*/
|
||||
movb PT_OLDSS(%esp), %ah
|
||||
movb PT_CS(%esp), %al
|
||||
andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
|
||||
cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
|
||||
jne .Lend_\@ # returning to user-space with LDT SS
|
||||
|
||||
/*
|
||||
* Setup and switch to ESPFIX stack
|
||||
*
|
||||
* We're returning to userspace with a 16 bit stack. The CPU will not
|
||||
* restore the high word of ESP for us on executing iret... This is an
|
||||
* "official" bug of all the x86-compatible CPUs, which we can work
|
||||
* around to make dosemu and wine happy. We do this by preloading the
|
||||
* high word of ESP with the high word of the userspace ESP while
|
||||
* compensating for the offset by changing to the ESPFIX segment with
|
||||
* a base address that matches for the difference.
|
||||
*/
|
||||
mov %esp, %edx /* load kernel esp */
|
||||
mov PT_OLDESP(%esp), %eax /* load userspace esp */
|
||||
mov %dx, %ax /* eax: new kernel esp */
|
||||
sub %eax, %edx /* offset (low word is 0) */
|
||||
shr $16, %edx
|
||||
mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
|
||||
mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
|
||||
pushl $__ESPFIX_SS
|
||||
pushl %eax /* new kernel esp */
|
||||
/*
|
||||
* Disable interrupts, but do not irqtrace this section: we
|
||||
* will soon execute iret and the tracer was already set to
|
||||
* the irqstate after the IRET:
|
||||
*/
|
||||
DISABLE_INTERRUPTS(CLBR_ANY)
|
||||
lss (%esp), %esp /* switch to espfix segment */
|
||||
.Lend_\@:
|
||||
#endif /* CONFIG_X86_ESPFIX32 */
|
||||
.endm
|
||||
/*
|
||||
* %eax: prev task
|
||||
* %edx: next task
|
||||
@ -547,21 +595,7 @@ ENTRY(entry_INT80_32)
|
||||
restore_all:
|
||||
TRACE_IRQS_IRET
|
||||
.Lrestore_all_notrace:
|
||||
#ifdef CONFIG_X86_ESPFIX32
|
||||
ALTERNATIVE "jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX
|
||||
|
||||
movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
|
||||
/*
|
||||
* Warning: PT_OLDSS(%esp) contains the wrong/random values if we
|
||||
* are returning to the kernel.
|
||||
* See comments in process.c:copy_thread() for details.
|
||||
*/
|
||||
movb PT_OLDSS(%esp), %ah
|
||||
movb PT_CS(%esp), %al
|
||||
andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
|
||||
cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
|
||||
je .Lldt_ss # returning to user-space with LDT SS
|
||||
#endif
|
||||
CHECK_AND_APPLY_ESPFIX
|
||||
.Lrestore_nocheck:
|
||||
RESTORE_REGS 4 # skip orig_eax/error_code
|
||||
.Lirq_return:
|
||||
@ -579,39 +613,6 @@ ENTRY(iret_exc )
|
||||
jmp common_exception
|
||||
.previous
|
||||
_ASM_EXTABLE(.Lirq_return, iret_exc)
|
||||
|
||||
#ifdef CONFIG_X86_ESPFIX32
|
||||
.Lldt_ss:
|
||||
/*
|
||||
* Setup and switch to ESPFIX stack
|
||||
*
|
||||
* We're returning to userspace with a 16 bit stack. The CPU will not
|
||||
* restore the high word of ESP for us on executing iret... This is an
|
||||
* "official" bug of all the x86-compatible CPUs, which we can work
|
||||
* around to make dosemu and wine happy. We do this by preloading the
|
||||
* high word of ESP with the high word of the userspace ESP while
|
||||
* compensating for the offset by changing to the ESPFIX segment with
|
||||
* a base address that matches for the difference.
|
||||
*/
|
||||
#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
|
||||
mov %esp, %edx /* load kernel esp */
|
||||
mov PT_OLDESP(%esp), %eax /* load userspace esp */
|
||||
mov %dx, %ax /* eax: new kernel esp */
|
||||
sub %eax, %edx /* offset (low word is 0) */
|
||||
shr $16, %edx
|
||||
mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
|
||||
mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
|
||||
pushl $__ESPFIX_SS
|
||||
pushl %eax /* new kernel esp */
|
||||
/*
|
||||
* Disable interrupts, but do not irqtrace this section: we
|
||||
* will soon execute iret and the tracer was already set to
|
||||
* the irqstate after the IRET:
|
||||
*/
|
||||
DISABLE_INTERRUPTS(CLBR_ANY)
|
||||
lss (%esp), %esp /* switch to espfix segment */
|
||||
jmp .Lrestore_nocheck
|
||||
#endif
|
||||
ENDPROC(entry_INT80_32)
|
||||
|
||||
.macro FIXUP_ESPFIX_STACK
|
||||
|
Loading…
x
Reference in New Issue
Block a user