On 64bit system, code should be executed in a safe page during page restoring, as the page where instruction is running during resume might be scribbled and causes issues. Although on 32 bit, we only suspend resuming by same kernel that did the suspend, we'd like to remove that restriction in the future. Porting corresponding code from 64bit system: Allocate a safe page, and copy the restore code to it, then jump to the safe page to run the code. Signed-off-by: Zhimin Gu <kookoo.gu@intel.com> Acked-by: Pavel Machek <pavel@ucw.cz> Signed-off-by: Chen Yu <yu.c.chen@intel.com> Acked-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
105 lines
2.1 KiB
ArmAsm
105 lines
2.1 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* This may not use any stack, nor any variable that is not "NoSave":
|
|
*
|
|
* Its rewriting one kernel image with another. What is stack in "old"
|
|
* image could very well be data page in "new" image, and overwriting
|
|
* your own stack under you is bad idea.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/segment.h>
|
|
#include <asm/page_types.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/processor-flags.h>
|
|
#include <asm/frame.h>
|
|
|
|
.text
|
|
|
|
ENTRY(swsusp_arch_suspend)
|
|
movl %esp, saved_context_esp
|
|
movl %ebx, saved_context_ebx
|
|
movl %ebp, saved_context_ebp
|
|
movl %esi, saved_context_esi
|
|
movl %edi, saved_context_edi
|
|
pushfl
|
|
popl saved_context_eflags
|
|
|
|
/* save cr3 */
|
|
movl %cr3, %eax
|
|
movl %eax, restore_cr3
|
|
|
|
FRAME_BEGIN
|
|
call swsusp_save
|
|
FRAME_END
|
|
ret
|
|
ENDPROC(swsusp_arch_suspend)
|
|
|
|
ENTRY(restore_image)
|
|
movl restore_cr3, %ebp
|
|
|
|
movl mmu_cr4_features, %ecx
|
|
|
|
/* jump to relocated restore code */
|
|
movl relocated_restore_code, %eax
|
|
jmpl *%eax
|
|
|
|
/* code below has been relocated to a safe page */
|
|
ENTRY(core_restore_code)
|
|
movl temp_pgt, %eax
|
|
movl %eax, %cr3
|
|
|
|
jecxz 1f # cr4 Pentium and higher, skip if zero
|
|
andl $~(X86_CR4_PGE), %ecx
|
|
movl %ecx, %cr4; # turn off PGE
|
|
movl %cr3, %eax; # flush TLB
|
|
movl %eax, %cr3
|
|
1:
|
|
movl restore_pblist, %edx
|
|
.p2align 4,,7
|
|
|
|
copy_loop:
|
|
testl %edx, %edx
|
|
jz done
|
|
|
|
movl pbe_address(%edx), %esi
|
|
movl pbe_orig_address(%edx), %edi
|
|
|
|
movl $(PAGE_SIZE >> 2), %ecx
|
|
rep
|
|
movsl
|
|
|
|
movl pbe_next(%edx), %edx
|
|
jmp copy_loop
|
|
.p2align 4,,7
|
|
|
|
done:
|
|
|
|
/* code below belongs to the image kernel */
|
|
.align PAGE_SIZE
|
|
ENTRY(restore_registers)
|
|
/* go back to the original page tables */
|
|
movl %ebp, %cr3
|
|
movl mmu_cr4_features, %ecx
|
|
jecxz 1f # cr4 Pentium and higher, skip if zero
|
|
movl %ecx, %cr4; # turn PGE back on
|
|
1:
|
|
|
|
movl saved_context_esp, %esp
|
|
movl saved_context_ebp, %ebp
|
|
movl saved_context_ebx, %ebx
|
|
movl saved_context_esi, %esi
|
|
movl saved_context_edi, %edi
|
|
|
|
pushl saved_context_eflags
|
|
popfl
|
|
|
|
/* Saved in save_processor_state. */
|
|
movl $saved_context, %eax
|
|
lgdt saved_context_gdt_desc(%eax)
|
|
|
|
xorl %eax, %eax
|
|
|
|
ret
|
|
ENDPROC(restore_registers)
|