x86: unify power/cpu_(32|64) regarding restoring processor state
In this step we do unify cpu_32.c and cpu_64.c functions that work on restoring the saved processor state. Also, we do eliminate the forward declaration of fix_processor_context() for X86_64, as it's not needed anymore. Signed-off-by: Sergio Luis <sergio@larces.uece.br> Signed-off-by: Lauro Salmito <laurosalmito@gmail.com> Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
This commit is contained in:
parent
f9ebbe53e7
commit
3134d04b77
@ -27,8 +27,6 @@ unsigned long saved_context_esi, saved_context_edi;
|
|||||||
unsigned long saved_context_eflags;
|
unsigned long saved_context_eflags;
|
||||||
#else
|
#else
|
||||||
/* CONFIG_X86_64 */
|
/* CONFIG_X86_64 */
|
||||||
static void fix_processor_context(void);
|
|
||||||
|
|
||||||
struct saved_context saved_context;
|
struct saved_context saved_context;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -136,6 +134,11 @@ static void fix_processor_context(void)
|
|||||||
* similar stupidity.
|
* similar stupidity.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
|
||||||
|
|
||||||
|
syscall_init(); /* This sets MSR_*STAR and related */
|
||||||
|
#endif
|
||||||
load_TR_desc(); /* This does ltr */
|
load_TR_desc(); /* This does ltr */
|
||||||
load_LDT(¤t->active_mm->context); /* This does lldt */
|
load_LDT(¤t->active_mm->context); /* This does lldt */
|
||||||
|
|
||||||
@ -143,6 +146,7 @@ static void fix_processor_context(void)
|
|||||||
* Now maybe reload the debug registers
|
* Now maybe reload the debug registers
|
||||||
*/
|
*/
|
||||||
if (current->thread.debugreg7) {
|
if (current->thread.debugreg7) {
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
set_debugreg(current->thread.debugreg0, 0);
|
set_debugreg(current->thread.debugreg0, 0);
|
||||||
set_debugreg(current->thread.debugreg1, 1);
|
set_debugreg(current->thread.debugreg1, 1);
|
||||||
set_debugreg(current->thread.debugreg2, 2);
|
set_debugreg(current->thread.debugreg2, 2);
|
||||||
@ -150,18 +154,40 @@ static void fix_processor_context(void)
|
|||||||
/* no 4 and 5 */
|
/* no 4 and 5 */
|
||||||
set_debugreg(current->thread.debugreg6, 6);
|
set_debugreg(current->thread.debugreg6, 6);
|
||||||
set_debugreg(current->thread.debugreg7, 7);
|
set_debugreg(current->thread.debugreg7, 7);
|
||||||
|
#else
|
||||||
|
/* CONFIG_X86_64 */
|
||||||
|
loaddebug(¤t->thread, 0);
|
||||||
|
loaddebug(¤t->thread, 1);
|
||||||
|
loaddebug(¤t->thread, 2);
|
||||||
|
loaddebug(¤t->thread, 3);
|
||||||
|
/* no 4 and 5 */
|
||||||
|
loaddebug(¤t->thread, 6);
|
||||||
|
loaddebug(¤t->thread, 7);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* __restore_processor_state - restore the contents of CPU registers saved
|
||||||
|
* by __save_processor_state()
|
||||||
|
* @ctxt - structure to load the registers contents from
|
||||||
|
*/
|
||||||
static void __restore_processor_state(struct saved_context *ctxt)
|
static void __restore_processor_state(struct saved_context *ctxt)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* control registers
|
* control registers
|
||||||
*/
|
*/
|
||||||
/* cr4 was introduced in the Pentium CPU */
|
/* cr4 was introduced in the Pentium CPU */
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
if (ctxt->cr4)
|
if (ctxt->cr4)
|
||||||
write_cr4(ctxt->cr4);
|
write_cr4(ctxt->cr4);
|
||||||
|
#else
|
||||||
|
/* CONFIG X86_64 */
|
||||||
|
wrmsrl(MSR_EFER, ctxt->efer);
|
||||||
|
write_cr8(ctxt->cr8);
|
||||||
|
write_cr4(ctxt->cr4);
|
||||||
|
#endif
|
||||||
write_cr3(ctxt->cr3);
|
write_cr3(ctxt->cr3);
|
||||||
write_cr2(ctxt->cr2);
|
write_cr2(ctxt->cr2);
|
||||||
write_cr0(ctxt->cr0);
|
write_cr0(ctxt->cr0);
|
||||||
@ -170,12 +196,19 @@ static void __restore_processor_state(struct saved_context *ctxt)
|
|||||||
* now restore the descriptor tables to their proper values
|
* now restore the descriptor tables to their proper values
|
||||||
* ltr is done i fix_processor_context().
|
* ltr is done i fix_processor_context().
|
||||||
*/
|
*/
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
load_gdt(&ctxt->gdt);
|
load_gdt(&ctxt->gdt);
|
||||||
load_idt(&ctxt->idt);
|
load_idt(&ctxt->idt);
|
||||||
|
#else
|
||||||
|
/* CONFIG_X86_64 */
|
||||||
|
load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
|
||||||
|
load_idt((const struct desc_ptr *)&ctxt->idt_limit);
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* segment registers
|
* segment registers
|
||||||
*/
|
*/
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
loadsegment(es, ctxt->es);
|
loadsegment(es, ctxt->es);
|
||||||
loadsegment(fs, ctxt->fs);
|
loadsegment(fs, ctxt->fs);
|
||||||
loadsegment(gs, ctxt->gs);
|
loadsegment(gs, ctxt->gs);
|
||||||
@ -186,6 +219,18 @@ static void __restore_processor_state(struct saved_context *ctxt)
|
|||||||
*/
|
*/
|
||||||
if (boot_cpu_has(X86_FEATURE_SEP))
|
if (boot_cpu_has(X86_FEATURE_SEP))
|
||||||
enable_sep_cpu();
|
enable_sep_cpu();
|
||||||
|
#else
|
||||||
|
/* CONFIG_X86_64 */
|
||||||
|
asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
|
||||||
|
asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
|
||||||
|
asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
|
||||||
|
load_gs_index(ctxt->gs);
|
||||||
|
asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
|
||||||
|
|
||||||
|
wrmsrl(MSR_FS_BASE, ctxt->fs_base);
|
||||||
|
wrmsrl(MSR_GS_BASE, ctxt->gs_base);
|
||||||
|
wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* restore XCR0 for xsave capable cpu's.
|
* restore XCR0 for xsave capable cpu's.
|
||||||
@ -194,9 +239,13 @@ static void __restore_processor_state(struct saved_context *ctxt)
|
|||||||
xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
|
xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
|
||||||
|
|
||||||
fix_processor_context();
|
fix_processor_context();
|
||||||
|
|
||||||
do_fpu_end();
|
do_fpu_end();
|
||||||
mtrr_ap_init();
|
mtrr_ap_init();
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
mcheck_init(&boot_cpu_data);
|
mcheck_init(&boot_cpu_data);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Needed by apm.c */
|
/* Needed by apm.c */
|
||||||
@ -204,4 +253,6 @@ void restore_processor_state(void)
|
|||||||
{
|
{
|
||||||
__restore_processor_state(&saved_context);
|
__restore_processor_state(&saved_context);
|
||||||
}
|
}
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
EXPORT_SYMBOL(restore_processor_state);
|
EXPORT_SYMBOL(restore_processor_state);
|
||||||
|
#endif
|
||||||
|
@ -28,8 +28,6 @@ unsigned long saved_context_esi, saved_context_edi;
|
|||||||
unsigned long saved_context_eflags;
|
unsigned long saved_context_eflags;
|
||||||
#else
|
#else
|
||||||
/* CONFIG_X86_64 */
|
/* CONFIG_X86_64 */
|
||||||
static void fix_processor_context(void);
|
|
||||||
|
|
||||||
struct saved_context saved_context;
|
struct saved_context saved_context;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -120,11 +118,57 @@ EXPORT_SYMBOL(save_processor_state);
|
|||||||
static void do_fpu_end(void)
|
static void do_fpu_end(void)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Restore FPU regs if necessary
|
* Restore FPU regs if necessary.
|
||||||
*/
|
*/
|
||||||
kernel_fpu_end();
|
kernel_fpu_end();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void fix_processor_context(void)
|
||||||
|
{
|
||||||
|
int cpu = smp_processor_id();
|
||||||
|
struct tss_struct *t = &per_cpu(init_tss, cpu);
|
||||||
|
|
||||||
|
set_tss_desc(cpu, t); /*
|
||||||
|
* This just modifies memory; should not be
|
||||||
|
* necessary. But... This is necessary, because
|
||||||
|
* 386 hardware has concept of busy TSS or some
|
||||||
|
* similar stupidity.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
|
||||||
|
|
||||||
|
syscall_init(); /* This sets MSR_*STAR and related */
|
||||||
|
#endif
|
||||||
|
load_TR_desc(); /* This does ltr */
|
||||||
|
load_LDT(¤t->active_mm->context); /* This does lldt */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Now maybe reload the debug registers
|
||||||
|
*/
|
||||||
|
if (current->thread.debugreg7) {
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
set_debugreg(current->thread.debugreg0, 0);
|
||||||
|
set_debugreg(current->thread.debugreg1, 1);
|
||||||
|
set_debugreg(current->thread.debugreg2, 2);
|
||||||
|
set_debugreg(current->thread.debugreg3, 3);
|
||||||
|
/* no 4 and 5 */
|
||||||
|
set_debugreg(current->thread.debugreg6, 6);
|
||||||
|
set_debugreg(current->thread.debugreg7, 7);
|
||||||
|
#else
|
||||||
|
/* CONFIG_X86_64 */
|
||||||
|
loaddebug(¤t->thread, 0);
|
||||||
|
loaddebug(¤t->thread, 1);
|
||||||
|
loaddebug(¤t->thread, 2);
|
||||||
|
loaddebug(¤t->thread, 3);
|
||||||
|
/* no 4 and 5 */
|
||||||
|
loaddebug(¤t->thread, 6);
|
||||||
|
loaddebug(¤t->thread, 7);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __restore_processor_state - restore the contents of CPU registers saved
|
* __restore_processor_state - restore the contents of CPU registers saved
|
||||||
* by __save_processor_state()
|
* by __save_processor_state()
|
||||||
@ -135,9 +179,16 @@ static void __restore_processor_state(struct saved_context *ctxt)
|
|||||||
/*
|
/*
|
||||||
* control registers
|
* control registers
|
||||||
*/
|
*/
|
||||||
|
/* cr4 was introduced in the Pentium CPU */
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
if (ctxt->cr4)
|
||||||
|
write_cr4(ctxt->cr4);
|
||||||
|
#else
|
||||||
|
/* CONFIG X86_64 */
|
||||||
wrmsrl(MSR_EFER, ctxt->efer);
|
wrmsrl(MSR_EFER, ctxt->efer);
|
||||||
write_cr8(ctxt->cr8);
|
write_cr8(ctxt->cr8);
|
||||||
write_cr4(ctxt->cr4);
|
write_cr4(ctxt->cr4);
|
||||||
|
#endif
|
||||||
write_cr3(ctxt->cr3);
|
write_cr3(ctxt->cr3);
|
||||||
write_cr2(ctxt->cr2);
|
write_cr2(ctxt->cr2);
|
||||||
write_cr0(ctxt->cr0);
|
write_cr0(ctxt->cr0);
|
||||||
@ -146,13 +197,31 @@ static void __restore_processor_state(struct saved_context *ctxt)
|
|||||||
* now restore the descriptor tables to their proper values
|
* now restore the descriptor tables to their proper values
|
||||||
* ltr is done i fix_processor_context().
|
* ltr is done i fix_processor_context().
|
||||||
*/
|
*/
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
load_gdt(&ctxt->gdt);
|
||||||
|
load_idt(&ctxt->idt);
|
||||||
|
#else
|
||||||
|
/* CONFIG_X86_64 */
|
||||||
load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
|
load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
|
||||||
load_idt((const struct desc_ptr *)&ctxt->idt_limit);
|
load_idt((const struct desc_ptr *)&ctxt->idt_limit);
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* segment registers
|
* segment registers
|
||||||
*/
|
*/
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
loadsegment(es, ctxt->es);
|
||||||
|
loadsegment(fs, ctxt->fs);
|
||||||
|
loadsegment(gs, ctxt->gs);
|
||||||
|
loadsegment(ss, ctxt->ss);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* sysenter MSRs
|
||||||
|
*/
|
||||||
|
if (boot_cpu_has(X86_FEATURE_SEP))
|
||||||
|
enable_sep_cpu();
|
||||||
|
#else
|
||||||
|
/* CONFIG_X86_64 */
|
||||||
asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
|
asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
|
||||||
asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
|
asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
|
||||||
asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
|
asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
|
||||||
@ -162,6 +231,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
|
|||||||
wrmsrl(MSR_FS_BASE, ctxt->fs_base);
|
wrmsrl(MSR_FS_BASE, ctxt->fs_base);
|
||||||
wrmsrl(MSR_GS_BASE, ctxt->gs_base);
|
wrmsrl(MSR_GS_BASE, ctxt->gs_base);
|
||||||
wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
|
wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* restore XCR0 for xsave capable cpu's.
|
* restore XCR0 for xsave capable cpu's.
|
||||||
@ -173,41 +243,17 @@ static void __restore_processor_state(struct saved_context *ctxt)
|
|||||||
|
|
||||||
do_fpu_end();
|
do_fpu_end();
|
||||||
mtrr_ap_init();
|
mtrr_ap_init();
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
mcheck_init(&boot_cpu_data);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Needed by apm.c */
|
||||||
void restore_processor_state(void)
|
void restore_processor_state(void)
|
||||||
{
|
{
|
||||||
__restore_processor_state(&saved_context);
|
__restore_processor_state(&saved_context);
|
||||||
}
|
}
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
static void fix_processor_context(void)
|
EXPORT_SYMBOL(restore_processor_state);
|
||||||
{
|
#endif
|
||||||
int cpu = smp_processor_id();
|
|
||||||
struct tss_struct *t = &per_cpu(init_tss, cpu);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This just modifies memory; should not be necessary. But... This
|
|
||||||
* is necessary, because 386 hardware has concept of busy TSS or some
|
|
||||||
* similar stupidity.
|
|
||||||
*/
|
|
||||||
set_tss_desc(cpu, t);
|
|
||||||
|
|
||||||
get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
|
|
||||||
|
|
||||||
syscall_init(); /* This sets MSR_*STAR and related */
|
|
||||||
load_TR_desc(); /* This does ltr */
|
|
||||||
load_LDT(¤t->active_mm->context); /* This does lldt */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Now maybe reload the debug registers
|
|
||||||
*/
|
|
||||||
if (current->thread.debugreg7){
|
|
||||||
loaddebug(¤t->thread, 0);
|
|
||||||
loaddebug(¤t->thread, 1);
|
|
||||||
loaddebug(¤t->thread, 2);
|
|
||||||
loaddebug(¤t->thread, 3);
|
|
||||||
/* no 4 and 5 */
|
|
||||||
loaddebug(¤t->thread, 6);
|
|
||||||
loaddebug(¤t->thread, 7);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
Loading…
Reference in New Issue
Block a user