2012-05-08 22:22:26 +04:00
# include <linux/io.h>
# include <linux/memblock.h>
# include <asm/cacheflush.h>
# include <asm/pgtable.h>
# include <asm/realmode.h>
2016-08-10 12:29:14 +03:00
# include <asm/tlbflush.h>
2012-05-08 22:22:26 +04:00
2012-05-08 22:22:41 +04:00
struct real_mode_header * real_mode_header ;
2012-05-08 22:22:46 +04:00
u32 * trampoline_cr4_features ;
2012-05-08 22:22:26 +04:00
2016-06-22 03:47:01 +03:00
/* Hold the pgd entry used on booting additional CPUs */
pgd_t trampoline_pgd_entry ;
2013-01-25 00:19:51 +04:00
void __init reserve_real_mode ( void )
2012-05-08 22:22:26 +04:00
{
phys_addr_t mem ;
2013-01-25 00:19:51 +04:00
unsigned char * base ;
size_t size = PAGE_ALIGN ( real_mode_blob_end - real_mode_blob ) ;
/* Has to be under 1M so we can execute real-mode AP code. */
mem = memblock_find_in_range ( 0 , 1 < < 20 , size , PAGE_SIZE ) ;
if ( ! mem )
panic ( " Cannot allocate trampoline \n " ) ;
base = __va ( mem ) ;
memblock_reserve ( mem , size ) ;
real_mode_header = ( struct real_mode_header * ) base ;
printk ( KERN_DEBUG " Base memory trampoline at [%p] %llx size %zu \n " ,
base , ( unsigned long long ) mem , size ) ;
}
void __init setup_real_mode ( void )
{
2012-05-08 22:22:26 +04:00
u16 real_mode_seg ;
2013-12-19 03:52:13 +04:00
const u32 * rel ;
2012-05-08 22:22:26 +04:00
u32 count ;
2012-05-08 22:22:41 +04:00
unsigned char * base ;
2013-12-19 03:52:13 +04:00
unsigned long phys_base ;
2012-05-08 22:22:43 +04:00
struct trampoline_header * trampoline_header ;
2012-05-08 22:22:41 +04:00
size_t size = PAGE_ALIGN ( real_mode_blob_end - real_mode_blob ) ;
2012-05-08 22:22:43 +04:00
# ifdef CONFIG_X86_64
u64 * trampoline_pgd ;
2012-05-17 01:02:05 +04:00
u64 efer ;
2012-05-08 22:22:43 +04:00
# endif
2012-05-08 22:22:26 +04:00
2013-01-25 00:19:51 +04:00
base = ( unsigned char * ) real_mode_header ;
2012-05-08 22:22:26 +04:00
2012-05-08 22:22:41 +04:00
memcpy ( base , real_mode_blob , size ) ;
2012-05-08 22:22:26 +04:00
2013-12-19 03:52:13 +04:00
phys_base = __pa ( base ) ;
real_mode_seg = phys_base > > 4 ;
2012-05-08 22:22:26 +04:00
rel = ( u32 * ) real_mode_relocs ;
/* 16-bit segment relocations. */
2013-12-19 03:52:13 +04:00
count = * rel + + ;
while ( count - - ) {
u16 * seg = ( u16 * ) ( base + * rel + + ) ;
2012-05-08 22:22:26 +04:00
* seg = real_mode_seg ;
}
/* 32-bit linear relocations. */
2013-12-19 03:52:13 +04:00
count = * rel + + ;
while ( count - - ) {
u32 * ptr = ( u32 * ) ( base + * rel + + ) ;
* ptr + = phys_base ;
2012-05-08 22:22:26 +04:00
}
2012-05-08 22:22:43 +04:00
/* Must be perfomed *after* relocation. */
trampoline_header = ( struct trampoline_header * )
__va ( real_mode_header - > trampoline_header ) ;
2012-05-08 22:22:28 +04:00
# ifdef CONFIG_X86_32
2012-11-17 01:57:13 +04:00
trampoline_header - > start = __pa_symbol ( startup_32_smp ) ;
2012-05-08 22:22:43 +04:00
trampoline_header - > gdt_limit = __BOOT_DS + 7 ;
2012-11-17 01:57:13 +04:00
trampoline_header - > gdt_base = __pa_symbol ( boot_gdt ) ;
2012-05-08 22:22:28 +04:00
# else
2012-05-17 00:22:41 +04:00
/*
* Some AMD processors will # GP ( 0 ) if EFER . LMA is set in WRMSR
* so we need to mask it out .
*/
2012-05-17 01:02:05 +04:00
rdmsrl ( MSR_EFER , efer ) ;
trampoline_header - > efer = efer & ~ EFER_LMA ;
2012-05-08 22:22:46 +04:00
2012-05-08 22:22:43 +04:00
trampoline_header - > start = ( u64 ) secondary_startup_64 ;
2012-05-08 22:22:46 +04:00
trampoline_cr4_features = & trampoline_header - > cr4 ;
2016-08-10 12:29:14 +03:00
* trampoline_cr4_features = mmu_cr4_features ;
2012-05-08 22:22:46 +04:00
2012-05-08 22:22:43 +04:00
trampoline_pgd = ( u64 * ) __va ( real_mode_header - > trampoline_pgd ) ;
2016-06-22 03:47:01 +03:00
trampoline_pgd [ 0 ] = trampoline_pgd_entry . pgd ;
2013-01-25 00:19:50 +04:00
trampoline_pgd [ 511 ] = init_level4_pgt [ 511 ] . pgd ;
2012-05-08 22:22:28 +04:00
# endif
2012-05-08 22:22:26 +04:00
}
/*
2013-01-25 00:19:51 +04:00
* reserve_real_mode ( ) gets called very early , to guarantee the
2013-01-25 00:19:47 +04:00
* availability of low memory . This is before the proper kernel page
2012-05-08 22:22:26 +04:00
* tables are set up , so we cannot set page permissions in that
2013-01-25 00:19:47 +04:00
* function . Also trampoline code will be executed by APs so we
* need to mark it executable at do_pre_smp_initcalls ( ) at least ,
* thus run it as a early_initcall ( ) .
2012-05-08 22:22:26 +04:00
*/
static int __init set_real_mode_permissions ( void )
{
2012-05-08 22:22:41 +04:00
unsigned char * base = ( unsigned char * ) real_mode_header ;
size_t size = PAGE_ALIGN ( real_mode_blob_end - real_mode_blob ) ;
2012-05-08 22:22:26 +04:00
2012-05-08 22:22:30 +04:00
size_t ro_size =
2012-05-08 22:22:41 +04:00
PAGE_ALIGN ( real_mode_header - > ro_end ) -
__pa ( base ) ;
2012-05-08 22:22:30 +04:00
size_t text_size =
2012-05-08 22:22:41 +04:00
PAGE_ALIGN ( real_mode_header - > ro_end ) -
real_mode_header - > text_start ;
2012-05-08 22:22:30 +04:00
unsigned long text_start =
2012-05-08 22:22:41 +04:00
( unsigned long ) __va ( real_mode_header - > text_start ) ;
2012-05-08 22:22:30 +04:00
2012-05-08 22:22:41 +04:00
set_memory_nx ( ( unsigned long ) base , size > > PAGE_SHIFT ) ;
set_memory_ro ( ( unsigned long ) base , ro_size > > PAGE_SHIFT ) ;
2012-05-08 22:22:30 +04:00
set_memory_x ( ( unsigned long ) text_start , text_size > > PAGE_SHIFT ) ;
2012-05-08 22:22:26 +04:00
return 0 ;
}
2013-01-25 00:19:47 +04:00
early_initcall ( set_real_mode_permissions ) ;