2016-05-09 13:22:05 -07:00
/*
* Helper routines for building identity mapping page tables . This is
* included by both the compressed kernel and the regular kernel .
*/
2016-05-06 15:01:34 -07:00
2016-08-08 15:31:31 +02:00
static void ident_pmd_init ( struct x86_mapping_info * info , pmd_t * pmd_page ,
2016-05-06 15:01:34 -07:00
unsigned long addr , unsigned long end )
{
addr & = PMD_MASK ;
for ( ; addr < end ; addr + = PMD_SIZE ) {
pmd_t * pmd = pmd_page + pmd_index ( addr ) ;
2016-08-08 15:31:31 +02:00
if ( pmd_present ( * pmd ) )
continue ;
2017-05-04 09:42:50 +08:00
set_pmd ( pmd , __pmd ( ( addr - info - > offset ) | info - > page_flag ) ) ;
2016-05-06 15:01:34 -07:00
}
}
2016-05-09 13:22:05 -07:00
2016-05-06 15:01:34 -07:00
static int ident_pud_init ( struct x86_mapping_info * info , pud_t * pud_page ,
unsigned long addr , unsigned long end )
{
unsigned long next ;
for ( ; addr < end ; addr = next ) {
pud_t * pud = pud_page + pud_index ( addr ) ;
pmd_t * pmd ;
next = ( addr & PUD_MASK ) + PUD_SIZE ;
if ( next > end )
next = end ;
2017-05-04 09:42:50 +08:00
if ( info - > direct_gbpages ) {
pud_t pudval ;
if ( pud_present ( * pud ) )
continue ;
addr & = PUD_MASK ;
pudval = __pud ( ( addr - info - > offset ) | info - > page_flag ) ;
set_pud ( pud , pudval ) ;
continue ;
}
2016-05-06 15:01:34 -07:00
if ( pud_present ( * pud ) ) {
pmd = pmd_offset ( pud , 0 ) ;
2016-08-08 15:31:31 +02:00
ident_pmd_init ( info , pmd , addr , next ) ;
2016-05-06 15:01:34 -07:00
continue ;
}
pmd = ( pmd_t * ) info - > alloc_pgt_page ( info - > context ) ;
if ( ! pmd )
return - ENOMEM ;
2016-08-08 15:31:31 +02:00
ident_pmd_init ( info , pmd , addr , next ) ;
2016-05-06 15:01:34 -07:00
set_pud ( pud , __pud ( __pa ( pmd ) | _KERNPG_TABLE ) ) ;
}
return 0 ;
}
2017-03-13 17:33:07 +03:00
static int ident_p4d_init ( struct x86_mapping_info * info , p4d_t * p4d_page ,
unsigned long addr , unsigned long end )
{
unsigned long next ;
for ( ; addr < end ; addr = next ) {
p4d_t * p4d = p4d_page + p4d_index ( addr ) ;
pud_t * pud ;
next = ( addr & P4D_MASK ) + P4D_SIZE ;
if ( next > end )
next = end ;
if ( p4d_present ( * p4d ) ) {
pud = pud_offset ( p4d , 0 ) ;
ident_pud_init ( info , pud , addr , next ) ;
continue ;
}
pud = ( pud_t * ) info - > alloc_pgt_page ( info - > context ) ;
if ( ! pud )
return - ENOMEM ;
ident_pud_init ( info , pud , addr , next ) ;
set_p4d ( p4d , __p4d ( __pa ( pud ) | _KERNPG_TABLE ) ) ;
}
return 0 ;
}
2016-05-06 15:01:34 -07:00
int kernel_ident_mapping_init ( struct x86_mapping_info * info , pgd_t * pgd_page ,
2016-08-08 15:31:31 +02:00
unsigned long pstart , unsigned long pend )
2016-05-06 15:01:34 -07:00
{
2016-08-08 15:31:31 +02:00
unsigned long addr = pstart + info - > offset ;
unsigned long end = pend + info - > offset ;
2016-05-06 15:01:34 -07:00
unsigned long next ;
int result ;
for ( ; addr < end ; addr = next ) {
2016-08-08 15:31:31 +02:00
pgd_t * pgd = pgd_page + pgd_index ( addr ) ;
2017-03-13 17:33:07 +03:00
p4d_t * p4d ;
2016-05-06 15:01:34 -07:00
next = ( addr & PGDIR_MASK ) + PGDIR_SIZE ;
if ( next > end )
next = end ;
if ( pgd_present ( * pgd ) ) {
2017-03-13 17:33:07 +03:00
p4d = p4d_offset ( pgd , 0 ) ;
result = ident_p4d_init ( info , p4d , addr , next ) ;
2016-05-06 15:01:34 -07:00
if ( result )
return result ;
continue ;
}
2017-03-13 17:33:07 +03:00
p4d = ( p4d_t * ) info - > alloc_pgt_page ( info - > context ) ;
if ( ! p4d )
2016-05-06 15:01:34 -07:00
return - ENOMEM ;
2017-03-13 17:33:07 +03:00
result = ident_p4d_init ( info , p4d , addr , next ) ;
2016-05-06 15:01:34 -07:00
if ( result )
return result ;
2017-03-13 17:33:07 +03:00
if ( IS_ENABLED ( CONFIG_X86_5LEVEL ) ) {
set_pgd ( pgd , __pgd ( __pa ( p4d ) | _KERNPG_TABLE ) ) ;
} else {
/*
* With p4d folded , pgd is equal to p4d .
* The pgd entry has to point to the pud page table in this case .
*/
pud_t * pud = pud_offset ( p4d , 0 ) ;
set_pgd ( pgd , __pgd ( __pa ( pud ) | _KERNPG_TABLE ) ) ;
}
2016-05-06 15:01:34 -07:00
}
return 0 ;
}