2020-02-04 04:36:20 +03:00
// SPDX-License-Identifier: GPL-2.0
# include <linux/pagewalk.h>
# include <linux/ptdump.h>
# include <linux/kasan.h>
2020-12-22 23:02:06 +03:00
# if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
2020-02-04 04:36:20 +03:00
/*
* This is an optimization for KASAN = y case . Since all kasan page tables
* eventually point to the kasan_early_shadow_page we could call note_page ( )
* right away without walking through lower level page tables . This saves
* us dozens of seconds ( minutes for 5 - level config ) while checking for
* W + X mapping or reading kernel_page_tables debugfs file .
*/
static inline int note_kasan_page_table ( struct mm_walk * walk ,
unsigned long addr )
{
struct ptdump_state * st = walk - > private ;
2020-02-04 04:36:38 +03:00
st - > note_page ( st , addr , 4 , pte_val ( kasan_early_shadow_pte [ 0 ] ) ) ;
2020-02-04 04:36:20 +03:00
walk - > action = ACTION_CONTINUE ;
return 0 ;
}
# endif
static int ptdump_pgd_entry ( pgd_t * pgd , unsigned long addr ,
unsigned long next , struct mm_walk * walk )
{
struct ptdump_state * st = walk - > private ;
pgd_t val = READ_ONCE ( * pgd ) ;
2020-12-22 23:02:06 +03:00
# if CONFIG_PGTABLE_LEVELS > 4 && \
( defined ( CONFIG_KASAN_GENERIC ) | | defined ( CONFIG_KASAN_SW_TAGS ) )
2020-02-04 04:36:20 +03:00
if ( pgd_page ( val ) = = virt_to_page ( lm_alias ( kasan_early_shadow_p4d ) ) )
return note_kasan_page_table ( walk , addr ) ;
# endif
2020-06-02 07:49:58 +03:00
if ( st - > effective_prot )
st - > effective_prot ( st , 0 , pgd_val ( val ) ) ;
2020-02-04 04:36:20 +03:00
if ( pgd_leaf ( val ) )
2020-02-04 04:36:38 +03:00
st - > note_page ( st , addr , 0 , pgd_val ( val ) ) ;
2020-02-04 04:36:20 +03:00
return 0 ;
}
static int ptdump_p4d_entry ( p4d_t * p4d , unsigned long addr ,
unsigned long next , struct mm_walk * walk )
{
struct ptdump_state * st = walk - > private ;
p4d_t val = READ_ONCE ( * p4d ) ;
2020-12-22 23:02:06 +03:00
# if CONFIG_PGTABLE_LEVELS > 3 && \
( defined ( CONFIG_KASAN_GENERIC ) | | defined ( CONFIG_KASAN_SW_TAGS ) )
2020-02-04 04:36:20 +03:00
if ( p4d_page ( val ) = = virt_to_page ( lm_alias ( kasan_early_shadow_pud ) ) )
return note_kasan_page_table ( walk , addr ) ;
# endif
2020-06-02 07:49:58 +03:00
if ( st - > effective_prot )
st - > effective_prot ( st , 1 , p4d_val ( val ) ) ;
2020-02-04 04:36:20 +03:00
if ( p4d_leaf ( val ) )
2020-02-04 04:36:38 +03:00
st - > note_page ( st , addr , 1 , p4d_val ( val ) ) ;
2020-02-04 04:36:20 +03:00
return 0 ;
}
static int ptdump_pud_entry ( pud_t * pud , unsigned long addr ,
unsigned long next , struct mm_walk * walk )
{
struct ptdump_state * st = walk - > private ;
pud_t val = READ_ONCE ( * pud ) ;
2020-12-22 23:02:06 +03:00
# if CONFIG_PGTABLE_LEVELS > 2 && \
( defined ( CONFIG_KASAN_GENERIC ) | | defined ( CONFIG_KASAN_SW_TAGS ) )
2020-02-04 04:36:20 +03:00
if ( pud_page ( val ) = = virt_to_page ( lm_alias ( kasan_early_shadow_pmd ) ) )
return note_kasan_page_table ( walk , addr ) ;
# endif
2020-06-02 07:49:58 +03:00
if ( st - > effective_prot )
st - > effective_prot ( st , 2 , pud_val ( val ) ) ;
2020-02-04 04:36:20 +03:00
if ( pud_leaf ( val ) )
2020-02-04 04:36:38 +03:00
st - > note_page ( st , addr , 2 , pud_val ( val ) ) ;
2020-02-04 04:36:20 +03:00
return 0 ;
}
static int ptdump_pmd_entry ( pmd_t * pmd , unsigned long addr ,
unsigned long next , struct mm_walk * walk )
{
struct ptdump_state * st = walk - > private ;
pmd_t val = READ_ONCE ( * pmd ) ;
2020-12-22 23:02:06 +03:00
# if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
2020-02-04 04:36:20 +03:00
if ( pmd_page ( val ) = = virt_to_page ( lm_alias ( kasan_early_shadow_pte ) ) )
return note_kasan_page_table ( walk , addr ) ;
# endif
2020-06-02 07:49:58 +03:00
if ( st - > effective_prot )
st - > effective_prot ( st , 3 , pmd_val ( val ) ) ;
2020-02-04 04:36:20 +03:00
if ( pmd_leaf ( val ) )
2020-02-04 04:36:38 +03:00
st - > note_page ( st , addr , 3 , pmd_val ( val ) ) ;
2020-02-04 04:36:20 +03:00
return 0 ;
}
static int ptdump_pte_entry ( pte_t * pte , unsigned long addr ,
unsigned long next , struct mm_walk * walk )
{
struct ptdump_state * st = walk - > private ;
2020-06-02 07:49:58 +03:00
pte_t val = READ_ONCE ( * pte ) ;
if ( st - > effective_prot )
st - > effective_prot ( st , 4 , pte_val ( val ) ) ;
2020-02-04 04:36:20 +03:00
2020-06-02 07:49:58 +03:00
st - > note_page ( st , addr , 4 , pte_val ( val ) ) ;
2020-02-04 04:36:20 +03:00
return 0 ;
}
static int ptdump_hole ( unsigned long addr , unsigned long next ,
int depth , struct mm_walk * walk )
{
struct ptdump_state * st = walk - > private ;
2020-02-04 04:36:38 +03:00
st - > note_page ( st , addr , depth , 0 ) ;
2020-02-04 04:36:20 +03:00
return 0 ;
}
static const struct mm_walk_ops ptdump_ops = {
. pgd_entry = ptdump_pgd_entry ,
. p4d_entry = ptdump_p4d_entry ,
. pud_entry = ptdump_pud_entry ,
. pmd_entry = ptdump_pmd_entry ,
. pte_entry = ptdump_pte_entry ,
. pte_hole = ptdump_hole ,
} ;
2020-02-04 04:36:42 +03:00
void ptdump_walk_pgd ( struct ptdump_state * st , struct mm_struct * mm , pgd_t * pgd )
2020-02-04 04:36:20 +03:00
{
const struct ptdump_range * range = st - > range ;
2020-06-09 07:33:25 +03:00
mmap_read_lock ( mm ) ;
2020-02-04 04:36:20 +03:00
while ( range - > start ! = range - > end ) {
walk_page_range_novma ( mm , range - > start , range - > end ,
2020-02-04 04:36:42 +03:00
& ptdump_ops , pgd , st ) ;
2020-02-04 04:36:20 +03:00
range + + ;
}
2020-06-09 07:33:25 +03:00
mmap_read_unlock ( mm ) ;
2020-02-04 04:36:20 +03:00
/* Flush out the last page */
2020-02-04 04:36:38 +03:00
st - > note_page ( st , 0 , - 1 , 0 ) ;
2020-02-04 04:36:20 +03:00
}