2020-10-14 20:26:42 +02:00
// SPDX-License-Identifier: GPL-2.0
# include "mmu_internal.h"
# include "tdp_iter.h"
# include "spte.h"
/*
* Recalculates the pointer to the SPTE for the current GFN and level and
* reread the SPTE .
*/
static void tdp_iter_refresh_sptep ( struct tdp_iter * iter )
{
iter - > sptep = iter - > pt_path [ iter - > level - 1 ] +
SHADOW_PT_INDEX ( iter - > gfn < < PAGE_SHIFT , iter - > level ) ;
2021-02-02 10:57:23 -08:00
iter - > old_spte = READ_ONCE ( * rcu_dereference ( iter - > sptep ) ) ;
2020-10-14 20:26:42 +02:00
}
static gfn_t round_gfn_for_level ( gfn_t gfn , int level )
{
return gfn & - KVM_PAGES_PER_HPAGE ( level ) ;
}
2021-03-15 16:38:02 -07:00
/*
* Return the TDP iterator to the root PT and allow it to continue its
* traversal over the paging structure from there .
*/
void tdp_iter_restart ( struct tdp_iter * iter )
{
iter - > yielded_gfn = iter - > next_last_level_gfn ;
iter - > level = iter - > root_level ;
iter - > gfn = round_gfn_for_level ( iter - > next_last_level_gfn , iter - > level ) ;
tdp_iter_refresh_sptep ( iter ) ;
iter - > valid = true ;
}
2020-10-14 20:26:42 +02:00
/*
* Sets a TDP iterator to walk a pre - order traversal of the paging structure
2021-02-02 10:57:18 -08:00
* rooted at root_pt , starting with the walk to translate next_last_level_gfn .
2020-10-14 20:26:42 +02:00
*/
void tdp_iter_start ( struct tdp_iter * iter , u64 * root_pt , int root_level ,
2021-02-02 10:57:18 -08:00
int min_level , gfn_t next_last_level_gfn )
2020-10-14 20:26:42 +02:00
{
WARN_ON ( root_level < 1 ) ;
WARN_ON ( root_level > PT64_ROOT_MAX_LEVEL ) ;
2021-02-02 10:57:18 -08:00
iter - > next_last_level_gfn = next_last_level_gfn ;
2020-10-14 20:26:42 +02:00
iter - > root_level = root_level ;
iter - > min_level = min_level ;
2021-03-15 16:38:02 -07:00
iter - > pt_path [ iter - > root_level - 1 ] = ( tdp_ptep_t ) root_pt ;
2021-03-15 16:38:03 -07:00
iter - > as_id = kvm_mmu_page_as_id ( sptep_to_sp ( root_pt ) ) ;
2020-10-14 20:26:42 +02:00
2021-03-15 16:38:02 -07:00
tdp_iter_restart ( iter ) ;
2020-10-14 20:26:42 +02:00
}
/*
* Given an SPTE and its level , returns a pointer containing the host virtual
* address of the child page table referenced by the SPTE . Returns null if
* there is no such entry .
*/
2021-02-02 10:57:23 -08:00
tdp_ptep_t spte_to_child_pt ( u64 spte , int level )
2020-10-14 20:26:42 +02:00
{
/*
* There ' s no child entry if this entry isn ' t present or is a
* last - level entry .
*/
if ( ! is_shadow_present_pte ( spte ) | | is_last_spte ( spte , level ) )
return NULL ;
2021-02-02 10:57:23 -08:00
return ( tdp_ptep_t ) __va ( spte_to_pfn ( spte ) < < PAGE_SHIFT ) ;
2020-10-14 20:26:42 +02:00
}
/*
* Steps down one level in the paging structure towards the goal GFN . Returns
* true if the iterator was able to step down a level , false otherwise .
*/
static bool try_step_down ( struct tdp_iter * iter )
{
2021-02-02 10:57:23 -08:00
tdp_ptep_t child_pt ;
2020-10-14 20:26:42 +02:00
if ( iter - > level = = iter - > min_level )
return false ;
/*
* Reread the SPTE before stepping down to avoid traversing into page
* tables that are no longer linked from this entry .
*/
2021-02-02 10:57:23 -08:00
iter - > old_spte = READ_ONCE ( * rcu_dereference ( iter - > sptep ) ) ;
2020-10-14 20:26:42 +02:00
child_pt = spte_to_child_pt ( iter - > old_spte , iter - > level ) ;
if ( ! child_pt )
return false ;
iter - > level - - ;
iter - > pt_path [ iter - > level - 1 ] = child_pt ;
2021-02-02 10:57:18 -08:00
iter - > gfn = round_gfn_for_level ( iter - > next_last_level_gfn , iter - > level ) ;
2020-10-14 20:26:42 +02:00
tdp_iter_refresh_sptep ( iter ) ;
return true ;
}
/*
* Steps to the next entry in the current page table , at the current page table
* level . The next entry could point to a page backing guest memory or another
* page table , or it could be non - present . Returns true if the iterator was
* able to step to the next entry in the page table , false if the iterator was
* already at the end of the current page table .
*/
static bool try_step_side ( struct tdp_iter * iter )
{
/*
* Check if the iterator is already at the end of the current page
* table .
*/
if ( SHADOW_PT_INDEX ( iter - > gfn < < PAGE_SHIFT , iter - > level ) = =
( PT64_ENT_PER_PAGE - 1 ) )
return false ;
iter - > gfn + = KVM_PAGES_PER_HPAGE ( iter - > level ) ;
2021-02-02 10:57:18 -08:00
iter - > next_last_level_gfn = iter - > gfn ;
2020-10-14 20:26:42 +02:00
iter - > sptep + + ;
2021-02-02 10:57:23 -08:00
iter - > old_spte = READ_ONCE ( * rcu_dereference ( iter - > sptep ) ) ;
2020-10-14 20:26:42 +02:00
return true ;
}
/*
* Tries to traverse back up a level in the paging structure so that the walk
* can continue from the next entry in the parent page table . Returns true on a
* successful step up , false if already in the root page .
*/
static bool try_step_up ( struct tdp_iter * iter )
{
if ( iter - > level = = iter - > root_level )
return false ;
iter - > level + + ;
iter - > gfn = round_gfn_for_level ( iter - > gfn , iter - > level ) ;
tdp_iter_refresh_sptep ( iter ) ;
return true ;
}
/*
* Step to the next SPTE in a pre - order traversal of the paging structure .
* To get to the next SPTE , the iterator either steps down towards the goal
* GFN , if at a present , non - last - level SPTE , or over to a SPTE mapping a
* highter GFN .
*
* The basic algorithm is as follows :
* 1. If the current SPTE is a non - last - level SPTE , step down into the page
* table it points to .
* 2. If the iterator cannot step down , it will try to step to the next SPTE
* in the current page of the paging structure .
* 3. If the iterator cannot step to the next entry in the current page , it will
* try to step up to the parent paging structure page . In this case , that
* SPTE will have already been visited , and so the iterator must also step
* to the side again .
*/
void tdp_iter_next ( struct tdp_iter * iter )
{
if ( try_step_down ( iter ) )
return ;
do {
if ( try_step_side ( iter ) )
return ;
} while ( try_step_up ( iter ) ) ;
iter - > valid = false ;
}