2016-03-28 23:23:16 +08:00
/*
* GTT virtualization
*
* Copyright ( c ) 2011 - 2016 Intel Corporation . All rights reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM ,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*
* Authors :
* Zhi Wang < zhi . a . wang @ intel . com >
* Zhenyu Wang < zhenyuw @ linux . intel . com >
* Xiao Zheng < xiao . zheng @ intel . com >
*
* Contributors :
* Min He < min . he @ intel . com >
* Bing Niu < bing . niu @ intel . com >
*
*/
# include "i915_drv.h"
2016-10-20 17:15:03 +08:00
# include "gvt.h"
# include "i915_pvinfo.h"
2016-03-28 23:23:16 +08:00
# include "trace.h"
2018-01-30 19:19:44 +08:00
# if defined(VERBOSE_DEBUG)
# define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args)
# else
# define gvt_vdbg_mm(fmt, args...)
# endif
2016-03-28 23:23:16 +08:00
static bool enable_out_of_sync = false ;
static int preallocated_oos_pages = 8192 ;
/*
* validate a gm address and related range size ,
* translate it to host gm address
*/
bool intel_gvt_ggtt_validate_range ( struct intel_vgpu * vgpu , u64 addr , u32 size )
{
if ( ( ! vgpu_gmadr_is_valid ( vgpu , addr ) ) | | ( size
& & ! vgpu_gmadr_is_valid ( vgpu , addr + size - 1 ) ) ) {
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " invalid range gmadr 0x%llx size 0x%x \n " ,
addr , size ) ;
2016-03-28 23:23:16 +08:00
return false ;
}
return true ;
}
/* translate a guest gmadr to host gmadr */
int intel_gvt_ggtt_gmadr_g2h ( struct intel_vgpu * vgpu , u64 g_addr , u64 * h_addr )
{
if ( WARN ( ! vgpu_gmadr_is_valid ( vgpu , g_addr ) ,
" invalid guest gmadr %llx \n " , g_addr ) )
return - EACCES ;
if ( vgpu_gmadr_is_aperture ( vgpu , g_addr ) )
* h_addr = vgpu_aperture_gmadr_base ( vgpu )
+ ( g_addr - vgpu_aperture_offset ( vgpu ) ) ;
else
* h_addr = vgpu_hidden_gmadr_base ( vgpu )
+ ( g_addr - vgpu_hidden_offset ( vgpu ) ) ;
return 0 ;
}
/* translate a host gmadr to guest gmadr */
int intel_gvt_ggtt_gmadr_h2g ( struct intel_vgpu * vgpu , u64 h_addr , u64 * g_addr )
{
if ( WARN ( ! gvt_gmadr_is_valid ( vgpu - > gvt , h_addr ) ,
" invalid host gmadr %llx \n " , h_addr ) )
return - EACCES ;
if ( gvt_gmadr_is_aperture ( vgpu - > gvt , h_addr ) )
* g_addr = vgpu_aperture_gmadr_base ( vgpu )
+ ( h_addr - gvt_aperture_gmadr_base ( vgpu - > gvt ) ) ;
else
* g_addr = vgpu_hidden_gmadr_base ( vgpu )
+ ( h_addr - gvt_hidden_gmadr_base ( vgpu - > gvt ) ) ;
return 0 ;
}
int intel_gvt_ggtt_index_g2h ( struct intel_vgpu * vgpu , unsigned long g_index ,
unsigned long * h_index )
{
u64 h_addr ;
int ret ;
2017-10-10 13:51:32 +08:00
ret = intel_gvt_ggtt_gmadr_g2h ( vgpu , g_index < < I915_GTT_PAGE_SHIFT ,
2016-03-28 23:23:16 +08:00
& h_addr ) ;
if ( ret )
return ret ;
2017-10-10 13:51:32 +08:00
* h_index = h_addr > > I915_GTT_PAGE_SHIFT ;
2016-03-28 23:23:16 +08:00
return 0 ;
}
int intel_gvt_ggtt_h2g_index ( struct intel_vgpu * vgpu , unsigned long h_index ,
unsigned long * g_index )
{
u64 g_addr ;
int ret ;
2017-10-10 13:51:32 +08:00
ret = intel_gvt_ggtt_gmadr_h2g ( vgpu , h_index < < I915_GTT_PAGE_SHIFT ,
2016-03-28 23:23:16 +08:00
& g_addr ) ;
if ( ret )
return ret ;
2017-10-10 13:51:32 +08:00
* g_index = g_addr > > I915_GTT_PAGE_SHIFT ;
2016-03-28 23:23:16 +08:00
return 0 ;
}
# define gtt_type_is_entry(type) \
( type > GTT_TYPE_INVALID & & type < GTT_TYPE_PPGTT_ENTRY \
& & type ! = GTT_TYPE_PPGTT_PTE_ENTRY \
& & type ! = GTT_TYPE_PPGTT_ROOT_ENTRY )
# define gtt_type_is_pt(type) \
( type > = GTT_TYPE_PPGTT_PTE_PT & & type < GTT_TYPE_MAX )
# define gtt_type_is_pte_pt(type) \
( type = = GTT_TYPE_PPGTT_PTE_PT )
# define gtt_type_is_root_pointer(type) \
( gtt_type_is_entry ( type ) & & type > GTT_TYPE_PPGTT_ROOT_ENTRY )
# define gtt_init_entry(e, t, p, v) do { \
( e ) - > type = t ; \
( e ) - > pdev = p ; \
memcpy ( & ( e ) - > val64 , & v , sizeof ( v ) ) ; \
} while ( 0 )
/*
* Mappings between GTT_TYPE * enumerations .
* Following information can be found according to the given type :
* - type of next level page table
* - type of entry inside this level page table
* - type of entry with PSE set
*
* If the given type doesn ' t have such a kind of information ,
* e . g . give a l4 root entry type , then request to get its PSE type ,
* give a PTE page table type , then request to get its next level page
* table type , as we know l4 root entry doesn ' t have a PSE bit ,
* and a PTE page table doesn ' t have a next level page table type ,
* GTT_TYPE_INVALID will be returned . This is useful when traversing a
* page table .
*/
struct gtt_type_table_entry {
int entry_type ;
2017-10-10 17:19:30 +08:00
int pt_type ;
2016-03-28 23:23:16 +08:00
int next_pt_type ;
int pse_entry_type ;
} ;
2017-10-10 17:19:30 +08:00
# define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
2016-03-28 23:23:16 +08:00
[ type ] = { \
. entry_type = e_type , \
2017-10-10 17:19:30 +08:00
. pt_type = cpt_type , \
2016-03-28 23:23:16 +08:00
. next_pt_type = npt_type , \
. pse_entry_type = pse_type , \
}
static struct gtt_type_table_entry gtt_type_table [ ] = {
GTT_TYPE_TABLE_ENTRY ( GTT_TYPE_PPGTT_ROOT_L4_ENTRY ,
GTT_TYPE_PPGTT_ROOT_L4_ENTRY ,
2017-10-10 17:19:30 +08:00
GTT_TYPE_INVALID ,
2016-03-28 23:23:16 +08:00
GTT_TYPE_PPGTT_PML4_PT ,
GTT_TYPE_INVALID ) ,
GTT_TYPE_TABLE_ENTRY ( GTT_TYPE_PPGTT_PML4_PT ,
GTT_TYPE_PPGTT_PML4_ENTRY ,
2017-10-10 17:19:30 +08:00
GTT_TYPE_PPGTT_PML4_PT ,
2016-03-28 23:23:16 +08:00
GTT_TYPE_PPGTT_PDP_PT ,
GTT_TYPE_INVALID ) ,
GTT_TYPE_TABLE_ENTRY ( GTT_TYPE_PPGTT_PML4_ENTRY ,
GTT_TYPE_PPGTT_PML4_ENTRY ,
2017-10-10 17:19:30 +08:00
GTT_TYPE_PPGTT_PML4_PT ,
2016-03-28 23:23:16 +08:00
GTT_TYPE_PPGTT_PDP_PT ,
GTT_TYPE_INVALID ) ,
GTT_TYPE_TABLE_ENTRY ( GTT_TYPE_PPGTT_PDP_PT ,
GTT_TYPE_PPGTT_PDP_ENTRY ,
2017-10-10 17:19:30 +08:00
GTT_TYPE_PPGTT_PDP_PT ,
2016-03-28 23:23:16 +08:00
GTT_TYPE_PPGTT_PDE_PT ,
GTT_TYPE_PPGTT_PTE_1G_ENTRY ) ,
GTT_TYPE_TABLE_ENTRY ( GTT_TYPE_PPGTT_ROOT_L3_ENTRY ,
GTT_TYPE_PPGTT_ROOT_L3_ENTRY ,
2017-10-10 17:19:30 +08:00
GTT_TYPE_INVALID ,
2016-03-28 23:23:16 +08:00
GTT_TYPE_PPGTT_PDE_PT ,
GTT_TYPE_PPGTT_PTE_1G_ENTRY ) ,
GTT_TYPE_TABLE_ENTRY ( GTT_TYPE_PPGTT_PDP_ENTRY ,
GTT_TYPE_PPGTT_PDP_ENTRY ,
2017-10-10 17:19:30 +08:00
GTT_TYPE_PPGTT_PDP_PT ,
2016-03-28 23:23:16 +08:00
GTT_TYPE_PPGTT_PDE_PT ,
GTT_TYPE_PPGTT_PTE_1G_ENTRY ) ,
GTT_TYPE_TABLE_ENTRY ( GTT_TYPE_PPGTT_PDE_PT ,
GTT_TYPE_PPGTT_PDE_ENTRY ,
2017-10-10 17:19:30 +08:00
GTT_TYPE_PPGTT_PDE_PT ,
2016-03-28 23:23:16 +08:00
GTT_TYPE_PPGTT_PTE_PT ,
GTT_TYPE_PPGTT_PTE_2M_ENTRY ) ,
GTT_TYPE_TABLE_ENTRY ( GTT_TYPE_PPGTT_PDE_ENTRY ,
GTT_TYPE_PPGTT_PDE_ENTRY ,
2017-10-10 17:19:30 +08:00
GTT_TYPE_PPGTT_PDE_PT ,
2016-03-28 23:23:16 +08:00
GTT_TYPE_PPGTT_PTE_PT ,
GTT_TYPE_PPGTT_PTE_2M_ENTRY ) ,
GTT_TYPE_TABLE_ENTRY ( GTT_TYPE_PPGTT_PTE_PT ,
GTT_TYPE_PPGTT_PTE_4K_ENTRY ,
2017-10-10 17:19:30 +08:00
GTT_TYPE_PPGTT_PTE_PT ,
2016-03-28 23:23:16 +08:00
GTT_TYPE_INVALID ,
GTT_TYPE_INVALID ) ,
GTT_TYPE_TABLE_ENTRY ( GTT_TYPE_PPGTT_PTE_4K_ENTRY ,
GTT_TYPE_PPGTT_PTE_4K_ENTRY ,
2017-10-10 17:19:30 +08:00
GTT_TYPE_PPGTT_PTE_PT ,
2016-03-28 23:23:16 +08:00
GTT_TYPE_INVALID ,
GTT_TYPE_INVALID ) ,
GTT_TYPE_TABLE_ENTRY ( GTT_TYPE_PPGTT_PTE_2M_ENTRY ,
GTT_TYPE_PPGTT_PDE_ENTRY ,
2017-10-10 17:19:30 +08:00
GTT_TYPE_PPGTT_PDE_PT ,
2016-03-28 23:23:16 +08:00
GTT_TYPE_INVALID ,
GTT_TYPE_PPGTT_PTE_2M_ENTRY ) ,
GTT_TYPE_TABLE_ENTRY ( GTT_TYPE_PPGTT_PTE_1G_ENTRY ,
GTT_TYPE_PPGTT_PDP_ENTRY ,
2017-10-10 17:19:30 +08:00
GTT_TYPE_PPGTT_PDP_PT ,
2016-03-28 23:23:16 +08:00
GTT_TYPE_INVALID ,
GTT_TYPE_PPGTT_PTE_1G_ENTRY ) ,
GTT_TYPE_TABLE_ENTRY ( GTT_TYPE_GGTT_PTE ,
GTT_TYPE_GGTT_PTE ,
GTT_TYPE_INVALID ,
2017-10-10 17:19:30 +08:00
GTT_TYPE_INVALID ,
2016-03-28 23:23:16 +08:00
GTT_TYPE_INVALID ) ,
} ;
static inline int get_next_pt_type ( int type )
{
return gtt_type_table [ type ] . next_pt_type ;
}
2017-10-10 17:19:30 +08:00
static inline int get_pt_type ( int type )
{
return gtt_type_table [ type ] . pt_type ;
}
2016-03-28 23:23:16 +08:00
static inline int get_entry_type ( int type )
{
return gtt_type_table [ type ] . entry_type ;
}
static inline int get_pse_type ( int type )
{
return gtt_type_table [ type ] . pse_entry_type ;
}
static u64 read_pte64 ( struct drm_i915_private * dev_priv , unsigned long index )
{
2016-10-20 14:08:46 +08:00
void __iomem * addr = ( gen8_pte_t __iomem * ) dev_priv - > ggtt . gsm + index ;
2016-12-30 14:10:53 +08:00
return readq ( addr ) ;
2016-03-28 23:23:16 +08:00
}
2018-01-30 19:19:45 +08:00
static void ggtt_invalidate ( struct drm_i915_private * dev_priv )
2017-06-02 15:34:24 +08:00
{
mmio_hw_access_pre ( dev_priv ) ;
I915_WRITE ( GFX_FLSH_CNTL_GEN6 , GFX_FLSH_CNTL_EN ) ;
mmio_hw_access_post ( dev_priv ) ;
}
2016-03-28 23:23:16 +08:00
static void write_pte64 ( struct drm_i915_private * dev_priv ,
unsigned long index , u64 pte )
{
2016-10-20 14:08:46 +08:00
void __iomem * addr = ( gen8_pte_t __iomem * ) dev_priv - > ggtt . gsm + index ;
2016-03-28 23:23:16 +08:00
writeq ( pte , addr ) ;
}
2017-08-02 15:06:37 +08:00
static inline int gtt_get_entry64 ( void * pt ,
2016-03-28 23:23:16 +08:00
struct intel_gvt_gtt_entry * e ,
unsigned long index , bool hypervisor_access , unsigned long gpa ,
struct intel_vgpu * vgpu )
{
const struct intel_gvt_device_info * info = & vgpu - > gvt - > device_info ;
int ret ;
if ( WARN_ON ( info - > gtt_entry_size ! = 8 ) )
2017-08-02 15:06:37 +08:00
return - EINVAL ;
2016-03-28 23:23:16 +08:00
if ( hypervisor_access ) {
ret = intel_gvt_hypervisor_read_gpa ( vgpu , gpa +
( index < < info - > gtt_entry_size_shift ) ,
& e - > val64 , 8 ) ;
2017-08-02 15:06:37 +08:00
if ( WARN_ON ( ret ) )
return ret ;
2016-03-28 23:23:16 +08:00
} else if ( ! pt ) {
e - > val64 = read_pte64 ( vgpu - > gvt - > dev_priv , index ) ;
} else {
e - > val64 = * ( ( u64 * ) pt + index ) ;
}
2017-08-02 15:06:37 +08:00
return 0 ;
2016-03-28 23:23:16 +08:00
}
2017-08-02 15:06:37 +08:00
static inline int gtt_set_entry64 ( void * pt ,
2016-03-28 23:23:16 +08:00
struct intel_gvt_gtt_entry * e ,
unsigned long index , bool hypervisor_access , unsigned long gpa ,
struct intel_vgpu * vgpu )
{
const struct intel_gvt_device_info * info = & vgpu - > gvt - > device_info ;
int ret ;
if ( WARN_ON ( info - > gtt_entry_size ! = 8 ) )
2017-08-02 15:06:37 +08:00
return - EINVAL ;
2016-03-28 23:23:16 +08:00
if ( hypervisor_access ) {
ret = intel_gvt_hypervisor_write_gpa ( vgpu , gpa +
( index < < info - > gtt_entry_size_shift ) ,
& e - > val64 , 8 ) ;
2017-08-02 15:06:37 +08:00
if ( WARN_ON ( ret ) )
return ret ;
2016-03-28 23:23:16 +08:00
} else if ( ! pt ) {
write_pte64 ( vgpu - > gvt - > dev_priv , index , e - > val64 ) ;
} else {
* ( ( u64 * ) pt + index ) = e - > val64 ;
}
2017-08-02 15:06:37 +08:00
return 0 ;
2016-03-28 23:23:16 +08:00
}
# define GTT_HAW 46
2017-11-28 07:29:54 +08:00
# define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30)
# define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21)
# define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12)
2016-03-28 23:23:16 +08:00
static unsigned long gen8_gtt_get_pfn ( struct intel_gvt_gtt_entry * e )
{
unsigned long pfn ;
if ( e - > type = = GTT_TYPE_PPGTT_PTE_1G_ENTRY )
pfn = ( e - > val64 & ADDR_1G_MASK ) > > 12 ;
else if ( e - > type = = GTT_TYPE_PPGTT_PTE_2M_ENTRY )
pfn = ( e - > val64 & ADDR_2M_MASK ) > > 12 ;
else
pfn = ( e - > val64 & ADDR_4K_MASK ) > > 12 ;
return pfn ;
}
static void gen8_gtt_set_pfn ( struct intel_gvt_gtt_entry * e , unsigned long pfn )
{
if ( e - > type = = GTT_TYPE_PPGTT_PTE_1G_ENTRY ) {
e - > val64 & = ~ ADDR_1G_MASK ;
pfn & = ( ADDR_1G_MASK > > 12 ) ;
} else if ( e - > type = = GTT_TYPE_PPGTT_PTE_2M_ENTRY ) {
e - > val64 & = ~ ADDR_2M_MASK ;
pfn & = ( ADDR_2M_MASK > > 12 ) ;
} else {
e - > val64 & = ~ ADDR_4K_MASK ;
pfn & = ( ADDR_4K_MASK > > 12 ) ;
}
e - > val64 | = ( pfn < < 12 ) ;
}
static bool gen8_gtt_test_pse ( struct intel_gvt_gtt_entry * e )
{
/* Entry doesn't have PSE bit. */
if ( get_pse_type ( e - > type ) = = GTT_TYPE_INVALID )
return false ;
e - > type = get_entry_type ( e - > type ) ;
2017-09-26 15:02:21 +08:00
if ( ! ( e - > val64 & BIT ( 7 ) ) )
2016-03-28 23:23:16 +08:00
return false ;
e - > type = get_pse_type ( e - > type ) ;
return true ;
}
static bool gen8_gtt_test_present ( struct intel_gvt_gtt_entry * e )
{
/*
* i915 writes PDP root pointer registers without present bit ,
* it also works , so we need to treat root pointer entry
* specifically .
*/
if ( e - > type = = GTT_TYPE_PPGTT_ROOT_L3_ENTRY
| | e - > type = = GTT_TYPE_PPGTT_ROOT_L4_ENTRY )
return ( e - > val64 ! = 0 ) ;
else
2017-09-26 15:02:21 +08:00
return ( e - > val64 & BIT ( 0 ) ) ;
2016-03-28 23:23:16 +08:00
}
static void gtt_entry_clear_present ( struct intel_gvt_gtt_entry * e )
{
2017-09-26 15:02:21 +08:00
e - > val64 & = ~ BIT ( 0 ) ;
2016-03-28 23:23:16 +08:00
}
2017-10-10 17:24:26 +08:00
static void gtt_entry_set_present ( struct intel_gvt_gtt_entry * e )
{
e - > val64 | = BIT ( 0 ) ;
2016-03-28 23:23:16 +08:00
}
/*
* Per - platform GMA routines .
*/
static unsigned long gma_to_ggtt_pte_index ( unsigned long gma )
{
2017-10-10 13:51:32 +08:00
unsigned long x = ( gma > > I915_GTT_PAGE_SHIFT ) ;
2016-03-28 23:23:16 +08:00
trace_gma_index ( __func__ , gma , x ) ;
return x ;
}
# define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
static unsigned long prefix # # _gma_to_ # # ename # # _index ( unsigned long gma ) \
{ \
unsigned long x = ( exp ) ; \
trace_gma_index ( __func__ , gma , x ) ; \
return x ; \
}
DEFINE_PPGTT_GMA_TO_INDEX ( gen8 , pte , ( gma > > 12 & 0x1ff ) ) ;
DEFINE_PPGTT_GMA_TO_INDEX ( gen8 , pde , ( gma > > 21 & 0x1ff ) ) ;
DEFINE_PPGTT_GMA_TO_INDEX ( gen8 , l3_pdp , ( gma > > 30 & 0x3 ) ) ;
DEFINE_PPGTT_GMA_TO_INDEX ( gen8 , l4_pdp , ( gma > > 30 & 0x1ff ) ) ;
DEFINE_PPGTT_GMA_TO_INDEX ( gen8 , pml4 , ( gma > > 39 & 0x1ff ) ) ;
static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
. get_entry = gtt_get_entry64 ,
. set_entry = gtt_set_entry64 ,
. clear_present = gtt_entry_clear_present ,
2017-10-10 17:24:26 +08:00
. set_present = gtt_entry_set_present ,
2016-03-28 23:23:16 +08:00
. test_present = gen8_gtt_test_present ,
. test_pse = gen8_gtt_test_pse ,
. get_pfn = gen8_gtt_get_pfn ,
. set_pfn = gen8_gtt_set_pfn ,
} ;
static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
. gma_to_ggtt_pte_index = gma_to_ggtt_pte_index ,
. gma_to_pte_index = gen8_gma_to_pte_index ,
. gma_to_pde_index = gen8_gma_to_pde_index ,
. gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index ,
. gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index ,
. gma_to_pml4_index = gen8_gma_to_pml4_index ,
} ;
static int gtt_entry_p2m ( struct intel_vgpu * vgpu , struct intel_gvt_gtt_entry * p ,
struct intel_gvt_gtt_entry * m )
{
struct intel_gvt_gtt_pte_ops * ops = vgpu - > gvt - > gtt . pte_ops ;
unsigned long gfn , mfn ;
* m = * p ;
if ( ! ops - > test_present ( p ) )
return 0 ;
gfn = ops - > get_pfn ( p ) ;
mfn = intel_gvt_hypervisor_gfn_to_mfn ( vgpu , gfn ) ;
if ( mfn = = INTEL_GVT_INVALID_ADDR ) {
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " fail to translate gfn: 0x%lx \n " , gfn ) ;
2016-03-28 23:23:16 +08:00
return - ENXIO ;
}
ops - > set_pfn ( m , mfn ) ;
return 0 ;
}
/*
* MM helpers .
*/
2018-01-30 19:19:42 +08:00
static void _ppgtt_get_root_entry ( struct intel_vgpu_mm * mm ,
struct intel_gvt_gtt_entry * entry , unsigned long index ,
bool guest )
2016-03-28 23:23:16 +08:00
{
2018-01-30 19:19:42 +08:00
struct intel_gvt_gtt_pte_ops * pte_ops = mm - > vgpu - > gvt - > gtt . pte_ops ;
2016-03-28 23:23:16 +08:00
2018-01-30 19:19:42 +08:00
GEM_BUG_ON ( mm - > type ! = INTEL_GVT_MM_PPGTT ) ;
2016-03-28 23:23:16 +08:00
2018-01-30 19:19:42 +08:00
entry - > type = mm - > ppgtt_mm . root_entry_type ;
pte_ops - > get_entry ( guest ? mm - > ppgtt_mm . guest_pdps :
mm - > ppgtt_mm . shadow_pdps ,
entry , index , false , 0 , mm - > vgpu ) ;
2017-08-02 15:06:37 +08:00
2018-01-30 19:19:42 +08:00
pte_ops - > test_pse ( entry ) ;
2016-03-28 23:23:16 +08:00
}
2018-01-30 19:19:42 +08:00
static inline void ppgtt_get_guest_root_entry ( struct intel_vgpu_mm * mm ,
struct intel_gvt_gtt_entry * entry , unsigned long index )
2016-03-28 23:23:16 +08:00
{
2018-01-30 19:19:42 +08:00
_ppgtt_get_root_entry ( mm , entry , index , true ) ;
}
static inline void ppgtt_get_shadow_root_entry ( struct intel_vgpu_mm * mm ,
struct intel_gvt_gtt_entry * entry , unsigned long index )
{
_ppgtt_get_root_entry ( mm , entry , index , false ) ;
}
static void _ppgtt_set_root_entry ( struct intel_vgpu_mm * mm ,
struct intel_gvt_gtt_entry * entry , unsigned long index ,
bool guest )
{
struct intel_gvt_gtt_pte_ops * pte_ops = mm - > vgpu - > gvt - > gtt . pte_ops ;
pte_ops - > set_entry ( guest ? mm - > ppgtt_mm . guest_pdps :
mm - > ppgtt_mm . shadow_pdps ,
entry , index , false , 0 , mm - > vgpu ) ;
}
static inline void ppgtt_set_guest_root_entry ( struct intel_vgpu_mm * mm ,
struct intel_gvt_gtt_entry * entry , unsigned long index )
{
_ppgtt_set_root_entry ( mm , entry , index , true ) ;
}
static inline void ppgtt_set_shadow_root_entry ( struct intel_vgpu_mm * mm ,
struct intel_gvt_gtt_entry * entry , unsigned long index )
{
_ppgtt_set_root_entry ( mm , entry , index , false ) ;
}
static void ggtt_get_guest_entry ( struct intel_vgpu_mm * mm ,
struct intel_gvt_gtt_entry * entry , unsigned long index )
{
struct intel_gvt_gtt_pte_ops * pte_ops = mm - > vgpu - > gvt - > gtt . pte_ops ;
GEM_BUG_ON ( mm - > type ! = INTEL_GVT_MM_GGTT ) ;
entry - > type = GTT_TYPE_GGTT_PTE ;
pte_ops - > get_entry ( mm - > ggtt_mm . virtual_ggtt , entry , index ,
false , 0 , mm - > vgpu ) ;
}
static void ggtt_set_guest_entry ( struct intel_vgpu_mm * mm ,
struct intel_gvt_gtt_entry * entry , unsigned long index )
{
struct intel_gvt_gtt_pte_ops * pte_ops = mm - > vgpu - > gvt - > gtt . pte_ops ;
GEM_BUG_ON ( mm - > type ! = INTEL_GVT_MM_GGTT ) ;
pte_ops - > set_entry ( mm - > ggtt_mm . virtual_ggtt , entry , index ,
false , 0 , mm - > vgpu ) ;
}
static void ggtt_set_host_entry ( struct intel_vgpu_mm * mm ,
struct intel_gvt_gtt_entry * entry , unsigned long index )
{
struct intel_gvt_gtt_pte_ops * pte_ops = mm - > vgpu - > gvt - > gtt . pte_ops ;
GEM_BUG_ON ( mm - > type ! = INTEL_GVT_MM_GGTT ) ;
2016-03-28 23:23:16 +08:00
2018-01-30 19:19:42 +08:00
pte_ops - > set_entry ( NULL , entry , index , false , 0 , mm - > vgpu ) ;
2016-03-28 23:23:16 +08:00
}
/*
* PPGTT shadow page table helpers .
*/
2017-08-02 15:06:37 +08:00
static inline int ppgtt_spt_get_entry (
2016-03-28 23:23:16 +08:00
struct intel_vgpu_ppgtt_spt * spt ,
void * page_table , int type ,
struct intel_gvt_gtt_entry * e , unsigned long index ,
bool guest )
{
struct intel_gvt * gvt = spt - > vgpu - > gvt ;
struct intel_gvt_gtt_pte_ops * ops = gvt - > gtt . pte_ops ;
2017-08-02 15:06:37 +08:00
int ret ;
2016-03-28 23:23:16 +08:00
e - > type = get_entry_type ( type ) ;
if ( WARN ( ! gtt_type_is_entry ( e - > type ) , " invalid entry type \n " ) )
2017-08-02 15:06:37 +08:00
return - EINVAL ;
2016-03-28 23:23:16 +08:00
2017-08-02 15:06:37 +08:00
ret = ops - > get_entry ( page_table , e , index , guest ,
2017-10-10 13:51:32 +08:00
spt - > guest_page . track . gfn < < I915_GTT_PAGE_SHIFT ,
2016-03-28 23:23:16 +08:00
spt - > vgpu ) ;
2017-08-02 15:06:37 +08:00
if ( ret )
return ret ;
2016-03-28 23:23:16 +08:00
ops - > test_pse ( e ) ;
2018-01-30 19:19:44 +08:00
gvt_vdbg_mm ( " read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx \n " ,
type , e - > type , index , e - > val64 ) ;
2017-08-02 15:06:37 +08:00
return 0 ;
2016-03-28 23:23:16 +08:00
}
2017-08-02 15:06:37 +08:00
static inline int ppgtt_spt_set_entry (
2016-03-28 23:23:16 +08:00
struct intel_vgpu_ppgtt_spt * spt ,
void * page_table , int type ,
struct intel_gvt_gtt_entry * e , unsigned long index ,
bool guest )
{
struct intel_gvt * gvt = spt - > vgpu - > gvt ;
struct intel_gvt_gtt_pte_ops * ops = gvt - > gtt . pte_ops ;
if ( WARN ( ! gtt_type_is_entry ( e - > type ) , " invalid entry type \n " ) )
2017-08-02 15:06:37 +08:00
return - EINVAL ;
2016-03-28 23:23:16 +08:00
2018-01-30 19:19:44 +08:00
gvt_vdbg_mm ( " set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx \n " ,
type , e - > type , index , e - > val64 ) ;
2016-03-28 23:23:16 +08:00
return ops - > set_entry ( page_table , e , index , guest ,
2017-10-10 13:51:32 +08:00
spt - > guest_page . track . gfn < < I915_GTT_PAGE_SHIFT ,
2016-03-28 23:23:16 +08:00
spt - > vgpu ) ;
}
# define ppgtt_get_guest_entry(spt, e, index) \
ppgtt_spt_get_entry ( spt , NULL , \
spt - > guest_page_type , e , index , true )
# define ppgtt_set_guest_entry(spt, e, index) \
ppgtt_spt_set_entry ( spt , NULL , \
spt - > guest_page_type , e , index , true )
# define ppgtt_get_shadow_entry(spt, e, index) \
ppgtt_spt_get_entry ( spt , spt - > shadow_page . vaddr , \
spt - > shadow_page . type , e , index , false )
# define ppgtt_set_shadow_entry(spt, e, index) \
ppgtt_spt_set_entry ( spt , spt - > shadow_page . vaddr , \
spt - > shadow_page . type , e , index , false )
/**
2017-09-29 02:47:55 +08:00
* intel_vgpu_init_page_track - init a page track data structure
2016-03-28 23:23:16 +08:00
* @ vgpu : a vGPU
2017-09-29 02:47:55 +08:00
* @ t : a page track data structure
2016-03-28 23:23:16 +08:00
* @ gfn : guest memory page frame number
2017-09-29 02:47:55 +08:00
* @ handler : the function will be called when target guest memory page has
2016-03-28 23:23:16 +08:00
* been modified .
*
2017-09-29 02:47:55 +08:00
* This function is called when a user wants to prepare a page track data
* structure to track a guest memory page .
2016-03-28 23:23:16 +08:00
*
* Returns :
* Zero on success , negative error code if failed .
*/
2017-09-29 02:47:55 +08:00
int intel_vgpu_init_page_track ( struct intel_vgpu * vgpu ,
struct intel_vgpu_page_track * t ,
2016-03-28 23:23:16 +08:00
unsigned long gfn ,
int ( * handler ) ( void * , u64 , void * , int ) ,
void * data )
{
2017-09-29 02:47:55 +08:00
INIT_HLIST_NODE ( & t - > node ) ;
2016-03-28 23:23:16 +08:00
2017-09-29 02:47:55 +08:00
t - > tracked = false ;
t - > gfn = gfn ;
t - > handler = handler ;
t - > data = data ;
2016-03-28 23:23:16 +08:00
2017-09-29 02:47:55 +08:00
hash_add ( vgpu - > gtt . tracked_guest_page_hash_table , & t - > node , t - > gfn ) ;
2016-03-28 23:23:16 +08:00
return 0 ;
}
/**
2017-09-29 02:47:55 +08:00
* intel_vgpu_clean_page_track - release a page track data structure
2016-03-28 23:23:16 +08:00
* @ vgpu : a vGPU
2017-09-29 02:47:55 +08:00
* @ t : a page track data structure
2016-03-28 23:23:16 +08:00
*
2017-09-29 02:47:55 +08:00
* This function is called before a user frees a page track data structure .
2016-03-28 23:23:16 +08:00
*/
2017-09-29 02:47:55 +08:00
void intel_vgpu_clean_page_track ( struct intel_vgpu * vgpu ,
struct intel_vgpu_page_track * t )
2016-03-28 23:23:16 +08:00
{
2017-09-29 02:47:55 +08:00
if ( ! hlist_unhashed ( & t - > node ) )
hash_del ( & t - > node ) ;
2016-03-28 23:23:16 +08:00
2017-09-29 02:47:55 +08:00
if ( t - > tracked )
intel_gvt_hypervisor_disable_page_track ( vgpu , t ) ;
2016-03-28 23:23:16 +08:00
}
/**
2017-09-29 02:47:55 +08:00
* intel_vgpu_find_tracked_page - find a tracked guest page
2016-03-28 23:23:16 +08:00
* @ vgpu : a vGPU
* @ gfn : guest memory page frame number
*
2017-09-29 02:47:55 +08:00
* This function is called when the emulation layer wants to figure out if a
* trapped GFN is a tracked guest page .
2016-03-28 23:23:16 +08:00
*
* Returns :
2017-09-29 02:47:55 +08:00
* Pointer to page track data structure , NULL if not found .
2016-03-28 23:23:16 +08:00
*/
2017-09-29 02:47:55 +08:00
struct intel_vgpu_page_track * intel_vgpu_find_tracked_page (
2016-03-28 23:23:16 +08:00
struct intel_vgpu * vgpu , unsigned long gfn )
{
2017-09-29 02:47:55 +08:00
struct intel_vgpu_page_track * t ;
2016-03-28 23:23:16 +08:00
2017-09-29 02:47:55 +08:00
hash_for_each_possible ( vgpu - > gtt . tracked_guest_page_hash_table ,
t , node , gfn ) {
if ( t - > gfn = = gfn )
return t ;
2016-03-28 23:23:16 +08:00
}
return NULL ;
}
2017-09-29 02:47:55 +08:00
static int init_guest_page ( struct intel_vgpu * vgpu ,
struct intel_vgpu_guest_page * p ,
unsigned long gfn ,
int ( * handler ) ( void * , u64 , void * , int ) ,
void * data )
{
p - > oos_page = NULL ;
p - > write_cnt = 0 ;
return intel_vgpu_init_page_track ( vgpu , & p - > track , gfn , handler , data ) ;
}
static int detach_oos_page ( struct intel_vgpu * vgpu ,
struct intel_vgpu_oos_page * oos_page ) ;
static void clean_guest_page ( struct intel_vgpu * vgpu ,
struct intel_vgpu_guest_page * p )
{
if ( p - > oos_page )
detach_oos_page ( vgpu , p - > oos_page ) ;
intel_vgpu_clean_page_track ( vgpu , & p - > track ) ;
}
2016-03-28 23:23:16 +08:00
static inline int init_shadow_page ( struct intel_vgpu * vgpu ,
2017-10-10 14:34:11 +08:00
struct intel_vgpu_shadow_page * p , int type , bool hash )
2016-03-28 23:23:16 +08:00
{
2017-02-09 11:37:11 +08:00
struct device * kdev = & vgpu - > gvt - > dev_priv - > drm . pdev - > dev ;
dma_addr_t daddr ;
daddr = dma_map_page ( kdev , p - > page , 0 , 4096 , PCI_DMA_BIDIRECTIONAL ) ;
if ( dma_mapping_error ( kdev , daddr ) ) {
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " fail to map dma addr \n " ) ;
2017-02-09 11:37:11 +08:00
return - EINVAL ;
}
2016-03-28 23:23:16 +08:00
p - > vaddr = page_address ( p - > page ) ;
p - > type = type ;
INIT_HLIST_NODE ( & p - > node ) ;
2017-10-10 13:51:32 +08:00
p - > mfn = daddr > > I915_GTT_PAGE_SHIFT ;
2017-10-10 14:34:11 +08:00
if ( hash )
hash_add ( vgpu - > gtt . shadow_page_hash_table , & p - > node , p - > mfn ) ;
2016-03-28 23:23:16 +08:00
return 0 ;
}
2017-02-09 11:37:11 +08:00
static inline void clean_shadow_page ( struct intel_vgpu * vgpu ,
struct intel_vgpu_shadow_page * p )
2016-03-28 23:23:16 +08:00
{
2017-02-09 11:37:11 +08:00
struct device * kdev = & vgpu - > gvt - > dev_priv - > drm . pdev - > dev ;
2017-10-10 13:51:32 +08:00
dma_unmap_page ( kdev , p - > mfn < < I915_GTT_PAGE_SHIFT , 4096 ,
2017-02-09 11:37:11 +08:00
PCI_DMA_BIDIRECTIONAL ) ;
2016-03-28 23:23:16 +08:00
if ( ! hlist_unhashed ( & p - > node ) )
hash_del ( & p - > node ) ;
}
static inline struct intel_vgpu_shadow_page * find_shadow_page (
struct intel_vgpu * vgpu , unsigned long mfn )
{
struct intel_vgpu_shadow_page * p ;
hash_for_each_possible ( vgpu - > gtt . shadow_page_hash_table ,
p , node , mfn ) {
if ( p - > mfn = = mfn )
return p ;
}
return NULL ;
}
2017-09-29 02:47:55 +08:00
# define page_track_to_guest_page(ptr) \
container_of ( ptr , struct intel_vgpu_guest_page , track )
2016-03-28 23:23:16 +08:00
# define guest_page_to_ppgtt_spt(ptr) \
container_of ( ptr , struct intel_vgpu_ppgtt_spt , guest_page )
# define shadow_page_to_ppgtt_spt(ptr) \
container_of ( ptr , struct intel_vgpu_ppgtt_spt , shadow_page )
static void * alloc_spt ( gfp_t gfp_mask )
{
struct intel_vgpu_ppgtt_spt * spt ;
spt = kzalloc ( sizeof ( * spt ) , gfp_mask ) ;
if ( ! spt )
return NULL ;
spt - > shadow_page . page = alloc_page ( gfp_mask ) ;
if ( ! spt - > shadow_page . page ) {
kfree ( spt ) ;
return NULL ;
}
return spt ;
}
static void free_spt ( struct intel_vgpu_ppgtt_spt * spt )
{
__free_page ( spt - > shadow_page . page ) ;
kfree ( spt ) ;
}
static void ppgtt_free_shadow_page ( struct intel_vgpu_ppgtt_spt * spt )
{
trace_spt_free ( spt - > vgpu - > id , spt , spt - > shadow_page . type ) ;
2017-02-09 11:37:11 +08:00
clean_shadow_page ( spt - > vgpu , & spt - > shadow_page ) ;
2017-09-29 02:47:55 +08:00
clean_guest_page ( spt - > vgpu , & spt - > guest_page ) ;
2016-03-28 23:23:16 +08:00
list_del_init ( & spt - > post_shadow_list ) ;
free_spt ( spt ) ;
}
static void ppgtt_free_all_shadow_page ( struct intel_vgpu * vgpu )
{
struct hlist_node * n ;
struct intel_vgpu_shadow_page * sp ;
int i ;
hash_for_each_safe ( vgpu - > gtt . shadow_page_hash_table , i , n , sp , node )
ppgtt_free_shadow_page ( shadow_page_to_ppgtt_spt ( sp ) ) ;
}
2017-09-29 02:47:55 +08:00
static int ppgtt_handle_guest_write_page_table_bytes (
struct intel_vgpu_guest_page * gpt ,
2016-03-28 23:23:16 +08:00
u64 pa , void * p_data , int bytes ) ;
2017-09-29 02:47:55 +08:00
static int ppgtt_write_protection_handler ( void * data , u64 pa ,
2016-03-28 23:23:16 +08:00
void * p_data , int bytes )
{
2017-09-29 02:47:55 +08:00
struct intel_vgpu_page_track * t = data ;
struct intel_vgpu_guest_page * p = page_track_to_guest_page ( t ) ;
2016-03-28 23:23:16 +08:00
int ret ;
if ( bytes ! = 4 & & bytes ! = 8 )
return - EINVAL ;
2017-09-29 02:47:55 +08:00
if ( ! t - > tracked )
2016-03-28 23:23:16 +08:00
return - EINVAL ;
2017-09-29 02:47:55 +08:00
ret = ppgtt_handle_guest_write_page_table_bytes ( p ,
2016-03-28 23:23:16 +08:00
pa , p_data , bytes ) ;
if ( ret )
return ret ;
return ret ;
}
2018-01-30 19:19:40 +08:00
static int reclaim_one_ppgtt_mm ( struct intel_gvt * gvt ) ;
2016-03-28 23:23:16 +08:00
static struct intel_vgpu_ppgtt_spt * ppgtt_alloc_shadow_page (
struct intel_vgpu * vgpu , int type , unsigned long gfn )
{
struct intel_vgpu_ppgtt_spt * spt = NULL ;
int ret ;
retry :
spt = alloc_spt ( GFP_KERNEL | __GFP_ZERO ) ;
if ( ! spt ) {
2018-01-30 19:19:40 +08:00
if ( reclaim_one_ppgtt_mm ( vgpu - > gvt ) )
2016-03-28 23:23:16 +08:00
goto retry ;
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " fail to allocate ppgtt shadow page \n " ) ;
2016-03-28 23:23:16 +08:00
return ERR_PTR ( - ENOMEM ) ;
}
spt - > vgpu = vgpu ;
spt - > guest_page_type = type ;
atomic_set ( & spt - > refcount , 1 ) ;
INIT_LIST_HEAD ( & spt - > post_shadow_list ) ;
/*
* TODO : guest page type may be different with shadow page type ,
* when we support PSE page in future .
*/
2017-10-10 14:34:11 +08:00
ret = init_shadow_page ( vgpu , & spt - > shadow_page , type , true ) ;
2016-03-28 23:23:16 +08:00
if ( ret ) {
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " fail to initialize shadow page for spt \n " ) ;
2016-03-28 23:23:16 +08:00
goto err ;
}
2017-09-29 02:47:55 +08:00
ret = init_guest_page ( vgpu , & spt - > guest_page ,
2016-03-28 23:23:16 +08:00
gfn , ppgtt_write_protection_handler , NULL ) ;
if ( ret ) {
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " fail to initialize guest page for spt \n " ) ;
2016-03-28 23:23:16 +08:00
goto err ;
}
trace_spt_alloc ( vgpu - > id , spt , type , spt - > shadow_page . mfn , gfn ) ;
return spt ;
err :
ppgtt_free_shadow_page ( spt ) ;
return ERR_PTR ( ret ) ;
}
static struct intel_vgpu_ppgtt_spt * ppgtt_find_shadow_page (
struct intel_vgpu * vgpu , unsigned long mfn )
{
struct intel_vgpu_shadow_page * p = find_shadow_page ( vgpu , mfn ) ;
if ( p )
return shadow_page_to_ppgtt_spt ( p ) ;
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " fail to find ppgtt shadow page: 0x%lx \n " , mfn ) ;
2016-03-28 23:23:16 +08:00
return NULL ;
}
# define pt_entry_size_shift(spt) \
( ( spt ) - > vgpu - > gvt - > device_info . gtt_entry_size_shift )
# define pt_entries(spt) \
2017-10-10 13:51:32 +08:00
( I915_GTT_PAGE_SIZE > > pt_entry_size_shift ( spt ) )
2016-03-28 23:23:16 +08:00
# define for_each_present_guest_entry(spt, e, i) \
for ( i = 0 ; i < pt_entries ( spt ) ; i + + ) \
2017-08-02 15:06:37 +08:00
if ( ! ppgtt_get_guest_entry ( spt , e , i ) & & \
spt - > vgpu - > gvt - > gtt . pte_ops - > test_present ( e ) )
2016-03-28 23:23:16 +08:00
# define for_each_present_shadow_entry(spt, e, i) \
for ( i = 0 ; i < pt_entries ( spt ) ; i + + ) \
2017-08-02 15:06:37 +08:00
if ( ! ppgtt_get_shadow_entry ( spt , e , i ) & & \
spt - > vgpu - > gvt - > gtt . pte_ops - > test_present ( e ) )
2016-03-28 23:23:16 +08:00
static void ppgtt_get_shadow_page ( struct intel_vgpu_ppgtt_spt * spt )
{
int v = atomic_read ( & spt - > refcount ) ;
trace_spt_refcount ( spt - > vgpu - > id , " inc " , spt , v , ( v + 1 ) ) ;
atomic_inc ( & spt - > refcount ) ;
}
static int ppgtt_invalidate_shadow_page ( struct intel_vgpu_ppgtt_spt * spt ) ;
static int ppgtt_invalidate_shadow_page_by_shadow_entry ( struct intel_vgpu * vgpu ,
struct intel_gvt_gtt_entry * e )
{
struct intel_gvt_gtt_pte_ops * ops = vgpu - > gvt - > gtt . pte_ops ;
struct intel_vgpu_ppgtt_spt * s ;
2016-11-04 13:47:35 +08:00
intel_gvt_gtt_type_t cur_pt_type ;
2016-03-28 23:23:16 +08:00
if ( WARN_ON ( ! gtt_type_is_pt ( get_next_pt_type ( e - > type ) ) ) )
return - EINVAL ;
2016-11-04 13:47:35 +08:00
if ( e - > type ! = GTT_TYPE_PPGTT_ROOT_L3_ENTRY
& & e - > type ! = GTT_TYPE_PPGTT_ROOT_L4_ENTRY ) {
cur_pt_type = get_next_pt_type ( e - > type ) + 1 ;
if ( ops - > get_pfn ( e ) = =
vgpu - > gtt . scratch_pt [ cur_pt_type ] . page_mfn )
return 0 ;
}
2016-03-28 23:23:16 +08:00
s = ppgtt_find_shadow_page ( vgpu , ops - > get_pfn ( e ) ) ;
if ( ! s ) {
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " fail to find shadow page: mfn: 0x%lx \n " ,
ops - > get_pfn ( e ) ) ;
2016-03-28 23:23:16 +08:00
return - ENXIO ;
}
return ppgtt_invalidate_shadow_page ( s ) ;
}
static int ppgtt_invalidate_shadow_page ( struct intel_vgpu_ppgtt_spt * spt )
{
2017-03-10 04:26:53 -05:00
struct intel_vgpu * vgpu = spt - > vgpu ;
2016-03-28 23:23:16 +08:00
struct intel_gvt_gtt_entry e ;
unsigned long index ;
int ret ;
int v = atomic_read ( & spt - > refcount ) ;
trace_spt_change ( spt - > vgpu - > id , " die " , spt ,
2017-09-29 02:47:55 +08:00
spt - > guest_page . track . gfn , spt - > shadow_page . type ) ;
2016-03-28 23:23:16 +08:00
trace_spt_refcount ( spt - > vgpu - > id , " dec " , spt , v , ( v - 1 ) ) ;
if ( atomic_dec_return ( & spt - > refcount ) > 0 )
return 0 ;
if ( gtt_type_is_pte_pt ( spt - > shadow_page . type ) )
goto release ;
for_each_present_shadow_entry ( spt , & e , index ) {
if ( ! gtt_type_is_pt ( get_next_pt_type ( e . type ) ) ) {
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " GVT doesn't support pse bit for now \n " ) ;
2016-03-28 23:23:16 +08:00
return - EINVAL ;
}
ret = ppgtt_invalidate_shadow_page_by_shadow_entry (
spt - > vgpu , & e ) ;
if ( ret )
goto fail ;
}
release :
trace_spt_change ( spt - > vgpu - > id , " release " , spt ,
2017-09-29 02:47:55 +08:00
spt - > guest_page . track . gfn , spt - > shadow_page . type ) ;
2016-03-28 23:23:16 +08:00
ppgtt_free_shadow_page ( spt ) ;
return 0 ;
fail :
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " fail: shadow page %p shadow entry 0x%llx type %d \n " ,
spt , e . val64 , e . type ) ;
2016-03-28 23:23:16 +08:00
return ret ;
}
static int ppgtt_populate_shadow_page ( struct intel_vgpu_ppgtt_spt * spt ) ;
static struct intel_vgpu_ppgtt_spt * ppgtt_populate_shadow_page_by_guest_entry (
struct intel_vgpu * vgpu , struct intel_gvt_gtt_entry * we )
{
struct intel_gvt_gtt_pte_ops * ops = vgpu - > gvt - > gtt . pte_ops ;
struct intel_vgpu_ppgtt_spt * s = NULL ;
struct intel_vgpu_guest_page * g ;
2017-09-29 02:47:55 +08:00
struct intel_vgpu_page_track * t ;
2016-03-28 23:23:16 +08:00
int ret ;
if ( WARN_ON ( ! gtt_type_is_pt ( get_next_pt_type ( we - > type ) ) ) ) {
ret = - EINVAL ;
goto fail ;
}
2017-09-29 02:47:55 +08:00
t = intel_vgpu_find_tracked_page ( vgpu , ops - > get_pfn ( we ) ) ;
if ( t ) {
g = page_track_to_guest_page ( t ) ;
2016-03-28 23:23:16 +08:00
s = guest_page_to_ppgtt_spt ( g ) ;
ppgtt_get_shadow_page ( s ) ;
} else {
int type = get_next_pt_type ( we - > type ) ;
s = ppgtt_alloc_shadow_page ( vgpu , type , ops - > get_pfn ( we ) ) ;
if ( IS_ERR ( s ) ) {
ret = PTR_ERR ( s ) ;
goto fail ;
}
2017-09-29 02:47:55 +08:00
ret = intel_gvt_hypervisor_enable_page_track ( vgpu ,
& s - > guest_page . track ) ;
2016-03-28 23:23:16 +08:00
if ( ret )
goto fail ;
ret = ppgtt_populate_shadow_page ( s ) ;
if ( ret )
goto fail ;
2017-09-29 02:47:55 +08:00
trace_spt_change ( vgpu - > id , " new " , s , s - > guest_page . track . gfn ,
2016-03-28 23:23:16 +08:00
s - > shadow_page . type ) ;
}
return s ;
fail :
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " fail: shadow page %p guest entry 0x%llx type %d \n " ,
s , we - > val64 , we - > type ) ;
2016-03-28 23:23:16 +08:00
return ERR_PTR ( ret ) ;
}
static inline void ppgtt_generate_shadow_entry ( struct intel_gvt_gtt_entry * se ,
struct intel_vgpu_ppgtt_spt * s , struct intel_gvt_gtt_entry * ge )
{
struct intel_gvt_gtt_pte_ops * ops = s - > vgpu - > gvt - > gtt . pte_ops ;
se - > type = ge - > type ;
se - > val64 = ge - > val64 ;
ops - > set_pfn ( se , s - > shadow_page . mfn ) ;
}
static int ppgtt_populate_shadow_page ( struct intel_vgpu_ppgtt_spt * spt )
{
struct intel_vgpu * vgpu = spt - > vgpu ;
2017-12-22 18:06:31 +08:00
struct intel_gvt * gvt = vgpu - > gvt ;
struct intel_gvt_gtt_pte_ops * ops = gvt - > gtt . pte_ops ;
2016-03-28 23:23:16 +08:00
struct intel_vgpu_ppgtt_spt * s ;
struct intel_gvt_gtt_entry se , ge ;
2017-12-22 18:06:31 +08:00
unsigned long gfn , i ;
2016-03-28 23:23:16 +08:00
int ret ;
trace_spt_change ( spt - > vgpu - > id , " born " , spt ,
2017-09-29 02:47:55 +08:00
spt - > guest_page . track . gfn , spt - > shadow_page . type ) ;
2016-03-28 23:23:16 +08:00
if ( gtt_type_is_pte_pt ( spt - > shadow_page . type ) ) {
for_each_present_guest_entry ( spt , & ge , i ) {
2017-12-22 18:06:31 +08:00
gfn = ops - > get_pfn ( & ge ) ;
if ( ! intel_gvt_hypervisor_is_valid_gfn ( vgpu , gfn ) | |
gtt_entry_p2m ( vgpu , & ge , & se ) )
ops - > set_pfn ( & se , gvt - > gtt . scratch_mfn ) ;
2016-03-28 23:23:16 +08:00
ppgtt_set_shadow_entry ( spt , & se , i ) ;
}
return 0 ;
}
for_each_present_guest_entry ( spt , & ge , i ) {
if ( ! gtt_type_is_pt ( get_next_pt_type ( ge . type ) ) ) {
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " GVT doesn't support pse bit now \n " ) ;
2016-03-28 23:23:16 +08:00
ret = - EINVAL ;
goto fail ;
}
s = ppgtt_populate_shadow_page_by_guest_entry ( vgpu , & ge ) ;
if ( IS_ERR ( s ) ) {
ret = PTR_ERR ( s ) ;
goto fail ;
}
ppgtt_get_shadow_entry ( spt , & se , i ) ;
ppgtt_generate_shadow_entry ( & se , s , & ge ) ;
ppgtt_set_shadow_entry ( spt , & se , i ) ;
}
return 0 ;
fail :
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " fail: shadow page %p guest entry 0x%llx type %d \n " ,
spt , ge . val64 , ge . type ) ;
2016-03-28 23:23:16 +08:00
return ret ;
}
static int ppgtt_handle_guest_entry_removal ( struct intel_vgpu_guest_page * gpt ,
2017-08-14 15:24:14 +08:00
struct intel_gvt_gtt_entry * se , unsigned long index )
2016-03-28 23:23:16 +08:00
{
struct intel_vgpu_ppgtt_spt * spt = guest_page_to_ppgtt_spt ( gpt ) ;
struct intel_vgpu_shadow_page * sp = & spt - > shadow_page ;
struct intel_vgpu * vgpu = spt - > vgpu ;
struct intel_gvt_gtt_pte_ops * ops = vgpu - > gvt - > gtt . pte_ops ;
int ret ;
2017-08-14 15:24:14 +08:00
trace_gpt_change ( spt - > vgpu - > id , " remove " , spt , sp - > type , se - > val64 ,
2016-11-07 10:44:36 +08:00
index ) ;
2018-01-30 19:19:44 +08:00
gvt_vdbg_mm ( " destroy old shadow entry, type %d, index %lu, value %llx \n " ,
se - > type , index , se - > val64 ) ;
2017-08-14 15:24:14 +08:00
if ( ! ops - > test_present ( se ) )
2016-03-28 23:23:16 +08:00
return 0 ;
2017-08-14 15:24:14 +08:00
if ( ops - > get_pfn ( se ) = = vgpu - > gtt . scratch_pt [ sp - > type ] . page_mfn )
2016-03-28 23:23:16 +08:00
return 0 ;
2017-08-14 15:24:14 +08:00
if ( gtt_type_is_pt ( get_next_pt_type ( se - > type ) ) ) {
2016-11-07 10:44:36 +08:00
struct intel_vgpu_ppgtt_spt * s =
2017-08-14 15:24:14 +08:00
ppgtt_find_shadow_page ( vgpu , ops - > get_pfn ( se ) ) ;
2016-11-07 10:44:36 +08:00
if ( ! s ) {
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " fail to find guest page \n " ) ;
2016-03-28 23:23:16 +08:00
ret = - ENXIO ;
goto fail ;
}
2016-11-07 10:44:36 +08:00
ret = ppgtt_invalidate_shadow_page ( s ) ;
2016-03-28 23:23:16 +08:00
if ( ret )
goto fail ;
}
return 0 ;
fail :
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " fail: shadow page %p guest entry 0x%llx type %d \n " ,
2017-08-14 15:24:14 +08:00
spt , se - > val64 , se - > type ) ;
2016-03-28 23:23:16 +08:00
return ret ;
}
static int ppgtt_handle_guest_entry_add ( struct intel_vgpu_guest_page * gpt ,
struct intel_gvt_gtt_entry * we , unsigned long index )
{
struct intel_vgpu_ppgtt_spt * spt = guest_page_to_ppgtt_spt ( gpt ) ;
struct intel_vgpu_shadow_page * sp = & spt - > shadow_page ;
struct intel_vgpu * vgpu = spt - > vgpu ;
struct intel_gvt_gtt_entry m ;
struct intel_vgpu_ppgtt_spt * s ;
int ret ;
trace_gpt_change ( spt - > vgpu - > id , " add " , spt , sp - > type ,
we - > val64 , index ) ;
2018-01-30 19:19:44 +08:00
gvt_vdbg_mm ( " add shadow entry: type %d, index %lu, value %llx \n " ,
we - > type , index , we - > val64 ) ;
2016-03-28 23:23:16 +08:00
if ( gtt_type_is_pt ( get_next_pt_type ( we - > type ) ) ) {
s = ppgtt_populate_shadow_page_by_guest_entry ( vgpu , we ) ;
if ( IS_ERR ( s ) ) {
ret = PTR_ERR ( s ) ;
goto fail ;
}
ppgtt_get_shadow_entry ( spt , & m , index ) ;
ppgtt_generate_shadow_entry ( & m , s , we ) ;
ppgtt_set_shadow_entry ( spt , & m , index ) ;
} else {
ret = gtt_entry_p2m ( vgpu , we , & m ) ;
if ( ret )
goto fail ;
ppgtt_set_shadow_entry ( spt , & m , index ) ;
}
return 0 ;
fail :
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " fail: spt %p guest entry 0x%llx type %d \n " ,
spt , we - > val64 , we - > type ) ;
2016-03-28 23:23:16 +08:00
return ret ;
}
static int sync_oos_page ( struct intel_vgpu * vgpu ,
struct intel_vgpu_oos_page * oos_page )
{
const struct intel_gvt_device_info * info = & vgpu - > gvt - > device_info ;
struct intel_gvt * gvt = vgpu - > gvt ;
struct intel_gvt_gtt_pte_ops * ops = gvt - > gtt . pte_ops ;
struct intel_vgpu_ppgtt_spt * spt =
guest_page_to_ppgtt_spt ( oos_page - > guest_page ) ;
struct intel_gvt_gtt_entry old , new , m ;
int index ;
int ret ;
trace_oos_change ( vgpu - > id , " sync " , oos_page - > id ,
oos_page - > guest_page , spt - > guest_page_type ) ;
old . type = new . type = get_entry_type ( spt - > guest_page_type ) ;
old . val64 = new . val64 = 0 ;
2017-10-10 13:51:32 +08:00
for ( index = 0 ; index < ( I915_GTT_PAGE_SIZE > >
info - > gtt_entry_size_shift ) ; index + + ) {
2016-03-28 23:23:16 +08:00
ops - > get_entry ( oos_page - > mem , & old , index , false , 0 , vgpu ) ;
ops - > get_entry ( NULL , & new , index , true ,
2017-09-29 02:47:55 +08:00
oos_page - > guest_page - > track . gfn < < PAGE_SHIFT , vgpu ) ;
2016-03-28 23:23:16 +08:00
if ( old . val64 = = new . val64
& & ! test_and_clear_bit ( index , spt - > post_shadow_bitmap ) )
continue ;
trace_oos_sync ( vgpu - > id , oos_page - > id ,
oos_page - > guest_page , spt - > guest_page_type ,
new . val64 , index ) ;
ret = gtt_entry_p2m ( vgpu , & new , & m ) ;
if ( ret )
return ret ;
ops - > set_entry ( oos_page - > mem , & new , index , false , 0 , vgpu ) ;
ppgtt_set_shadow_entry ( spt , & m , index ) ;
}
oos_page - > guest_page - > write_cnt = 0 ;
list_del_init ( & spt - > post_shadow_list ) ;
return 0 ;
}
static int detach_oos_page ( struct intel_vgpu * vgpu ,
struct intel_vgpu_oos_page * oos_page )
{
struct intel_gvt * gvt = vgpu - > gvt ;
struct intel_vgpu_ppgtt_spt * spt =
guest_page_to_ppgtt_spt ( oos_page - > guest_page ) ;
trace_oos_change ( vgpu - > id , " detach " , oos_page - > id ,
oos_page - > guest_page , spt - > guest_page_type ) ;
oos_page - > guest_page - > write_cnt = 0 ;
oos_page - > guest_page - > oos_page = NULL ;
oos_page - > guest_page = NULL ;
list_del_init ( & oos_page - > vm_list ) ;
list_move_tail ( & oos_page - > list , & gvt - > gtt . oos_page_free_list_head ) ;
return 0 ;
}
static int attach_oos_page ( struct intel_vgpu * vgpu ,
struct intel_vgpu_oos_page * oos_page ,
struct intel_vgpu_guest_page * gpt )
{
struct intel_gvt * gvt = vgpu - > gvt ;
int ret ;
2017-09-29 02:47:55 +08:00
ret = intel_gvt_hypervisor_read_gpa ( vgpu ,
2017-10-10 13:51:32 +08:00
gpt - > track . gfn < < I915_GTT_PAGE_SHIFT ,
oos_page - > mem , I915_GTT_PAGE_SIZE ) ;
2016-03-28 23:23:16 +08:00
if ( ret )
return ret ;
oos_page - > guest_page = gpt ;
gpt - > oos_page = oos_page ;
list_move_tail ( & oos_page - > list , & gvt - > gtt . oos_page_use_list_head ) ;
trace_oos_change ( vgpu - > id , " attach " , gpt - > oos_page - > id ,
gpt , guest_page_to_ppgtt_spt ( gpt ) - > guest_page_type ) ;
return 0 ;
}
static int ppgtt_set_guest_page_sync ( struct intel_vgpu * vgpu ,
struct intel_vgpu_guest_page * gpt )
{
int ret ;
2017-09-29 02:47:55 +08:00
ret = intel_gvt_hypervisor_enable_page_track ( vgpu , & gpt - > track ) ;
2016-03-28 23:23:16 +08:00
if ( ret )
return ret ;
trace_oos_change ( vgpu - > id , " set page sync " , gpt - > oos_page - > id ,
gpt , guest_page_to_ppgtt_spt ( gpt ) - > guest_page_type ) ;
list_del_init ( & gpt - > oos_page - > vm_list ) ;
return sync_oos_page ( vgpu , gpt - > oos_page ) ;
}
static int ppgtt_allocate_oos_page ( struct intel_vgpu * vgpu ,
struct intel_vgpu_guest_page * gpt )
{
struct intel_gvt * gvt = vgpu - > gvt ;
struct intel_gvt_gtt * gtt = & gvt - > gtt ;
struct intel_vgpu_oos_page * oos_page = gpt - > oos_page ;
int ret ;
WARN ( oos_page , " shadow PPGTT page has already has a oos page \n " ) ;
if ( list_empty ( & gtt - > oos_page_free_list_head ) ) {
oos_page = container_of ( gtt - > oos_page_use_list_head . next ,
struct intel_vgpu_oos_page , list ) ;
ret = ppgtt_set_guest_page_sync ( vgpu , oos_page - > guest_page ) ;
if ( ret )
return ret ;
ret = detach_oos_page ( vgpu , oos_page ) ;
if ( ret )
return ret ;
} else
oos_page = container_of ( gtt - > oos_page_free_list_head . next ,
struct intel_vgpu_oos_page , list ) ;
return attach_oos_page ( vgpu , oos_page , gpt ) ;
}
static int ppgtt_set_guest_page_oos ( struct intel_vgpu * vgpu ,
struct intel_vgpu_guest_page * gpt )
{
struct intel_vgpu_oos_page * oos_page = gpt - > oos_page ;
if ( WARN ( ! oos_page , " shadow PPGTT page should have a oos page \n " ) )
return - EINVAL ;
trace_oos_change ( vgpu - > id , " set page out of sync " , gpt - > oos_page - > id ,
gpt , guest_page_to_ppgtt_spt ( gpt ) - > guest_page_type ) ;
list_add_tail ( & oos_page - > vm_list , & vgpu - > gtt . oos_page_list_head ) ;
2017-09-29 02:47:55 +08:00
return intel_gvt_hypervisor_disable_page_track ( vgpu , & gpt - > track ) ;
2016-03-28 23:23:16 +08:00
}
/**
* intel_vgpu_sync_oos_pages - sync all the out - of - synced shadow for vGPU
* @ vgpu : a vGPU
*
* This function is called before submitting a guest workload to host ,
* to sync all the out - of - synced shadow for vGPU
*
* Returns :
* Zero on success , negative error code if failed .
*/
int intel_vgpu_sync_oos_pages ( struct intel_vgpu * vgpu )
{
struct list_head * pos , * n ;
struct intel_vgpu_oos_page * oos_page ;
int ret ;
if ( ! enable_out_of_sync )
return 0 ;
list_for_each_safe ( pos , n , & vgpu - > gtt . oos_page_list_head ) {
oos_page = container_of ( pos ,
struct intel_vgpu_oos_page , vm_list ) ;
ret = ppgtt_set_guest_page_sync ( vgpu , oos_page - > guest_page ) ;
if ( ret )
return ret ;
}
return 0 ;
}
/*
* The heart of PPGTT shadow page table .
*/
static int ppgtt_handle_guest_write_page_table (
struct intel_vgpu_guest_page * gpt ,
struct intel_gvt_gtt_entry * we , unsigned long index )
{
struct intel_vgpu_ppgtt_spt * spt = guest_page_to_ppgtt_spt ( gpt ) ;
struct intel_vgpu * vgpu = spt - > vgpu ;
2017-08-14 15:24:14 +08:00
int type = spt - > shadow_page . type ;
2016-03-28 23:23:16 +08:00
struct intel_gvt_gtt_pte_ops * ops = vgpu - > gvt - > gtt . pte_ops ;
2017-08-14 15:24:14 +08:00
struct intel_gvt_gtt_entry se ;
2016-03-28 23:23:16 +08:00
int ret ;
2016-11-07 10:44:36 +08:00
int new_present ;
2016-03-28 23:23:16 +08:00
new_present = ops - > test_present ( we ) ;
2017-08-14 15:24:14 +08:00
/*
* Adding the new entry first and then removing the old one , that can
* guarantee the ppgtt table is validated during the window between
* adding and removal .
*/
ppgtt_get_shadow_entry ( spt , & se , index ) ;
2016-03-28 23:23:16 +08:00
if ( new_present ) {
ret = ppgtt_handle_guest_entry_add ( gpt , we , index ) ;
if ( ret )
goto fail ;
}
2017-08-14 15:24:14 +08:00
ret = ppgtt_handle_guest_entry_removal ( gpt , & se , index ) ;
if ( ret )
goto fail ;
if ( ! new_present ) {
ops - > set_pfn ( & se , vgpu - > gtt . scratch_pt [ type ] . page_mfn ) ;
ppgtt_set_shadow_entry ( spt , & se , index ) ;
}
2016-03-28 23:23:16 +08:00
return 0 ;
fail :
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " fail: shadow page %p guest entry 0x%llx type %d. \n " ,
spt , we - > val64 , we - > type ) ;
2016-03-28 23:23:16 +08:00
return ret ;
}
static inline bool can_do_out_of_sync ( struct intel_vgpu_guest_page * gpt )
{
return enable_out_of_sync
& & gtt_type_is_pte_pt (
guest_page_to_ppgtt_spt ( gpt ) - > guest_page_type )
& & gpt - > write_cnt > = 2 ;
}
static void ppgtt_set_post_shadow ( struct intel_vgpu_ppgtt_spt * spt ,
unsigned long index )
{
set_bit ( index , spt - > post_shadow_bitmap ) ;
if ( ! list_empty ( & spt - > post_shadow_list ) )
return ;
list_add_tail ( & spt - > post_shadow_list ,
& spt - > vgpu - > gtt . post_shadow_list_head ) ;
}
/**
* intel_vgpu_flush_post_shadow - flush the post shadow transactions
* @ vgpu : a vGPU
*
* This function is called before submitting a guest workload to host ,
* to flush all the post shadows for a vGPU .
*
* Returns :
* Zero on success , negative error code if failed .
*/
int intel_vgpu_flush_post_shadow ( struct intel_vgpu * vgpu )
{
struct list_head * pos , * n ;
struct intel_vgpu_ppgtt_spt * spt ;
2016-11-07 10:44:36 +08:00
struct intel_gvt_gtt_entry ge ;
2016-03-28 23:23:16 +08:00
unsigned long index ;
int ret ;
list_for_each_safe ( pos , n , & vgpu - > gtt . post_shadow_list_head ) {
spt = container_of ( pos , struct intel_vgpu_ppgtt_spt ,
post_shadow_list ) ;
for_each_set_bit ( index , spt - > post_shadow_bitmap ,
GTT_ENTRY_NUM_IN_ONE_PAGE ) {
ppgtt_get_guest_entry ( spt , & ge , index ) ;
ret = ppgtt_handle_guest_write_page_table (
& spt - > guest_page , & ge , index ) ;
if ( ret )
return ret ;
clear_bit ( index , spt - > post_shadow_bitmap ) ;
}
list_del_init ( & spt - > post_shadow_list ) ;
}
return 0 ;
}
2017-09-29 02:47:55 +08:00
static int ppgtt_handle_guest_write_page_table_bytes (
struct intel_vgpu_guest_page * gpt ,
2016-03-28 23:23:16 +08:00
u64 pa , void * p_data , int bytes )
{
struct intel_vgpu_ppgtt_spt * spt = guest_page_to_ppgtt_spt ( gpt ) ;
struct intel_vgpu * vgpu = spt - > vgpu ;
struct intel_gvt_gtt_pte_ops * ops = vgpu - > gvt - > gtt . pte_ops ;
const struct intel_gvt_device_info * info = & vgpu - > gvt - > device_info ;
2017-08-14 15:24:14 +08:00
struct intel_gvt_gtt_entry we , se ;
2016-03-28 23:23:16 +08:00
unsigned long index ;
int ret ;
index = ( pa & ( PAGE_SIZE - 1 ) ) > > info - > gtt_entry_size_shift ;
ppgtt_get_guest_entry ( spt , & we , index ) ;
ops - > test_pse ( & we ) ;
if ( bytes = = info - > gtt_entry_size ) {
ret = ppgtt_handle_guest_write_page_table ( gpt , & we , index ) ;
if ( ret )
return ret ;
} else {
if ( ! test_bit ( index , spt - > post_shadow_bitmap ) ) {
2017-12-29 02:50:08 +08:00
int type = spt - > shadow_page . type ;
2017-08-14 15:24:14 +08:00
ppgtt_get_shadow_entry ( spt , & se , index ) ;
ret = ppgtt_handle_guest_entry_removal ( gpt , & se , index ) ;
2016-03-28 23:23:16 +08:00
if ( ret )
return ret ;
2017-12-29 02:50:08 +08:00
ops - > set_pfn ( & se , vgpu - > gtt . scratch_pt [ type ] . page_mfn ) ;
ppgtt_set_shadow_entry ( spt , & se , index ) ;
2016-03-28 23:23:16 +08:00
}
ppgtt_set_post_shadow ( spt , index ) ;
}
if ( ! enable_out_of_sync )
return 0 ;
gpt - > write_cnt + + ;
if ( gpt - > oos_page )
ops - > set_entry ( gpt - > oos_page - > mem , & we , index ,
false , 0 , vgpu ) ;
if ( can_do_out_of_sync ( gpt ) ) {
if ( ! gpt - > oos_page )
ppgtt_allocate_oos_page ( vgpu , gpt ) ;
ret = ppgtt_set_guest_page_oos ( vgpu , gpt ) ;
if ( ret < 0 )
return ret ;
}
return 0 ;
}
2018-01-30 19:19:40 +08:00
static void invalidate_ppgtt_mm ( struct intel_vgpu_mm * mm )
2016-03-28 23:23:16 +08:00
{
struct intel_vgpu * vgpu = mm - > vgpu ;
struct intel_gvt * gvt = vgpu - > gvt ;
struct intel_gvt_gtt * gtt = & gvt - > gtt ;
struct intel_gvt_gtt_pte_ops * ops = gtt - > pte_ops ;
struct intel_gvt_gtt_entry se ;
2018-01-30 19:19:40 +08:00
int index ;
2016-03-28 23:23:16 +08:00
2018-01-30 19:19:40 +08:00
if ( ! mm - > ppgtt_mm . shadowed )
2016-03-28 23:23:16 +08:00
return ;
2018-01-30 19:19:40 +08:00
for ( index = 0 ; index < ARRAY_SIZE ( mm - > ppgtt_mm . shadow_pdps ) ; index + + ) {
ppgtt_get_shadow_root_entry ( mm , & se , index ) ;
2016-03-28 23:23:16 +08:00
if ( ! ops - > test_present ( & se ) )
continue ;
2018-01-30 19:19:40 +08:00
ppgtt_invalidate_shadow_page_by_shadow_entry ( vgpu , & se ) ;
2016-03-28 23:23:16 +08:00
se . val64 = 0 ;
2018-01-30 19:19:40 +08:00
ppgtt_set_shadow_root_entry ( mm , & se , index ) ;
2016-03-28 23:23:16 +08:00
trace_gpt_change ( vgpu - > id , " destroy root pointer " ,
2018-01-30 19:19:40 +08:00
NULL , se . type , se . val64 , index ) ;
2016-03-28 23:23:16 +08:00
}
2018-01-30 19:19:40 +08:00
mm - > ppgtt_mm . shadowed = false ;
2016-03-28 23:23:16 +08:00
}
2018-01-30 19:19:40 +08:00
static int shadow_ppgtt_mm ( struct intel_vgpu_mm * mm )
2016-03-28 23:23:16 +08:00
{
struct intel_vgpu * vgpu = mm - > vgpu ;
struct intel_gvt * gvt = vgpu - > gvt ;
struct intel_gvt_gtt * gtt = & gvt - > gtt ;
struct intel_gvt_gtt_pte_ops * ops = gtt - > pte_ops ;
struct intel_vgpu_ppgtt_spt * spt ;
struct intel_gvt_gtt_entry ge , se ;
2018-01-30 19:19:40 +08:00
int index , ret ;
2016-03-28 23:23:16 +08:00
2018-01-30 19:19:40 +08:00
if ( mm - > ppgtt_mm . shadowed )
2016-03-28 23:23:16 +08:00
return 0 ;
2018-01-30 19:19:40 +08:00
mm - > ppgtt_mm . shadowed = true ;
for ( index = 0 ; index < ARRAY_SIZE ( mm - > ppgtt_mm . guest_pdps ) ; index + + ) {
ppgtt_get_guest_root_entry ( mm , & ge , index ) ;
2016-03-28 23:23:16 +08:00
if ( ! ops - > test_present ( & ge ) )
continue ;
trace_gpt_change ( vgpu - > id , __func__ , NULL ,
2018-01-30 19:19:40 +08:00
ge . type , ge . val64 , index ) ;
2016-03-28 23:23:16 +08:00
spt = ppgtt_populate_shadow_page_by_guest_entry ( vgpu , & ge ) ;
if ( IS_ERR ( spt ) ) {
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " fail to populate guest root pointer \n " ) ;
2016-03-28 23:23:16 +08:00
ret = PTR_ERR ( spt ) ;
goto fail ;
}
ppgtt_generate_shadow_entry ( & se , spt , & ge ) ;
2018-01-30 19:19:40 +08:00
ppgtt_set_shadow_root_entry ( mm , & se , index ) ;
2016-03-28 23:23:16 +08:00
trace_gpt_change ( vgpu - > id , " populate root pointer " ,
2018-01-30 19:19:40 +08:00
NULL , se . type , se . val64 , index ) ;
2016-03-28 23:23:16 +08:00
}
2018-01-30 19:19:40 +08:00
2016-03-28 23:23:16 +08:00
return 0 ;
fail :
2018-01-30 19:19:40 +08:00
invalidate_ppgtt_mm ( mm ) ;
2016-03-28 23:23:16 +08:00
return ret ;
}
2018-01-30 19:19:40 +08:00
static struct intel_vgpu_mm * vgpu_alloc_mm ( struct intel_vgpu * vgpu )
{
struct intel_vgpu_mm * mm ;
mm = kzalloc ( sizeof ( * mm ) , GFP_KERNEL ) ;
if ( ! mm )
return NULL ;
mm - > vgpu = vgpu ;
kref_init ( & mm - > ref ) ;
atomic_set ( & mm - > pincount , 0 ) ;
return mm ;
}
static void vgpu_free_mm ( struct intel_vgpu_mm * mm )
{
kfree ( mm ) ;
}
2016-03-28 23:23:16 +08:00
/**
2018-01-30 19:19:40 +08:00
* intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU
2016-03-28 23:23:16 +08:00
* @ vgpu : a vGPU
2018-01-30 19:19:40 +08:00
* @ root_entry_type : ppgtt root entry type
* @ pdps : guest pdps .
2016-03-28 23:23:16 +08:00
*
2018-01-30 19:19:40 +08:00
* This function is used to create a ppgtt mm object for a vGPU .
2016-03-28 23:23:16 +08:00
*
* Returns :
* Zero on success , negative error code in pointer if failed .
*/
2018-01-30 19:19:40 +08:00
struct intel_vgpu_mm * intel_vgpu_create_ppgtt_mm ( struct intel_vgpu * vgpu ,
intel_gvt_gtt_type_t root_entry_type , u64 pdps [ ] )
2016-03-28 23:23:16 +08:00
{
struct intel_gvt * gvt = vgpu - > gvt ;
struct intel_vgpu_mm * mm ;
int ret ;
2018-01-30 19:19:40 +08:00
mm = vgpu_alloc_mm ( vgpu ) ;
if ( ! mm )
return ERR_PTR ( - ENOMEM ) ;
2016-03-28 23:23:16 +08:00
2018-01-30 19:19:40 +08:00
mm - > type = INTEL_GVT_MM_PPGTT ;
2016-03-28 23:23:16 +08:00
2018-01-30 19:19:40 +08:00
GEM_BUG_ON ( root_entry_type ! = GTT_TYPE_PPGTT_ROOT_L3_ENTRY & &
root_entry_type ! = GTT_TYPE_PPGTT_ROOT_L4_ENTRY ) ;
mm - > ppgtt_mm . root_entry_type = root_entry_type ;
2016-03-28 23:23:16 +08:00
2018-01-30 19:19:40 +08:00
INIT_LIST_HEAD ( & mm - > ppgtt_mm . list ) ;
INIT_LIST_HEAD ( & mm - > ppgtt_mm . lru_list ) ;
2016-03-28 23:23:16 +08:00
2018-01-30 19:19:40 +08:00
if ( root_entry_type = = GTT_TYPE_PPGTT_ROOT_L4_ENTRY )
mm - > ppgtt_mm . guest_pdps [ 0 ] = pdps [ 0 ] ;
else
memcpy ( mm - > ppgtt_mm . guest_pdps , pdps ,
sizeof ( mm - > ppgtt_mm . guest_pdps ) ) ;
2016-03-28 23:23:16 +08:00
2018-01-30 19:19:40 +08:00
ret = shadow_ppgtt_mm ( mm ) ;
2016-03-28 23:23:16 +08:00
if ( ret ) {
2018-01-30 19:19:40 +08:00
gvt_vgpu_err ( " failed to shadow ppgtt mm \n " ) ;
vgpu_free_mm ( mm ) ;
return ERR_PTR ( ret ) ;
2016-03-28 23:23:16 +08:00
}
2018-01-30 19:19:40 +08:00
list_add_tail ( & mm - > ppgtt_mm . list , & vgpu - > gtt . ppgtt_mm_list_head ) ;
list_add_tail ( & mm - > ppgtt_mm . lru_list , & gvt - > gtt . ppgtt_mm_lru_list_head ) ;
return mm ;
}
2016-03-28 23:23:16 +08:00
2018-01-30 19:19:40 +08:00
static struct intel_vgpu_mm * intel_vgpu_create_ggtt_mm ( struct intel_vgpu * vgpu )
{
struct intel_vgpu_mm * mm ;
unsigned long nr_entries ;
2016-03-28 23:23:16 +08:00
2018-01-30 19:19:40 +08:00
mm = vgpu_alloc_mm ( vgpu ) ;
if ( ! mm )
return ERR_PTR ( - ENOMEM ) ;
mm - > type = INTEL_GVT_MM_GGTT ;
nr_entries = gvt_ggtt_gm_sz ( vgpu - > gvt ) > > I915_GTT_PAGE_SHIFT ;
mm - > ggtt_mm . virtual_ggtt = vzalloc ( nr_entries *
vgpu - > gvt - > device_info . gtt_entry_size ) ;
if ( ! mm - > ggtt_mm . virtual_ggtt ) {
vgpu_free_mm ( mm ) ;
return ERR_PTR ( - ENOMEM ) ;
2016-03-28 23:23:16 +08:00
}
2018-01-30 19:19:40 +08:00
2016-03-28 23:23:16 +08:00
return mm ;
2018-01-30 19:19:40 +08:00
}
/**
2018-01-30 19:19:41 +08:00
* _intel_vgpu_mm_release - destroy a mm object
2018-01-30 19:19:40 +08:00
* @ mm_ref : a kref object
*
* This function is used to destroy a mm object for vGPU
*
*/
2018-01-30 19:19:41 +08:00
void _intel_vgpu_mm_release ( struct kref * mm_ref )
2018-01-30 19:19:40 +08:00
{
struct intel_vgpu_mm * mm = container_of ( mm_ref , typeof ( * mm ) , ref ) ;
if ( GEM_WARN_ON ( atomic_read ( & mm - > pincount ) ) )
gvt_err ( " vgpu mm pin count bug detected \n " ) ;
if ( mm - > type = = INTEL_GVT_MM_PPGTT ) {
list_del ( & mm - > ppgtt_mm . list ) ;
list_del ( & mm - > ppgtt_mm . lru_list ) ;
invalidate_ppgtt_mm ( mm ) ;
} else {
vfree ( mm - > ggtt_mm . virtual_ggtt ) ;
}
vgpu_free_mm ( mm ) ;
2016-03-28 23:23:16 +08:00
}
/**
* intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
* @ mm : a vGPU mm object
*
* This function is called when user doesn ' t want to use a vGPU mm object
*/
void intel_vgpu_unpin_mm ( struct intel_vgpu_mm * mm )
{
atomic_dec ( & mm - > pincount ) ;
}
/**
* intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
* @ vgpu : a vGPU
*
* This function is called when user wants to use a vGPU mm object . If this
* mm object hasn ' t been shadowed yet , the shadow will be populated at this
* time .
*
* Returns :
* Zero on success , negative error code if failed .
*/
int intel_vgpu_pin_mm ( struct intel_vgpu_mm * mm )
{
int ret ;
2018-01-30 19:19:40 +08:00
atomic_inc ( & mm - > pincount ) ;
2016-03-28 23:23:16 +08:00
2018-01-30 19:19:40 +08:00
if ( mm - > type = = INTEL_GVT_MM_PPGTT ) {
ret = shadow_ppgtt_mm ( mm ) ;
2016-03-28 23:23:16 +08:00
if ( ret )
return ret ;
2018-01-30 19:19:40 +08:00
list_move_tail ( & mm - > ppgtt_mm . lru_list ,
& mm - > vgpu - > gvt - > gtt . ppgtt_mm_lru_list_head ) ;
2016-03-28 23:23:16 +08:00
}
return 0 ;
}
2018-01-30 19:19:40 +08:00
static int reclaim_one_ppgtt_mm ( struct intel_gvt * gvt )
2016-03-28 23:23:16 +08:00
{
struct intel_vgpu_mm * mm ;
struct list_head * pos , * n ;
2018-01-30 19:19:40 +08:00
list_for_each_safe ( pos , n , & gvt - > gtt . ppgtt_mm_lru_list_head ) {
mm = container_of ( pos , struct intel_vgpu_mm , ppgtt_mm . lru_list ) ;
2016-03-28 23:23:16 +08:00
if ( atomic_read ( & mm - > pincount ) )
continue ;
2018-01-30 19:19:40 +08:00
list_del_init ( & mm - > ppgtt_mm . lru_list ) ;
invalidate_ppgtt_mm ( mm ) ;
2016-03-28 23:23:16 +08:00
return 1 ;
}
return 0 ;
}
/*
* GMA translation APIs .
*/
static inline int ppgtt_get_next_level_entry ( struct intel_vgpu_mm * mm ,
struct intel_gvt_gtt_entry * e , unsigned long index , bool guest )
{
struct intel_vgpu * vgpu = mm - > vgpu ;
struct intel_gvt_gtt_pte_ops * ops = vgpu - > gvt - > gtt . pte_ops ;
struct intel_vgpu_ppgtt_spt * s ;
s = ppgtt_find_shadow_page ( vgpu , ops - > get_pfn ( e ) ) ;
if ( ! s )
return - ENXIO ;
if ( ! guest )
ppgtt_get_shadow_entry ( s , e , index ) ;
else
ppgtt_get_guest_entry ( s , e , index ) ;
return 0 ;
}
/**
* intel_vgpu_gma_to_gpa - translate a gma to GPA
* @ mm : mm object . could be a PPGTT or GGTT mm object
* @ gma : graphics memory address in this mm object
*
* This function is used to translate a graphics memory address in specific
* graphics memory space to guest physical address .
*
* Returns :
* Guest physical address on success , INTEL_GVT_INVALID_ADDR if failed .
*/
unsigned long intel_vgpu_gma_to_gpa ( struct intel_vgpu_mm * mm , unsigned long gma )
{
struct intel_vgpu * vgpu = mm - > vgpu ;
struct intel_gvt * gvt = vgpu - > gvt ;
struct intel_gvt_gtt_pte_ops * pte_ops = gvt - > gtt . pte_ops ;
struct intel_gvt_gtt_gma_ops * gma_ops = gvt - > gtt . gma_ops ;
unsigned long gpa = INTEL_GVT_INVALID_ADDR ;
unsigned long gma_index [ 4 ] ;
struct intel_gvt_gtt_entry e ;
2018-01-30 19:19:40 +08:00
int i , levels = 0 ;
2016-03-28 23:23:16 +08:00
int ret ;
2018-01-30 19:19:40 +08:00
GEM_BUG_ON ( mm - > type ! = INTEL_GVT_MM_GGTT & &
mm - > type ! = INTEL_GVT_MM_PPGTT ) ;
2016-03-28 23:23:16 +08:00
if ( mm - > type = = INTEL_GVT_MM_GGTT ) {
if ( ! vgpu_gmadr_is_valid ( vgpu , gma ) )
goto err ;
2018-01-30 19:19:40 +08:00
ggtt_get_guest_entry ( mm , & e ,
gma_ops - > gma_to_ggtt_pte_index ( gma ) ) ;
2017-10-10 13:51:32 +08:00
gpa = ( pte_ops - > get_pfn ( & e ) < < I915_GTT_PAGE_SHIFT )
+ ( gma & ~ I915_GTT_PAGE_MASK ) ;
2016-03-28 23:23:16 +08:00
trace_gma_translate ( vgpu - > id , " ggtt " , 0 , 0 , gma , gpa ) ;
2018-01-30 19:19:40 +08:00
} else {
switch ( mm - > ppgtt_mm . root_entry_type ) {
case GTT_TYPE_PPGTT_ROOT_L4_ENTRY :
ppgtt_get_shadow_root_entry ( mm , & e , 0 ) ;
gma_index [ 0 ] = gma_ops - > gma_to_pml4_index ( gma ) ;
gma_index [ 1 ] = gma_ops - > gma_to_l4_pdp_index ( gma ) ;
gma_index [ 2 ] = gma_ops - > gma_to_pde_index ( gma ) ;
gma_index [ 3 ] = gma_ops - > gma_to_pte_index ( gma ) ;
levels = 4 ;
break ;
case GTT_TYPE_PPGTT_ROOT_L3_ENTRY :
ppgtt_get_shadow_root_entry ( mm , & e ,
gma_ops - > gma_to_l3_pdp_index ( gma ) ) ;
gma_index [ 0 ] = gma_ops - > gma_to_pde_index ( gma ) ;
gma_index [ 1 ] = gma_ops - > gma_to_pte_index ( gma ) ;
levels = 2 ;
break ;
default :
GEM_BUG_ON ( 1 ) ;
}
2016-03-28 23:23:16 +08:00
2018-01-30 19:19:40 +08:00
/* walk the shadow page table and get gpa from guest entry */
for ( i = 0 ; i < levels ; i + + ) {
ret = ppgtt_get_next_level_entry ( mm , & e , gma_index [ i ] ,
( i = = levels - 1 ) ) ;
if ( ret )
goto err ;
2017-08-02 15:06:37 +08:00
2018-01-30 19:19:40 +08:00
if ( ! pte_ops - > test_present ( & e ) ) {
gvt_dbg_core ( " GMA 0x%lx is not present \n " , gma ) ;
goto err ;
}
2017-08-02 15:06:37 +08:00
}
2016-03-28 23:23:16 +08:00
2018-01-30 19:19:40 +08:00
gpa = ( pte_ops - > get_pfn ( & e ) < < I915_GTT_PAGE_SHIFT ) +
( gma & ~ I915_GTT_PAGE_MASK ) ;
trace_gma_translate ( vgpu - > id , " ppgtt " , 0 ,
mm - > ppgtt_mm . root_entry_type , gma , gpa ) ;
}
2016-03-28 23:23:16 +08:00
return gpa ;
err :
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " invalid mm type: %d gma %lx \n " , mm - > type , gma ) ;
2016-03-28 23:23:16 +08:00
return INTEL_GVT_INVALID_ADDR ;
}
2018-01-30 19:19:45 +08:00
static int emulate_ggtt_mmio_read ( struct intel_vgpu * vgpu ,
2016-03-28 23:23:16 +08:00
unsigned int off , void * p_data , unsigned int bytes )
{
struct intel_vgpu_mm * ggtt_mm = vgpu - > gtt . ggtt_mm ;
const struct intel_gvt_device_info * info = & vgpu - > gvt - > device_info ;
unsigned long index = off > > info - > gtt_entry_size_shift ;
struct intel_gvt_gtt_entry e ;
if ( bytes ! = 4 & & bytes ! = 8 )
return - EINVAL ;
ggtt_get_guest_entry ( ggtt_mm , & e , index ) ;
memcpy ( p_data , ( void * ) & e . val64 + ( off & ( info - > gtt_entry_size - 1 ) ) ,
bytes ) ;
return 0 ;
}
/**
* intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
* @ vgpu : a vGPU
* @ off : register offset
* @ p_data : data will be returned to guest
* @ bytes : data length
*
* This function is used to emulate the GTT MMIO register read
*
* Returns :
* Zero on success , error code if failed .
*/
2018-01-30 19:19:45 +08:00
int intel_vgpu_emulate_ggtt_mmio_read ( struct intel_vgpu * vgpu , unsigned int off ,
2016-03-28 23:23:16 +08:00
void * p_data , unsigned int bytes )
{
const struct intel_gvt_device_info * info = & vgpu - > gvt - > device_info ;
int ret ;
if ( bytes ! = 4 & & bytes ! = 8 )
return - EINVAL ;
off - = info - > gtt_start_offset ;
2018-01-30 19:19:45 +08:00
ret = emulate_ggtt_mmio_read ( vgpu , off , p_data , bytes ) ;
2016-03-28 23:23:16 +08:00
return ret ;
}
2018-01-30 19:19:45 +08:00
static int emulate_ggtt_mmio_write ( struct intel_vgpu * vgpu , unsigned int off ,
2016-03-28 23:23:16 +08:00
void * p_data , unsigned int bytes )
{
struct intel_gvt * gvt = vgpu - > gvt ;
const struct intel_gvt_device_info * info = & gvt - > device_info ;
struct intel_vgpu_mm * ggtt_mm = vgpu - > gtt . ggtt_mm ;
struct intel_gvt_gtt_pte_ops * ops = gvt - > gtt . pte_ops ;
unsigned long g_gtt_index = off > > info - > gtt_entry_size_shift ;
2017-12-22 18:06:31 +08:00
unsigned long gma , gfn ;
2016-03-28 23:23:16 +08:00
struct intel_gvt_gtt_entry e , m ;
int ret ;
if ( bytes ! = 4 & & bytes ! = 8 )
return - EINVAL ;
2017-10-10 13:51:32 +08:00
gma = g_gtt_index < < I915_GTT_PAGE_SHIFT ;
2016-03-28 23:23:16 +08:00
/* the VM may configure the whole GM space when ballooning is used */
2017-02-21 15:54:56 +08:00
if ( ! vgpu_gmadr_is_valid ( vgpu , gma ) )
2016-03-28 23:23:16 +08:00
return 0 ;
ggtt_get_guest_entry ( ggtt_mm , & e , g_gtt_index ) ;
memcpy ( ( void * ) & e . val64 + ( off & ( info - > gtt_entry_size - 1 ) ) , p_data ,
bytes ) ;
if ( ops - > test_present ( & e ) ) {
2017-12-22 18:06:31 +08:00
gfn = ops - > get_pfn ( & e ) ;
/* one PTE update may be issued in multiple writes and the
* first write may not construct a valid gfn
*/
if ( ! intel_gvt_hypervisor_is_valid_gfn ( vgpu , gfn ) ) {
ops - > set_pfn ( & m , gvt - > gtt . scratch_mfn ) ;
goto out ;
}
2016-03-28 23:23:16 +08:00
ret = gtt_entry_p2m ( vgpu , & e , & m ) ;
if ( ret ) {
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " fail to translate guest gtt entry \n " ) ;
2017-03-21 10:54:21 +08:00
/* guest driver may read/write the entry when partial
* update the entry in this situation p2m will fail
* settting the shadow entry to point to a scratch page
*/
2017-10-10 14:34:11 +08:00
ops - > set_pfn ( & m , gvt - > gtt . scratch_mfn ) ;
2016-03-28 23:23:16 +08:00
}
} else {
m = e ;
2017-10-10 14:34:11 +08:00
ops - > set_pfn ( & m , gvt - > gtt . scratch_mfn ) ;
2016-03-28 23:23:16 +08:00
}
2017-12-22 18:06:31 +08:00
out :
2018-01-30 19:19:42 +08:00
ggtt_set_host_entry ( ggtt_mm , & m , g_gtt_index ) ;
2018-01-30 19:19:45 +08:00
ggtt_invalidate ( gvt - > dev_priv ) ;
2016-03-28 23:23:16 +08:00
ggtt_set_guest_entry ( ggtt_mm , & e , g_gtt_index ) ;
return 0 ;
}
/*
2018-01-30 19:19:45 +08:00
* intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write
2016-03-28 23:23:16 +08:00
* @ vgpu : a vGPU
* @ off : register offset
* @ p_data : data from guest write
* @ bytes : data length
*
* This function is used to emulate the GTT MMIO register write
*
* Returns :
* Zero on success , error code if failed .
*/
2018-01-30 19:19:45 +08:00
int intel_vgpu_emulate_ggtt_mmio_write ( struct intel_vgpu * vgpu ,
unsigned int off , void * p_data , unsigned int bytes )
2016-03-28 23:23:16 +08:00
{
const struct intel_gvt_device_info * info = & vgpu - > gvt - > device_info ;
int ret ;
if ( bytes ! = 4 & & bytes ! = 8 )
return - EINVAL ;
off - = info - > gtt_start_offset ;
2018-01-30 19:19:45 +08:00
ret = emulate_ggtt_mmio_write ( vgpu , off , p_data , bytes ) ;
2016-03-28 23:23:16 +08:00
return ret ;
}
2017-12-18 11:58:46 +08:00
int intel_vgpu_write_protect_handler ( struct intel_vgpu * vgpu , u64 pa ,
void * p_data , unsigned int bytes )
{
struct intel_gvt * gvt = vgpu - > gvt ;
int ret = 0 ;
if ( atomic_read ( & vgpu - > gtt . n_tracked_guest_page ) ) {
struct intel_vgpu_page_track * t ;
mutex_lock ( & gvt - > lock ) ;
t = intel_vgpu_find_tracked_page ( vgpu , pa > > PAGE_SHIFT ) ;
if ( t ) {
if ( unlikely ( vgpu - > failsafe ) ) {
/* remove write protection to prevent furture traps */
intel_vgpu_clean_page_track ( vgpu , t ) ;
} else {
ret = t - > handler ( t , pa , p_data , bytes ) ;
if ( ret ) {
gvt_err ( " guest page write error %d, "
" gfn 0x%lx, pa 0x%llx, "
" var 0x%x, len %d \n " ,
ret , t - > gfn , pa ,
* ( u32 * ) p_data , bytes ) ;
}
}
}
mutex_unlock ( & gvt - > lock ) ;
}
return ret ;
}
2016-11-04 13:47:35 +08:00
static int alloc_scratch_pages ( struct intel_vgpu * vgpu ,
intel_gvt_gtt_type_t type )
2016-03-28 23:23:16 +08:00
{
struct intel_vgpu_gtt * gtt = & vgpu - > gtt ;
2016-11-04 13:47:35 +08:00
struct intel_gvt_gtt_pte_ops * ops = vgpu - > gvt - > gtt . pte_ops ;
2017-11-02 17:44:52 +08:00
int page_entry_num = I915_GTT_PAGE_SIZE > >
2016-11-04 13:47:35 +08:00
vgpu - > gvt - > device_info . gtt_entry_size_shift ;
2017-01-09 15:38:38 +08:00
void * scratch_pt ;
2016-11-04 13:47:35 +08:00
int i ;
2017-02-09 11:37:11 +08:00
struct device * dev = & vgpu - > gvt - > dev_priv - > drm . pdev - > dev ;
dma_addr_t daddr ;
2016-03-28 23:23:16 +08:00
2016-11-04 13:47:35 +08:00
if ( WARN_ON ( type < GTT_TYPE_PPGTT_PTE_PT | | type > = GTT_TYPE_MAX ) )
return - EINVAL ;
2017-01-09 15:38:38 +08:00
scratch_pt = ( void * ) get_zeroed_page ( GFP_KERNEL ) ;
2016-11-04 13:47:35 +08:00
if ( ! scratch_pt ) {
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " fail to allocate scratch page \n " ) ;
2016-03-28 23:23:16 +08:00
return - ENOMEM ;
}
2017-02-09 11:37:11 +08:00
daddr = dma_map_page ( dev , virt_to_page ( scratch_pt ) , 0 ,
4096 , PCI_DMA_BIDIRECTIONAL ) ;
if ( dma_mapping_error ( dev , daddr ) ) {
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " fail to dmamap scratch_pt \n " ) ;
2017-02-09 11:37:11 +08:00
__free_page ( virt_to_page ( scratch_pt ) ) ;
return - ENOMEM ;
2016-11-04 13:47:35 +08:00
}
2017-02-09 11:37:11 +08:00
gtt - > scratch_pt [ type ] . page_mfn =
2017-11-02 17:44:52 +08:00
( unsigned long ) ( daddr > > I915_GTT_PAGE_SHIFT ) ;
2017-01-09 15:38:38 +08:00
gtt - > scratch_pt [ type ] . page = virt_to_page ( scratch_pt ) ;
2016-11-04 13:47:35 +08:00
gvt_dbg_mm ( " vgpu%d create scratch_pt: type %d mfn=0x%lx \n " ,
2017-02-09 11:37:11 +08:00
vgpu - > id , type , gtt - > scratch_pt [ type ] . page_mfn ) ;
2016-11-04 13:47:35 +08:00
/* Build the tree by full filled the scratch pt with the entries which
* point to the next level scratch pt or scratch page . The
* scratch_pt [ type ] indicate the scratch pt / scratch page used by the
* ' type ' pt .
* e . g . scratch_pt [ GTT_TYPE_PPGTT_PDE_PT ] is used by
2017-01-09 15:38:38 +08:00
* GTT_TYPE_PPGTT_PDE_PT level pt , that means this scratch_pt it self
2016-11-04 13:47:35 +08:00
* is GTT_TYPE_PPGTT_PTE_PT , and full filled by scratch page mfn .
*/
if ( type > GTT_TYPE_PPGTT_PTE_PT & & type < GTT_TYPE_MAX ) {
struct intel_gvt_gtt_entry se ;
memset ( & se , 0 , sizeof ( struct intel_gvt_gtt_entry ) ) ;
se . type = get_entry_type ( type - 1 ) ;
ops - > set_pfn ( & se , gtt - > scratch_pt [ type - 1 ] . page_mfn ) ;
/* The entry parameters like present/writeable/cache type
* set to the same as i915 ' s scratch page tree .
*/
se . val64 | = _PAGE_PRESENT | _PAGE_RW ;
if ( type = = GTT_TYPE_PPGTT_PDE_PT )
2017-09-14 20:39:41 +08:00
se . val64 | = PPAT_CACHED ;
2016-11-04 13:47:35 +08:00
for ( i = 0 ; i < page_entry_num ; i + + )
2017-01-09 15:38:38 +08:00
ops - > set_entry ( scratch_pt , & se , i , false , 0 , vgpu ) ;
2016-11-04 13:47:35 +08:00
}
return 0 ;
}
2016-03-28 23:23:16 +08:00
2016-11-04 13:47:35 +08:00
static int release_scratch_page_tree ( struct intel_vgpu * vgpu )
{
int i ;
2017-02-09 11:37:11 +08:00
struct device * dev = & vgpu - > gvt - > dev_priv - > drm . pdev - > dev ;
dma_addr_t daddr ;
2016-11-04 13:47:35 +08:00
for ( i = GTT_TYPE_PPGTT_PTE_PT ; i < GTT_TYPE_MAX ; i + + ) {
if ( vgpu - > gtt . scratch_pt [ i ] . page ! = NULL ) {
2017-02-09 11:37:11 +08:00
daddr = ( dma_addr_t ) ( vgpu - > gtt . scratch_pt [ i ] . page_mfn < <
2017-11-02 17:44:52 +08:00
I915_GTT_PAGE_SHIFT ) ;
2017-02-09 11:37:11 +08:00
dma_unmap_page ( dev , daddr , 4096 , PCI_DMA_BIDIRECTIONAL ) ;
2016-11-04 13:47:35 +08:00
__free_page ( vgpu - > gtt . scratch_pt [ i ] . page ) ;
vgpu - > gtt . scratch_pt [ i ] . page = NULL ;
vgpu - > gtt . scratch_pt [ i ] . page_mfn = 0 ;
}
2016-03-28 23:23:16 +08:00
}
return 0 ;
}
2016-11-04 13:47:35 +08:00
static int create_scratch_page_tree ( struct intel_vgpu * vgpu )
2016-03-28 23:23:16 +08:00
{
2016-11-04 13:47:35 +08:00
int i , ret ;
for ( i = GTT_TYPE_PPGTT_PTE_PT ; i < GTT_TYPE_MAX ; i + + ) {
ret = alloc_scratch_pages ( vgpu , i ) ;
if ( ret )
goto err ;
2016-03-28 23:23:16 +08:00
}
2016-11-04 13:47:35 +08:00
return 0 ;
err :
release_scratch_page_tree ( vgpu ) ;
return ret ;
2016-03-28 23:23:16 +08:00
}
/**
* intel_vgpu_init_gtt - initialize per - vGPU graphics memory virulization
* @ vgpu : a vGPU
*
* This function is used to initialize per - vGPU graphics memory virtualization
* components .
*
* Returns :
* Zero on success , error code if failed .
*/
int intel_vgpu_init_gtt ( struct intel_vgpu * vgpu )
{
struct intel_vgpu_gtt * gtt = & vgpu - > gtt ;
2017-09-29 02:47:55 +08:00
hash_init ( gtt - > tracked_guest_page_hash_table ) ;
2016-03-28 23:23:16 +08:00
hash_init ( gtt - > shadow_page_hash_table ) ;
2018-01-30 19:19:40 +08:00
INIT_LIST_HEAD ( & gtt - > ppgtt_mm_list_head ) ;
2016-03-28 23:23:16 +08:00
INIT_LIST_HEAD ( & gtt - > oos_page_list_head ) ;
INIT_LIST_HEAD ( & gtt - > post_shadow_list_head ) ;
2018-01-30 19:19:40 +08:00
gtt - > ggtt_mm = intel_vgpu_create_ggtt_mm ( vgpu ) ;
if ( IS_ERR ( gtt - > ggtt_mm ) ) {
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " fail to create mm for ggtt. \n " ) ;
2018-01-30 19:19:40 +08:00
return PTR_ERR ( gtt - > ggtt_mm ) ;
2016-03-28 23:23:16 +08:00
}
2018-01-30 19:19:40 +08:00
intel_vgpu_reset_ggtt ( vgpu ) ;
2016-03-28 23:23:16 +08:00
2016-11-04 13:47:35 +08:00
return create_scratch_page_tree ( vgpu ) ;
2016-03-28 23:23:16 +08:00
}
2018-01-30 19:19:40 +08:00
static void intel_vgpu_destroy_all_ppgtt_mm ( struct intel_vgpu * vgpu )
2017-02-21 15:52:56 +08:00
{
struct list_head * pos , * n ;
struct intel_vgpu_mm * mm ;
2018-01-30 19:19:40 +08:00
list_for_each_safe ( pos , n , & vgpu - > gtt . ppgtt_mm_list_head ) {
mm = container_of ( pos , struct intel_vgpu_mm , ppgtt_mm . list ) ;
2018-01-30 19:19:41 +08:00
intel_vgpu_destroy_mm ( mm ) ;
2017-02-21 15:52:56 +08:00
}
2018-01-30 19:19:40 +08:00
if ( GEM_WARN_ON ( ! list_empty ( & vgpu - > gtt . ppgtt_mm_list_head ) ) )
gvt_err ( " vgpu ppgtt mm is not fully destoried \n " ) ;
if ( GEM_WARN_ON ( ! hlist_empty ( vgpu - > gtt . shadow_page_hash_table ) ) ) {
gvt_err ( " Why we still has spt not freed? \n " ) ;
ppgtt_free_all_shadow_page ( vgpu ) ;
}
}
static void intel_vgpu_destroy_ggtt_mm ( struct intel_vgpu * vgpu )
{
2018-01-30 19:19:41 +08:00
intel_vgpu_destroy_mm ( vgpu - > gtt . ggtt_mm ) ;
2018-01-30 19:19:40 +08:00
vgpu - > gtt . ggtt_mm = NULL ;
2017-02-21 15:52:56 +08:00
}
2016-03-28 23:23:16 +08:00
/**
* intel_vgpu_clean_gtt - clean up per - vGPU graphics memory virulization
* @ vgpu : a vGPU
*
* This function is used to clean up per - vGPU graphics memory virtualization
* components .
*
* Returns :
* Zero on success , error code if failed .
*/
void intel_vgpu_clean_gtt ( struct intel_vgpu * vgpu )
{
2018-01-30 19:19:40 +08:00
intel_vgpu_destroy_all_ppgtt_mm ( vgpu ) ;
intel_vgpu_destroy_ggtt_mm ( vgpu ) ;
2016-11-04 13:47:35 +08:00
release_scratch_page_tree ( vgpu ) ;
2016-03-28 23:23:16 +08:00
}
static void clean_spt_oos ( struct intel_gvt * gvt )
{
struct intel_gvt_gtt * gtt = & gvt - > gtt ;
struct list_head * pos , * n ;
struct intel_vgpu_oos_page * oos_page ;
WARN ( ! list_empty ( & gtt - > oos_page_use_list_head ) ,
" someone is still using oos page \n " ) ;
list_for_each_safe ( pos , n , & gtt - > oos_page_free_list_head ) {
oos_page = container_of ( pos , struct intel_vgpu_oos_page , list ) ;
list_del ( & oos_page - > list ) ;
kfree ( oos_page ) ;
}
}
static int setup_spt_oos ( struct intel_gvt * gvt )
{
struct intel_gvt_gtt * gtt = & gvt - > gtt ;
struct intel_vgpu_oos_page * oos_page ;
int i ;
int ret ;
INIT_LIST_HEAD ( & gtt - > oos_page_free_list_head ) ;
INIT_LIST_HEAD ( & gtt - > oos_page_use_list_head ) ;
for ( i = 0 ; i < preallocated_oos_pages ; i + + ) {
oos_page = kzalloc ( sizeof ( * oos_page ) , GFP_KERNEL ) ;
if ( ! oos_page ) {
ret = - ENOMEM ;
goto fail ;
}
INIT_LIST_HEAD ( & oos_page - > list ) ;
INIT_LIST_HEAD ( & oos_page - > vm_list ) ;
oos_page - > id = i ;
list_add_tail ( & oos_page - > list , & gtt - > oos_page_free_list_head ) ;
}
gvt_dbg_mm ( " %d oos pages preallocated \n " , i ) ;
return 0 ;
fail :
clean_spt_oos ( gvt ) ;
return ret ;
}
/**
* intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
* @ vgpu : a vGPU
* @ page_table_level : PPGTT page table level
* @ root_entry : PPGTT page table root pointers
*
* This function is used to find a PPGTT mm object from mm object pool
*
* Returns :
* pointer to mm object on success , NULL if failed .
*/
struct intel_vgpu_mm * intel_vgpu_find_ppgtt_mm ( struct intel_vgpu * vgpu ,
2018-01-30 19:19:40 +08:00
u64 pdps [ ] )
2016-03-28 23:23:16 +08:00
{
struct intel_vgpu_mm * mm ;
2018-01-30 19:19:40 +08:00
struct list_head * pos ;
2016-03-28 23:23:16 +08:00
2018-01-30 19:19:40 +08:00
list_for_each ( pos , & vgpu - > gtt . ppgtt_mm_list_head ) {
mm = container_of ( pos , struct intel_vgpu_mm , ppgtt_mm . list ) ;
2016-03-28 23:23:16 +08:00
2018-01-30 19:19:40 +08:00
switch ( mm - > ppgtt_mm . root_entry_type ) {
case GTT_TYPE_PPGTT_ROOT_L4_ENTRY :
if ( pdps [ 0 ] = = mm - > ppgtt_mm . guest_pdps [ 0 ] )
2016-03-28 23:23:16 +08:00
return mm ;
2018-01-30 19:19:40 +08:00
break ;
case GTT_TYPE_PPGTT_ROOT_L3_ENTRY :
if ( ! memcmp ( pdps , mm - > ppgtt_mm . guest_pdps ,
sizeof ( mm - > ppgtt_mm . guest_pdps ) ) )
2016-03-28 23:23:16 +08:00
return mm ;
2018-01-30 19:19:40 +08:00
break ;
default :
GEM_BUG_ON ( 1 ) ;
2016-03-28 23:23:16 +08:00
}
}
return NULL ;
}
/**
* intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from
* g2v notification
* @ vgpu : a vGPU
2018-01-30 19:19:40 +08:00
* @ root_entry_type : ppgtt root entry type
* @ pdps : guest pdps
2016-03-28 23:23:16 +08:00
*
* This function is used to create a PPGTT mm object from a guest to GVT - g
* notification .
*
* Returns :
* Zero on success , negative error code if failed .
*/
int intel_vgpu_g2v_create_ppgtt_mm ( struct intel_vgpu * vgpu ,
2018-01-30 19:19:40 +08:00
intel_gvt_gtt_type_t root_entry_type , u64 pdps [ ] )
2016-03-28 23:23:16 +08:00
{
struct intel_vgpu_mm * mm ;
2018-01-30 19:19:40 +08:00
mm = intel_vgpu_find_ppgtt_mm ( vgpu , pdps ) ;
2016-03-28 23:23:16 +08:00
if ( mm ) {
2018-01-30 19:19:41 +08:00
intel_vgpu_mm_get ( mm ) ;
2016-03-28 23:23:16 +08:00
} else {
2018-01-30 19:19:40 +08:00
mm = intel_vgpu_create_ppgtt_mm ( vgpu , root_entry_type , pdps ) ;
2016-03-28 23:23:16 +08:00
if ( IS_ERR ( mm ) ) {
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " fail to create mm \n " ) ;
2016-03-28 23:23:16 +08:00
return PTR_ERR ( mm ) ;
}
}
return 0 ;
}
/**
* intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from
* g2v notification
* @ vgpu : a vGPU
2018-01-30 19:19:40 +08:00
* @ pdps : guest pdps
2016-03-28 23:23:16 +08:00
*
* This function is used to create a PPGTT mm object from a guest to GVT - g
* notification .
*
* Returns :
* Zero on success , negative error code if failed .
*/
int intel_vgpu_g2v_destroy_ppgtt_mm ( struct intel_vgpu * vgpu ,
2018-01-30 19:19:40 +08:00
u64 pdps [ ] )
2016-03-28 23:23:16 +08:00
{
struct intel_vgpu_mm * mm ;
2018-01-30 19:19:40 +08:00
mm = intel_vgpu_find_ppgtt_mm ( vgpu , pdps ) ;
2016-03-28 23:23:16 +08:00
if ( ! mm ) {
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " fail to find ppgtt instance. \n " ) ;
2016-03-28 23:23:16 +08:00
return - EINVAL ;
}
2018-01-30 19:19:41 +08:00
intel_vgpu_mm_put ( mm ) ;
2016-03-28 23:23:16 +08:00
return 0 ;
}
/**
* intel_gvt_init_gtt - initialize mm components of a GVT device
* @ gvt : GVT device
*
* This function is called at the initialization stage , to initialize
* the mm components of a GVT device .
*
* Returns :
* zero on success , negative error code if failed .
*/
int intel_gvt_init_gtt ( struct intel_gvt * gvt )
{
int ret ;
2017-01-09 15:38:38 +08:00
void * page ;
2017-02-09 11:37:11 +08:00
struct device * dev = & gvt - > dev_priv - > drm . pdev - > dev ;
dma_addr_t daddr ;
2016-03-28 23:23:16 +08:00
gvt_dbg_core ( " init gtt \n " ) ;
2017-03-29 10:13:59 +08:00
if ( IS_BROADWELL ( gvt - > dev_priv ) | | IS_SKYLAKE ( gvt - > dev_priv )
| | IS_KABYLAKE ( gvt - > dev_priv ) ) {
2016-03-28 23:23:16 +08:00
gvt - > gtt . pte_ops = & gen8_gtt_pte_ops ;
gvt - > gtt . gma_ops = & gen8_gtt_gma_ops ;
} else {
return - ENODEV ;
}
2017-01-09 15:38:38 +08:00
page = ( void * ) get_zeroed_page ( GFP_KERNEL ) ;
if ( ! page ) {
2016-12-08 10:14:48 +08:00
gvt_err ( " fail to allocate scratch ggtt page \n " ) ;
return - ENOMEM ;
}
2017-02-09 11:37:11 +08:00
daddr = dma_map_page ( dev , virt_to_page ( page ) , 0 ,
4096 , PCI_DMA_BIDIRECTIONAL ) ;
if ( dma_mapping_error ( dev , daddr ) ) {
gvt_err ( " fail to dmamap scratch ggtt page \n " ) ;
__free_page ( virt_to_page ( page ) ) ;
return - ENOMEM ;
2016-12-08 10:14:48 +08:00
}
2017-10-10 14:34:11 +08:00
gvt - > gtt . scratch_page = virt_to_page ( page ) ;
gvt - > gtt . scratch_mfn = ( unsigned long ) ( daddr > > I915_GTT_PAGE_SHIFT ) ;
2016-12-08 10:14:48 +08:00
2016-03-28 23:23:16 +08:00
if ( enable_out_of_sync ) {
ret = setup_spt_oos ( gvt ) ;
if ( ret ) {
gvt_err ( " fail to initialize SPT oos \n " ) ;
2017-07-04 15:47:00 +08:00
dma_unmap_page ( dev , daddr , 4096 , PCI_DMA_BIDIRECTIONAL ) ;
2017-10-10 14:34:11 +08:00
__free_page ( gvt - > gtt . scratch_page ) ;
2016-03-28 23:23:16 +08:00
return ret ;
}
}
2018-01-30 19:19:40 +08:00
INIT_LIST_HEAD ( & gvt - > gtt . ppgtt_mm_lru_list_head ) ;
2016-03-28 23:23:16 +08:00
return 0 ;
}
/**
* intel_gvt_clean_gtt - clean up mm components of a GVT device
* @ gvt : GVT device
*
* This function is called at the driver unloading stage , to clean up the
* the mm components of a GVT device .
*
*/
void intel_gvt_clean_gtt ( struct intel_gvt * gvt )
{
2017-02-09 11:37:11 +08:00
struct device * dev = & gvt - > dev_priv - > drm . pdev - > dev ;
2017-10-10 14:34:11 +08:00
dma_addr_t daddr = ( dma_addr_t ) ( gvt - > gtt . scratch_mfn < <
2017-10-10 13:51:32 +08:00
I915_GTT_PAGE_SHIFT ) ;
2017-02-09 11:37:11 +08:00
dma_unmap_page ( dev , daddr , 4096 , PCI_DMA_BIDIRECTIONAL ) ;
2017-10-10 14:34:11 +08:00
__free_page ( gvt - > gtt . scratch_page ) ;
2016-12-08 10:14:48 +08:00
2016-03-28 23:23:16 +08:00
if ( enable_out_of_sync )
clean_spt_oos ( gvt ) ;
}
2016-12-08 10:14:48 +08:00
/**
* intel_vgpu_reset_ggtt - reset the GGTT entry
* @ vgpu : a vGPU
*
* This function is called at the vGPU create stage
* to reset all the GGTT entries .
*
*/
void intel_vgpu_reset_ggtt ( struct intel_vgpu * vgpu )
{
struct intel_gvt * gvt = vgpu - > gvt ;
2017-04-12 16:24:57 +08:00
struct drm_i915_private * dev_priv = gvt - > dev_priv ;
2018-01-30 19:19:43 +08:00
struct intel_gvt_gtt_pte_ops * pte_ops = vgpu - > gvt - > gtt . pte_ops ;
struct intel_gvt_gtt_entry entry = { . type = GTT_TYPE_GGTT_PTE } ;
2016-12-08 10:14:48 +08:00
u32 index ;
u32 num_entries ;
2018-01-30 19:19:43 +08:00
pte_ops - > set_pfn ( & entry , gvt - > gtt . scratch_mfn ) ;
pte_ops - > set_present ( & entry ) ;
2016-12-08 10:14:48 +08:00
index = vgpu_aperture_gmadr_base ( vgpu ) > > PAGE_SHIFT ;
num_entries = vgpu_aperture_sz ( vgpu ) > > PAGE_SHIFT ;
2018-01-30 19:19:43 +08:00
while ( num_entries - - )
ggtt_set_host_entry ( vgpu - > gtt . ggtt_mm , & entry , index + + ) ;
2016-12-08 10:14:48 +08:00
index = vgpu_hidden_gmadr_base ( vgpu ) > > PAGE_SHIFT ;
num_entries = vgpu_hidden_sz ( vgpu ) > > PAGE_SHIFT ;
2018-01-30 19:19:43 +08:00
while ( num_entries - - )
ggtt_set_host_entry ( vgpu - > gtt . ggtt_mm , & entry , index + + ) ;
2017-04-12 16:24:57 +08:00
2018-01-30 19:19:45 +08:00
ggtt_invalidate ( dev_priv ) ;
2016-12-08 10:14:48 +08:00
}
2017-01-13 11:15:57 +08:00
/**
* intel_vgpu_reset_gtt - reset the all GTT related status
* @ vgpu : a vGPU
*
* This function is called from vfio core to reset reset all
* GTT related status , including GGTT , PPGTT , scratch page .
*
*/
2017-08-04 13:08:59 +08:00
void intel_vgpu_reset_gtt ( struct intel_vgpu * vgpu )
2017-01-13 11:15:57 +08:00
{
2017-02-21 15:52:56 +08:00
/* Shadow pages are only created when there is no page
* table tracking data , so remove page tracking data after
* removing the shadow pages .
*/
2018-01-30 19:19:40 +08:00
intel_vgpu_destroy_all_ppgtt_mm ( vgpu ) ;
2017-01-13 11:15:57 +08:00
intel_vgpu_reset_ggtt ( vgpu ) ;
}