2015-02-10 19:05:47 +08:00
/*
* Copyright ( c ) 2011 - 2015 Intel Corporation . All rights reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM ,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
2020-02-27 16:44:06 +02:00
# include "i915_drv.h"
# include "i915_pvinfo.h"
2015-02-10 19:05:47 +08:00
# include "i915_vgpu.h"
/**
* DOC : Intel GVT - g guest support
*
* Intel GVT - g is a graphics virtualization technology which shares the
* GPU among multiple virtual machines on a time - sharing basis . Each
* virtual machine is presented a virtual GPU ( vGPU ) , which has equivalent
* features as the underlying physical GPU ( pGPU ) , so i915 driver can run
* seamlessly in a virtual machine . This file provides vGPU specific
* optimizations when running in a virtual machine , to reduce the complexity
* of vGPU emulation and to improve the overall performance .
*
* A primary function introduced here is so - called " address space ballooning "
* technique . Intel GVT - g partitions global graphics memory among multiple VMs ,
* so each VM can directly access a portion of the memory without hypervisor ' s
* intervention , e . g . filling textures or queuing commands . However with the
* partitioning an unmodified i915 driver would assume a smaller graphics
* memory starting from address ZERO , then requires vGPU emulation module to
* translate the graphics address between ' guest view ' and ' host view ' , for
* all registers and command opcodes which contain a graphics memory address .
* To reduce the complexity , Intel GVT - g introduces " address space ballooning " ,
* by telling the exact partitioning knowledge to each guest i915 driver , which
* then reserves and prevents non - allocated portions from allocation . Thus vGPU
* emulation module only needs to scan and validate graphics addresses without
* complexity of address translation .
*
*/
/**
2020-02-27 16:44:06 +02:00
* intel_vgpu_detect - detect virtual GPU
2016-06-03 14:02:17 +01:00
* @ dev_priv : i915 device private
2015-02-10 19:05:47 +08:00
*
* This function is called at the initialization stage , to detect whether
* running on a vGPU .
*/
2020-02-27 16:44:06 +02:00
void intel_vgpu_detect ( struct drm_i915_private * dev_priv )
2015-02-10 19:05:47 +08:00
{
2021-01-28 14:31:23 +01:00
struct pci_dev * pdev = to_pci_dev ( dev_priv - > drm . dev ) ;
2017-06-09 15:48:05 +08:00
u64 magic ;
u16 version_major ;
2019-06-19 18:00:21 -07:00
void __iomem * shared_area ;
2015-02-10 19:05:47 +08:00
BUILD_BUG_ON ( sizeof ( struct vgt_if ) ! = VGT_PVINFO_SIZE ) ;
2019-06-19 18:00:21 -07:00
/*
* This is called before we setup the main MMIO BAR mappings used via
* the uncore structure , so we need to access the BAR directly . Since
* we do not support VGT on older gens , return early so we don ' t have
* to consider differently numbered or sized MMIO bars
*/
2021-06-05 21:50:49 -07:00
if ( GRAPHICS_VER ( dev_priv ) < 6 )
2019-06-19 18:00:21 -07:00
return ;
shared_area = pci_iomap_range ( pdev , 0 , VGT_PVINFO_PAGE , VGT_PVINFO_SIZE ) ;
if ( ! shared_area ) {
2020-01-09 12:06:42 +03:00
drm_err ( & dev_priv - > drm ,
" failed to map MMIO bar to check for VGT \n " ) ;
2015-02-10 19:05:47 +08:00
return ;
2019-06-19 18:00:21 -07:00
}
2015-02-10 19:05:47 +08:00
2019-06-19 18:00:21 -07:00
magic = readq ( shared_area + vgtif_offset ( magic ) ) ;
if ( magic ! = VGT_MAGIC )
goto out ;
version_major = readw ( shared_area + vgtif_offset ( version_major ) ) ;
2017-06-09 15:48:05 +08:00
if ( version_major < VGT_VERSION_MAJOR ) {
2020-01-09 12:06:42 +03:00
drm_info ( & dev_priv - > drm , " VGT interface version mismatch! \n " ) ;
2019-06-19 18:00:21 -07:00
goto out ;
2015-02-10 19:05:47 +08:00
}
2019-06-19 18:00:21 -07:00
dev_priv - > vgpu . caps = readl ( shared_area + vgtif_offset ( vgt_caps ) ) ;
2017-08-14 15:20:46 +08:00
2015-02-10 19:05:47 +08:00
dev_priv - > vgpu . active = true ;
2019-08-23 14:57:31 +08:00
mutex_init ( & dev_priv - > vgpu . lock ) ;
2020-01-09 12:06:42 +03:00
drm_info ( & dev_priv - > drm , " Virtual GPU for Intel GVT-g detected. \n " ) ;
2019-06-19 18:00:21 -07:00
out :
pci_iounmap ( pdev , shared_area ) ;
2015-02-10 19:05:47 +08:00
}
2015-02-10 19:05:48 +08:00
2020-02-27 16:44:06 +02:00
void intel_vgpu_register ( struct drm_i915_private * i915 )
{
/*
* Notify a valid surface after modesetting , when running inside a VM .
*/
if ( intel_vgpu_active ( i915 ) )
intel_uncore_write ( & i915 - > uncore , vgtif_reg ( display_ready ) ,
VGT_DRV_DISPLAY_READY ) ;
}
bool intel_vgpu_active ( struct drm_i915_private * dev_priv )
{
return dev_priv - > vgpu . active ;
}
2019-03-14 22:38:35 +00:00
bool intel_vgpu_has_full_ppgtt ( struct drm_i915_private * dev_priv )
2017-08-14 15:20:46 +08:00
{
2019-03-14 22:38:35 +00:00
return dev_priv - > vgpu . caps & VGT_CAPS_FULL_PPGTT ;
2017-08-14 15:20:46 +08:00
}
2020-02-27 16:44:06 +02:00
bool intel_vgpu_has_hwsp_emulation ( struct drm_i915_private * dev_priv )
{
return dev_priv - > vgpu . caps & VGT_CAPS_HWSP_EMULATION ;
}
bool intel_vgpu_has_huge_gtt ( struct drm_i915_private * dev_priv )
{
return dev_priv - > vgpu . caps & VGT_CAPS_HUGE_GTT ;
}
2015-02-10 19:05:48 +08:00
struct _balloon_info_ {
/*
* There are up to 2 regions per mappable / unmappable graphic
* memory that might be ballooned . Here , index 0 / 1 is for mappable
* graphic memory , 2 / 3 for unmappable graphic memory .
*/
struct drm_mm_node space [ 4 ] ;
} ;
static struct _balloon_info_ bl_info ;
2017-05-31 10:35:52 +08:00
static void vgt_deballoon_space ( struct i915_ggtt * ggtt ,
struct drm_mm_node * node )
{
2020-01-09 12:06:42 +03:00
struct drm_i915_private * dev_priv = ggtt - > vm . i915 ;
2019-08-20 13:46:17 +08:00
if ( ! drm_mm_node_allocated ( node ) )
return ;
2020-01-09 12:06:42 +03:00
drm_dbg ( & dev_priv - > drm ,
" deballoon space: range [0x%llx - 0x%llx] %llu KiB. \n " ,
node - > start ,
node - > start + node - > size ,
node - > size / 1024 ) ;
2017-05-31 10:35:52 +08:00
2018-06-05 16:37:58 +01:00
ggtt - > vm . reserved - = node - > size ;
2017-05-31 10:35:52 +08:00
drm_mm_remove_node ( node ) ;
}
2015-02-10 19:05:48 +08:00
/**
* intel_vgt_deballoon - deballoon reserved graphics address trunks
2019-06-21 14:16:40 +01:00
* @ ggtt : the global GGTT from which we reserved earlier
2015-02-10 19:05:48 +08:00
*
* This function is called to deallocate the ballooned - out graphic memory , when
* driver is unloaded or when ballooning fails .
*/
2019-06-21 08:07:39 +01:00
void intel_vgt_deballoon ( struct i915_ggtt * ggtt )
2015-02-10 19:05:48 +08:00
{
2020-01-09 12:06:42 +03:00
struct drm_i915_private * dev_priv = ggtt - > vm . i915 ;
2015-02-10 19:05:48 +08:00
int i ;
2019-06-21 08:07:39 +01:00
if ( ! intel_vgpu_active ( ggtt - > vm . i915 ) )
2016-06-16 08:06:59 -04:00
return ;
2020-01-09 12:06:42 +03:00
drm_dbg ( & dev_priv - > drm , " VGT deballoon. \n " ) ;
2015-02-10 19:05:48 +08:00
2017-05-31 10:35:52 +08:00
for ( i = 0 ; i < 4 ; i + + )
2019-06-21 08:07:39 +01:00
vgt_deballoon_space ( ggtt , & bl_info . space [ i ] ) ;
2015-02-10 19:05:48 +08:00
}
2017-01-11 11:23:11 +00:00
static int vgt_balloon_space ( struct i915_ggtt * ggtt ,
2015-02-10 19:05:48 +08:00
struct drm_mm_node * node ,
unsigned long start , unsigned long end )
{
2020-01-09 12:06:42 +03:00
struct drm_i915_private * dev_priv = ggtt - > vm . i915 ;
2015-02-10 19:05:48 +08:00
unsigned long size = end - start ;
2017-05-31 10:35:52 +08:00
int ret ;
2015-02-10 19:05:48 +08:00
2017-01-17 22:06:11 +08:00
if ( start > = end )
2015-02-10 19:05:48 +08:00
return - EINVAL ;
2020-01-09 12:06:42 +03:00
drm_info ( & dev_priv - > drm ,
" balloon space: range [ 0x%lx - 0x%lx ] %lu KiB. \n " ,
2015-02-10 19:05:48 +08:00
start , end , size / 1024 ) ;
2022-01-14 14:23:17 +01:00
ret = i915_gem_gtt_reserve ( & ggtt - > vm , NULL , node ,
2017-05-31 10:35:52 +08:00
size , start , I915_COLOR_UNEVICTABLE ,
0 ) ;
if ( ! ret )
2018-06-05 16:37:58 +01:00
ggtt - > vm . reserved + = size ;
2017-05-31 10:35:52 +08:00
return ret ;
2015-02-10 19:05:48 +08:00
}
/**
* intel_vgt_balloon - balloon out reserved graphics address trunks
2019-06-21 14:16:40 +01:00
* @ ggtt : the global GGTT from which to reserve
2015-02-10 19:05:48 +08:00
*
* This function is called at the initialization stage , to balloon out the
* graphic address space allocated to other vGPUs , by marking these spaces as
* reserved . The ballooning related knowledge ( starting address and size of
* the mappable / unmappable graphic memory ) is described in the vgt_if structure
* in a reserved mmio range .
*
* To give an example , the drawing below depicts one typical scenario after
* ballooning . Here the vGPU1 has 2 pieces of graphic address spaces ballooned
* out each for the mappable and the non - mappable part . From the vGPU1 point of
* view , the total size is the same as the physical one , with the start address
* of its graphic space being zero . Yet there are some portions ballooned out (
* the shadow part , which are marked as reserved by drm allocator ) . From the
* host point of view , the graphic address space is partitioned by multiple
2016-05-31 22:55:13 +02:00
* vGPUs in different VMs . : :
2015-02-10 19:05:48 +08:00
*
2016-08-12 22:48:37 +02:00
* vGPU1 view Host view
* 0 - - - - - - > + - - - - - - - - - - - + + - - - - - - - - - - - +
* ^ | # # # # # # # # # # # | | vGPU3 |
* | | # # # # # # # # # # # | + - - - - - - - - - - - +
* | | # # # # # # # # # # # | | vGPU2 |
* | + - - - - - - - - - - - + + - - - - - - - - - - - +
* mappable GM | available | = = > | vGPU1 |
* | + - - - - - - - - - - - + + - - - - - - - - - - - +
* | | # # # # # # # # # # # | | |
* v | # # # # # # # # # # # | | Host |
* + = = = = = = = + = = = = = = = = = = = + + = = = = = = = = = = = +
* ^ | # # # # # # # # # # # | | vGPU3 |
* | | # # # # # # # # # # # | + - - - - - - - - - - - +
* | | # # # # # # # # # # # | | vGPU2 |
* | + - - - - - - - - - - - + + - - - - - - - - - - - +
* unmappable GM | available | = = > | vGPU1 |
* | + - - - - - - - - - - - + + - - - - - - - - - - - +
* | | # # # # # # # # # # # | | |
* | | # # # # # # # # # # # | | Host |
* v | # # # # # # # # # # # | | |
* total GM size - - - - - - > + - - - - - - - - - - - + + - - - - - - - - - - - +
2015-02-10 19:05:48 +08:00
*
* Returns :
* zero on success , non - zero if configuration invalid or ballooning failed
*/
2019-06-21 08:07:39 +01:00
int intel_vgt_balloon ( struct i915_ggtt * ggtt )
2015-02-10 19:05:48 +08:00
{
2020-01-09 12:06:42 +03:00
struct drm_i915_private * dev_priv = ggtt - > vm . i915 ;
struct intel_uncore * uncore = & dev_priv - > uncore ;
2018-06-05 16:37:58 +01:00
unsigned long ggtt_end = ggtt - > vm . total ;
2015-02-10 19:05:48 +08:00
unsigned long mappable_base , mappable_size , mappable_end ;
unsigned long unmappable_base , unmappable_size , unmappable_end ;
int ret ;
2019-06-21 08:07:39 +01:00
if ( ! intel_vgpu_active ( ggtt - > vm . i915 ) )
2016-06-16 08:06:59 -04:00
return 0 ;
2019-06-21 08:07:39 +01:00
mappable_base =
intel_uncore_read ( uncore , vgtif_reg ( avail_rs . mappable_gmadr . base ) ) ;
mappable_size =
intel_uncore_read ( uncore , vgtif_reg ( avail_rs . mappable_gmadr . size ) ) ;
unmappable_base =
intel_uncore_read ( uncore , vgtif_reg ( avail_rs . nonmappable_gmadr . base ) ) ;
unmappable_size =
intel_uncore_read ( uncore , vgtif_reg ( avail_rs . nonmappable_gmadr . size ) ) ;
2015-02-10 19:05:48 +08:00
mappable_end = mappable_base + mappable_size ;
unmappable_end = unmappable_base + unmappable_size ;
2020-01-09 12:06:42 +03:00
drm_info ( & dev_priv - > drm , " VGT ballooning configuration: \n " ) ;
drm_info ( & dev_priv - > drm ,
" Mappable graphic memory: base 0x%lx size %ldKiB \n " ,
2015-02-10 19:05:48 +08:00
mappable_base , mappable_size / 1024 ) ;
2020-01-09 12:06:42 +03:00
drm_info ( & dev_priv - > drm ,
" Unmappable graphic memory: base 0x%lx size %ldKiB \n " ,
2015-02-10 19:05:48 +08:00
unmappable_base , unmappable_size / 1024 ) ;
2017-02-15 08:43:54 +00:00
if ( mappable_end > ggtt - > mappable_end | |
2016-03-30 16:57:10 +03:00
unmappable_base < ggtt - > mappable_end | |
unmappable_end > ggtt_end ) {
2020-01-09 12:06:42 +03:00
drm_err ( & dev_priv - > drm , " Invalid ballooning configuration! \n " ) ;
2015-02-10 19:05:48 +08:00
return - EINVAL ;
}
/* Unmappable graphic memory ballooning */
2016-03-30 16:57:10 +03:00
if ( unmappable_base > ggtt - > mappable_end ) {
2017-01-11 11:23:11 +00:00
ret = vgt_balloon_space ( ggtt , & bl_info . space [ 2 ] ,
ggtt - > mappable_end , unmappable_base ) ;
2015-02-10 19:05:48 +08:00
if ( ret )
goto err ;
}
2017-03-10 10:22:38 +08:00
if ( unmappable_end < ggtt_end ) {
2017-01-11 11:23:11 +00:00
ret = vgt_balloon_space ( ggtt , & bl_info . space [ 3 ] ,
2017-03-10 10:22:38 +08:00
unmappable_end , ggtt_end ) ;
2015-02-10 19:05:48 +08:00
if ( ret )
2017-05-31 10:35:52 +08:00
goto err_upon_mappable ;
2015-02-10 19:05:48 +08:00
}
/* Mappable graphic memory ballooning */
2017-02-15 08:43:54 +00:00
if ( mappable_base ) {
2017-01-11 11:23:11 +00:00
ret = vgt_balloon_space ( ggtt , & bl_info . space [ 0 ] ,
2017-02-15 08:43:54 +00:00
0 , mappable_base ) ;
2015-02-10 19:05:48 +08:00
if ( ret )
2017-05-31 10:35:52 +08:00
goto err_upon_unmappable ;
2015-02-10 19:05:48 +08:00
}
2016-03-30 16:57:10 +03:00
if ( mappable_end < ggtt - > mappable_end ) {
2017-01-11 11:23:11 +00:00
ret = vgt_balloon_space ( ggtt , & bl_info . space [ 1 ] ,
mappable_end , ggtt - > mappable_end ) ;
2015-02-10 19:05:48 +08:00
if ( ret )
2017-05-31 10:35:52 +08:00
goto err_below_mappable ;
2015-02-10 19:05:48 +08:00
}
2020-01-09 12:06:42 +03:00
drm_info ( & dev_priv - > drm , " VGT balloon successfully \n " ) ;
2015-02-10 19:05:48 +08:00
return 0 ;
2017-05-31 10:35:52 +08:00
err_below_mappable :
vgt_deballoon_space ( ggtt , & bl_info . space [ 0 ] ) ;
err_upon_unmappable :
vgt_deballoon_space ( ggtt , & bl_info . space [ 3 ] ) ;
err_upon_mappable :
vgt_deballoon_space ( ggtt , & bl_info . space [ 2 ] ) ;
2015-02-10 19:05:48 +08:00
err :
2020-01-09 12:06:42 +03:00
drm_err ( & dev_priv - > drm , " VGT balloon fail \n " ) ;
2015-02-10 19:05:48 +08:00
return ret ;
}