drm/i915/gvt: devirtualize ->dma_{,un}map_guest_page
Just call the functions directly. Also remove a pointless wrapper. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Zhi Wang <zhi.a.wang@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20220411141403.86980-22-hch@lst.de Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Zhi Wang <zhi.a.wang@intel.com>
This commit is contained in:
parent
4c2baaaf76
commit
8398eee85f
@ -54,12 +54,6 @@ static int vgpu_pin_dma_address(struct intel_vgpu *vgpu,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr);
|
||||
}
|
||||
|
||||
static int vgpu_gem_get_pages(
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
@ -114,7 +108,7 @@ out:
|
||||
for_each_sg(st->sgl, sg, i, j) {
|
||||
dma_addr = sg_dma_address(sg);
|
||||
if (dma_addr)
|
||||
vgpu_unpin_dma_address(vgpu, dma_addr);
|
||||
intel_gvt_dma_unmap_guest_page(vgpu, dma_addr);
|
||||
}
|
||||
sg_free_table(st);
|
||||
kfree(st);
|
||||
@ -136,7 +130,7 @@ static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
|
||||
int i;
|
||||
|
||||
for_each_sg(pages->sgl, sg, fb_info->size, i)
|
||||
vgpu_unpin_dma_address(vgpu,
|
||||
intel_gvt_dma_unmap_guest_page(vgpu,
|
||||
sg_dma_address(sg));
|
||||
}
|
||||
|
||||
|
@ -1013,7 +1013,7 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
|
||||
if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
|
||||
return;
|
||||
|
||||
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
|
||||
intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
|
||||
@ -1212,8 +1212,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
|
||||
return PTR_ERR(sub_spt);
|
||||
|
||||
for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
|
||||
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
|
||||
start_gfn + sub_index, PAGE_SIZE, &dma_addr);
|
||||
ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index,
|
||||
PAGE_SIZE, &dma_addr);
|
||||
if (ret) {
|
||||
ppgtt_invalidate_spt(spt);
|
||||
return ret;
|
||||
@ -1258,8 +1258,8 @@ static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
|
||||
ops->set_64k_splited(&entry);
|
||||
|
||||
for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
|
||||
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
|
||||
start_gfn + i, PAGE_SIZE, &dma_addr);
|
||||
ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + i,
|
||||
PAGE_SIZE, &dma_addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1313,8 +1313,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
|
||||
}
|
||||
|
||||
/* direct shadow */
|
||||
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
|
||||
&dma_addr);
|
||||
ret = intel_gvt_dma_map_guest_page(vgpu, gfn, page_size, &dma_addr);
|
||||
if (ret)
|
||||
return -ENXIO;
|
||||
|
||||
@ -2245,8 +2244,7 @@ static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
|
||||
|
||||
pfn = pte_ops->get_pfn(entry);
|
||||
if (pfn != vgpu->gvt->gtt.scratch_mfn)
|
||||
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu,
|
||||
pfn << PAGE_SHIFT);
|
||||
intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
||||
@ -2337,8 +2335,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
|
||||
PAGE_SIZE, &dma_addr);
|
||||
ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE,
|
||||
&dma_addr);
|
||||
if (ret) {
|
||||
gvt_vgpu_err("fail to populate guest ggtt entry\n");
|
||||
/* guest driver may read/write the entry when partial
|
||||
|
@ -767,6 +767,10 @@ void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
|
||||
|
||||
int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn);
|
||||
int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn);
|
||||
int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
||||
unsigned long size, dma_addr_t *dma_addr);
|
||||
void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
|
||||
dma_addr_t dma_addr);
|
||||
|
||||
#include "trace.h"
|
||||
#include "mpt.h"
|
||||
|
@ -46,11 +46,6 @@ struct intel_gvt_mpt {
|
||||
int (*host_init)(struct device *dev, void *gvt);
|
||||
void (*host_exit)(struct device *dev, void *gvt);
|
||||
|
||||
int (*dma_map_guest_page)(struct intel_vgpu *vgpu, unsigned long gfn,
|
||||
unsigned long size, dma_addr_t *dma_addr);
|
||||
void (*dma_unmap_guest_page)(struct intel_vgpu *vgpu,
|
||||
dma_addr_t dma_addr);
|
||||
|
||||
int (*dma_pin_guest_page)(struct intel_vgpu *vgpu, dma_addr_t dma_addr);
|
||||
};
|
||||
|
||||
|
@ -1874,7 +1874,7 @@ void intel_vgpu_detach_regions(struct intel_vgpu *vgpu)
|
||||
vgpu->region = NULL;
|
||||
}
|
||||
|
||||
static int kvmgt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
||||
int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
||||
unsigned long size, dma_addr_t *dma_addr)
|
||||
{
|
||||
struct gvt_dma *entry;
|
||||
@ -1950,7 +1950,7 @@ static void __gvt_dma_release(struct kref *ref)
|
||||
__gvt_cache_remove_entry(entry->vgpu, entry);
|
||||
}
|
||||
|
||||
static void kvmgt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
|
||||
void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
struct gvt_dma *entry;
|
||||
@ -1968,8 +1968,6 @@ static void kvmgt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
|
||||
static const struct intel_gvt_mpt kvmgt_mpt = {
|
||||
.host_init = kvmgt_host_init,
|
||||
.host_exit = kvmgt_host_exit,
|
||||
.dma_map_guest_page = kvmgt_dma_map_guest_page,
|
||||
.dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
|
||||
.dma_pin_guest_page = kvmgt_dma_pin_guest_page,
|
||||
};
|
||||
|
||||
|
@ -71,35 +71,6 @@ static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt)
|
||||
intel_gvt_host.mpt->host_exit(dev, gvt);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page
|
||||
* @vgpu: a vGPU
|
||||
* @gfn: guest pfn
|
||||
* @size: page size
|
||||
* @dma_addr: retrieve allocated dma addr
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_dma_map_guest_page(
|
||||
struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size,
|
||||
dma_addr_t *dma_addr)
|
||||
{
|
||||
return intel_gvt_host.mpt->dma_map_guest_page(vgpu, gfn, size,
|
||||
dma_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_dma_unmap_guest_page - cancel dma map for guest page
|
||||
* @vgpu: a vGPU
|
||||
* @dma_addr: the mapped dma addr
|
||||
*/
|
||||
static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
|
||||
struct intel_vgpu *vgpu, dma_addr_t dma_addr)
|
||||
{
|
||||
intel_gvt_host.mpt->dma_unmap_guest_page(vgpu, dma_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf
|
||||
* @vgpu: a vGPU
|
||||
|
Loading…
x
Reference in New Issue
Block a user