Merge tag 'gvt-fixes-2019-12-18' of https://github.com/intel/gvt-linux into drm-intel-fixes
gvt-fixes-2019-12-18 - vGPU state setting locking fix (Zhenyu) - Fix vGPU display dmabuf as read-only (Zhenyu) - Properly handle vGPU display dmabuf page pin when rendering (Tina) - Fix one guest boot warning to handle guc reset state (Fred) Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> From: Zhenyu Wang <zhenyuw@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191218051657.GA21662@zhen-hp.sh.intel.com
This commit is contained in:
commit
78d75f5739
@ -36,13 +36,32 @@
|
||||
|
||||
#define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
|
||||
|
||||
static int vgpu_pin_dma_address(struct intel_vgpu *vgpu,
|
||||
unsigned long size,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr))
|
||||
ret = -EINVAL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr);
|
||||
}
|
||||
|
||||
static int vgpu_gem_get_pages(
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
||||
struct intel_vgpu *vgpu;
|
||||
struct sg_table *st;
|
||||
struct scatterlist *sg;
|
||||
int i, ret;
|
||||
int i, j, ret;
|
||||
gen8_pte_t __iomem *gtt_entries;
|
||||
struct intel_vgpu_fb_info *fb_info;
|
||||
u32 page_num;
|
||||
@ -51,6 +70,10 @@ static int vgpu_gem_get_pages(
|
||||
if (WARN_ON(!fb_info))
|
||||
return -ENODEV;
|
||||
|
||||
vgpu = fb_info->obj->vgpu;
|
||||
if (WARN_ON(!vgpu))
|
||||
return -ENODEV;
|
||||
|
||||
st = kmalloc(sizeof(*st), GFP_KERNEL);
|
||||
if (unlikely(!st))
|
||||
return -ENOMEM;
|
||||
@ -64,21 +87,53 @@ static int vgpu_gem_get_pages(
|
||||
gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
|
||||
(fb_info->start >> PAGE_SHIFT);
|
||||
for_each_sg(st->sgl, sg, page_num, i) {
|
||||
dma_addr_t dma_addr =
|
||||
GEN8_DECODE_PTE(readq(>t_entries[i]));
|
||||
if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
sg->offset = 0;
|
||||
sg->length = PAGE_SIZE;
|
||||
sg_dma_address(sg) =
|
||||
GEN8_DECODE_PTE(readq(>t_entries[i]));
|
||||
sg_dma_len(sg) = PAGE_SIZE;
|
||||
sg_dma_address(sg) = dma_addr;
|
||||
}
|
||||
|
||||
__i915_gem_object_set_pages(obj, st, PAGE_SIZE);
|
||||
out:
|
||||
if (ret) {
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
for_each_sg(st->sgl, sg, i, j) {
|
||||
dma_addr = sg_dma_address(sg);
|
||||
if (dma_addr)
|
||||
vgpu_unpin_dma_address(vgpu, dma_addr);
|
||||
}
|
||||
sg_free_table(st);
|
||||
kfree(st);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
|
||||
if (obj->base.dma_buf) {
|
||||
struct intel_vgpu_fb_info *fb_info = obj->gvt_info;
|
||||
struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
|
||||
struct intel_vgpu *vgpu = obj->vgpu;
|
||||
int i;
|
||||
|
||||
for_each_sg(pages->sgl, sg, fb_info->size, i)
|
||||
vgpu_unpin_dma_address(vgpu,
|
||||
sg_dma_address(sg));
|
||||
}
|
||||
|
||||
sg_free_table(pages);
|
||||
kfree(pages);
|
||||
}
|
||||
@ -163,6 +218,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
|
||||
drm_gem_private_object_init(dev, &obj->base,
|
||||
roundup(info->size, PAGE_SIZE));
|
||||
i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class);
|
||||
i915_gem_object_set_readonly(obj);
|
||||
|
||||
obj->read_domains = I915_GEM_DOMAIN_GTT;
|
||||
obj->write_domain = 0;
|
||||
|
@ -341,6 +341,10 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
|
||||
engine_mask |= BIT(VCS1);
|
||||
}
|
||||
if (data & GEN9_GRDOM_GUC) {
|
||||
gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
|
||||
vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
|
||||
}
|
||||
engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask;
|
||||
}
|
||||
|
||||
@ -1636,6 +1640,16 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int guc_status_read(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data,
|
||||
unsigned int bytes)
|
||||
{
|
||||
/* keep MIA_IN_RESET before clearing */
|
||||
read_vreg(vgpu, offset, p_data, bytes);
|
||||
vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mmio_read_from_hw(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
@ -2672,6 +2686,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
|
||||
MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
|
||||
MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
|
||||
MMIO_DH(GUC_STATUS, D_ALL, guc_status_read, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -62,6 +62,8 @@ struct intel_gvt_mpt {
|
||||
unsigned long size, dma_addr_t *dma_addr);
|
||||
void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
|
||||
|
||||
int (*dma_pin_guest_page)(unsigned long handle, dma_addr_t dma_addr);
|
||||
|
||||
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
|
||||
unsigned long mfn, unsigned int nr, bool map);
|
||||
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
|
||||
|
@ -1916,6 +1916,28 @@ err_unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
|
||||
{
|
||||
struct kvmgt_guest_info *info;
|
||||
struct gvt_dma *entry;
|
||||
int ret = 0;
|
||||
|
||||
if (!handle_valid(handle))
|
||||
return -ENODEV;
|
||||
|
||||
info = (struct kvmgt_guest_info *)handle;
|
||||
|
||||
mutex_lock(&info->vgpu->vdev.cache_lock);
|
||||
entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
|
||||
if (entry)
|
||||
kref_get(&entry->ref);
|
||||
else
|
||||
ret = -ENOMEM;
|
||||
mutex_unlock(&info->vgpu->vdev.cache_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __gvt_dma_release(struct kref *ref)
|
||||
{
|
||||
struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
|
||||
@ -2027,6 +2049,7 @@ static struct intel_gvt_mpt kvmgt_mpt = {
|
||||
.gfn_to_mfn = kvmgt_gfn_to_pfn,
|
||||
.dma_map_guest_page = kvmgt_dma_map_guest_page,
|
||||
.dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
|
||||
.dma_pin_guest_page = kvmgt_dma_pin_guest_page,
|
||||
.set_opregion = kvmgt_set_opregion,
|
||||
.set_edid = kvmgt_set_edid,
|
||||
.get_vfio_device = kvmgt_get_vfio_device,
|
||||
|
@ -254,6 +254,21 @@ static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
|
||||
intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf
|
||||
* @vgpu: a vGPU
|
||||
* @dma_addr: guest dma addr
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, negative error code if failed.
|
||||
*/
|
||||
static inline int
|
||||
intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
|
||||
* @vgpu: a vGPU
|
||||
|
@ -212,9 +212,9 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
|
||||
*/
|
||||
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
mutex_lock(&vgpu->gvt->lock);
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
vgpu->active = true;
|
||||
mutex_unlock(&vgpu->gvt->lock);
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
x
Reference in New Issue
Block a user