diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index c5b1694b292f..2a49dea231e7 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -1347,7 +1347,13 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) vm->pt_ops = &xelp_pt_ops; - if (!(flags & XE_VM_FLAG_MIGRATION)) + /* + * Long-running workloads are not protected by the scheduler references. + * By design, run_job for long-running workloads returns NULL and the + * scheduler drops all the references of it, hence protecting the VM + * for this case is necessary. + */ + if (flags & XE_VM_FLAG_LR_MODE) xe_pm_runtime_get_noresume(xe); vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm); @@ -1457,7 +1463,7 @@ err_no_resv: for_each_tile(tile, xe, id) xe_range_fence_tree_fini(&vm->rftree[id]); kfree(vm); - if (!(flags & XE_VM_FLAG_MIGRATION)) + if (flags & XE_VM_FLAG_LR_MODE) xe_pm_runtime_put(xe); return ERR_PTR(err); } @@ -1592,7 +1598,7 @@ static void vm_destroy_work_func(struct work_struct *w) mutex_destroy(&vm->snap_mutex); - if (!(vm->flags & XE_VM_FLAG_MIGRATION)) + if (vm->flags & XE_VM_FLAG_LR_MODE) xe_pm_runtime_put(xe); for_each_tile(tile, xe, id)