KVM: x86/mmu: Handle KVM bookkeeping in page-track APIs, not callers

Get/put references to KVM when a page-track notifier is (un)registered
instead of relying on the caller to do so.  Forcing the caller to do the
bookkeeping is unnecessary and adds one more thing for users to get
wrong, e.g. see commit 9ed1fdee9ee3 ("drm/i915/gvt: Get reference to KVM
iff attachment to VM is successful").

Reviewed-by: Yan Zhao <yan.y.zhao@intel.com>
Tested-by: Yongwei Ma <yongwei.ma@intel.com>
Link: https://lore.kernel.org/r/20230729013535.1070024-29-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sean Christopherson 2023-07-28 18:35:34 -07:00 committed by Paolo Bonzini
parent 96316a0670
commit f22b1e8500
3 changed files with 24 additions and 22 deletions

View File

@ -44,12 +44,11 @@ struct kvm_page_track_notifier_node {
struct kvm_page_track_notifier_node *node);
};
void
kvm_page_track_register_notifier(struct kvm *kvm,
int kvm_page_track_register_notifier(struct kvm *kvm,
struct kvm_page_track_notifier_node *n);
void
kvm_page_track_unregister_notifier(struct kvm *kvm,
void kvm_page_track_unregister_notifier(struct kvm *kvm,
struct kvm_page_track_notifier_node *n);
int kvm_write_track_add_gfn(struct kvm *kvm, gfn_t gfn);
int kvm_write_track_remove_gfn(struct kvm *kvm, gfn_t gfn);
#else

View File

@ -157,17 +157,22 @@ int kvm_page_track_init(struct kvm *kvm)
* register the notifier so that event interception for the tracked guest
* pages can be received.
*/
void
kvm_page_track_register_notifier(struct kvm *kvm,
int kvm_page_track_register_notifier(struct kvm *kvm,
struct kvm_page_track_notifier_node *n)
{
struct kvm_page_track_notifier_head *head;
if (!kvm || kvm->mm != current->mm)
return -ESRCH;
kvm_get_kvm(kvm);
head = &kvm->arch.track_notifier_head;
write_lock(&kvm->mmu_lock);
hlist_add_head_rcu(&n->node, &head->track_notifier_list);
write_unlock(&kvm->mmu_lock);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier);
@ -175,8 +180,7 @@ EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier);
* stop receiving the event interception. It is the opposed operation of
* kvm_page_track_register_notifier().
*/
void
kvm_page_track_unregister_notifier(struct kvm *kvm,
void kvm_page_track_unregister_notifier(struct kvm *kvm,
struct kvm_page_track_notifier_node *n)
{
struct kvm_page_track_notifier_head *head;
@ -187,6 +191,8 @@ kvm_page_track_unregister_notifier(struct kvm *kvm,
hlist_del_rcu(&n->node);
write_unlock(&kvm->mmu_lock);
synchronize_srcu(&head->track_srcu);
kvm_put_kvm(kvm);
}
EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier);

View File

@ -654,21 +654,19 @@ out:
static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
if (!vgpu->vfio_device.kvm ||
vgpu->vfio_device.kvm->mm != current->mm) {
gvt_vgpu_err("KVM is required to use Intel vGPU\n");
return -ESRCH;
}
int ret;
if (__kvmgt_vgpu_exist(vgpu))
return -EEXIST;
vgpu->track_node.track_write = kvmgt_page_track_write;
vgpu->track_node.track_remove_region = kvmgt_page_track_remove_region;
kvm_get_kvm(vgpu->vfio_device.kvm);
kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
ret = kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
&vgpu->track_node);
if (ret) {
gvt_vgpu_err("KVM is required to use Intel vGPU\n");
return ret;
}
set_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
@ -703,7 +701,6 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
&vgpu->track_node);
kvm_put_kvm(vgpu->vfio_device.kvm);
kvmgt_protect_table_destroy(vgpu);
gvt_cache_destroy(vgpu);