KVM: PPC: Book3S HV: Don't take kvm->lock around kvm_for_each_vcpu
Currently the HV KVM code takes the kvm->lock around calls to kvm_for_each_vcpu() and kvm_get_vcpu_by_id() (which can call kvm_for_each_vcpu() internally). However, that leads to a lock order inversion problem, because these are called in contexts where the vcpu mutex is held, but the vcpu mutexes nest within kvm->lock according to Documentation/virtual/kvm/locking.txt. Hence there is a possibility of deadlock. To fix this, we simply don't take the kvm->lock mutex around these calls. This is safe because the implementations of kvm_for_each_vcpu() and kvm_get_vcpu_by_id() have been designed to be able to be called locklessly. Signed-off-by: Paul Mackerras <paulus@ozlabs.org> Reviewed-by: Cédric Le Goater <clg@kaod.org> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
This commit is contained in:
parent
1659e27d2b
commit
5a3f49364c
@ -446,12 +446,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
|
||||
|
||||
static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
|
||||
{
|
||||
struct kvm_vcpu *ret;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
ret = kvm_get_vcpu_by_id(kvm, id);
|
||||
mutex_unlock(&kvm->lock);
|
||||
return ret;
|
||||
return kvm_get_vcpu_by_id(kvm, id);
|
||||
}
|
||||
|
||||
static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
|
||||
@ -1583,7 +1578,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
u64 mask;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
spin_lock(&vc->lock);
|
||||
/*
|
||||
* If ILE (interrupt little-endian) has changed, update the
|
||||
@ -1623,7 +1617,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
|
||||
mask &= 0xFFFFFFFF;
|
||||
vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
|
||||
spin_unlock(&vc->lock);
|
||||
mutex_unlock(&kvm->lock);
|
||||
}
|
||||
|
||||
static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
||||
|
Loading…
Reference in New Issue
Block a user