KVM: arm64: pvtime: Fix potential loss of stolen time
We should only check current->sched_info.run_delay once when updating stolen time. Otherwise there's a chance there could be a change between checks that we miss (preemption disabling comes after vcpu request checks). Signed-off-by: Andrew Jones <drjones@redhat.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20200804170604.42662-3-drjones@redhat.com
This commit is contained in:
parent
38480df564
commit
2dbd780e34
@ -13,6 +13,7 @@
|
||||
void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
u64 last_steal = vcpu->arch.steal.last_steal;
|
||||
u64 steal;
|
||||
__le64 steal_le;
|
||||
u64 offset;
|
||||
@ -24,8 +25,8 @@ void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
|
||||
|
||||
/* Let's do the local bookkeeping */
|
||||
steal = vcpu->arch.steal.steal;
|
||||
steal += current->sched_info.run_delay - vcpu->arch.steal.last_steal;
|
||||
vcpu->arch.steal.last_steal = current->sched_info.run_delay;
|
||||
vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay);
|
||||
steal += vcpu->arch.steal.last_steal - last_steal;
|
||||
vcpu->arch.steal.steal = steal;
|
||||
|
||||
steal_le = cpu_to_le64(steal);
|
||||
|
Loading…
Reference in New Issue
Block a user