KVM: PPC: Book3S HV P9: Move cede logic out of XIVE escalation rearming
Move the cede abort logic out of xive escalation rearming and into the caller to prepare for handling a similar case with nested guest entry. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Reviewed-by: Cédric Le Goater <clg@kaod.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20220303053315.1056880-4-npiggin@gmail.com
This commit is contained in:
parent
026728dc5d
commit
ad5ace91c5
@ -685,7 +685,7 @@ extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
|
||||
int level, bool line_status);
|
||||
extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
|
||||
extern bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
|
||||
|
||||
static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
@ -723,7 +723,7 @@ static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 ir
|
||||
int level, bool line_status) { return -ENODEV; }
|
||||
static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
|
||||
static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { }
|
||||
static inline void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { }
|
||||
static inline bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { return true; }
|
||||
|
||||
static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
|
||||
{ return 0; }
|
||||
|
@ -4068,10 +4068,16 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
|
||||
!(vcpu->arch.shregs.msr & MSR_PR)) {
|
||||
unsigned long req = kvmppc_get_gpr(vcpu, 3);
|
||||
|
||||
/* H_CEDE has to be handled now, not later */
|
||||
/* H_CEDE has to be handled now */
|
||||
if (req == H_CEDE) {
|
||||
kvmppc_cede(vcpu);
|
||||
kvmppc_xive_rearm_escalation(vcpu); /* may un-cede */
|
||||
if (!kvmppc_xive_rearm_escalation(vcpu)) {
|
||||
/*
|
||||
* Pending escalation so abort
|
||||
* the cede.
|
||||
*/
|
||||
vcpu->arch.ceded = 0;
|
||||
}
|
||||
kvmppc_set_gpr(vcpu, 3, 0);
|
||||
trap = 0;
|
||||
|
||||
|
@ -179,12 +179,13 @@ void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_xive_pull_vcpu);
|
||||
|
||||
void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
|
||||
bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
void __iomem *esc_vaddr = (void __iomem *)vcpu->arch.xive_esc_vaddr;
|
||||
bool ret = true;
|
||||
|
||||
if (!esc_vaddr)
|
||||
return;
|
||||
return ret;
|
||||
|
||||
/* we are using XIVE with single escalation */
|
||||
|
||||
@ -197,7 +198,7 @@ void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
|
||||
* we also don't want to set xive_esc_on to 1 here in
|
||||
* case we race with xive_esc_irq().
|
||||
*/
|
||||
vcpu->arch.ceded = 0;
|
||||
ret = false;
|
||||
/*
|
||||
* The escalation interrupts are special as we don't EOI them.
|
||||
* There is no need to use the load-after-store ordering offset
|
||||
@ -210,6 +211,8 @@ void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
|
||||
__raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_00);
|
||||
}
|
||||
mb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_xive_rearm_escalation);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user