KVM: PPC: Book3S HV P9: Remove subcore HMI handling

On POWER9 and newer, rather than the complex HMI synchronisation and
subcore state, have each thread un-apply the guest TB offset before
calling into the early HMI handler.

This allows the subcore state to be avoided, including subcore enter
/ exit guest, which includes an expensive divide that shows up
slightly in profiles.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211123095231.1036501-54-npiggin@gmail.com
This commit is contained in:
Nicholas Piggin 2021-11-23 19:52:31 +10:00 committed by Michael Ellerman
parent 6398326b9b
commit 9c5a432a55
5 changed files with 67 additions and 9 deletions

View File

@ -759,6 +759,7 @@ void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
void kvmppc_subcore_enter_guest(void);
void kvmppc_subcore_exit_guest(void);
long kvmppc_realmode_hmi_handler(void);
long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu);
long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel);
long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,

View File

@ -4033,8 +4033,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu->arch.ceded = 0;
kvmppc_subcore_enter_guest();
vcpu_vpa_increment_dispatch(vcpu);
if (kvmhv_on_pseries()) {
@ -4087,8 +4085,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu_vpa_increment_dispatch(vcpu);
kvmppc_subcore_exit_guest();
return trap;
}
@ -6102,9 +6098,11 @@ static int kvmppc_book3s_init_hv(void)
if (r)
return r;
r = kvm_init_subcore_bitmap();
if (r)
return r;
if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
r = kvm_init_subcore_bitmap();
if (r)
return r;
}
/*
* We need a way of accessing the XICS interrupt controller,

View File

@ -20,10 +20,15 @@ void wait_for_subcore_guest_exit(void)
/*
* NULL bitmap pointer indicates that KVM module hasn't
* been loaded yet and hence no guests are running.
* been loaded yet and hence no guests are running, or running
* on POWER9 or newer CPU.
*
* If no KVM is in use, no need to co-ordinate among threads
* as all of them will always be in host and no one is going
* to modify TB other than the opal hmi handler.
*
* POWER9 and newer don't need this synchronisation.
*
* Hence, just return from here.
*/
if (!local_paca->sibling_subcore_state)

View File

@ -1013,7 +1013,7 @@ tm_return_to_guest:
kvmppc_realmode_machine_check(vcpu);
} else if (unlikely(trap == BOOK3S_INTERRUPT_HMI)) {
kvmppc_realmode_hmi_handler();
kvmppc_p9_realmode_hmi_handler(vcpu);
} else if (trap == BOOK3S_INTERRUPT_H_EMUL_ASSIST) {
vcpu->arch.emul_inst = mfspr(SPRN_HEIR);

View File

@ -136,6 +136,60 @@ void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
vcpu->arch.mce_evt = mce_evt;
}
long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
long ret = 0;
/*
* Unapply and clear the offset first. That way, if the TB was not
* resynced then it will remain in host-offset, and if it was resynced
* then it is brought into host-offset. Then the tb offset is
* re-applied before continuing with the KVM exit.
*
* This way, we don't need to actually know whether not OPAL resynced
* the timebase or do any of the complicated dance that the P7/8
* path requires.
*/
if (vc->tb_offset_applied) {
u64 new_tb = mftb() - vc->tb_offset_applied;
mtspr(SPRN_TBU40, new_tb);
if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
new_tb += 0x1000000;
mtspr(SPRN_TBU40, new_tb);
}
vc->tb_offset_applied = 0;
}
local_paca->hmi_irqs++;
if (hmi_handle_debugtrig(NULL) >= 0) {
ret = 1;
goto out;
}
if (ppc_md.hmi_exception_early)
ppc_md.hmi_exception_early(NULL);
out:
if (vc->tb_offset) {
u64 new_tb = mftb() + vc->tb_offset;
mtspr(SPRN_TBU40, new_tb);
if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
new_tb += 0x1000000;
mtspr(SPRN_TBU40, new_tb);
}
vc->tb_offset_applied = vc->tb_offset;
}
return ret;
}
/*
* The following subcore HMI handling is all only for pre-POWER9 CPUs.
*/
/* Check if dynamic split is in force and return subcore size accordingly. */
static inline int kvmppc_cur_subcore_size(void)
{