KVM: PPC: Book3S: Treat VTB as a per-subcore register, not per-thread

POWER8 has one virtual timebase (VTB) register per subcore, not one
per CPU thread.  The HV KVM code currently treats VTB as a per-thread
register, which can lead to spurious soft lockup messages from guests
which use the VTB as the time source for the soft lockup detector.
(CPUs before POWER8 did not have the VTB register.)

For HV KVM, this fixes the problem by making only the primary thread
in each virtual core save and restore the VTB value.  With this,
the VTB state becomes part of the kvmppc_vcore structure.  This
also means that "piggybacking" of multiple virtual cores onto one
subcore is not possible on POWER8, because then the virtual cores
would share a single VTB register.

PR KVM emulates a VTB register, which is per-vcpu because PR KVM
has no notion of CPU threads or SMT.  For PR KVM we move the VTB
state into the kvmppc_vcpu_book3s struct.

Cc: stable@vger.kernel.org # v3.14+
Reported-by: Thomas Huth <thuth@redhat.com>
Tested-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
This commit is contained in:
Paul Mackerras 2016-09-15 13:42:52 +10:00
parent adad0d02a7
commit 88b02cf97b
8 changed files with 29 additions and 20 deletions

View File

@ -101,6 +101,7 @@ struct kvmppc_vcore {
u32 arch_compat; u32 arch_compat;
ulong pcr; ulong pcr;
ulong dpdes; /* doorbell state (POWER8) */ ulong dpdes; /* doorbell state (POWER8) */
ulong vtb; /* virtual timebase */
ulong conferring_threads; ulong conferring_threads;
unsigned int halt_poll_ns; unsigned int halt_poll_ns;
}; };
@ -119,6 +120,7 @@ struct kvmppc_vcpu_book3s {
u64 sdr1; u64 sdr1;
u64 hior; u64 hior;
u64 msr_mask; u64 msr_mask;
u64 vtb;
#ifdef CONFIG_PPC_BOOK3S_32 #ifdef CONFIG_PPC_BOOK3S_32
u32 vsid_pool[VSID_POOL_SIZE]; u32 vsid_pool[VSID_POOL_SIZE];
u32 vsid_next; u32 vsid_next;

View File

@ -475,7 +475,6 @@ struct kvm_vcpu_arch {
ulong purr; ulong purr;
ulong spurr; ulong spurr;
ulong ic; ulong ic;
ulong vtb;
ulong dscr; ulong dscr;
ulong amr; ulong amr;
ulong uamor; ulong uamor;

View File

@ -506,7 +506,6 @@ int main(void)
DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
DEFINE(VCPU_IC, offsetof(struct kvm_vcpu, arch.ic)); DEFINE(VCPU_IC, offsetof(struct kvm_vcpu, arch.ic));
DEFINE(VCPU_VTB, offsetof(struct kvm_vcpu, arch.vtb));
DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr)); DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr));
DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr)); DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr));
DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor)); DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor));
@ -557,6 +556,7 @@ int main(void)
DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr)); DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr));
DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr)); DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr));
DEFINE(VCORE_DPDES, offsetof(struct kvmppc_vcore, dpdes)); DEFINE(VCORE_DPDES, offsetof(struct kvmppc_vcore, dpdes));
DEFINE(VCORE_VTB, offsetof(struct kvmppc_vcore, vtb));
DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));

View File

@ -599,9 +599,6 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_BESCR: case KVM_REG_PPC_BESCR:
*val = get_reg_val(id, vcpu->arch.bescr); *val = get_reg_val(id, vcpu->arch.bescr);
break; break;
case KVM_REG_PPC_VTB:
*val = get_reg_val(id, vcpu->arch.vtb);
break;
case KVM_REG_PPC_IC: case KVM_REG_PPC_IC:
*val = get_reg_val(id, vcpu->arch.ic); *val = get_reg_val(id, vcpu->arch.ic);
break; break;
@ -673,9 +670,6 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_BESCR: case KVM_REG_PPC_BESCR:
vcpu->arch.bescr = set_reg_val(id, *val); vcpu->arch.bescr = set_reg_val(id, *val);
break; break;
case KVM_REG_PPC_VTB:
vcpu->arch.vtb = set_reg_val(id, *val);
break;
case KVM_REG_PPC_IC: case KVM_REG_PPC_IC:
vcpu->arch.ic = set_reg_val(id, *val); vcpu->arch.ic = set_reg_val(id, *val);
break; break;

View File

@ -579,7 +579,7 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val
*spr_val = vcpu->arch.spurr; *spr_val = vcpu->arch.spurr;
break; break;
case SPRN_VTB: case SPRN_VTB:
*spr_val = vcpu->arch.vtb; *spr_val = to_book3s(vcpu)->vtb;
break; break;
case SPRN_IC: case SPRN_IC:
*spr_val = vcpu->arch.ic; *spr_val = vcpu->arch.ic;

View File

@ -1199,6 +1199,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_DPDES: case KVM_REG_PPC_DPDES:
*val = get_reg_val(id, vcpu->arch.vcore->dpdes); *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
break; break;
case KVM_REG_PPC_VTB:
*val = get_reg_val(id, vcpu->arch.vcore->vtb);
break;
case KVM_REG_PPC_DAWR: case KVM_REG_PPC_DAWR:
*val = get_reg_val(id, vcpu->arch.dawr); *val = get_reg_val(id, vcpu->arch.dawr);
break; break;
@ -1391,6 +1394,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_DPDES: case KVM_REG_PPC_DPDES:
vcpu->arch.vcore->dpdes = set_reg_val(id, *val); vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
break; break;
case KVM_REG_PPC_VTB:
vcpu->arch.vcore->vtb = set_reg_val(id, *val);
break;
case KVM_REG_PPC_DAWR: case KVM_REG_PPC_DAWR:
vcpu->arch.dawr = set_reg_val(id, *val); vcpu->arch.dawr = set_reg_val(id, *val);
break; break;
@ -2213,9 +2219,11 @@ static bool can_piggyback_subcore(struct kvmppc_vcore *pvc,
pvc->lpcr != vc->lpcr) pvc->lpcr != vc->lpcr)
return false; return false;
/* P8 guest with > 1 thread per core would see wrong TIR value */ /*
if (cpu_has_feature(CPU_FTR_ARCH_207S) && * P8 guests can't do piggybacking, because then the
(vc->num_threads > 1 || pvc->num_threads > 1)) * VTB would be shared between the vcpus.
*/
if (cpu_has_feature(CPU_FTR_ARCH_207S))
return false; return false;
n_thr = cip->subcore_threads[sub] + pvc->num_threads; n_thr = cip->subcore_threads[sub] + pvc->num_threads;

View File

@ -644,9 +644,11 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
38: 38:
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
/* DPDES is shared between threads */ /* DPDES and VTB are shared between threads */
ld r8, VCORE_DPDES(r5) ld r8, VCORE_DPDES(r5)
ld r7, VCORE_VTB(r5)
mtspr SPRN_DPDES, r8 mtspr SPRN_DPDES, r8
mtspr SPRN_VTB, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
/* Mark the subcore state as inside guest */ /* Mark the subcore state as inside guest */
@ -806,10 +808,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mtspr SPRN_CIABR, r7 mtspr SPRN_CIABR, r7
mtspr SPRN_TAR, r8 mtspr SPRN_TAR, r8
ld r5, VCPU_IC(r4) ld r5, VCPU_IC(r4)
ld r6, VCPU_VTB(r4)
mtspr SPRN_IC, r5
mtspr SPRN_VTB, r6
ld r8, VCPU_EBBHR(r4) ld r8, VCPU_EBBHR(r4)
mtspr SPRN_IC, r5
mtspr SPRN_EBBHR, r8 mtspr SPRN_EBBHR, r8
ld r5, VCPU_EBBRR(r4) ld r5, VCPU_EBBRR(r4)
ld r6, VCPU_BESCR(r4) ld r6, VCPU_BESCR(r4)
@ -1334,10 +1334,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
stw r6, VCPU_PSPB(r9) stw r6, VCPU_PSPB(r9)
std r7, VCPU_FSCR(r9) std r7, VCPU_FSCR(r9)
mfspr r5, SPRN_IC mfspr r5, SPRN_IC
mfspr r6, SPRN_VTB
mfspr r7, SPRN_TAR mfspr r7, SPRN_TAR
std r5, VCPU_IC(r9) std r5, VCPU_IC(r9)
std r6, VCPU_VTB(r9)
std r7, VCPU_TAR(r9) std r7, VCPU_TAR(r9)
mfspr r8, SPRN_EBBHR mfspr r8, SPRN_EBBHR
std r8, VCPU_EBBHR(r9) std r8, VCPU_EBBHR(r9)
@ -1564,9 +1562,11 @@ kvmhv_switch_to_host:
isync isync
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
/* DPDES is shared between threads */ /* DPDES and VTB are shared between threads */
mfspr r7, SPRN_DPDES mfspr r7, SPRN_DPDES
mfspr r8, SPRN_VTB
std r7, VCORE_DPDES(r5) std r7, VCORE_DPDES(r5)
std r8, VCORE_VTB(r5)
/* clear DPDES so we don't get guest doorbells in the host */ /* clear DPDES so we don't get guest doorbells in the host */
li r8, 0 li r8, 0
mtspr SPRN_DPDES, r8 mtspr SPRN_DPDES, r8

View File

@ -226,7 +226,7 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
*/ */
vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb; vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb;
vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb; vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb;
vcpu->arch.vtb += get_vtb() - vcpu->arch.entry_vtb; to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb;
if (cpu_has_feature(CPU_FTR_ARCH_207S)) if (cpu_has_feature(CPU_FTR_ARCH_207S))
vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic; vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
svcpu->in_use = false; svcpu->in_use = false;
@ -1361,6 +1361,9 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_HIOR: case KVM_REG_PPC_HIOR:
*val = get_reg_val(id, to_book3s(vcpu)->hior); *val = get_reg_val(id, to_book3s(vcpu)->hior);
break; break;
case KVM_REG_PPC_VTB:
*val = get_reg_val(id, to_book3s(vcpu)->vtb);
break;
case KVM_REG_PPC_LPCR: case KVM_REG_PPC_LPCR:
case KVM_REG_PPC_LPCR_64: case KVM_REG_PPC_LPCR_64:
/* /*
@ -1397,6 +1400,9 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
to_book3s(vcpu)->hior = set_reg_val(id, *val); to_book3s(vcpu)->hior = set_reg_val(id, *val);
to_book3s(vcpu)->hior_explicit = true; to_book3s(vcpu)->hior_explicit = true;
break; break;
case KVM_REG_PPC_VTB:
to_book3s(vcpu)->vtb = set_reg_val(id, *val);
break;
case KVM_REG_PPC_LPCR: case KVM_REG_PPC_LPCR:
case KVM_REG_PPC_LPCR_64: case KVM_REG_PPC_LPCR_64:
kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val)); kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));