KVM: s390: fixes for multiple epoch facility
We have certain cases where the multiple epoch facility is broken: - timer wakeup during epoch change - cpu hotplug - SCK instruction - stp sync checks Fix those. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQIcBAABAgAGBQJajqO9AAoJEBF7vIC1phx8fk4QAIACbrjlCpdqQ/3s8JC6SfNb 2tnyt4gQLw0ztb11kPGkjYXAkG9SA7v4Y3J3oXDtFH8BP/xhf6CO3jVmWCXUNv7E wfk04Dh0xJwnwBsHYuERFlngB2BTODLoHV/w00fd/ja1c8T5yGULzADie6dJjNNT B/q/eCIpzHQYZLZnBW7+YO05ciMwssi2luq46uijY/MZkfCYIvO8pf4MNcuLPvWq CepxzCyXbdy2xw0fWu7lrYk/0VU08eYchGbqjsDbpuz3CdbKJhVLwZhSGx89WebX /+s2IKXQZEtxKcBWHOZS2k98mB8LNMLumnaoeEJDjDt3T+lu3B/ujGfPURipcvGQ 0ch4iM5Fmhyx3IxYk4lEgrdoRpjHdjnBs1ONyNGIx35NJrfWjAsRRHw6ov6qQ0rH rcDmBC8bBZmZYTxBXD+R5rTn+noJp2OkNt4Wc5X7SnKj3DIbfR3FKgT3z+mtJyIX l8+qnaQpj/Pchuko4j7gh0/uzHVt3WtG3HtLqQnqHJTZM+b9nkIeDfUbdp3cLycD W2wfs9LO2tXXcX1A05KFPPSjNDUypz1ToAfyt6JgPXjE7ZfHkpLJTQPrN+BoJZCk 3P//LQ85yJaDNcEJtH9S7nGjhSTdW1MqeO61mhlkag4A5Qe2Mquqd18H1ngac7aq 0Xna6qvJBvdvPwEmI04H =umvB -----END PGP SIGNATURE----- Merge tag 'kvm-s390-master-4.16-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD KVM: s390: fixes for multiple epoch facility We have certain cases where the multiple epoch facility is broken: - timer wakeup during epoch change - cpu hotplug - SCK instruction - stp sync checks Fix those.
This commit is contained in:
commit
ee1a15e3f5
@ -169,8 +169,15 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int ckc_irq_pending(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm))
|
||||
const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
|
||||
const u64 ckc = vcpu->arch.sie_block->ckc;
|
||||
|
||||
if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
|
||||
if ((s64)ckc >= (s64)now)
|
||||
return 0;
|
||||
} else if (ckc >= now) {
|
||||
return 0;
|
||||
}
|
||||
return ckc_interrupts_enabled(vcpu);
|
||||
}
|
||||
|
||||
@ -1047,13 +1054,19 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
|
||||
|
||||
static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 now, cputm, sltime = 0;
|
||||
const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
|
||||
const u64 ckc = vcpu->arch.sie_block->ckc;
|
||||
u64 cputm, sltime = 0;
|
||||
|
||||
if (ckc_interrupts_enabled(vcpu)) {
|
||||
now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
|
||||
sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
|
||||
/* already expired or overflow? */
|
||||
if (!sltime || vcpu->arch.sie_block->ckc <= now)
|
||||
if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
|
||||
if ((s64)now < (s64)ckc)
|
||||
sltime = tod_to_ns((s64)ckc - (s64)now);
|
||||
} else if (now < ckc) {
|
||||
sltime = tod_to_ns(ckc - now);
|
||||
}
|
||||
/* already expired */
|
||||
if (!sltime)
|
||||
return 0;
|
||||
if (cpu_timer_interrupts_enabled(vcpu)) {
|
||||
cputm = kvm_s390_get_cpu_timer(vcpu);
|
||||
|
@ -179,6 +179,28 @@ int kvm_arch_hardware_enable(void)
|
||||
static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
|
||||
unsigned long end);
|
||||
|
||||
static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
|
||||
{
|
||||
u8 delta_idx = 0;
|
||||
|
||||
/*
|
||||
* The TOD jumps by delta, we have to compensate this by adding
|
||||
* -delta to the epoch.
|
||||
*/
|
||||
delta = -delta;
|
||||
|
||||
/* sign-extension - we're adding to signed values below */
|
||||
if ((s64)delta < 0)
|
||||
delta_idx = -1;
|
||||
|
||||
scb->epoch += delta;
|
||||
if (scb->ecd & ECD_MEF) {
|
||||
scb->epdx += delta_idx;
|
||||
if (scb->epoch < delta)
|
||||
scb->epdx += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This callback is executed during stop_machine(). All CPUs are therefore
|
||||
* temporarily stopped. In order not to change guest behavior, we have to
|
||||
@ -194,13 +216,17 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
|
||||
unsigned long long *delta = v;
|
||||
|
||||
list_for_each_entry(kvm, &vm_list, vm_list) {
|
||||
kvm->arch.epoch -= *delta;
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
vcpu->arch.sie_block->epoch -= *delta;
|
||||
kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
|
||||
if (i == 0) {
|
||||
kvm->arch.epoch = vcpu->arch.sie_block->epoch;
|
||||
kvm->arch.epdx = vcpu->arch.sie_block->epdx;
|
||||
}
|
||||
if (vcpu->arch.cputm_enabled)
|
||||
vcpu->arch.cputm_start += *delta;
|
||||
if (vcpu->arch.vsie_block)
|
||||
vcpu->arch.vsie_block->epoch -= *delta;
|
||||
kvm_clock_sync_scb(vcpu->arch.vsie_block,
|
||||
*delta);
|
||||
}
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
@ -902,12 +928,9 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
|
||||
return -EFAULT;
|
||||
|
||||
if (test_kvm_facility(kvm, 139))
|
||||
kvm_s390_set_tod_clock_ext(kvm, >od);
|
||||
else if (gtod.epoch_idx == 0)
|
||||
kvm_s390_set_tod_clock(kvm, gtod.tod);
|
||||
else
|
||||
if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
|
||||
return -EINVAL;
|
||||
kvm_s390_set_tod_clock(kvm, >od);
|
||||
|
||||
VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
|
||||
gtod.epoch_idx, gtod.tod);
|
||||
@ -932,13 +955,14 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
|
||||
static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
{
|
||||
u64 gtod;
|
||||
struct kvm_s390_vm_tod_clock gtod = { 0 };
|
||||
|
||||
if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
|
||||
if (copy_from_user(>od.tod, (void __user *)attr->addr,
|
||||
sizeof(gtod.tod)))
|
||||
return -EFAULT;
|
||||
|
||||
kvm_s390_set_tod_clock(kvm, gtod);
|
||||
VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
|
||||
kvm_s390_set_tod_clock(kvm, >od);
|
||||
VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2389,6 +2413,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
|
||||
mutex_lock(&vcpu->kvm->lock);
|
||||
preempt_disable();
|
||||
vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
|
||||
vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
|
||||
preempt_enable();
|
||||
mutex_unlock(&vcpu->kvm->lock);
|
||||
if (!kvm_is_ucontrol(vcpu->kvm)) {
|
||||
@ -3021,8 +3046,8 @@ retry:
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
|
||||
const struct kvm_s390_vm_tod_clock *gtod)
|
||||
void kvm_s390_set_tod_clock(struct kvm *kvm,
|
||||
const struct kvm_s390_vm_tod_clock *gtod)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_s390_tod_clock_ext htod;
|
||||
@ -3034,10 +3059,12 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
|
||||
get_tod_clock_ext((char *)&htod);
|
||||
|
||||
kvm->arch.epoch = gtod->tod - htod.tod;
|
||||
kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
|
||||
|
||||
if (kvm->arch.epoch > gtod->tod)
|
||||
kvm->arch.epdx -= 1;
|
||||
kvm->arch.epdx = 0;
|
||||
if (test_kvm_facility(kvm, 139)) {
|
||||
kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
|
||||
if (kvm->arch.epoch > gtod->tod)
|
||||
kvm->arch.epdx -= 1;
|
||||
}
|
||||
|
||||
kvm_s390_vcpu_block_all(kvm);
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
@ -3050,22 +3077,6 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
|
||||
mutex_unlock(&kvm->lock);
|
||||
}
|
||||
|
||||
void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
int i;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
preempt_disable();
|
||||
kvm->arch.epoch = tod - get_tod_clock();
|
||||
kvm_s390_vcpu_block_all(kvm);
|
||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||
vcpu->arch.sie_block->epoch = kvm->arch.epoch;
|
||||
kvm_s390_vcpu_unblock_all(kvm);
|
||||
preempt_enable();
|
||||
mutex_unlock(&kvm->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_arch_fault_in_page - fault-in guest page if necessary
|
||||
* @vcpu: The corresponding virtual cpu
|
||||
|
@ -281,9 +281,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* implemented in kvm-s390.c */
|
||||
void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
|
||||
const struct kvm_s390_vm_tod_clock *gtod);
|
||||
void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod);
|
||||
void kvm_s390_set_tod_clock(struct kvm *kvm,
|
||||
const struct kvm_s390_vm_tod_clock *gtod);
|
||||
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
|
||||
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
|
@ -85,9 +85,10 @@ int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
|
||||
/* Handle SCK (SET CLOCK) interception */
|
||||
static int handle_set_clock(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_s390_vm_tod_clock gtod = { 0 };
|
||||
int rc;
|
||||
u8 ar;
|
||||
u64 op2, val;
|
||||
u64 op2;
|
||||
|
||||
vcpu->stat.instruction_sck++;
|
||||
|
||||
@ -97,12 +98,12 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
|
||||
op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
|
||||
if (op2 & 7) /* Operand must be on a doubleword boundary */
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
|
||||
rc = read_guest(vcpu, op2, ar, >od.tod, sizeof(gtod.tod));
|
||||
if (rc)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
|
||||
VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
|
||||
kvm_s390_set_tod_clock(vcpu->kvm, val);
|
||||
VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
|
||||
kvm_s390_set_tod_clock(vcpu->kvm, >od);
|
||||
|
||||
kvm_s390_set_psw_cc(vcpu, 0);
|
||||
return 0;
|
||||
|
Loading…
x
Reference in New Issue
Block a user