2019-10-21 18:28:16 +03:00
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2019 Arm Ltd.
# include <linux/arm-smccc.h>
2019-10-21 18:28:20 +03:00
# include <linux/kvm_host.h>
2019-10-21 18:28:16 +03:00
2019-10-21 18:28:20 +03:00
# include <asm/kvm_mmu.h>
2019-10-21 18:28:18 +03:00
# include <asm/pvclock-abi.h>
2019-10-21 18:28:16 +03:00
# include <kvm/arm_hypercalls.h>
2019-10-21 18:28:18 +03:00
void kvm_update_stolen_time ( struct kvm_vcpu * vcpu )
{
struct kvm * kvm = vcpu - > kvm ;
u64 steal ;
__le64 steal_le ;
u64 offset ;
int idx ;
u64 base = vcpu - > arch . steal . base ;
if ( base = = GPA_INVALID )
return ;
/* Let's do the local bookkeeping */
steal = vcpu - > arch . steal . steal ;
steal + = current - > sched_info . run_delay - vcpu - > arch . steal . last_steal ;
vcpu - > arch . steal . last_steal = current - > sched_info . run_delay ;
vcpu - > arch . steal . steal = steal ;
steal_le = cpu_to_le64 ( steal ) ;
idx = srcu_read_lock ( & kvm - > srcu ) ;
offset = offsetof ( struct pvclock_vcpu_stolen_time , stolen_time ) ;
kvm_put_guest ( kvm , base + offset , steal_le , u64 ) ;
srcu_read_unlock ( & kvm - > srcu , idx ) ;
}
2019-10-21 18:28:16 +03:00
long kvm_hypercall_pv_features ( struct kvm_vcpu * vcpu )
{
u32 feature = smccc_get_arg1 ( vcpu ) ;
long val = SMCCC_RET_NOT_SUPPORTED ;
switch ( feature ) {
case ARM_SMCCC_HV_PV_TIME_FEATURES :
2019-10-21 18:28:18 +03:00
case ARM_SMCCC_HV_PV_TIME_ST :
2019-10-21 18:28:16 +03:00
val = SMCCC_RET_SUCCESS ;
break ;
}
return val ;
}
2019-10-21 18:28:18 +03:00
gpa_t kvm_init_stolen_time ( struct kvm_vcpu * vcpu )
{
struct pvclock_vcpu_stolen_time init_values = { } ;
struct kvm * kvm = vcpu - > kvm ;
u64 base = vcpu - > arch . steal . base ;
int idx ;
if ( base = = GPA_INVALID )
return base ;
/*
* Start counting stolen time from the time the guest requests
* the feature enabled .
*/
vcpu - > arch . steal . steal = 0 ;
vcpu - > arch . steal . last_steal = current - > sched_info . run_delay ;
idx = srcu_read_lock ( & kvm - > srcu ) ;
kvm_write_guest ( kvm , base , & init_values , sizeof ( init_values ) ) ;
srcu_read_unlock ( & kvm - > srcu , idx ) ;
return base ;
}
2019-10-21 18:28:20 +03:00
int kvm_arm_pvtime_set_attr ( struct kvm_vcpu * vcpu ,
struct kvm_device_attr * attr )
{
u64 __user * user = ( u64 __user * ) attr - > addr ;
struct kvm * kvm = vcpu - > kvm ;
u64 ipa ;
int ret = 0 ;
int idx ;
if ( attr - > attr ! = KVM_ARM_VCPU_PVTIME_IPA )
return - ENXIO ;
if ( get_user ( ipa , user ) )
return - EFAULT ;
if ( ! IS_ALIGNED ( ipa , 64 ) )
return - EINVAL ;
if ( vcpu - > arch . steal . base ! = GPA_INVALID )
return - EEXIST ;
/* Check the address is in a valid memslot */
idx = srcu_read_lock ( & kvm - > srcu ) ;
if ( kvm_is_error_hva ( gfn_to_hva ( kvm , ipa > > PAGE_SHIFT ) ) )
ret = - EINVAL ;
srcu_read_unlock ( & kvm - > srcu , idx ) ;
if ( ! ret )
vcpu - > arch . steal . base = ipa ;
return ret ;
}
int kvm_arm_pvtime_get_attr ( struct kvm_vcpu * vcpu ,
struct kvm_device_attr * attr )
{
u64 __user * user = ( u64 __user * ) attr - > addr ;
u64 ipa ;
if ( attr - > attr ! = KVM_ARM_VCPU_PVTIME_IPA )
return - ENXIO ;
ipa = vcpu - > arch . steal . base ;
if ( put_user ( ipa , user ) )
return - EFAULT ;
return 0 ;
}
int kvm_arm_pvtime_has_attr ( struct kvm_vcpu * vcpu ,
struct kvm_device_attr * attr )
{
switch ( attr - > attr ) {
case KVM_ARM_VCPU_PVTIME_IPA :
return 0 ;
}
return - ENXIO ;
}