2019-10-21 18:28:16 +03:00
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2019 Arm Ltd.
# include <linux/arm-smccc.h>
2019-10-21 18:28:20 +03:00
# include <linux/kvm_host.h>
2020-06-22 17:27:10 +03:00
# include <linux/sched/stat.h>
2019-10-21 18:28:16 +03:00
2019-10-21 18:28:20 +03:00
# include <asm/kvm_mmu.h>
2019-10-21 18:28:18 +03:00
# include <asm/pvclock-abi.h>
2019-10-21 18:28:16 +03:00
# include <kvm/arm_hypercalls.h>
2019-10-21 18:28:18 +03:00
void kvm_update_stolen_time ( struct kvm_vcpu * vcpu )
{
struct kvm * kvm = vcpu - > kvm ;
2020-08-04 20:06:02 +03:00
u64 base = vcpu - > arch . steal . base ;
2020-08-04 20:06:00 +03:00
u64 last_steal = vcpu - > arch . steal . last_steal ;
2020-08-04 20:06:02 +03:00
u64 offset = offsetof ( struct pvclock_vcpu_stolen_time , stolen_time ) ;
u64 steal = 0 ;
2019-10-21 18:28:18 +03:00
int idx ;
if ( base = = GPA_INVALID )
return ;
idx = srcu_read_lock ( & kvm - > srcu ) ;
2020-08-04 20:06:02 +03:00
if ( ! kvm_get_guest ( kvm , base + offset , steal ) ) {
steal = le64_to_cpu ( steal ) ;
vcpu - > arch . steal . last_steal = READ_ONCE ( current - > sched_info . run_delay ) ;
steal + = vcpu - > arch . steal . last_steal - last_steal ;
kvm_put_guest ( kvm , base + offset , cpu_to_le64 ( steal ) ) ;
}
2019-10-21 18:28:18 +03:00
srcu_read_unlock ( & kvm - > srcu , idx ) ;
}
2019-10-21 18:28:16 +03:00
long kvm_hypercall_pv_features ( struct kvm_vcpu * vcpu )
{
u32 feature = smccc_get_arg1 ( vcpu ) ;
long val = SMCCC_RET_NOT_SUPPORTED ;
switch ( feature ) {
case ARM_SMCCC_HV_PV_TIME_FEATURES :
2019-10-21 18:28:18 +03:00
case ARM_SMCCC_HV_PV_TIME_ST :
2020-08-04 20:05:59 +03:00
if ( vcpu - > arch . steal . base ! = GPA_INVALID )
val = SMCCC_RET_SUCCESS ;
2019-10-21 18:28:16 +03:00
break ;
}
return val ;
}
2019-10-21 18:28:18 +03:00
gpa_t kvm_init_stolen_time ( struct kvm_vcpu * vcpu )
{
struct pvclock_vcpu_stolen_time init_values = { } ;
struct kvm * kvm = vcpu - > kvm ;
u64 base = vcpu - > arch . steal . base ;
if ( base = = GPA_INVALID )
return base ;
/*
* Start counting stolen time from the time the guest requests
* the feature enabled .
*/
vcpu - > arch . steal . last_steal = current - > sched_info . run_delay ;
2020-08-17 14:07:28 +03:00
kvm_write_guest_lock ( kvm , base , & init_values , sizeof ( init_values ) ) ;
2019-10-21 18:28:18 +03:00
return base ;
}
2019-10-21 18:28:20 +03:00
2020-08-04 20:06:04 +03:00
bool kvm_arm_pvtime_supported ( void )
2020-06-22 17:27:10 +03:00
{
return ! ! sched_info_on ( ) ;
}
2019-10-21 18:28:20 +03:00
int kvm_arm_pvtime_set_attr ( struct kvm_vcpu * vcpu ,
struct kvm_device_attr * attr )
{
u64 __user * user = ( u64 __user * ) attr - > addr ;
struct kvm * kvm = vcpu - > kvm ;
u64 ipa ;
int ret = 0 ;
int idx ;
2020-06-22 17:27:10 +03:00
if ( ! kvm_arm_pvtime_supported ( ) | |
attr - > attr ! = KVM_ARM_VCPU_PVTIME_IPA )
2019-10-21 18:28:20 +03:00
return - ENXIO ;
if ( get_user ( ipa , user ) )
return - EFAULT ;
if ( ! IS_ALIGNED ( ipa , 64 ) )
return - EINVAL ;
if ( vcpu - > arch . steal . base ! = GPA_INVALID )
return - EEXIST ;
/* Check the address is in a valid memslot */
idx = srcu_read_lock ( & kvm - > srcu ) ;
if ( kvm_is_error_hva ( gfn_to_hva ( kvm , ipa > > PAGE_SHIFT ) ) )
ret = - EINVAL ;
srcu_read_unlock ( & kvm - > srcu , idx ) ;
if ( ! ret )
vcpu - > arch . steal . base = ipa ;
return ret ;
}
int kvm_arm_pvtime_get_attr ( struct kvm_vcpu * vcpu ,
struct kvm_device_attr * attr )
{
u64 __user * user = ( u64 __user * ) attr - > addr ;
u64 ipa ;
2020-06-22 17:27:10 +03:00
if ( ! kvm_arm_pvtime_supported ( ) | |
attr - > attr ! = KVM_ARM_VCPU_PVTIME_IPA )
2019-10-21 18:28:20 +03:00
return - ENXIO ;
ipa = vcpu - > arch . steal . base ;
if ( put_user ( ipa , user ) )
return - EFAULT ;
return 0 ;
}
int kvm_arm_pvtime_has_attr ( struct kvm_vcpu * vcpu ,
struct kvm_device_attr * attr )
{
switch ( attr - > attr ) {
case KVM_ARM_VCPU_PVTIME_IPA :
2020-06-22 17:27:10 +03:00
if ( kvm_arm_pvtime_supported ( ) )
return 0 ;
2019-10-21 18:28:20 +03:00
}
return - ENXIO ;
}