2015-11-05 18:15:07 +03:00
/*
* Xen stolen ticks accounting .
*/
# include <linux/kernel.h>
# include <linux/kernel_stat.h>
# include <linux/math64.h>
# include <linux/gfp.h>
2016-05-20 10:26:48 +03:00
# include <asm/paravirt.h>
2015-11-05 18:15:07 +03:00
# include <asm/xen/hypervisor.h>
# include <asm/xen/hypercall.h>
# include <xen/events.h>
# include <xen/features.h>
# include <xen/interface/xen.h>
# include <xen/interface/vcpu.h>
# include <xen/xen-ops.h>
/* runstate info updated by Xen */
static DEFINE_PER_CPU ( struct vcpu_runstate_info , xen_runstate ) ;
/* return an consistent snapshot of 64-bit time/counter value */
static u64 get64 ( const u64 * p )
{
u64 ret ;
if ( BITS_PER_LONG < 64 ) {
u32 * p32 = ( u32 * ) p ;
2015-11-20 18:02:44 +03:00
u32 h , l , h2 ;
2015-11-05 18:15:07 +03:00
/*
* Read high then low , and then make sure high is
* still the same ; this will only loop if low wraps
* and carries into high .
* XXX some clean way to make this endian - proof ?
*/
do {
2015-11-20 18:02:44 +03:00
h = READ_ONCE ( p32 [ 1 ] ) ;
l = READ_ONCE ( p32 [ 0 ] ) ;
h2 = READ_ONCE ( p32 [ 1 ] ) ;
} while ( h2 ! = h ) ;
2015-11-05 18:15:07 +03:00
ret = ( ( ( u64 ) h ) < < 32 ) | l ;
} else
2015-11-20 18:02:44 +03:00
ret = READ_ONCE ( * p ) ;
2015-11-05 18:15:07 +03:00
return ret ;
}
2016-07-06 08:00:30 +03:00
static void xen_get_runstate_snapshot_cpu ( struct vcpu_runstate_info * res ,
unsigned int cpu )
2015-11-05 18:15:07 +03:00
{
u64 state_time ;
struct vcpu_runstate_info * state ;
BUG_ON ( preemptible ( ) ) ;
2016-07-06 08:00:30 +03:00
state = per_cpu_ptr ( & xen_runstate , cpu ) ;
2015-11-05 18:15:07 +03:00
do {
state_time = get64 ( & state - > state_entry_time ) ;
2016-07-06 08:00:30 +03:00
rmb ( ) ; /* Hypervisor might update data. */
2015-11-20 18:02:44 +03:00
* res = READ_ONCE ( * state ) ;
2016-07-06 08:00:30 +03:00
rmb ( ) ; /* Hypervisor might update data. */
} while ( get64 ( & state - > state_entry_time ) ! = state_time | |
( state_time & XEN_RUNSTATE_UPDATE ) ) ;
}
/*
* Runstate accounting
*/
void xen_get_runstate_snapshot ( struct vcpu_runstate_info * res )
{
xen_get_runstate_snapshot_cpu ( res , smp_processor_id ( ) ) ;
2015-11-05 18:15:07 +03:00
}
/* return true when a vcpu could run but has no real cpu to run on */
bool xen_vcpu_stolen ( int vcpu )
{
return per_cpu ( xen_runstate , vcpu ) . state = = RUNSTATE_runnable ;
}
2016-07-26 15:15:11 +03:00
u64 xen_steal_clock ( int cpu )
2016-05-20 10:26:48 +03:00
{
struct vcpu_runstate_info state ;
2016-07-06 08:00:30 +03:00
xen_get_runstate_snapshot_cpu ( & state , cpu ) ;
2016-05-20 10:26:48 +03:00
return state . time [ RUNSTATE_runnable ] + state . time [ RUNSTATE_offline ] ;
}
2015-11-05 18:15:07 +03:00
void xen_setup_runstate_info ( int cpu )
{
struct vcpu_register_runstate_memory_area area ;
area . addr . v = & per_cpu ( xen_runstate , cpu ) ;
if ( HYPERVISOR_vcpu_op ( VCPUOP_register_runstate_memory_area ,
2016-06-30 18:56:38 +03:00
xen_vcpu_nr ( cpu ) , & area ) )
2015-11-05 18:15:07 +03:00
BUG ( ) ;
}
2016-05-20 10:26:48 +03:00
void __init xen_time_setup_guest ( void )
{
2016-07-06 08:00:30 +03:00
bool xen_runstate_remote ;
xen_runstate_remote = ! HYPERVISOR_vm_assist ( VMASST_CMD_enable ,
VMASST_TYPE_runstate_update_flag ) ;
2016-05-20 10:26:48 +03:00
pv_time_ops . steal_clock = xen_steal_clock ;
static_key_slow_inc ( & paravirt_steal_enabled ) ;
2016-07-06 08:00:30 +03:00
if ( xen_runstate_remote )
static_key_slow_inc ( & paravirt_steal_rq_enabled ) ;
2016-05-20 10:26:48 +03:00
}