2018-06-13 09:55:44 -04:00
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright © 2019 Oracle and / or its affiliates . All rights reserved .
* Copyright © 2020 Amazon . com , Inc . or its affiliates . All Rights Reserved .
*
* KVM Xen emulation
*/
# ifndef __ARCH_X86_KVM_XEN_H__
# define __ARCH_X86_KVM_XEN_H__
2021-02-26 04:54:45 -05:00
# ifdef CONFIG_KVM_XEN
2021-02-02 15:48:05 +00:00
# include <linux/jump_label_ratelimit.h>
extern struct static_key_false_deferred kvm_xen_enabled ;
2020-12-09 20:08:30 +00:00
int __kvm_xen_has_interrupt ( struct kvm_vcpu * vcpu ) ;
2022-03-03 15:41:15 +00:00
void kvm_xen_inject_pending_events ( struct kvm_vcpu * vcpu ) ;
2021-02-02 16:53:25 +00:00
int kvm_xen_vcpu_set_attr ( struct kvm_vcpu * vcpu , struct kvm_xen_vcpu_attr * data ) ;
int kvm_xen_vcpu_get_attr ( struct kvm_vcpu * vcpu , struct kvm_xen_vcpu_attr * data ) ;
2020-12-03 15:52:25 +00:00
int kvm_xen_hvm_set_attr ( struct kvm * kvm , struct kvm_xen_hvm_attr * data ) ;
int kvm_xen_hvm_get_attr ( struct kvm * kvm , struct kvm_xen_hvm_attr * data ) ;
2022-03-03 15:41:18 +00:00
int kvm_xen_hvm_evtchn_send ( struct kvm * kvm , struct kvm_irq_routing_xen_evtchn * evt ) ;
2018-06-13 09:55:44 -04:00
int kvm_xen_write_hypercall_page ( struct kvm_vcpu * vcpu , u64 data ) ;
2021-02-02 13:19:35 +00:00
int kvm_xen_hvm_config ( struct kvm * kvm , struct kvm_xen_hvm_config * xhc ) ;
2021-08-04 12:48:41 -04:00
void kvm_xen_init_vm ( struct kvm * kvm ) ;
2021-02-02 15:48:05 +00:00
void kvm_xen_destroy_vm ( struct kvm * kvm ) ;
2022-03-03 15:41:21 +00:00
void kvm_xen_init_vcpu ( struct kvm_vcpu * vcpu ) ;
2022-03-03 15:41:13 +00:00
void kvm_xen_destroy_vcpu ( struct kvm_vcpu * vcpu ) ;
2022-03-03 15:41:17 +00:00
int kvm_xen_set_evtchn_fast ( struct kvm_xen_evtchn * xe ,
2021-12-10 16:36:23 +00:00
struct kvm * kvm ) ;
int kvm_xen_setup_evtchn ( struct kvm * kvm ,
struct kvm_kernel_irq_routing_entry * e ,
const struct kvm_irq_routing_entry * ue ) ;
2021-03-01 12:53:09 +00:00
static inline bool kvm_xen_msr_enabled ( struct kvm * kvm )
{
return static_branch_unlikely ( & kvm_xen_enabled . key ) & &
kvm - > arch . xen_hvm_config . msr ;
}
2018-06-13 09:55:44 -04:00
static inline bool kvm_xen_hypercall_enabled ( struct kvm * kvm )
{
2021-02-02 15:48:05 +00:00
return static_branch_unlikely ( & kvm_xen_enabled . key ) & &
( kvm - > arch . xen_hvm_config . flags &
KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL ) ;
2018-06-13 09:55:44 -04:00
}
2020-12-09 20:08:30 +00:00
static inline int kvm_xen_has_interrupt ( struct kvm_vcpu * vcpu )
{
if ( static_branch_unlikely ( & kvm_xen_enabled . key ) & &
2022-03-03 15:41:15 +00:00
vcpu - > arch . xen . vcpu_info_cache . active & &
vcpu - > kvm - > arch . xen . upcall_vector )
2020-12-09 20:08:30 +00:00
return __kvm_xen_has_interrupt ( vcpu ) ;
return 0 ;
}
2022-03-03 15:41:15 +00:00
static inline bool kvm_xen_has_pending_events ( struct kvm_vcpu * vcpu )
{
return static_branch_unlikely ( & kvm_xen_enabled . key ) & &
vcpu - > arch . xen . evtchn_pending_sel ;
}
2022-03-03 15:41:22 +00:00
static inline bool kvm_xen_timer_enabled ( struct kvm_vcpu * vcpu )
{
return ! ! vcpu - > arch . xen . timer_virq ;
}
static inline int kvm_xen_has_pending_timer ( struct kvm_vcpu * vcpu )
{
if ( kvm_xen_hypercall_enabled ( vcpu - > kvm ) & & kvm_xen_timer_enabled ( vcpu ) )
return atomic_read ( & vcpu - > arch . xen . timer_pending ) ;
return 0 ;
}
void kvm_xen_inject_timer_irqs ( struct kvm_vcpu * vcpu ) ;
2021-02-26 04:54:45 -05:00
# else
static inline int kvm_xen_write_hypercall_page ( struct kvm_vcpu * vcpu , u64 data )
{
return 1 ;
}
2021-08-04 12:48:41 -04:00
static inline void kvm_xen_init_vm ( struct kvm * kvm )
{
}
2021-02-26 04:54:45 -05:00
static inline void kvm_xen_destroy_vm ( struct kvm * kvm )
{
}
2022-03-03 15:41:21 +00:00
static inline void kvm_xen_init_vcpu ( struct kvm_vcpu * vcpu )
{
}
2022-03-03 15:41:13 +00:00
static inline void kvm_xen_destroy_vcpu ( struct kvm_vcpu * vcpu )
{
}
2021-03-01 12:53:09 +00:00
static inline bool kvm_xen_msr_enabled ( struct kvm * kvm )
{
return false ;
}
2021-02-26 04:54:45 -05:00
static inline bool kvm_xen_hypercall_enabled ( struct kvm * kvm )
{
return false ;
}
static inline int kvm_xen_has_interrupt ( struct kvm_vcpu * vcpu )
{
return 0 ;
}
2022-03-03 15:41:15 +00:00
static inline void kvm_xen_inject_pending_events ( struct kvm_vcpu * vcpu )
{
}
static inline bool kvm_xen_has_pending_events ( struct kvm_vcpu * vcpu )
{
return false ;
}
2022-03-03 15:41:22 +00:00
static inline int kvm_xen_has_pending_timer ( struct kvm_vcpu * vcpu )
{
return 0 ;
}
static inline void kvm_xen_inject_timer_irqs ( struct kvm_vcpu * vcpu )
{
}
static inline bool kvm_xen_timer_enabled ( struct kvm_vcpu * vcpu )
{
return false ;
}
2021-02-26 04:54:45 -05:00
# endif
int kvm_xen_hypercall ( struct kvm_vcpu * vcpu ) ;
2020-12-03 18:45:22 +00:00
# include <asm/pvclock-abi.h>
# include <asm/xen/interface.h>
2021-03-01 12:53:09 +00:00
# include <xen/interface/vcpu.h>
2020-12-03 18:45:22 +00:00
2021-03-01 12:53:09 +00:00
void kvm_xen_update_runstate_guest ( struct kvm_vcpu * vcpu , int state ) ;
static inline void kvm_xen_runstate_set_running ( struct kvm_vcpu * vcpu )
{
kvm_xen_update_runstate_guest ( vcpu , RUNSTATE_running ) ;
}
static inline void kvm_xen_runstate_set_preempted ( struct kvm_vcpu * vcpu )
{
/*
* If the vCPU wasn ' t preempted but took a normal exit for
* some reason ( hypercalls , I / O , etc . ) , that is accounted as
* still RUNSTATE_running , as the VMM is still operating on
* behalf of the vCPU . Only if the VMM does actually block
* does it need to enter RUNSTATE_blocked .
*/
2022-06-07 10:07:11 -04:00
if ( WARN_ON_ONCE ( ! vcpu - > preempted ) )
return ;
kvm_xen_update_runstate_guest ( vcpu , RUNSTATE_runnable ) ;
2021-03-01 12:53:09 +00:00
}
/* 32-bit compatibility definitions, also used natively in 32-bit build */
2020-12-03 18:45:22 +00:00
struct compat_arch_vcpu_info {
unsigned int cr2 ;
unsigned int pad [ 5 ] ;
} ;
struct compat_vcpu_info {
2021-02-10 10:26:09 -08:00
uint8_t evtchn_upcall_pending ;
uint8_t evtchn_upcall_mask ;
uint16_t pad ;
uint32_t evtchn_pending_sel ;
struct compat_arch_vcpu_info arch ;
struct pvclock_vcpu_time_info time ;
2020-12-03 18:45:22 +00:00
} ; /* 64 bytes (x86) */
struct compat_arch_shared_info {
unsigned int max_pfn ;
unsigned int pfn_to_mfn_frame_list_list ;
unsigned int nmi_reason ;
unsigned int p2m_cr3 ;
unsigned int p2m_vaddr ;
unsigned int p2m_generation ;
uint32_t wc_sec_hi ;
} ;
struct compat_shared_info {
struct compat_vcpu_info vcpu_info [ MAX_VIRT_CPUS ] ;
uint32_t evtchn_pending [ 32 ] ;
uint32_t evtchn_mask [ 32 ] ;
struct pvclock_wall_clock wc ;
struct compat_arch_shared_info arch ;
} ;
2021-12-10 16:36:23 +00:00
# define COMPAT_EVTCHN_2L_NR_CHANNELS (8 * \
sizeof_field ( struct compat_shared_info , \
evtchn_pending ) )
2021-03-01 12:53:09 +00:00
struct compat_vcpu_runstate_info {
int state ;
uint64_t state_entry_time ;
uint64_t time [ 4 ] ;
} __attribute__ ( ( packed ) ) ;
2018-06-13 09:55:44 -04:00
# endif /* __ARCH_X86_KVM_XEN_H__ */