2021-09-27 17:10:01 +05:30
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright ( C ) 2019 Western Digital Corporation or its affiliates .
*
* Authors :
* Anup Patel < anup . patel @ wdc . com >
*/
# ifndef __RISCV_KVM_HOST_H__
# define __RISCV_KVM_HOST_H__
# include <linux/types.h>
# include <linux/kvm.h>
# include <linux/kvm_types.h>
2022-05-09 10:44:05 +05:30
# include <linux/spinlock.h>
2021-11-17 10:30:29 +05:30
# include <asm/csr.h>
2022-07-29 17:14:11 +05:30
# include <asm/hwcap.h>
2021-10-26 22:31:35 +05:30
# include <asm/kvm_vcpu_fp.h>
2022-07-29 17:14:40 +05:30
# include <asm/kvm_vcpu_insn.h>
2021-09-27 17:10:11 +05:30
# include <asm/kvm_vcpu_timer.h>
2021-09-27 17:10:01 +05:30
2022-05-09 10:44:00 +05:30
# define KVM_MAX_VCPUS 1024
2021-09-27 17:10:01 +05:30
# define KVM_HALT_POLL_NS_DEFAULT 500000
# define KVM_VCPU_MAX_FEATURES 0
# define KVM_REQ_SLEEP \
KVM_ARCH_REQ_FLAGS ( 0 , KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP )
# define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1)
2021-09-27 17:10:08 +05:30
# define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2)
2022-05-09 10:44:05 +05:30
# define KVM_REQ_FENCE_I \
KVM_ARCH_REQ_FLAGS ( 3 , KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP )
# define KVM_REQ_HFENCE_GVMA_VMID_ALL KVM_REQ_TLB_FLUSH
# define KVM_REQ_HFENCE_VVMA_ALL \
KVM_ARCH_REQ_FLAGS ( 4 , KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP )
# define KVM_REQ_HFENCE \
KVM_ARCH_REQ_FLAGS ( 5 , KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP )
enum kvm_riscv_hfence_type {
KVM_RISCV_HFENCE_UNKNOWN = 0 ,
KVM_RISCV_HFENCE_GVMA_VMID_GPA ,
KVM_RISCV_HFENCE_VVMA_ASID_GVA ,
KVM_RISCV_HFENCE_VVMA_ASID_ALL ,
KVM_RISCV_HFENCE_VVMA_GVA ,
} ;
struct kvm_riscv_hfence {
enum kvm_riscv_hfence_type type ;
unsigned long asid ;
unsigned long order ;
gpa_t addr ;
gpa_t size ;
} ;
# define KVM_RISCV_VCPU_MAX_HFENCE 64
2021-09-27 17:10:01 +05:30
struct kvm_vm_stat {
struct kvm_vm_stat_generic generic ;
} ;
struct kvm_vcpu_stat {
struct kvm_vcpu_stat_generic generic ;
u64 ecall_exit_stat ;
u64 wfi_exit_stat ;
u64 mmio_exit_user ;
u64 mmio_exit_kernel ;
2022-07-29 17:14:53 +05:30
u64 csr_exit_user ;
u64 csr_exit_kernel ;
2021-09-27 17:10:01 +05:30
u64 exits ;
} ;
struct kvm_arch_memory_slot {
} ;
2021-09-27 17:10:08 +05:30
struct kvm_vmid {
/*
* Writes to vmid_version and vmid happen with vmid_lock held
* whereas reads happen without any lock held .
*/
unsigned long vmid_version ;
unsigned long vmid ;
} ;
2021-09-27 17:10:01 +05:30
struct kvm_arch {
2022-05-09 10:43:30 +05:30
/* G-stage vmid */
2021-09-27 17:10:08 +05:30
struct kvm_vmid vmid ;
2022-05-09 10:43:30 +05:30
/* G-stage page table */
2021-09-27 17:10:01 +05:30
pgd_t * pgd ;
phys_addr_t pgd_phys ;
2021-09-27 17:10:11 +05:30
/* Guest Timer */
struct kvm_guest_timer timer ;
2021-09-27 17:10:01 +05:30
} ;
2021-09-27 17:10:14 +05:30
struct kvm_sbi_context {
int return_handled ;
} ;
2021-09-27 17:10:01 +05:30
struct kvm_cpu_trap {
unsigned long sepc ;
unsigned long scause ;
unsigned long stval ;
unsigned long htval ;
unsigned long htinst ;
} ;
2021-09-27 17:10:02 +05:30
struct kvm_cpu_context {
unsigned long zero ;
unsigned long ra ;
unsigned long sp ;
unsigned long gp ;
unsigned long tp ;
unsigned long t0 ;
unsigned long t1 ;
unsigned long t2 ;
unsigned long s0 ;
unsigned long s1 ;
unsigned long a0 ;
unsigned long a1 ;
unsigned long a2 ;
unsigned long a3 ;
unsigned long a4 ;
unsigned long a5 ;
unsigned long a6 ;
unsigned long a7 ;
unsigned long s2 ;
unsigned long s3 ;
unsigned long s4 ;
unsigned long s5 ;
unsigned long s6 ;
unsigned long s7 ;
unsigned long s8 ;
unsigned long s9 ;
unsigned long s10 ;
unsigned long s11 ;
unsigned long t3 ;
unsigned long t4 ;
unsigned long t5 ;
unsigned long t6 ;
unsigned long sepc ;
unsigned long sstatus ;
unsigned long hstatus ;
2021-09-27 17:10:12 +05:30
union __riscv_fp_state fp ;
2021-09-27 17:10:02 +05:30
} ;
struct kvm_vcpu_csr {
unsigned long vsstatus ;
unsigned long vsie ;
unsigned long vstvec ;
unsigned long vsscratch ;
unsigned long vsepc ;
unsigned long vscause ;
unsigned long vstval ;
unsigned long hvip ;
unsigned long vsatp ;
unsigned long scounteren ;
} ;
2021-09-27 17:10:01 +05:30
struct kvm_vcpu_arch {
2021-09-27 17:10:02 +05:30
/* VCPU ran at least once */
bool ran_atleast_once ;
2022-05-09 10:44:11 +05:30
/* Last Host CPU on which Guest VCPU exited */
int last_exit_cpu ;
2021-09-27 17:10:02 +05:30
/* ISA feature bits (similar to MISA) */
2022-07-29 17:14:11 +05:30
DECLARE_BITMAP ( isa , RISCV_ISA_EXT_MAX ) ;
2021-09-27 17:10:02 +05:30
2021-09-27 17:10:05 +05:30
/* SSCRATCH, STVEC, and SCOUNTEREN of Host */
unsigned long host_sscratch ;
unsigned long host_stvec ;
unsigned long host_scounteren ;
/* CPU context of Host */
struct kvm_cpu_context host_context ;
2021-09-27 17:10:02 +05:30
/* CPU context of Guest VCPU */
struct kvm_cpu_context guest_context ;
/* CPU CSR context of Guest VCPU */
struct kvm_vcpu_csr guest_csr ;
/* CPU context upon Guest VCPU reset */
struct kvm_cpu_context guest_reset_context ;
/* CPU CSR context upon Guest VCPU reset */
struct kvm_vcpu_csr guest_reset_csr ;
2021-09-27 17:10:03 +05:30
/*
* VCPU interrupts
*
* We have a lockless approach for tracking pending VCPU interrupts
* implemented using atomic bitops . The irqs_pending bitmap represent
* pending interrupts whereas irqs_pending_mask represent bits changed
* in irqs_pending . Our approach is modeled around multiple producer
* and single consumer problem where the consumer is the VCPU itself .
*/
unsigned long irqs_pending ;
unsigned long irqs_pending_mask ;
2021-09-27 17:10:11 +05:30
/* VCPU Timer */
struct kvm_vcpu_timer timer ;
2022-05-09 10:44:05 +05:30
/* HFENCE request queue */
spinlock_t hfence_lock ;
unsigned long hfence_head ;
unsigned long hfence_tail ;
struct kvm_riscv_hfence hfence_queue [ KVM_RISCV_VCPU_MAX_HFENCE ] ;
2021-09-27 17:10:06 +05:30
/* MMIO instruction details */
struct kvm_mmio_decode mmio_decode ;
2022-07-29 17:14:53 +05:30
/* CSR instruction details */
struct kvm_csr_decode csr_decode ;
2021-09-27 17:10:14 +05:30
/* SBI context */
struct kvm_sbi_context sbi_context ;
2021-09-27 17:10:09 +05:30
/* Cache pages needed to program page tables with spinlock held */
2021-11-04 16:41:07 +00:00
struct kvm_mmu_memory_cache mmu_page_cache ;
2021-09-27 17:10:09 +05:30
2021-09-27 17:10:03 +05:30
/* VCPU power-off state */
bool power_off ;
2021-09-27 17:10:01 +05:30
/* Don't run the VCPU (blocked) */
bool pause ;
} ;
static inline void kvm_arch_hardware_unsetup ( void ) { }
static inline void kvm_arch_sync_events ( struct kvm * kvm ) { }
static inline void kvm_arch_sched_in ( struct kvm_vcpu * vcpu , int cpu ) { }
2021-09-27 17:10:10 +05:30
# define KVM_ARCH_WANT_MMU_NOTIFIER
2022-05-09 10:43:52 +05:30
# define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
void kvm_riscv_local_hfence_gvma_vmid_gpa ( unsigned long vmid ,
gpa_t gpa , gpa_t gpsz ,
unsigned long order ) ;
void kvm_riscv_local_hfence_gvma_vmid_all ( unsigned long vmid ) ;
void kvm_riscv_local_hfence_gvma_gpa ( gpa_t gpa , gpa_t gpsz ,
unsigned long order ) ;
void kvm_riscv_local_hfence_gvma_all ( void ) ;
void kvm_riscv_local_hfence_vvma_asid_gva ( unsigned long vmid ,
unsigned long asid ,
unsigned long gva ,
unsigned long gvsz ,
unsigned long order ) ;
void kvm_riscv_local_hfence_vvma_asid_all ( unsigned long vmid ,
unsigned long asid ) ;
void kvm_riscv_local_hfence_vvma_gva ( unsigned long vmid ,
unsigned long gva , unsigned long gvsz ,
unsigned long order ) ;
void kvm_riscv_local_hfence_vvma_all ( unsigned long vmid ) ;
2021-09-27 17:10:08 +05:30
2022-05-09 10:44:11 +05:30
void kvm_riscv_local_tlb_sanitize ( struct kvm_vcpu * vcpu ) ;
2022-05-09 10:44:05 +05:30
void kvm_riscv_fence_i_process ( struct kvm_vcpu * vcpu ) ;
void kvm_riscv_hfence_gvma_vmid_all_process ( struct kvm_vcpu * vcpu ) ;
void kvm_riscv_hfence_vvma_all_process ( struct kvm_vcpu * vcpu ) ;
void kvm_riscv_hfence_process ( struct kvm_vcpu * vcpu ) ;
void kvm_riscv_fence_i ( struct kvm * kvm ,
unsigned long hbase , unsigned long hmask ) ;
void kvm_riscv_hfence_gvma_vmid_gpa ( struct kvm * kvm ,
unsigned long hbase , unsigned long hmask ,
gpa_t gpa , gpa_t gpsz ,
unsigned long order ) ;
void kvm_riscv_hfence_gvma_vmid_all ( struct kvm * kvm ,
unsigned long hbase , unsigned long hmask ) ;
void kvm_riscv_hfence_vvma_asid_gva ( struct kvm * kvm ,
unsigned long hbase , unsigned long hmask ,
unsigned long gva , unsigned long gvsz ,
unsigned long order , unsigned long asid ) ;
void kvm_riscv_hfence_vvma_asid_all ( struct kvm * kvm ,
unsigned long hbase , unsigned long hmask ,
unsigned long asid ) ;
void kvm_riscv_hfence_vvma_gva ( struct kvm * kvm ,
unsigned long hbase , unsigned long hmask ,
unsigned long gva , unsigned long gvsz ,
unsigned long order ) ;
void kvm_riscv_hfence_vvma_all ( struct kvm * kvm ,
unsigned long hbase , unsigned long hmask ) ;
2022-07-29 17:15:06 +05:30
int kvm_riscv_gstage_ioremap ( struct kvm * kvm , gpa_t gpa ,
phys_addr_t hpa , unsigned long size ,
bool writable , bool in_atomic ) ;
void kvm_riscv_gstage_iounmap ( struct kvm * kvm , gpa_t gpa ,
unsigned long size ) ;
2022-05-09 10:43:30 +05:30
int kvm_riscv_gstage_map ( struct kvm_vcpu * vcpu ,
2021-09-27 17:10:06 +05:30
struct kvm_memory_slot * memslot ,
gpa_t gpa , unsigned long hva , bool is_write ) ;
2022-05-09 10:43:30 +05:30
int kvm_riscv_gstage_alloc_pgd ( struct kvm * kvm ) ;
void kvm_riscv_gstage_free_pgd ( struct kvm * kvm ) ;
void kvm_riscv_gstage_update_hgatp ( struct kvm_vcpu * vcpu ) ;
void kvm_riscv_gstage_mode_detect ( void ) ;
unsigned long kvm_riscv_gstage_mode ( void ) ;
int kvm_riscv_gstage_gpa_bits ( void ) ;
void kvm_riscv_gstage_vmid_detect ( void ) ;
unsigned long kvm_riscv_gstage_vmid_bits ( void ) ;
int kvm_riscv_gstage_vmid_init ( struct kvm * kvm ) ;
bool kvm_riscv_gstage_vmid_ver_changed ( struct kvm_vmid * vmid ) ;
void kvm_riscv_gstage_vmid_update ( struct kvm_vcpu * vcpu ) ;
2021-09-27 17:10:01 +05:30
2021-09-27 17:10:06 +05:30
void __kvm_riscv_unpriv_trap ( void ) ;
unsigned long kvm_riscv_vcpu_unpriv_read ( struct kvm_vcpu * vcpu ,
bool read_insn ,
unsigned long guest_addr ,
struct kvm_cpu_trap * trap ) ;
void kvm_riscv_vcpu_trap_redirect ( struct kvm_vcpu * vcpu ,
struct kvm_cpu_trap * trap ) ;
2021-09-27 17:10:01 +05:30
int kvm_riscv_vcpu_exit ( struct kvm_vcpu * vcpu , struct kvm_run * run ,
struct kvm_cpu_trap * trap ) ;
2021-09-27 17:10:05 +05:30
void __kvm_riscv_switch_to ( struct kvm_vcpu_arch * vcpu_arch ) ;
2021-09-27 17:10:01 +05:30
2021-09-27 17:10:03 +05:30
int kvm_riscv_vcpu_set_interrupt ( struct kvm_vcpu * vcpu , unsigned int irq ) ;
int kvm_riscv_vcpu_unset_interrupt ( struct kvm_vcpu * vcpu , unsigned int irq ) ;
void kvm_riscv_vcpu_flush_interrupts ( struct kvm_vcpu * vcpu ) ;
void kvm_riscv_vcpu_sync_interrupts ( struct kvm_vcpu * vcpu ) ;
bool kvm_riscv_vcpu_has_interrupts ( struct kvm_vcpu * vcpu , unsigned long mask ) ;
void kvm_riscv_vcpu_power_off ( struct kvm_vcpu * vcpu ) ;
void kvm_riscv_vcpu_power_on ( struct kvm_vcpu * vcpu ) ;
2021-09-27 17:10:14 +05:30
int kvm_riscv_vcpu_sbi_return ( struct kvm_vcpu * vcpu , struct kvm_run * run ) ;
int kvm_riscv_vcpu_sbi_ecall ( struct kvm_vcpu * vcpu , struct kvm_run * run ) ;
2021-09-27 17:10:01 +05:30
# endif /* __RISCV_KVM_HOST_H__ */