Handles the guest faults in KVM by mapping in corresponding user pages in the 2nd stage page tables. We invalidate the instruction cache by MVA whenever we map a page to the guest (no, we cannot only do it when we have an iabt because the guest may happily read/write a page before hitting the icache) if the hardware uses VIPT or PIPT. In the latter case, we can invalidate only that physical page. In the first case, all bets are off and we simply must invalidate the whole affair. Not that VIVT icaches are tagged with vmids, and we are out of the woods on that one. Alexander Graf was nice enough to remind us of this massive pain. Reviewed-by: Will Deacon <will.deacon@arm.com> Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <c.dall@virtualopensystems.com>
83 lines
3.0 KiB
C
83 lines
3.0 KiB
C
/*
|
|
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
|
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License, version 2, as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
*/
|
|
|
|
#ifndef __ARM_KVM_ASM_H__
|
|
#define __ARM_KVM_ASM_H__
|
|
|
|
/* 0 is reserved as an invalid value. */
|
|
#define c0_MPIDR 1 /* MultiProcessor ID Register */
|
|
#define c0_CSSELR 2 /* Cache Size Selection Register */
|
|
#define c1_SCTLR 3 /* System Control Register */
|
|
#define c1_ACTLR 4 /* Auxilliary Control Register */
|
|
#define c1_CPACR 5 /* Coprocessor Access Control */
|
|
#define c2_TTBR0 6 /* Translation Table Base Register 0 */
|
|
#define c2_TTBR0_high 7 /* TTBR0 top 32 bits */
|
|
#define c2_TTBR1 8 /* Translation Table Base Register 1 */
|
|
#define c2_TTBR1_high 9 /* TTBR1 top 32 bits */
|
|
#define c2_TTBCR 10 /* Translation Table Base Control R. */
|
|
#define c3_DACR 11 /* Domain Access Control Register */
|
|
#define c5_DFSR 12 /* Data Fault Status Register */
|
|
#define c5_IFSR 13 /* Instruction Fault Status Register */
|
|
#define c5_ADFSR 14 /* Auxilary Data Fault Status R */
|
|
#define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */
|
|
#define c6_DFAR 16 /* Data Fault Address Register */
|
|
#define c6_IFAR 17 /* Instruction Fault Address Register */
|
|
#define c9_L2CTLR 18 /* Cortex A15 L2 Control Register */
|
|
#define c10_PRRR 19 /* Primary Region Remap Register */
|
|
#define c10_NMRR 20 /* Normal Memory Remap Register */
|
|
#define c12_VBAR 21 /* Vector Base Address Register */
|
|
#define c13_CID 22 /* Context ID Register */
|
|
#define c13_TID_URW 23 /* Thread ID, User R/W */
|
|
#define c13_TID_URO 24 /* Thread ID, User R/O */
|
|
#define c13_TID_PRIV 25 /* Thread ID, Privileged */
|
|
#define NR_CP15_REGS 26 /* Number of regs (incl. invalid) */
|
|
|
|
#define ARM_EXCEPTION_RESET 0
|
|
#define ARM_EXCEPTION_UNDEFINED 1
|
|
#define ARM_EXCEPTION_SOFTWARE 2
|
|
#define ARM_EXCEPTION_PREF_ABORT 3
|
|
#define ARM_EXCEPTION_DATA_ABORT 4
|
|
#define ARM_EXCEPTION_IRQ 5
|
|
#define ARM_EXCEPTION_FIQ 6
|
|
#define ARM_EXCEPTION_HVC 7
|
|
|
|
#ifndef __ASSEMBLY__
|
|
struct kvm;
|
|
struct kvm_vcpu;
|
|
|
|
extern char __kvm_hyp_init[];
|
|
extern char __kvm_hyp_init_end[];
|
|
|
|
extern char __kvm_hyp_exit[];
|
|
extern char __kvm_hyp_exit_end[];
|
|
|
|
extern char __kvm_hyp_vector[];
|
|
|
|
extern char __kvm_hyp_code_start[];
|
|
extern char __kvm_hyp_code_end[];
|
|
|
|
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
|
|
|
|
extern void __kvm_flush_vm_context(void);
|
|
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
|
|
|
|
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
|
|
#endif
|
|
|
|
#endif /* __ARM_KVM_ASM_H__ */
|