Merge branch 'kvm-ppc-next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc into HEAD
This commit is contained in:
commit
ec594c4710
@ -16,7 +16,21 @@ Groups:
|
|||||||
|
|
||||||
KVM_DEV_VFIO_GROUP attributes:
|
KVM_DEV_VFIO_GROUP attributes:
|
||||||
KVM_DEV_VFIO_GROUP_ADD: Add a VFIO group to VFIO-KVM device tracking
|
KVM_DEV_VFIO_GROUP_ADD: Add a VFIO group to VFIO-KVM device tracking
|
||||||
|
kvm_device_attr.addr points to an int32_t file descriptor
|
||||||
|
for the VFIO group.
|
||||||
KVM_DEV_VFIO_GROUP_DEL: Remove a VFIO group from VFIO-KVM device tracking
|
KVM_DEV_VFIO_GROUP_DEL: Remove a VFIO group from VFIO-KVM device tracking
|
||||||
|
kvm_device_attr.addr points to an int32_t file descriptor
|
||||||
|
for the VFIO group.
|
||||||
|
KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: attaches a guest visible TCE table
|
||||||
|
allocated by sPAPR KVM.
|
||||||
|
kvm_device_attr.addr points to a struct:
|
||||||
|
|
||||||
For each, kvm_device_attr.addr points to an int32_t file descriptor
|
struct kvm_vfio_spapr_tce {
|
||||||
for the VFIO group.
|
__s32 groupfd;
|
||||||
|
__s32 tablefd;
|
||||||
|
};
|
||||||
|
|
||||||
|
where
|
||||||
|
@groupfd is a file descriptor for a VFIO group;
|
||||||
|
@tablefd is a file descriptor for a TCE table allocated via
|
||||||
|
KVM_CREATE_SPAPR_TCE.
|
||||||
|
@ -87,6 +87,11 @@ static inline unsigned int get_oc(u32 inst)
|
|||||||
return (inst >> 11) & 0x7fff;
|
return (inst >> 11) & 0x7fff;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned int get_tx_or_sx(u32 inst)
|
||||||
|
{
|
||||||
|
return (inst) & 0x1;
|
||||||
|
}
|
||||||
|
|
||||||
#define IS_XFORM(inst) (get_op(inst) == 31)
|
#define IS_XFORM(inst) (get_op(inst) == 31)
|
||||||
#define IS_DSFORM(inst) (get_op(inst) >= 56)
|
#define IS_DSFORM(inst) (get_op(inst) >= 56)
|
||||||
|
|
||||||
|
@ -64,6 +64,11 @@ struct iommu_table_ops {
|
|||||||
long index,
|
long index,
|
||||||
unsigned long *hpa,
|
unsigned long *hpa,
|
||||||
enum dma_data_direction *direction);
|
enum dma_data_direction *direction);
|
||||||
|
/* Real mode */
|
||||||
|
int (*exchange_rm)(struct iommu_table *tbl,
|
||||||
|
long index,
|
||||||
|
unsigned long *hpa,
|
||||||
|
enum dma_data_direction *direction);
|
||||||
#endif
|
#endif
|
||||||
void (*clear)(struct iommu_table *tbl,
|
void (*clear)(struct iommu_table *tbl,
|
||||||
long index, long npages);
|
long index, long npages);
|
||||||
@ -114,6 +119,7 @@ struct iommu_table {
|
|||||||
struct list_head it_group_list;/* List of iommu_table_group_link */
|
struct list_head it_group_list;/* List of iommu_table_group_link */
|
||||||
unsigned long *it_userspace; /* userspace view of the table */
|
unsigned long *it_userspace; /* userspace view of the table */
|
||||||
struct iommu_table_ops *it_ops;
|
struct iommu_table_ops *it_ops;
|
||||||
|
struct kref it_kref;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \
|
#define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \
|
||||||
@ -146,8 +152,8 @@ static inline void *get_iommu_table_base(struct device *dev)
|
|||||||
|
|
||||||
extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
|
extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
|
||||||
|
|
||||||
/* Frees table for an individual device node */
|
extern struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl);
|
||||||
extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
|
extern int iommu_tce_table_put(struct iommu_table *tbl);
|
||||||
|
|
||||||
/* Initializes an iommu_table based in values set in the passed-in
|
/* Initializes an iommu_table based in values set in the passed-in
|
||||||
* structure
|
* structure
|
||||||
@ -208,6 +214,8 @@ extern void iommu_del_device(struct device *dev);
|
|||||||
extern int __init tce_iommu_bus_notifier_init(void);
|
extern int __init tce_iommu_bus_notifier_init(void);
|
||||||
extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
|
extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
|
||||||
unsigned long *hpa, enum dma_data_direction *direction);
|
unsigned long *hpa, enum dma_data_direction *direction);
|
||||||
|
extern long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
|
||||||
|
unsigned long *hpa, enum dma_data_direction *direction);
|
||||||
#else
|
#else
|
||||||
static inline void iommu_register_group(struct iommu_table_group *table_group,
|
static inline void iommu_register_group(struct iommu_table_group *table_group,
|
||||||
int pci_domain_number,
|
int pci_domain_number,
|
||||||
@ -288,11 +296,21 @@ static inline void iommu_restore(void)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* The API to support IOMMU operations for VFIO */
|
/* The API to support IOMMU operations for VFIO */
|
||||||
extern int iommu_tce_clear_param_check(struct iommu_table *tbl,
|
extern int iommu_tce_check_ioba(unsigned long page_shift,
|
||||||
unsigned long ioba, unsigned long tce_value,
|
unsigned long offset, unsigned long size,
|
||||||
unsigned long npages);
|
unsigned long ioba, unsigned long npages);
|
||||||
extern int iommu_tce_put_param_check(struct iommu_table *tbl,
|
extern int iommu_tce_check_gpa(unsigned long page_shift,
|
||||||
unsigned long ioba, unsigned long tce);
|
unsigned long gpa);
|
||||||
|
|
||||||
|
#define iommu_tce_clear_param_check(tbl, ioba, tce_value, npages) \
|
||||||
|
(iommu_tce_check_ioba((tbl)->it_page_shift, \
|
||||||
|
(tbl)->it_offset, (tbl)->it_size, \
|
||||||
|
(ioba), (npages)) || (tce_value))
|
||||||
|
#define iommu_tce_put_param_check(tbl, ioba, gpa) \
|
||||||
|
(iommu_tce_check_ioba((tbl)->it_page_shift, \
|
||||||
|
(tbl)->it_offset, (tbl)->it_size, \
|
||||||
|
(ioba), 1) || \
|
||||||
|
iommu_tce_check_gpa((tbl)->it_page_shift, (gpa)))
|
||||||
|
|
||||||
extern void iommu_flush_tce(struct iommu_table *tbl);
|
extern void iommu_flush_tce(struct iommu_table *tbl);
|
||||||
extern int iommu_take_ownership(struct iommu_table *tbl);
|
extern int iommu_take_ownership(struct iommu_table *tbl);
|
||||||
|
@ -188,6 +188,13 @@ struct kvmppc_pginfo {
|
|||||||
atomic_t refcnt;
|
atomic_t refcnt;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct kvmppc_spapr_tce_iommu_table {
|
||||||
|
struct rcu_head rcu;
|
||||||
|
struct list_head next;
|
||||||
|
struct iommu_table *tbl;
|
||||||
|
struct kref kref;
|
||||||
|
};
|
||||||
|
|
||||||
struct kvmppc_spapr_tce_table {
|
struct kvmppc_spapr_tce_table {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct kvm *kvm;
|
struct kvm *kvm;
|
||||||
@ -196,6 +203,7 @@ struct kvmppc_spapr_tce_table {
|
|||||||
u32 page_shift;
|
u32 page_shift;
|
||||||
u64 offset; /* in pages */
|
u64 offset; /* in pages */
|
||||||
u64 size; /* window size in pages */
|
u64 size; /* window size in pages */
|
||||||
|
struct list_head iommu_tables;
|
||||||
struct page *pages[0];
|
struct page *pages[0];
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -342,6 +350,7 @@ struct kvmppc_pte {
|
|||||||
bool may_read : 1;
|
bool may_read : 1;
|
||||||
bool may_write : 1;
|
bool may_write : 1;
|
||||||
bool may_execute : 1;
|
bool may_execute : 1;
|
||||||
|
unsigned long wimg;
|
||||||
u8 page_size; /* MMU_PAGE_xxx */
|
u8 page_size; /* MMU_PAGE_xxx */
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -438,6 +447,11 @@ struct mmio_hpte_cache {
|
|||||||
unsigned int index;
|
unsigned int index;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define KVMPPC_VSX_COPY_NONE 0
|
||||||
|
#define KVMPPC_VSX_COPY_WORD 1
|
||||||
|
#define KVMPPC_VSX_COPY_DWORD 2
|
||||||
|
#define KVMPPC_VSX_COPY_DWORD_LOAD_DUMP 3
|
||||||
|
|
||||||
struct openpic;
|
struct openpic;
|
||||||
|
|
||||||
struct kvm_vcpu_arch {
|
struct kvm_vcpu_arch {
|
||||||
@ -641,6 +655,21 @@ struct kvm_vcpu_arch {
|
|||||||
u8 io_gpr; /* GPR used as IO source/target */
|
u8 io_gpr; /* GPR used as IO source/target */
|
||||||
u8 mmio_host_swabbed;
|
u8 mmio_host_swabbed;
|
||||||
u8 mmio_sign_extend;
|
u8 mmio_sign_extend;
|
||||||
|
/* conversion between single and double precision */
|
||||||
|
u8 mmio_sp64_extend;
|
||||||
|
/*
|
||||||
|
* Number of simulations for vsx.
|
||||||
|
* If we use 2*8bytes to simulate 1*16bytes,
|
||||||
|
* then the number should be 2 and
|
||||||
|
* mmio_vsx_copy_type=KVMPPC_VSX_COPY_DWORD.
|
||||||
|
* If we use 4*4bytes to simulate 1*16bytes,
|
||||||
|
* the number should be 4 and
|
||||||
|
* mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD.
|
||||||
|
*/
|
||||||
|
u8 mmio_vsx_copy_nums;
|
||||||
|
u8 mmio_vsx_offset;
|
||||||
|
u8 mmio_vsx_copy_type;
|
||||||
|
u8 mmio_vsx_tx_sx_enabled;
|
||||||
u8 osi_needed;
|
u8 osi_needed;
|
||||||
u8 osi_enabled;
|
u8 osi_enabled;
|
||||||
u8 papr_enabled;
|
u8 papr_enabled;
|
||||||
@ -729,6 +758,8 @@ struct kvm_vcpu_arch {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
|
#define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
|
||||||
|
#define VCPU_VSX_FPR(vcpu, i, j) ((vcpu)->arch.fp.fpr[i][j])
|
||||||
|
#define VCPU_VSX_VR(vcpu, i) ((vcpu)->arch.vr.vr[i])
|
||||||
|
|
||||||
/* Values for vcpu->arch.state */
|
/* Values for vcpu->arch.state */
|
||||||
#define KVMPPC_VCPU_NOTREADY 0
|
#define KVMPPC_VCPU_NOTREADY 0
|
||||||
@ -742,6 +773,7 @@ struct kvm_vcpu_arch {
|
|||||||
#define KVM_MMIO_REG_FPR 0x0020
|
#define KVM_MMIO_REG_FPR 0x0020
|
||||||
#define KVM_MMIO_REG_QPR 0x0040
|
#define KVM_MMIO_REG_QPR 0x0040
|
||||||
#define KVM_MMIO_REG_FQPR 0x0060
|
#define KVM_MMIO_REG_FQPR 0x0060
|
||||||
|
#define KVM_MMIO_REG_VSX 0x0080
|
||||||
|
|
||||||
#define __KVM_HAVE_ARCH_WQP
|
#define __KVM_HAVE_ARCH_WQP
|
||||||
#define __KVM_HAVE_CREATE_DEVICE
|
#define __KVM_HAVE_CREATE_DEVICE
|
||||||
|
@ -78,9 +78,15 @@ extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||||||
extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
unsigned int rt, unsigned int bytes,
|
unsigned int rt, unsigned int bytes,
|
||||||
int is_default_endian);
|
int is_default_endian);
|
||||||
|
extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
|
unsigned int rt, unsigned int bytes,
|
||||||
|
int is_default_endian, int mmio_sign_extend);
|
||||||
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
u64 val, unsigned int bytes,
|
u64 val, unsigned int bytes,
|
||||||
int is_default_endian);
|
int is_default_endian);
|
||||||
|
extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
|
int rs, unsigned int bytes,
|
||||||
|
int is_default_endian);
|
||||||
|
|
||||||
extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
|
extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
|
||||||
enum instruction_type type, u32 *inst);
|
enum instruction_type type, u32 *inst);
|
||||||
@ -132,6 +138,9 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
|
|||||||
extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
|
extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
|
||||||
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
|
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
|
||||||
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
|
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
|
||||||
|
extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
|
||||||
|
extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
|
||||||
|
extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
|
||||||
extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
|
extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
|
||||||
extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
|
extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
|
||||||
extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
|
extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
|
||||||
@ -164,13 +173,19 @@ extern long kvmppc_prepare_vrma(struct kvm *kvm,
|
|||||||
extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
|
extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
|
||||||
struct kvm_memory_slot *memslot, unsigned long porder);
|
struct kvm_memory_slot *memslot, unsigned long porder);
|
||||||
extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
|
extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
|
||||||
|
extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
|
||||||
|
struct iommu_group *grp);
|
||||||
|
extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
|
||||||
|
struct iommu_group *grp);
|
||||||
|
|
||||||
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
||||||
struct kvm_create_spapr_tce_64 *args);
|
struct kvm_create_spapr_tce_64 *args);
|
||||||
extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
|
extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
|
||||||
struct kvm_vcpu *vcpu, unsigned long liobn);
|
struct kvm *kvm, unsigned long liobn);
|
||||||
extern long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
|
#define kvmppc_ioba_validate(stt, ioba, npages) \
|
||||||
unsigned long ioba, unsigned long npages);
|
(iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
|
||||||
|
(stt)->size, (ioba), (npages)) ? \
|
||||||
|
H_PARAMETER : H_SUCCESS)
|
||||||
extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt,
|
extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt,
|
||||||
unsigned long tce);
|
unsigned long tce);
|
||||||
extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
|
extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
|
||||||
@ -240,6 +255,7 @@ union kvmppc_one_reg {
|
|||||||
u64 dval;
|
u64 dval;
|
||||||
vector128 vval;
|
vector128 vval;
|
||||||
u64 vsxval[2];
|
u64 vsxval[2];
|
||||||
|
u32 vsx32val[4];
|
||||||
struct {
|
struct {
|
||||||
u64 addr;
|
u64 addr;
|
||||||
u64 length;
|
u64 length;
|
||||||
|
@ -29,10 +29,14 @@ extern void mm_iommu_init(struct mm_struct *mm);
|
|||||||
extern void mm_iommu_cleanup(struct mm_struct *mm);
|
extern void mm_iommu_cleanup(struct mm_struct *mm);
|
||||||
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
|
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
|
||||||
unsigned long ua, unsigned long size);
|
unsigned long ua, unsigned long size);
|
||||||
|
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
|
||||||
|
struct mm_struct *mm, unsigned long ua, unsigned long size);
|
||||||
extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
|
extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
|
||||||
unsigned long ua, unsigned long entries);
|
unsigned long ua, unsigned long entries);
|
||||||
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
||||||
unsigned long ua, unsigned long *hpa);
|
unsigned long ua, unsigned long *hpa);
|
||||||
|
extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
|
||||||
|
unsigned long ua, unsigned long *hpa);
|
||||||
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
|
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
|
||||||
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
|
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
|
||||||
#endif
|
#endif
|
||||||
|
@ -86,32 +86,79 @@
|
|||||||
#define OP_TRAP_64 2
|
#define OP_TRAP_64 2
|
||||||
|
|
||||||
#define OP_31_XOP_TRAP 4
|
#define OP_31_XOP_TRAP 4
|
||||||
|
#define OP_31_XOP_LDX 21
|
||||||
#define OP_31_XOP_LWZX 23
|
#define OP_31_XOP_LWZX 23
|
||||||
|
#define OP_31_XOP_LDUX 53
|
||||||
#define OP_31_XOP_DCBST 54
|
#define OP_31_XOP_DCBST 54
|
||||||
#define OP_31_XOP_LWZUX 55
|
#define OP_31_XOP_LWZUX 55
|
||||||
#define OP_31_XOP_TRAP_64 68
|
#define OP_31_XOP_TRAP_64 68
|
||||||
#define OP_31_XOP_DCBF 86
|
#define OP_31_XOP_DCBF 86
|
||||||
#define OP_31_XOP_LBZX 87
|
#define OP_31_XOP_LBZX 87
|
||||||
|
#define OP_31_XOP_STDX 149
|
||||||
#define OP_31_XOP_STWX 151
|
#define OP_31_XOP_STWX 151
|
||||||
|
#define OP_31_XOP_STDUX 181
|
||||||
|
#define OP_31_XOP_STWUX 183
|
||||||
#define OP_31_XOP_STBX 215
|
#define OP_31_XOP_STBX 215
|
||||||
#define OP_31_XOP_LBZUX 119
|
#define OP_31_XOP_LBZUX 119
|
||||||
#define OP_31_XOP_STBUX 247
|
#define OP_31_XOP_STBUX 247
|
||||||
#define OP_31_XOP_LHZX 279
|
#define OP_31_XOP_LHZX 279
|
||||||
#define OP_31_XOP_LHZUX 311
|
#define OP_31_XOP_LHZUX 311
|
||||||
#define OP_31_XOP_MFSPR 339
|
#define OP_31_XOP_MFSPR 339
|
||||||
|
#define OP_31_XOP_LWAX 341
|
||||||
#define OP_31_XOP_LHAX 343
|
#define OP_31_XOP_LHAX 343
|
||||||
|
#define OP_31_XOP_LWAUX 373
|
||||||
#define OP_31_XOP_LHAUX 375
|
#define OP_31_XOP_LHAUX 375
|
||||||
#define OP_31_XOP_STHX 407
|
#define OP_31_XOP_STHX 407
|
||||||
#define OP_31_XOP_STHUX 439
|
#define OP_31_XOP_STHUX 439
|
||||||
#define OP_31_XOP_MTSPR 467
|
#define OP_31_XOP_MTSPR 467
|
||||||
#define OP_31_XOP_DCBI 470
|
#define OP_31_XOP_DCBI 470
|
||||||
|
#define OP_31_XOP_LDBRX 532
|
||||||
#define OP_31_XOP_LWBRX 534
|
#define OP_31_XOP_LWBRX 534
|
||||||
#define OP_31_XOP_TLBSYNC 566
|
#define OP_31_XOP_TLBSYNC 566
|
||||||
|
#define OP_31_XOP_STDBRX 660
|
||||||
#define OP_31_XOP_STWBRX 662
|
#define OP_31_XOP_STWBRX 662
|
||||||
|
#define OP_31_XOP_STFSX 663
|
||||||
|
#define OP_31_XOP_STFSUX 695
|
||||||
|
#define OP_31_XOP_STFDX 727
|
||||||
|
#define OP_31_XOP_STFDUX 759
|
||||||
#define OP_31_XOP_LHBRX 790
|
#define OP_31_XOP_LHBRX 790
|
||||||
|
#define OP_31_XOP_LFIWAX 855
|
||||||
|
#define OP_31_XOP_LFIWZX 887
|
||||||
#define OP_31_XOP_STHBRX 918
|
#define OP_31_XOP_STHBRX 918
|
||||||
|
#define OP_31_XOP_STFIWX 983
|
||||||
|
|
||||||
|
/* VSX Scalar Load Instructions */
|
||||||
|
#define OP_31_XOP_LXSDX 588
|
||||||
|
#define OP_31_XOP_LXSSPX 524
|
||||||
|
#define OP_31_XOP_LXSIWAX 76
|
||||||
|
#define OP_31_XOP_LXSIWZX 12
|
||||||
|
|
||||||
|
/* VSX Scalar Store Instructions */
|
||||||
|
#define OP_31_XOP_STXSDX 716
|
||||||
|
#define OP_31_XOP_STXSSPX 652
|
||||||
|
#define OP_31_XOP_STXSIWX 140
|
||||||
|
|
||||||
|
/* VSX Vector Load Instructions */
|
||||||
|
#define OP_31_XOP_LXVD2X 844
|
||||||
|
#define OP_31_XOP_LXVW4X 780
|
||||||
|
|
||||||
|
/* VSX Vector Load and Splat Instruction */
|
||||||
|
#define OP_31_XOP_LXVDSX 332
|
||||||
|
|
||||||
|
/* VSX Vector Store Instructions */
|
||||||
|
#define OP_31_XOP_STXVD2X 972
|
||||||
|
#define OP_31_XOP_STXVW4X 908
|
||||||
|
|
||||||
|
#define OP_31_XOP_LFSX 535
|
||||||
|
#define OP_31_XOP_LFSUX 567
|
||||||
|
#define OP_31_XOP_LFDX 599
|
||||||
|
#define OP_31_XOP_LFDUX 631
|
||||||
|
|
||||||
#define OP_LWZ 32
|
#define OP_LWZ 32
|
||||||
|
#define OP_STFS 52
|
||||||
|
#define OP_STFSU 53
|
||||||
|
#define OP_STFD 54
|
||||||
|
#define OP_STFDU 55
|
||||||
#define OP_LD 58
|
#define OP_LD 58
|
||||||
#define OP_LWZU 33
|
#define OP_LWZU 33
|
||||||
#define OP_LBZ 34
|
#define OP_LBZ 34
|
||||||
@ -127,6 +174,17 @@
|
|||||||
#define OP_LHAU 43
|
#define OP_LHAU 43
|
||||||
#define OP_STH 44
|
#define OP_STH 44
|
||||||
#define OP_STHU 45
|
#define OP_STHU 45
|
||||||
|
#define OP_LMW 46
|
||||||
|
#define OP_STMW 47
|
||||||
|
#define OP_LFS 48
|
||||||
|
#define OP_LFSU 49
|
||||||
|
#define OP_LFD 50
|
||||||
|
#define OP_LFDU 51
|
||||||
|
#define OP_STFS 52
|
||||||
|
#define OP_STFSU 53
|
||||||
|
#define OP_STFD 54
|
||||||
|
#define OP_STFDU 55
|
||||||
|
#define OP_LQ 56
|
||||||
|
|
||||||
/* sorted alphabetically */
|
/* sorted alphabetically */
|
||||||
#define PPC_INST_BHRBE 0x7c00025c
|
#define PPC_INST_BHRBE 0x7c00025c
|
||||||
|
@ -711,13 +711,16 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
|
|||||||
return tbl;
|
return tbl;
|
||||||
}
|
}
|
||||||
|
|
||||||
void iommu_free_table(struct iommu_table *tbl, const char *node_name)
|
static void iommu_table_free(struct kref *kref)
|
||||||
{
|
{
|
||||||
unsigned long bitmap_sz;
|
unsigned long bitmap_sz;
|
||||||
unsigned int order;
|
unsigned int order;
|
||||||
|
struct iommu_table *tbl;
|
||||||
|
|
||||||
if (!tbl)
|
tbl = container_of(kref, struct iommu_table, it_kref);
|
||||||
return;
|
|
||||||
|
if (tbl->it_ops->free)
|
||||||
|
tbl->it_ops->free(tbl);
|
||||||
|
|
||||||
if (!tbl->it_map) {
|
if (!tbl->it_map) {
|
||||||
kfree(tbl);
|
kfree(tbl);
|
||||||
@ -733,7 +736,7 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name)
|
|||||||
|
|
||||||
/* verify that table contains no entries */
|
/* verify that table contains no entries */
|
||||||
if (!bitmap_empty(tbl->it_map, tbl->it_size))
|
if (!bitmap_empty(tbl->it_map, tbl->it_size))
|
||||||
pr_warn("%s: Unexpected TCEs for %s\n", __func__, node_name);
|
pr_warn("%s: Unexpected TCEs\n", __func__);
|
||||||
|
|
||||||
/* calculate bitmap size in bytes */
|
/* calculate bitmap size in bytes */
|
||||||
bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
|
bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
|
||||||
@ -746,6 +749,24 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name)
|
|||||||
kfree(tbl);
|
kfree(tbl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
|
||||||
|
{
|
||||||
|
if (kref_get_unless_zero(&tbl->it_kref))
|
||||||
|
return tbl;
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(iommu_tce_table_get);
|
||||||
|
|
||||||
|
int iommu_tce_table_put(struct iommu_table *tbl)
|
||||||
|
{
|
||||||
|
if (WARN_ON(!tbl))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return kref_put(&tbl->it_kref, iommu_table_free);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(iommu_tce_table_put);
|
||||||
|
|
||||||
/* Creates TCEs for a user provided buffer. The user buffer must be
|
/* Creates TCEs for a user provided buffer. The user buffer must be
|
||||||
* contiguous real kernel storage (not vmalloc). The address passed here
|
* contiguous real kernel storage (not vmalloc). The address passed here
|
||||||
* comprises a page address and offset into that page. The dma_addr_t
|
* comprises a page address and offset into that page. The dma_addr_t
|
||||||
@ -942,47 +963,36 @@ void iommu_flush_tce(struct iommu_table *tbl)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iommu_flush_tce);
|
EXPORT_SYMBOL_GPL(iommu_flush_tce);
|
||||||
|
|
||||||
int iommu_tce_clear_param_check(struct iommu_table *tbl,
|
int iommu_tce_check_ioba(unsigned long page_shift,
|
||||||
unsigned long ioba, unsigned long tce_value,
|
unsigned long offset, unsigned long size,
|
||||||
unsigned long npages)
|
unsigned long ioba, unsigned long npages)
|
||||||
{
|
{
|
||||||
/* tbl->it_ops->clear() does not support any value but 0 */
|
unsigned long mask = (1UL << page_shift) - 1;
|
||||||
if (tce_value)
|
|
||||||
|
if (ioba & mask)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (ioba & ~IOMMU_PAGE_MASK(tbl))
|
ioba >>= page_shift;
|
||||||
|
if (ioba < offset)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ioba >>= tbl->it_page_shift;
|
if ((ioba + 1) > (offset + size))
|
||||||
if (ioba < tbl->it_offset)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if ((ioba + npages) > (tbl->it_offset + tbl->it_size))
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iommu_tce_clear_param_check);
|
EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
|
||||||
|
|
||||||
int iommu_tce_put_param_check(struct iommu_table *tbl,
|
int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
|
||||||
unsigned long ioba, unsigned long tce)
|
|
||||||
{
|
{
|
||||||
if (tce & ~IOMMU_PAGE_MASK(tbl))
|
unsigned long mask = (1UL << page_shift) - 1;
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (ioba & ~IOMMU_PAGE_MASK(tbl))
|
if (gpa & mask)
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
ioba >>= tbl->it_page_shift;
|
|
||||||
if (ioba < tbl->it_offset)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if ((ioba + 1) > (tbl->it_offset + tbl->it_size))
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iommu_tce_put_param_check);
|
EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
|
||||||
|
|
||||||
long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
|
long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
|
||||||
unsigned long *hpa, enum dma_data_direction *direction)
|
unsigned long *hpa, enum dma_data_direction *direction)
|
||||||
@ -1004,6 +1014,31 @@ long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iommu_tce_xchg);
|
EXPORT_SYMBOL_GPL(iommu_tce_xchg);
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
|
long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
|
||||||
|
unsigned long *hpa, enum dma_data_direction *direction)
|
||||||
|
{
|
||||||
|
long ret;
|
||||||
|
|
||||||
|
ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
|
||||||
|
|
||||||
|
if (!ret && ((*direction == DMA_FROM_DEVICE) ||
|
||||||
|
(*direction == DMA_BIDIRECTIONAL))) {
|
||||||
|
struct page *pg = realmode_pfn_to_page(*hpa >> PAGE_SHIFT);
|
||||||
|
|
||||||
|
if (likely(pg)) {
|
||||||
|
SetPageDirty(pg);
|
||||||
|
} else {
|
||||||
|
tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
|
||||||
|
ret = -EFAULT;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(iommu_tce_xchg_rm);
|
||||||
|
#endif
|
||||||
|
|
||||||
int iommu_take_ownership(struct iommu_table *tbl)
|
int iommu_take_ownership(struct iommu_table *tbl)
|
||||||
{
|
{
|
||||||
unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
|
unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
|
||||||
|
@ -67,6 +67,7 @@ config KVM_BOOK3S_64
|
|||||||
select KVM_BOOK3S_64_HANDLER
|
select KVM_BOOK3S_64_HANDLER
|
||||||
select KVM
|
select KVM
|
||||||
select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
|
select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
|
||||||
|
select SPAPR_TCE_IOMMU if IOMMU_SUPPORT
|
||||||
---help---
|
---help---
|
||||||
Support running unmodified book3s_64 and book3s_32 guest kernels
|
Support running unmodified book3s_64 and book3s_32 guest kernels
|
||||||
in virtual machines on book3s_64 host processors.
|
in virtual machines on book3s_64 host processors.
|
||||||
|
@ -197,6 +197,24 @@ void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
|
EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
|
||||||
|
|
||||||
|
void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
/* might as well deliver this straight away */
|
||||||
|
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
/* might as well deliver this straight away */
|
||||||
|
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
/* might as well deliver this straight away */
|
||||||
|
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, 0);
|
||||||
|
}
|
||||||
|
|
||||||
void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
|
void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
|
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
|
||||||
|
@ -319,6 +319,7 @@ do_second:
|
|||||||
gpte->may_execute = true;
|
gpte->may_execute = true;
|
||||||
gpte->may_read = false;
|
gpte->may_read = false;
|
||||||
gpte->may_write = false;
|
gpte->may_write = false;
|
||||||
|
gpte->wimg = r & HPTE_R_WIMG;
|
||||||
|
|
||||||
switch (pp) {
|
switch (pp) {
|
||||||
case 0:
|
case 0:
|
||||||
|
@ -145,6 +145,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
|
|||||||
else
|
else
|
||||||
kvmppc_mmu_flush_icache(pfn);
|
kvmppc_mmu_flush_icache(pfn);
|
||||||
|
|
||||||
|
rflags = (rflags & ~HPTE_R_WIMG) | orig_pte->wimg;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use 64K pages if possible; otherwise, on 64K page kernels,
|
* Use 64K pages if possible; otherwise, on 64K page kernels,
|
||||||
* we need to transfer 4 more bits from guest real to host real addr.
|
* we need to transfer 4 more bits from guest real to host real addr.
|
||||||
@ -177,12 +179,15 @@ map_again:
|
|||||||
ret = mmu_hash_ops.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
|
ret = mmu_hash_ops.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
|
||||||
hpsize, hpsize, MMU_SEGSIZE_256M);
|
hpsize, hpsize, MMU_SEGSIZE_256M);
|
||||||
|
|
||||||
if (ret < 0) {
|
if (ret == -1) {
|
||||||
/* If we couldn't map a primary PTE, try a secondary */
|
/* If we couldn't map a primary PTE, try a secondary */
|
||||||
hash = ~hash;
|
hash = ~hash;
|
||||||
vflags ^= HPTE_V_SECONDARY;
|
vflags ^= HPTE_V_SECONDARY;
|
||||||
attempt++;
|
attempt++;
|
||||||
goto map_again;
|
goto map_again;
|
||||||
|
} else if (ret < 0) {
|
||||||
|
r = -EIO;
|
||||||
|
goto out_unlock;
|
||||||
} else {
|
} else {
|
||||||
trace_kvm_book3s_64_mmu_map(rflags, hpteg,
|
trace_kvm_book3s_64_mmu_map(rflags, hpteg,
|
||||||
vpn, hpaddr, orig_pte);
|
vpn, hpaddr, orig_pte);
|
||||||
|
@ -28,6 +28,8 @@
|
|||||||
#include <linux/hugetlb.h>
|
#include <linux/hugetlb.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/anon_inodes.h>
|
#include <linux/anon_inodes.h>
|
||||||
|
#include <linux/iommu.h>
|
||||||
|
#include <linux/file.h>
|
||||||
|
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
#include <asm/kvm_ppc.h>
|
#include <asm/kvm_ppc.h>
|
||||||
@ -40,6 +42,7 @@
|
|||||||
#include <asm/udbg.h>
|
#include <asm/udbg.h>
|
||||||
#include <asm/iommu.h>
|
#include <asm/iommu.h>
|
||||||
#include <asm/tce.h>
|
#include <asm/tce.h>
|
||||||
|
#include <asm/mmu_context.h>
|
||||||
|
|
||||||
static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
|
static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
|
||||||
{
|
{
|
||||||
@ -91,6 +94,137 @@ static long kvmppc_account_memlimit(unsigned long stt_pages, bool inc)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
|
||||||
|
{
|
||||||
|
struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
|
||||||
|
struct kvmppc_spapr_tce_iommu_table, rcu);
|
||||||
|
|
||||||
|
iommu_tce_table_put(stit->tbl);
|
||||||
|
|
||||||
|
kfree(stit);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void kvm_spapr_tce_liobn_put(struct kref *kref)
|
||||||
|
{
|
||||||
|
struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
|
||||||
|
struct kvmppc_spapr_tce_iommu_table, kref);
|
||||||
|
|
||||||
|
list_del_rcu(&stit->next);
|
||||||
|
|
||||||
|
call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
|
||||||
|
}
|
||||||
|
|
||||||
|
extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
|
||||||
|
struct iommu_group *grp)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
struct kvmppc_spapr_tce_table *stt;
|
||||||
|
struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
|
||||||
|
struct iommu_table_group *table_group = NULL;
|
||||||
|
|
||||||
|
list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
|
||||||
|
|
||||||
|
table_group = iommu_group_get_iommudata(grp);
|
||||||
|
if (WARN_ON(!table_group))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
|
||||||
|
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
|
||||||
|
if (table_group->tables[i] != stit->tbl)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
|
||||||
|
struct iommu_group *grp)
|
||||||
|
{
|
||||||
|
struct kvmppc_spapr_tce_table *stt = NULL;
|
||||||
|
bool found = false;
|
||||||
|
struct iommu_table *tbl = NULL;
|
||||||
|
struct iommu_table_group *table_group;
|
||||||
|
long i;
|
||||||
|
struct kvmppc_spapr_tce_iommu_table *stit;
|
||||||
|
struct fd f;
|
||||||
|
|
||||||
|
f = fdget(tablefd);
|
||||||
|
if (!f.file)
|
||||||
|
return -EBADF;
|
||||||
|
|
||||||
|
list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
|
||||||
|
if (stt == f.file->private_data) {
|
||||||
|
found = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fdput(f);
|
||||||
|
|
||||||
|
if (!found)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
table_group = iommu_group_get_iommudata(grp);
|
||||||
|
if (WARN_ON(!table_group))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
|
||||||
|
struct iommu_table *tbltmp = table_group->tables[i];
|
||||||
|
|
||||||
|
if (!tbltmp)
|
||||||
|
continue;
|
||||||
|
/*
|
||||||
|
* Make sure hardware table parameters are exactly the same;
|
||||||
|
* this is used in the TCE handlers where boundary checks
|
||||||
|
* use only the first attached table.
|
||||||
|
*/
|
||||||
|
if ((tbltmp->it_page_shift == stt->page_shift) &&
|
||||||
|
(tbltmp->it_offset == stt->offset) &&
|
||||||
|
(tbltmp->it_size == stt->size)) {
|
||||||
|
/*
|
||||||
|
* Reference the table to avoid races with
|
||||||
|
* add/remove DMA windows.
|
||||||
|
*/
|
||||||
|
tbl = iommu_tce_table_get(tbltmp);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!tbl)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
|
||||||
|
if (tbl != stit->tbl)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (!kref_get_unless_zero(&stit->kref)) {
|
||||||
|
/* stit is being destroyed */
|
||||||
|
iommu_tce_table_put(tbl);
|
||||||
|
return -ENOTTY;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* The table is already known to this KVM, we just increased
|
||||||
|
* its KVM reference counter and can return.
|
||||||
|
*/
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
stit = kzalloc(sizeof(*stit), GFP_KERNEL);
|
||||||
|
if (!stit) {
|
||||||
|
iommu_tce_table_put(tbl);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
stit->tbl = tbl;
|
||||||
|
kref_init(&stit->kref);
|
||||||
|
|
||||||
|
list_add_rcu(&stit->next, &stt->iommu_tables);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void release_spapr_tce_table(struct rcu_head *head)
|
static void release_spapr_tce_table(struct rcu_head *head)
|
||||||
{
|
{
|
||||||
struct kvmppc_spapr_tce_table *stt = container_of(head,
|
struct kvmppc_spapr_tce_table *stt = container_of(head,
|
||||||
@ -130,9 +264,18 @@ static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
|
|||||||
static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
|
static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
|
||||||
{
|
{
|
||||||
struct kvmppc_spapr_tce_table *stt = filp->private_data;
|
struct kvmppc_spapr_tce_table *stt = filp->private_data;
|
||||||
|
struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
|
||||||
|
|
||||||
list_del_rcu(&stt->list);
|
list_del_rcu(&stt->list);
|
||||||
|
|
||||||
|
list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
|
||||||
|
WARN_ON(!kref_read(&stit->kref));
|
||||||
|
while (1) {
|
||||||
|
if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
kvm_put_kvm(stt->kvm);
|
kvm_put_kvm(stt->kvm);
|
||||||
|
|
||||||
kvmppc_account_memlimit(
|
kvmppc_account_memlimit(
|
||||||
@ -164,7 +307,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
|||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
size = args->size;
|
size = _ALIGN_UP(args->size, PAGE_SIZE >> 3);
|
||||||
npages = kvmppc_tce_pages(size);
|
npages = kvmppc_tce_pages(size);
|
||||||
ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
|
ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -183,6 +326,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
|||||||
stt->offset = args->offset;
|
stt->offset = args->offset;
|
||||||
stt->size = size;
|
stt->size = size;
|
||||||
stt->kvm = kvm;
|
stt->kvm = kvm;
|
||||||
|
INIT_LIST_HEAD_RCU(&stt->iommu_tables);
|
||||||
|
|
||||||
for (i = 0; i < npages; i++) {
|
for (i = 0; i < npages; i++) {
|
||||||
stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||||
@ -211,15 +355,106 @@ fail:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void kvmppc_clear_tce(struct iommu_table *tbl, unsigned long entry)
|
||||||
|
{
|
||||||
|
unsigned long hpa = 0;
|
||||||
|
enum dma_data_direction dir = DMA_NONE;
|
||||||
|
|
||||||
|
iommu_tce_xchg(tbl, entry, &hpa, &dir);
|
||||||
|
}
|
||||||
|
|
||||||
|
static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
|
||||||
|
struct iommu_table *tbl, unsigned long entry)
|
||||||
|
{
|
||||||
|
struct mm_iommu_table_group_mem_t *mem = NULL;
|
||||||
|
const unsigned long pgsize = 1ULL << tbl->it_page_shift;
|
||||||
|
unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
|
||||||
|
|
||||||
|
if (!pua)
|
||||||
|
/* it_userspace allocation might be delayed */
|
||||||
|
return H_TOO_HARD;
|
||||||
|
|
||||||
|
mem = mm_iommu_lookup(kvm->mm, *pua, pgsize);
|
||||||
|
if (!mem)
|
||||||
|
return H_TOO_HARD;
|
||||||
|
|
||||||
|
mm_iommu_mapped_dec(mem);
|
||||||
|
|
||||||
|
*pua = 0;
|
||||||
|
|
||||||
|
return H_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
|
||||||
|
struct iommu_table *tbl, unsigned long entry)
|
||||||
|
{
|
||||||
|
enum dma_data_direction dir = DMA_NONE;
|
||||||
|
unsigned long hpa = 0;
|
||||||
|
long ret;
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir)))
|
||||||
|
return H_HARDWARE;
|
||||||
|
|
||||||
|
if (dir == DMA_NONE)
|
||||||
|
return H_SUCCESS;
|
||||||
|
|
||||||
|
ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
|
||||||
|
if (ret != H_SUCCESS)
|
||||||
|
iommu_tce_xchg(tbl, entry, &hpa, &dir);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
|
||||||
|
unsigned long entry, unsigned long ua,
|
||||||
|
enum dma_data_direction dir)
|
||||||
|
{
|
||||||
|
long ret;
|
||||||
|
unsigned long hpa, *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
|
||||||
|
struct mm_iommu_table_group_mem_t *mem;
|
||||||
|
|
||||||
|
if (!pua)
|
||||||
|
/* it_userspace allocation might be delayed */
|
||||||
|
return H_TOO_HARD;
|
||||||
|
|
||||||
|
mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
|
||||||
|
if (!mem)
|
||||||
|
/* This only handles v2 IOMMU type, v1 is handled via ioctl() */
|
||||||
|
return H_TOO_HARD;
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, &hpa)))
|
||||||
|
return H_HARDWARE;
|
||||||
|
|
||||||
|
if (mm_iommu_mapped_inc(mem))
|
||||||
|
return H_CLOSED;
|
||||||
|
|
||||||
|
ret = iommu_tce_xchg(tbl, entry, &hpa, &dir);
|
||||||
|
if (WARN_ON_ONCE(ret)) {
|
||||||
|
mm_iommu_mapped_dec(mem);
|
||||||
|
return H_HARDWARE;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dir != DMA_NONE)
|
||||||
|
kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
|
||||||
|
|
||||||
|
*pua = ua;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||||
unsigned long ioba, unsigned long tce)
|
unsigned long ioba, unsigned long tce)
|
||||||
{
|
{
|
||||||
struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
|
struct kvmppc_spapr_tce_table *stt;
|
||||||
long ret;
|
long ret, idx;
|
||||||
|
struct kvmppc_spapr_tce_iommu_table *stit;
|
||||||
|
unsigned long entry, ua = 0;
|
||||||
|
enum dma_data_direction dir;
|
||||||
|
|
||||||
/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
|
/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
|
||||||
/* liobn, ioba, tce); */
|
/* liobn, ioba, tce); */
|
||||||
|
|
||||||
|
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
||||||
if (!stt)
|
if (!stt)
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
|
|
||||||
@ -231,7 +466,35 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
|||||||
if (ret != H_SUCCESS)
|
if (ret != H_SUCCESS)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
kvmppc_tce_put(stt, ioba >> stt->page_shift, tce);
|
dir = iommu_tce_direction(tce);
|
||||||
|
if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
|
||||||
|
tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
|
||||||
|
return H_PARAMETER;
|
||||||
|
|
||||||
|
entry = ioba >> stt->page_shift;
|
||||||
|
|
||||||
|
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||||
|
if (dir == DMA_NONE) {
|
||||||
|
ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
|
||||||
|
stit->tbl, entry);
|
||||||
|
} else {
|
||||||
|
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||||
|
ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl,
|
||||||
|
entry, ua, dir);
|
||||||
|
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ret == H_SUCCESS)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (ret == H_TOO_HARD)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
kvmppc_clear_tce(stit->tbl, entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
kvmppc_tce_put(stt, entry, tce);
|
||||||
|
|
||||||
return H_SUCCESS;
|
return H_SUCCESS;
|
||||||
}
|
}
|
||||||
@ -246,8 +509,9 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
|||||||
unsigned long entry, ua = 0;
|
unsigned long entry, ua = 0;
|
||||||
u64 __user *tces;
|
u64 __user *tces;
|
||||||
u64 tce;
|
u64 tce;
|
||||||
|
struct kvmppc_spapr_tce_iommu_table *stit;
|
||||||
|
|
||||||
stt = kvmppc_find_table(vcpu, liobn);
|
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
||||||
if (!stt)
|
if (!stt)
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
|
|
||||||
@ -284,6 +548,26 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
|||||||
if (ret != H_SUCCESS)
|
if (ret != H_SUCCESS)
|
||||||
goto unlock_exit;
|
goto unlock_exit;
|
||||||
|
|
||||||
|
if (kvmppc_gpa_to_ua(vcpu->kvm,
|
||||||
|
tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
|
||||||
|
&ua, NULL))
|
||||||
|
return H_PARAMETER;
|
||||||
|
|
||||||
|
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||||
|
ret = kvmppc_tce_iommu_map(vcpu->kvm,
|
||||||
|
stit->tbl, entry + i, ua,
|
||||||
|
iommu_tce_direction(tce));
|
||||||
|
|
||||||
|
if (ret == H_SUCCESS)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (ret == H_TOO_HARD)
|
||||||
|
goto unlock_exit;
|
||||||
|
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
kvmppc_clear_tce(stit->tbl, entry);
|
||||||
|
}
|
||||||
|
|
||||||
kvmppc_tce_put(stt, entry + i, tce);
|
kvmppc_tce_put(stt, entry + i, tce);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -300,8 +584,9 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
|
|||||||
{
|
{
|
||||||
struct kvmppc_spapr_tce_table *stt;
|
struct kvmppc_spapr_tce_table *stt;
|
||||||
long i, ret;
|
long i, ret;
|
||||||
|
struct kvmppc_spapr_tce_iommu_table *stit;
|
||||||
|
|
||||||
stt = kvmppc_find_table(vcpu, liobn);
|
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
||||||
if (!stt)
|
if (!stt)
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
|
|
||||||
@ -313,6 +598,24 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
|
|||||||
if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
|
if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
|
||||||
return H_PARAMETER;
|
return H_PARAMETER;
|
||||||
|
|
||||||
|
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||||
|
unsigned long entry = ioba >> stit->tbl->it_page_shift;
|
||||||
|
|
||||||
|
for (i = 0; i < npages; ++i) {
|
||||||
|
ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
|
||||||
|
stit->tbl, entry + i);
|
||||||
|
|
||||||
|
if (ret == H_SUCCESS)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (ret == H_TOO_HARD)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
kvmppc_clear_tce(stit->tbl, entry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
|
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
|
||||||
kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
|
kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
|
||||||
|
|
||||||
|
@ -40,6 +40,31 @@
|
|||||||
#include <asm/iommu.h>
|
#include <asm/iommu.h>
|
||||||
#include <asm/tce.h>
|
#include <asm/tce.h>
|
||||||
|
|
||||||
|
#ifdef CONFIG_BUG
|
||||||
|
|
||||||
|
#define WARN_ON_ONCE_RM(condition) ({ \
|
||||||
|
static bool __section(.data.unlikely) __warned; \
|
||||||
|
int __ret_warn_once = !!(condition); \
|
||||||
|
\
|
||||||
|
if (unlikely(__ret_warn_once && !__warned)) { \
|
||||||
|
__warned = true; \
|
||||||
|
pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \
|
||||||
|
__stringify(condition), \
|
||||||
|
__func__, __LINE__); \
|
||||||
|
dump_stack(); \
|
||||||
|
} \
|
||||||
|
unlikely(__ret_warn_once); \
|
||||||
|
})
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#define WARN_ON_ONCE_RM(condition) ({ \
|
||||||
|
int __ret_warn_on = !!(condition); \
|
||||||
|
unlikely(__ret_warn_on); \
|
||||||
|
})
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
|
#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -48,10 +73,9 @@
|
|||||||
* WARNING: This will be called in real or virtual mode on HV KVM and virtual
|
* WARNING: This will be called in real or virtual mode on HV KVM and virtual
|
||||||
* mode on PR KVM
|
* mode on PR KVM
|
||||||
*/
|
*/
|
||||||
struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu,
|
struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
|
||||||
unsigned long liobn)
|
unsigned long liobn)
|
||||||
{
|
{
|
||||||
struct kvm *kvm = vcpu->kvm;
|
|
||||||
struct kvmppc_spapr_tce_table *stt;
|
struct kvmppc_spapr_tce_table *stt;
|
||||||
|
|
||||||
list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
|
list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
|
||||||
@ -62,27 +86,6 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvmppc_find_table);
|
EXPORT_SYMBOL_GPL(kvmppc_find_table);
|
||||||
|
|
||||||
/*
|
|
||||||
* Validates IO address.
|
|
||||||
*
|
|
||||||
* WARNING: This will be called in real-mode on HV KVM and virtual
|
|
||||||
* mode on PR KVM
|
|
||||||
*/
|
|
||||||
long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
|
|
||||||
unsigned long ioba, unsigned long npages)
|
|
||||||
{
|
|
||||||
unsigned long mask = (1ULL << stt->page_shift) - 1;
|
|
||||||
unsigned long idx = ioba >> stt->page_shift;
|
|
||||||
|
|
||||||
if ((ioba & mask) || (idx < stt->offset) ||
|
|
||||||
(idx - stt->offset + npages > stt->size) ||
|
|
||||||
(idx + npages < idx))
|
|
||||||
return H_PARAMETER;
|
|
||||||
|
|
||||||
return H_SUCCESS;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(kvmppc_ioba_validate);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Validates TCE address.
|
* Validates TCE address.
|
||||||
* At the moment flags and page mask are validated.
|
* At the moment flags and page mask are validated.
|
||||||
@ -96,10 +99,14 @@ EXPORT_SYMBOL_GPL(kvmppc_ioba_validate);
|
|||||||
*/
|
*/
|
||||||
long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
|
long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
|
||||||
{
|
{
|
||||||
unsigned long page_mask = ~((1ULL << stt->page_shift) - 1);
|
unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
|
||||||
unsigned long mask = ~(page_mask | TCE_PCI_WRITE | TCE_PCI_READ);
|
enum dma_data_direction dir = iommu_tce_direction(tce);
|
||||||
|
|
||||||
if (tce & mask)
|
/* Allow userspace to poison TCE table */
|
||||||
|
if (dir == DMA_NONE)
|
||||||
|
return H_SUCCESS;
|
||||||
|
|
||||||
|
if (iommu_tce_check_gpa(stt->page_shift, gpa))
|
||||||
return H_PARAMETER;
|
return H_PARAMETER;
|
||||||
|
|
||||||
return H_SUCCESS;
|
return H_SUCCESS;
|
||||||
@ -179,15 +186,122 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
|
|||||||
EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
|
EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||||
|
static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry)
|
||||||
|
{
|
||||||
|
unsigned long hpa = 0;
|
||||||
|
enum dma_data_direction dir = DMA_NONE;
|
||||||
|
|
||||||
|
iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
|
||||||
|
}
|
||||||
|
|
||||||
|
static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
|
||||||
|
struct iommu_table *tbl, unsigned long entry)
|
||||||
|
{
|
||||||
|
struct mm_iommu_table_group_mem_t *mem = NULL;
|
||||||
|
const unsigned long pgsize = 1ULL << tbl->it_page_shift;
|
||||||
|
unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
|
||||||
|
|
||||||
|
if (!pua)
|
||||||
|
/* it_userspace allocation might be delayed */
|
||||||
|
return H_TOO_HARD;
|
||||||
|
|
||||||
|
pua = (void *) vmalloc_to_phys(pua);
|
||||||
|
if (WARN_ON_ONCE_RM(!pua))
|
||||||
|
return H_HARDWARE;
|
||||||
|
|
||||||
|
mem = mm_iommu_lookup_rm(kvm->mm, *pua, pgsize);
|
||||||
|
if (!mem)
|
||||||
|
return H_TOO_HARD;
|
||||||
|
|
||||||
|
mm_iommu_mapped_dec(mem);
|
||||||
|
|
||||||
|
*pua = 0;
|
||||||
|
|
||||||
|
return H_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
|
||||||
|
struct iommu_table *tbl, unsigned long entry)
|
||||||
|
{
|
||||||
|
enum dma_data_direction dir = DMA_NONE;
|
||||||
|
unsigned long hpa = 0;
|
||||||
|
long ret;
|
||||||
|
|
||||||
|
if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir))
|
||||||
|
/*
|
||||||
|
* real mode xchg can fail if struct page crosses
|
||||||
|
* a page boundary
|
||||||
|
*/
|
||||||
|
return H_TOO_HARD;
|
||||||
|
|
||||||
|
if (dir == DMA_NONE)
|
||||||
|
return H_SUCCESS;
|
||||||
|
|
||||||
|
ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
|
||||||
|
if (ret)
|
||||||
|
iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
|
||||||
|
unsigned long entry, unsigned long ua,
|
||||||
|
enum dma_data_direction dir)
|
||||||
|
{
|
||||||
|
long ret;
|
||||||
|
unsigned long hpa = 0;
|
||||||
|
unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
|
||||||
|
struct mm_iommu_table_group_mem_t *mem;
|
||||||
|
|
||||||
|
if (!pua)
|
||||||
|
/* it_userspace allocation might be delayed */
|
||||||
|
return H_TOO_HARD;
|
||||||
|
|
||||||
|
mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
|
||||||
|
if (!mem)
|
||||||
|
return H_TOO_HARD;
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa)))
|
||||||
|
return H_HARDWARE;
|
||||||
|
|
||||||
|
pua = (void *) vmalloc_to_phys(pua);
|
||||||
|
if (WARN_ON_ONCE_RM(!pua))
|
||||||
|
return H_HARDWARE;
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
|
||||||
|
return H_CLOSED;
|
||||||
|
|
||||||
|
ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
|
||||||
|
if (ret) {
|
||||||
|
mm_iommu_mapped_dec(mem);
|
||||||
|
/*
|
||||||
|
* real mode xchg can fail if struct page crosses
|
||||||
|
* a page boundary
|
||||||
|
*/
|
||||||
|
return H_TOO_HARD;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dir != DMA_NONE)
|
||||||
|
kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
|
||||||
|
|
||||||
|
*pua = ua;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||||
unsigned long ioba, unsigned long tce)
|
unsigned long ioba, unsigned long tce)
|
||||||
{
|
{
|
||||||
struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
|
struct kvmppc_spapr_tce_table *stt;
|
||||||
long ret;
|
long ret;
|
||||||
|
struct kvmppc_spapr_tce_iommu_table *stit;
|
||||||
|
unsigned long entry, ua = 0;
|
||||||
|
enum dma_data_direction dir;
|
||||||
|
|
||||||
/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
|
/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
|
||||||
/* liobn, ioba, tce); */
|
/* liobn, ioba, tce); */
|
||||||
|
|
||||||
|
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
||||||
if (!stt)
|
if (!stt)
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
|
|
||||||
@ -199,7 +313,32 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
|||||||
if (ret != H_SUCCESS)
|
if (ret != H_SUCCESS)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
kvmppc_tce_put(stt, ioba >> stt->page_shift, tce);
|
dir = iommu_tce_direction(tce);
|
||||||
|
if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
|
||||||
|
tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
|
||||||
|
return H_PARAMETER;
|
||||||
|
|
||||||
|
entry = ioba >> stt->page_shift;
|
||||||
|
|
||||||
|
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||||
|
if (dir == DMA_NONE)
|
||||||
|
ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
|
||||||
|
stit->tbl, entry);
|
||||||
|
else
|
||||||
|
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
|
||||||
|
stit->tbl, entry, ua, dir);
|
||||||
|
|
||||||
|
if (ret == H_SUCCESS)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (ret == H_TOO_HARD)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
WARN_ON_ONCE_RM(1);
|
||||||
|
kvmppc_rm_clear_tce(stit->tbl, entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
kvmppc_tce_put(stt, entry, tce);
|
||||||
|
|
||||||
return H_SUCCESS;
|
return H_SUCCESS;
|
||||||
}
|
}
|
||||||
@ -239,8 +378,10 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
|||||||
long i, ret = H_SUCCESS;
|
long i, ret = H_SUCCESS;
|
||||||
unsigned long tces, entry, ua = 0;
|
unsigned long tces, entry, ua = 0;
|
||||||
unsigned long *rmap = NULL;
|
unsigned long *rmap = NULL;
|
||||||
|
bool prereg = false;
|
||||||
|
struct kvmppc_spapr_tce_iommu_table *stit;
|
||||||
|
|
||||||
stt = kvmppc_find_table(vcpu, liobn);
|
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
||||||
if (!stt)
|
if (!stt)
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
|
|
||||||
@ -259,10 +400,35 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
|||||||
if (ret != H_SUCCESS)
|
if (ret != H_SUCCESS)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
if (mm_iommu_preregistered(vcpu->kvm->mm)) {
|
||||||
|
/*
|
||||||
|
* We get here if guest memory was pre-registered which
|
||||||
|
* is normally VFIO case and gpa->hpa translation does not
|
||||||
|
* depend on hpt.
|
||||||
|
*/
|
||||||
|
struct mm_iommu_table_group_mem_t *mem;
|
||||||
|
|
||||||
|
if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL))
|
||||||
|
return H_TOO_HARD;
|
||||||
|
|
||||||
|
mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
|
||||||
|
if (mem)
|
||||||
|
prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!prereg) {
|
||||||
|
/*
|
||||||
|
* This is usually a case of a guest with emulated devices only
|
||||||
|
* when TCE list is not in preregistered memory.
|
||||||
|
* We do not require memory to be preregistered in this case
|
||||||
|
* so lock rmap and do __find_linux_pte_or_hugepte().
|
||||||
|
*/
|
||||||
if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
|
if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
|
|
||||||
rmap = (void *) vmalloc_to_phys(rmap);
|
rmap = (void *) vmalloc_to_phys(rmap);
|
||||||
|
if (WARN_ON_ONCE_RM(!rmap))
|
||||||
|
return H_HARDWARE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Synchronize with the MMU notifier callbacks in
|
* Synchronize with the MMU notifier callbacks in
|
||||||
@ -277,6 +443,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
|||||||
ret = H_TOO_HARD;
|
ret = H_TOO_HARD;
|
||||||
goto unlock_exit;
|
goto unlock_exit;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < npages; ++i) {
|
for (i = 0; i < npages; ++i) {
|
||||||
unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
|
unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
|
||||||
@ -285,10 +452,32 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
|||||||
if (ret != H_SUCCESS)
|
if (ret != H_SUCCESS)
|
||||||
goto unlock_exit;
|
goto unlock_exit;
|
||||||
|
|
||||||
|
ua = 0;
|
||||||
|
if (kvmppc_gpa_to_ua(vcpu->kvm,
|
||||||
|
tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
|
||||||
|
&ua, NULL))
|
||||||
|
return H_PARAMETER;
|
||||||
|
|
||||||
|
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||||
|
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
|
||||||
|
stit->tbl, entry + i, ua,
|
||||||
|
iommu_tce_direction(tce));
|
||||||
|
|
||||||
|
if (ret == H_SUCCESS)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (ret == H_TOO_HARD)
|
||||||
|
goto unlock_exit;
|
||||||
|
|
||||||
|
WARN_ON_ONCE_RM(1);
|
||||||
|
kvmppc_rm_clear_tce(stit->tbl, entry);
|
||||||
|
}
|
||||||
|
|
||||||
kvmppc_tce_put(stt, entry + i, tce);
|
kvmppc_tce_put(stt, entry + i, tce);
|
||||||
}
|
}
|
||||||
|
|
||||||
unlock_exit:
|
unlock_exit:
|
||||||
|
if (rmap)
|
||||||
unlock_rmap(rmap);
|
unlock_rmap(rmap);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -300,8 +489,9 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
|
|||||||
{
|
{
|
||||||
struct kvmppc_spapr_tce_table *stt;
|
struct kvmppc_spapr_tce_table *stt;
|
||||||
long i, ret;
|
long i, ret;
|
||||||
|
struct kvmppc_spapr_tce_iommu_table *stit;
|
||||||
|
|
||||||
stt = kvmppc_find_table(vcpu, liobn);
|
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
||||||
if (!stt)
|
if (!stt)
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
|
|
||||||
@ -313,6 +503,24 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
|
|||||||
if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
|
if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
|
||||||
return H_PARAMETER;
|
return H_PARAMETER;
|
||||||
|
|
||||||
|
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||||
|
unsigned long entry = ioba >> stit->tbl->it_page_shift;
|
||||||
|
|
||||||
|
for (i = 0; i < npages; ++i) {
|
||||||
|
ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
|
||||||
|
stit->tbl, entry + i);
|
||||||
|
|
||||||
|
if (ret == H_SUCCESS)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (ret == H_TOO_HARD)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
WARN_ON_ONCE_RM(1);
|
||||||
|
kvmppc_rm_clear_tce(stit->tbl, entry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
|
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
|
||||||
kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
|
kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
|
||||||
|
|
||||||
@ -322,12 +530,13 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
|
|||||||
long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||||
unsigned long ioba)
|
unsigned long ioba)
|
||||||
{
|
{
|
||||||
struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
|
struct kvmppc_spapr_tce_table *stt;
|
||||||
long ret;
|
long ret;
|
||||||
unsigned long idx;
|
unsigned long idx;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
u64 *tbl;
|
u64 *tbl;
|
||||||
|
|
||||||
|
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
||||||
if (!stt)
|
if (!stt)
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
|
|
||||||
|
@ -503,10 +503,18 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
|
|||||||
break;
|
break;
|
||||||
unprivileged:
|
unprivileged:
|
||||||
default:
|
default:
|
||||||
printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn);
|
pr_info_ratelimited("KVM: invalid SPR write: %d\n", sprn);
|
||||||
#ifndef DEBUG_SPR
|
if (sprn & 0x10) {
|
||||||
emulated = EMULATE_FAIL;
|
if (kvmppc_get_msr(vcpu) & MSR_PR) {
|
||||||
#endif
|
kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
|
||||||
|
emulated = EMULATE_AGAIN;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0) {
|
||||||
|
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
||||||
|
emulated = EMULATE_AGAIN;
|
||||||
|
}
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -648,10 +656,20 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val
|
|||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
unprivileged:
|
unprivileged:
|
||||||
printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn);
|
pr_info_ratelimited("KVM: invalid SPR read: %d\n", sprn);
|
||||||
#ifndef DEBUG_SPR
|
if (sprn & 0x10) {
|
||||||
emulated = EMULATE_FAIL;
|
if (kvmppc_get_msr(vcpu) & MSR_PR) {
|
||||||
#endif
|
kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
|
||||||
|
emulated = EMULATE_AGAIN;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0 ||
|
||||||
|
sprn == 4 || sprn == 5 || sprn == 6) {
|
||||||
|
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
||||||
|
emulated = EMULATE_AGAIN;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3624,11 +3624,9 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
|
|||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
mutex_lock(&kvm->lock);
|
mutex_lock(&kvm->lock);
|
||||||
|
if (!kvm->arch.pimap)
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
if (kvm->arch.pimap == NULL) {
|
|
||||||
mutex_unlock(&kvm->lock);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
pimap = kvm->arch.pimap;
|
pimap = kvm->arch.pimap;
|
||||||
|
|
||||||
for (i = 0; i < pimap->n_mapped; i++) {
|
for (i = 0; i < pimap->n_mapped; i++) {
|
||||||
@ -3650,7 +3648,7 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
|
|||||||
* We don't free this structure even when the count goes to
|
* We don't free this structure even when the count goes to
|
||||||
* zero. The structure is freed when we destroy the VM.
|
* zero. The structure is freed when we destroy the VM.
|
||||||
*/
|
*/
|
||||||
|
unlock:
|
||||||
mutex_unlock(&kvm->lock);
|
mutex_unlock(&kvm->lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -537,8 +537,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||||||
int r = RESUME_GUEST;
|
int r = RESUME_GUEST;
|
||||||
int relocated;
|
int relocated;
|
||||||
int page_found = 0;
|
int page_found = 0;
|
||||||
struct kvmppc_pte pte;
|
struct kvmppc_pte pte = { 0 };
|
||||||
bool is_mmio = false;
|
|
||||||
bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
|
bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
|
||||||
bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
|
bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
|
||||||
u64 vsid;
|
u64 vsid;
|
||||||
@ -616,8 +615,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||||||
/* Page not found in guest SLB */
|
/* Page not found in guest SLB */
|
||||||
kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
|
kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
|
||||||
kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
|
kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
|
||||||
} else if (!is_mmio &&
|
} else if (kvmppc_visible_gpa(vcpu, pte.raddr)) {
|
||||||
kvmppc_visible_gpa(vcpu, pte.raddr)) {
|
|
||||||
if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
|
if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
|
||||||
/*
|
/*
|
||||||
* There is already a host HPTE there, presumably
|
* There is already a host HPTE there, presumably
|
||||||
@ -627,7 +625,11 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||||||
kvmppc_mmu_unmap_page(vcpu, &pte);
|
kvmppc_mmu_unmap_page(vcpu, &pte);
|
||||||
}
|
}
|
||||||
/* The guest's PTE is not mapped yet. Map on the host */
|
/* The guest's PTE is not mapped yet. Map on the host */
|
||||||
kvmppc_mmu_map_page(vcpu, &pte, iswrite);
|
if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
|
||||||
|
/* Exit KVM if mapping failed */
|
||||||
|
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||||
|
return RESUME_HOST;
|
||||||
|
}
|
||||||
if (data)
|
if (data)
|
||||||
vcpu->stat.sp_storage++;
|
vcpu->stat.sp_storage++;
|
||||||
else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
|
else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
|
||||||
|
@ -300,6 +300,11 @@ void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
|
|||||||
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
|
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
|
||||||
|
}
|
||||||
|
|
||||||
void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
|
void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
|
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
|
||||||
|
@ -797,9 +797,8 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|||||||
host_tlb_params[0].sets =
|
host_tlb_params[0].sets =
|
||||||
host_tlb_params[0].entries / host_tlb_params[0].ways;
|
host_tlb_params[0].entries / host_tlb_params[0].ways;
|
||||||
host_tlb_params[1].sets = 1;
|
host_tlb_params[1].sets = 1;
|
||||||
|
vcpu_e500->h2g_tlb1_rmap = kcalloc(host_tlb_params[1].entries,
|
||||||
vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
|
sizeof(*vcpu_e500->h2g_tlb1_rmap),
|
||||||
host_tlb_params[1].entries,
|
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!vcpu_e500->h2g_tlb1_rmap)
|
if (!vcpu_e500->h2g_tlb1_rmap)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -259,10 +259,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
case OP_31_XOP_MFSPR:
|
case OP_31_XOP_MFSPR:
|
||||||
emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
|
emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
|
||||||
|
if (emulated == EMULATE_AGAIN) {
|
||||||
|
emulated = EMULATE_DONE;
|
||||||
|
advance = 0;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case OP_31_XOP_MTSPR:
|
case OP_31_XOP_MTSPR:
|
||||||
emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
|
emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
|
||||||
|
if (emulated == EMULATE_AGAIN) {
|
||||||
|
emulated = EMULATE_DONE;
|
||||||
|
advance = 0;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case OP_31_XOP_TLBSYNC:
|
case OP_31_XOP_TLBSYNC:
|
||||||
|
@ -34,18 +34,38 @@
|
|||||||
#include "timing.h"
|
#include "timing.h"
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
|
||||||
/* XXX to do:
|
#ifdef CONFIG_PPC_FPU
|
||||||
* lhax
|
static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
|
||||||
* lhaux
|
{
|
||||||
* lswx
|
if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
|
||||||
* lswi
|
kvmppc_core_queue_fpunavail(vcpu);
|
||||||
* stswx
|
return true;
|
||||||
* stswi
|
}
|
||||||
* lha
|
|
||||||
* lhau
|
return false;
|
||||||
* lmw
|
}
|
||||||
* stmw
|
#endif /* CONFIG_PPC_FPU */
|
||||||
|
|
||||||
|
#ifdef CONFIG_VSX
|
||||||
|
static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
|
||||||
|
kvmppc_core_queue_vsx_unavail(vcpu);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_VSX */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* XXX to do:
|
||||||
|
* lfiwax, lfiwzx
|
||||||
|
* vector loads and stores
|
||||||
*
|
*
|
||||||
|
* Instructions that trap when used on cache-inhibited mappings
|
||||||
|
* are not emulated here: multiple and string instructions,
|
||||||
|
* lq/stq, and the load-reserve/store-conditional instructions.
|
||||||
*/
|
*/
|
||||||
int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
@ -66,6 +86,19 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
|||||||
rs = get_rs(inst);
|
rs = get_rs(inst);
|
||||||
rt = get_rt(inst);
|
rt = get_rt(inst);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* if mmio_vsx_tx_sx_enabled == 0, copy data between
|
||||||
|
* VSR[0..31] and memory
|
||||||
|
* if mmio_vsx_tx_sx_enabled == 1, copy data between
|
||||||
|
* VSR[32..63] and memory
|
||||||
|
*/
|
||||||
|
vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst);
|
||||||
|
vcpu->arch.mmio_vsx_copy_nums = 0;
|
||||||
|
vcpu->arch.mmio_vsx_offset = 0;
|
||||||
|
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE;
|
||||||
|
vcpu->arch.mmio_sp64_extend = 0;
|
||||||
|
vcpu->arch.mmio_sign_extend = 0;
|
||||||
|
|
||||||
switch (get_op(inst)) {
|
switch (get_op(inst)) {
|
||||||
case 31:
|
case 31:
|
||||||
switch (get_xop(inst)) {
|
switch (get_xop(inst)) {
|
||||||
@ -73,6 +106,11 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
|||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
|
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_LWZUX:
|
||||||
|
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
|
||||||
|
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||||
|
break;
|
||||||
|
|
||||||
case OP_31_XOP_LBZX:
|
case OP_31_XOP_LBZX:
|
||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
|
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
|
||||||
break;
|
break;
|
||||||
@ -82,22 +120,36 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
|||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_STDX:
|
||||||
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
|
kvmppc_get_gpr(vcpu, rs), 8, 1);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_STDUX:
|
||||||
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
|
kvmppc_get_gpr(vcpu, rs), 8, 1);
|
||||||
|
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||||
|
break;
|
||||||
|
|
||||||
case OP_31_XOP_STWX:
|
case OP_31_XOP_STWX:
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
kvmppc_get_gpr(vcpu, rs),
|
kvmppc_get_gpr(vcpu, rs), 4, 1);
|
||||||
4, 1);
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_STWUX:
|
||||||
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
|
kvmppc_get_gpr(vcpu, rs), 4, 1);
|
||||||
|
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case OP_31_XOP_STBX:
|
case OP_31_XOP_STBX:
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
kvmppc_get_gpr(vcpu, rs),
|
kvmppc_get_gpr(vcpu, rs), 1, 1);
|
||||||
1, 1);
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case OP_31_XOP_STBUX:
|
case OP_31_XOP_STBUX:
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
kvmppc_get_gpr(vcpu, rs),
|
kvmppc_get_gpr(vcpu, rs), 1, 1);
|
||||||
1, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -105,6 +157,11 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
|||||||
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
|
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_LHAUX:
|
||||||
|
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
|
||||||
|
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||||
|
break;
|
||||||
|
|
||||||
case OP_31_XOP_LHZX:
|
case OP_31_XOP_LHZX:
|
||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
|
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
|
||||||
break;
|
break;
|
||||||
@ -116,14 +173,12 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
case OP_31_XOP_STHX:
|
case OP_31_XOP_STHX:
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
kvmppc_get_gpr(vcpu, rs),
|
kvmppc_get_gpr(vcpu, rs), 2, 1);
|
||||||
2, 1);
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case OP_31_XOP_STHUX:
|
case OP_31_XOP_STHUX:
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
kvmppc_get_gpr(vcpu, rs),
|
kvmppc_get_gpr(vcpu, rs), 2, 1);
|
||||||
2, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -143,8 +198,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
case OP_31_XOP_STWBRX:
|
case OP_31_XOP_STWBRX:
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
kvmppc_get_gpr(vcpu, rs),
|
kvmppc_get_gpr(vcpu, rs), 4, 0);
|
||||||
4, 0);
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case OP_31_XOP_LHBRX:
|
case OP_31_XOP_LHBRX:
|
||||||
@ -153,10 +207,258 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
case OP_31_XOP_STHBRX:
|
case OP_31_XOP_STHBRX:
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
kvmppc_get_gpr(vcpu, rs),
|
kvmppc_get_gpr(vcpu, rs), 2, 0);
|
||||||
2, 0);
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_LDBRX:
|
||||||
|
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 0);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_STDBRX:
|
||||||
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
|
kvmppc_get_gpr(vcpu, rs), 8, 0);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_LDX:
|
||||||
|
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_LDUX:
|
||||||
|
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
|
||||||
|
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_LWAX:
|
||||||
|
emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_LWAUX:
|
||||||
|
emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
|
||||||
|
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||||
|
break;
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC_FPU
|
||||||
|
case OP_31_XOP_LFSX:
|
||||||
|
if (kvmppc_check_fp_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
vcpu->arch.mmio_sp64_extend = 1;
|
||||||
|
emulated = kvmppc_handle_load(run, vcpu,
|
||||||
|
KVM_MMIO_REG_FPR|rt, 4, 1);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_LFSUX:
|
||||||
|
if (kvmppc_check_fp_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
vcpu->arch.mmio_sp64_extend = 1;
|
||||||
|
emulated = kvmppc_handle_load(run, vcpu,
|
||||||
|
KVM_MMIO_REG_FPR|rt, 4, 1);
|
||||||
|
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_LFDX:
|
||||||
|
if (kvmppc_check_fp_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
emulated = kvmppc_handle_load(run, vcpu,
|
||||||
|
KVM_MMIO_REG_FPR|rt, 8, 1);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_LFDUX:
|
||||||
|
if (kvmppc_check_fp_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
emulated = kvmppc_handle_load(run, vcpu,
|
||||||
|
KVM_MMIO_REG_FPR|rt, 8, 1);
|
||||||
|
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_LFIWAX:
|
||||||
|
if (kvmppc_check_fp_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
emulated = kvmppc_handle_loads(run, vcpu,
|
||||||
|
KVM_MMIO_REG_FPR|rt, 4, 1);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_LFIWZX:
|
||||||
|
if (kvmppc_check_fp_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
emulated = kvmppc_handle_load(run, vcpu,
|
||||||
|
KVM_MMIO_REG_FPR|rt, 4, 1);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_STFSX:
|
||||||
|
if (kvmppc_check_fp_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
vcpu->arch.mmio_sp64_extend = 1;
|
||||||
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
|
VCPU_FPR(vcpu, rs), 4, 1);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_STFSUX:
|
||||||
|
if (kvmppc_check_fp_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
vcpu->arch.mmio_sp64_extend = 1;
|
||||||
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
|
VCPU_FPR(vcpu, rs), 4, 1);
|
||||||
|
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_STFDX:
|
||||||
|
if (kvmppc_check_fp_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
|
VCPU_FPR(vcpu, rs), 8, 1);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_STFDUX:
|
||||||
|
if (kvmppc_check_fp_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
|
VCPU_FPR(vcpu, rs), 8, 1);
|
||||||
|
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_STFIWX:
|
||||||
|
if (kvmppc_check_fp_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
|
VCPU_FPR(vcpu, rs), 4, 1);
|
||||||
|
break;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_VSX
|
||||||
|
case OP_31_XOP_LXSDX:
|
||||||
|
if (kvmppc_check_vsx_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
vcpu->arch.mmio_vsx_copy_nums = 1;
|
||||||
|
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
||||||
|
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
||||||
|
KVM_MMIO_REG_VSX|rt, 8, 1, 0);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_LXSSPX:
|
||||||
|
if (kvmppc_check_vsx_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
vcpu->arch.mmio_vsx_copy_nums = 1;
|
||||||
|
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
||||||
|
vcpu->arch.mmio_sp64_extend = 1;
|
||||||
|
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
||||||
|
KVM_MMIO_REG_VSX|rt, 4, 1, 0);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_LXSIWAX:
|
||||||
|
if (kvmppc_check_vsx_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
vcpu->arch.mmio_vsx_copy_nums = 1;
|
||||||
|
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
||||||
|
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
||||||
|
KVM_MMIO_REG_VSX|rt, 4, 1, 1);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_LXSIWZX:
|
||||||
|
if (kvmppc_check_vsx_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
vcpu->arch.mmio_vsx_copy_nums = 1;
|
||||||
|
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
||||||
|
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
||||||
|
KVM_MMIO_REG_VSX|rt, 4, 1, 0);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_LXVD2X:
|
||||||
|
/*
|
||||||
|
* In this case, the official load/store process is like this:
|
||||||
|
* Step1, exit from vm by page fault isr, then kvm save vsr.
|
||||||
|
* Please see guest_exit_cont->store_fp_state->SAVE_32VSRS
|
||||||
|
* as reference.
|
||||||
|
*
|
||||||
|
* Step2, copy data between memory and VCPU
|
||||||
|
* Notice: for LXVD2X/STXVD2X/LXVW4X/STXVW4X, we use
|
||||||
|
* 2copies*8bytes or 4copies*4bytes
|
||||||
|
* to simulate one copy of 16bytes.
|
||||||
|
* Also there is an endian issue here, we should notice the
|
||||||
|
* layout of memory.
|
||||||
|
* Please see MARCO of LXVD2X_ROT/STXVD2X_ROT as more reference.
|
||||||
|
* If host is little-endian, kvm will call XXSWAPD for
|
||||||
|
* LXVD2X_ROT/STXVD2X_ROT.
|
||||||
|
* So, if host is little-endian,
|
||||||
|
* the postion of memeory should be swapped.
|
||||||
|
*
|
||||||
|
* Step3, return to guest, kvm reset register.
|
||||||
|
* Please see kvmppc_hv_entry->load_fp_state->REST_32VSRS
|
||||||
|
* as reference.
|
||||||
|
*/
|
||||||
|
if (kvmppc_check_vsx_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
vcpu->arch.mmio_vsx_copy_nums = 2;
|
||||||
|
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
||||||
|
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
||||||
|
KVM_MMIO_REG_VSX|rt, 8, 1, 0);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_LXVW4X:
|
||||||
|
if (kvmppc_check_vsx_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
vcpu->arch.mmio_vsx_copy_nums = 4;
|
||||||
|
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
|
||||||
|
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
||||||
|
KVM_MMIO_REG_VSX|rt, 4, 1, 0);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_LXVDSX:
|
||||||
|
if (kvmppc_check_vsx_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
vcpu->arch.mmio_vsx_copy_nums = 1;
|
||||||
|
vcpu->arch.mmio_vsx_copy_type =
|
||||||
|
KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
|
||||||
|
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
||||||
|
KVM_MMIO_REG_VSX|rt, 8, 1, 0);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_STXSDX:
|
||||||
|
if (kvmppc_check_vsx_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
vcpu->arch.mmio_vsx_copy_nums = 1;
|
||||||
|
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
||||||
|
emulated = kvmppc_handle_vsx_store(run, vcpu,
|
||||||
|
rs, 8, 1);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_STXSSPX:
|
||||||
|
if (kvmppc_check_vsx_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
vcpu->arch.mmio_vsx_copy_nums = 1;
|
||||||
|
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
||||||
|
vcpu->arch.mmio_sp64_extend = 1;
|
||||||
|
emulated = kvmppc_handle_vsx_store(run, vcpu,
|
||||||
|
rs, 4, 1);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_STXSIWX:
|
||||||
|
if (kvmppc_check_vsx_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
vcpu->arch.mmio_vsx_offset = 1;
|
||||||
|
vcpu->arch.mmio_vsx_copy_nums = 1;
|
||||||
|
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
|
||||||
|
emulated = kvmppc_handle_vsx_store(run, vcpu,
|
||||||
|
rs, 4, 1);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_STXVD2X:
|
||||||
|
if (kvmppc_check_vsx_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
vcpu->arch.mmio_vsx_copy_nums = 2;
|
||||||
|
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
||||||
|
emulated = kvmppc_handle_vsx_store(run, vcpu,
|
||||||
|
rs, 8, 1);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_31_XOP_STXVW4X:
|
||||||
|
if (kvmppc_check_vsx_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
vcpu->arch.mmio_vsx_copy_nums = 4;
|
||||||
|
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
|
||||||
|
emulated = kvmppc_handle_vsx_store(run, vcpu,
|
||||||
|
rs, 4, 1);
|
||||||
|
break;
|
||||||
|
#endif /* CONFIG_VSX */
|
||||||
default:
|
default:
|
||||||
emulated = EMULATE_FAIL;
|
emulated = EMULATE_FAIL;
|
||||||
break;
|
break;
|
||||||
@ -167,11 +469,61 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
|||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
|
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
|
#ifdef CONFIG_PPC_FPU
|
||||||
|
case OP_STFS:
|
||||||
|
if (kvmppc_check_fp_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
vcpu->arch.mmio_sp64_extend = 1;
|
||||||
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
|
VCPU_FPR(vcpu, rs),
|
||||||
|
4, 1);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_STFSU:
|
||||||
|
if (kvmppc_check_fp_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
vcpu->arch.mmio_sp64_extend = 1;
|
||||||
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
|
VCPU_FPR(vcpu, rs),
|
||||||
|
4, 1);
|
||||||
|
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_STFD:
|
||||||
|
if (kvmppc_check_fp_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
|
VCPU_FPR(vcpu, rs),
|
||||||
|
8, 1);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_STFDU:
|
||||||
|
if (kvmppc_check_fp_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
|
VCPU_FPR(vcpu, rs),
|
||||||
|
8, 1);
|
||||||
|
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||||
|
break;
|
||||||
|
#endif
|
||||||
|
|
||||||
case OP_LD:
|
case OP_LD:
|
||||||
rt = get_rt(inst);
|
rt = get_rt(inst);
|
||||||
|
switch (inst & 3) {
|
||||||
|
case 0: /* ld */
|
||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
|
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
|
||||||
break;
|
break;
|
||||||
|
case 1: /* ldu */
|
||||||
|
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
|
||||||
|
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||||
|
break;
|
||||||
|
case 2: /* lwa */
|
||||||
|
emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
emulated = EMULATE_FAIL;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
case OP_LWZU:
|
case OP_LWZU:
|
||||||
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
|
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
|
||||||
@ -193,31 +545,37 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
|||||||
4, 1);
|
4, 1);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
|
|
||||||
case OP_STD:
|
case OP_STD:
|
||||||
rs = get_rs(inst);
|
rs = get_rs(inst);
|
||||||
|
switch (inst & 3) {
|
||||||
|
case 0: /* std */
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
kvmppc_get_gpr(vcpu, rs),
|
kvmppc_get_gpr(vcpu, rs), 8, 1);
|
||||||
8, 1);
|
break;
|
||||||
|
case 1: /* stdu */
|
||||||
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
|
kvmppc_get_gpr(vcpu, rs), 8, 1);
|
||||||
|
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
emulated = EMULATE_FAIL;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case OP_STWU:
|
case OP_STWU:
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
kvmppc_get_gpr(vcpu, rs),
|
kvmppc_get_gpr(vcpu, rs), 4, 1);
|
||||||
4, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case OP_STB:
|
case OP_STB:
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
kvmppc_get_gpr(vcpu, rs),
|
kvmppc_get_gpr(vcpu, rs), 1, 1);
|
||||||
1, 1);
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case OP_STBU:
|
case OP_STBU:
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
kvmppc_get_gpr(vcpu, rs),
|
kvmppc_get_gpr(vcpu, rs), 1, 1);
|
||||||
1, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -241,17 +599,49 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
case OP_STH:
|
case OP_STH:
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
kvmppc_get_gpr(vcpu, rs),
|
kvmppc_get_gpr(vcpu, rs), 2, 1);
|
||||||
2, 1);
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case OP_STHU:
|
case OP_STHU:
|
||||||
emulated = kvmppc_handle_store(run, vcpu,
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
kvmppc_get_gpr(vcpu, rs),
|
kvmppc_get_gpr(vcpu, rs), 2, 1);
|
||||||
2, 1);
|
|
||||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC_FPU
|
||||||
|
case OP_LFS:
|
||||||
|
if (kvmppc_check_fp_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
vcpu->arch.mmio_sp64_extend = 1;
|
||||||
|
emulated = kvmppc_handle_load(run, vcpu,
|
||||||
|
KVM_MMIO_REG_FPR|rt, 4, 1);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_LFSU:
|
||||||
|
if (kvmppc_check_fp_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
vcpu->arch.mmio_sp64_extend = 1;
|
||||||
|
emulated = kvmppc_handle_load(run, vcpu,
|
||||||
|
KVM_MMIO_REG_FPR|rt, 4, 1);
|
||||||
|
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_LFD:
|
||||||
|
if (kvmppc_check_fp_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
emulated = kvmppc_handle_load(run, vcpu,
|
||||||
|
KVM_MMIO_REG_FPR|rt, 8, 1);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case OP_LFDU:
|
||||||
|
if (kvmppc_check_fp_disabled(vcpu))
|
||||||
|
return EMULATE_DONE;
|
||||||
|
emulated = kvmppc_handle_load(run, vcpu,
|
||||||
|
KVM_MMIO_REG_FPR|rt, 8, 1);
|
||||||
|
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||||
|
break;
|
||||||
|
#endif
|
||||||
|
|
||||||
default:
|
default:
|
||||||
emulated = EMULATE_FAIL;
|
emulated = EMULATE_FAIL;
|
||||||
break;
|
break;
|
||||||
|
@ -37,6 +37,7 @@
|
|||||||
#include <asm/cputhreads.h>
|
#include <asm/cputhreads.h>
|
||||||
#include <asm/irqflags.h>
|
#include <asm/irqflags.h>
|
||||||
#include <asm/iommu.h>
|
#include <asm/iommu.h>
|
||||||
|
#include <asm/switch_to.h>
|
||||||
#include "timing.h"
|
#include "timing.h"
|
||||||
#include "irq.h"
|
#include "irq.h"
|
||||||
#include "../mm/mmu_decl.h"
|
#include "../mm/mmu_decl.h"
|
||||||
@ -533,6 +534,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||||||
#ifdef CONFIG_PPC_BOOK3S_64
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
case KVM_CAP_SPAPR_TCE:
|
case KVM_CAP_SPAPR_TCE:
|
||||||
case KVM_CAP_SPAPR_TCE_64:
|
case KVM_CAP_SPAPR_TCE_64:
|
||||||
|
/* fallthrough */
|
||||||
|
case KVM_CAP_SPAPR_TCE_VFIO:
|
||||||
case KVM_CAP_PPC_RTAS:
|
case KVM_CAP_PPC_RTAS:
|
||||||
case KVM_CAP_PPC_FIXUP_HCALL:
|
case KVM_CAP_PPC_FIXUP_HCALL:
|
||||||
case KVM_CAP_PPC_ENABLE_HCALL:
|
case KVM_CAP_PPC_ENABLE_HCALL:
|
||||||
@ -801,6 +804,129 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
|
|||||||
kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
|
kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_VSX
|
||||||
|
static inline int kvmppc_get_vsr_dword_offset(int index)
|
||||||
|
{
|
||||||
|
int offset;
|
||||||
|
|
||||||
|
if ((index != 0) && (index != 1))
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
#ifdef __BIG_ENDIAN
|
||||||
|
offset = index;
|
||||||
|
#else
|
||||||
|
offset = 1 - index;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int kvmppc_get_vsr_word_offset(int index)
|
||||||
|
{
|
||||||
|
int offset;
|
||||||
|
|
||||||
|
if ((index > 3) || (index < 0))
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
#ifdef __BIG_ENDIAN
|
||||||
|
offset = index;
|
||||||
|
#else
|
||||||
|
offset = 3 - index;
|
||||||
|
#endif
|
||||||
|
return offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
|
||||||
|
u64 gpr)
|
||||||
|
{
|
||||||
|
union kvmppc_one_reg val;
|
||||||
|
int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
|
||||||
|
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
|
||||||
|
|
||||||
|
if (offset == -1)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
|
||||||
|
val.vval = VCPU_VSX_VR(vcpu, index);
|
||||||
|
val.vsxval[offset] = gpr;
|
||||||
|
VCPU_VSX_VR(vcpu, index) = val.vval;
|
||||||
|
} else {
|
||||||
|
VCPU_VSX_FPR(vcpu, index, offset) = gpr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
|
||||||
|
u64 gpr)
|
||||||
|
{
|
||||||
|
union kvmppc_one_reg val;
|
||||||
|
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
|
||||||
|
|
||||||
|
if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
|
||||||
|
val.vval = VCPU_VSX_VR(vcpu, index);
|
||||||
|
val.vsxval[0] = gpr;
|
||||||
|
val.vsxval[1] = gpr;
|
||||||
|
VCPU_VSX_VR(vcpu, index) = val.vval;
|
||||||
|
} else {
|
||||||
|
VCPU_VSX_FPR(vcpu, index, 0) = gpr;
|
||||||
|
VCPU_VSX_FPR(vcpu, index, 1) = gpr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
|
||||||
|
u32 gpr32)
|
||||||
|
{
|
||||||
|
union kvmppc_one_reg val;
|
||||||
|
int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
|
||||||
|
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
|
||||||
|
int dword_offset, word_offset;
|
||||||
|
|
||||||
|
if (offset == -1)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
|
||||||
|
val.vval = VCPU_VSX_VR(vcpu, index);
|
||||||
|
val.vsx32val[offset] = gpr32;
|
||||||
|
VCPU_VSX_VR(vcpu, index) = val.vval;
|
||||||
|
} else {
|
||||||
|
dword_offset = offset / 2;
|
||||||
|
word_offset = offset % 2;
|
||||||
|
val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
|
||||||
|
val.vsx32val[word_offset] = gpr32;
|
||||||
|
VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_VSX */
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC_FPU
|
||||||
|
static inline u64 sp_to_dp(u32 fprs)
|
||||||
|
{
|
||||||
|
u64 fprd;
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
|
enable_kernel_fp();
|
||||||
|
asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs)
|
||||||
|
: "fr0");
|
||||||
|
preempt_enable();
|
||||||
|
return fprd;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 dp_to_sp(u64 fprd)
|
||||||
|
{
|
||||||
|
u32 fprs;
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
|
enable_kernel_fp();
|
||||||
|
asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd)
|
||||||
|
: "fr0");
|
||||||
|
preempt_enable();
|
||||||
|
return fprs;
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
#define sp_to_dp(x) (x)
|
||||||
|
#define dp_to_sp(x) (x)
|
||||||
|
#endif /* CONFIG_PPC_FPU */
|
||||||
|
|
||||||
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
|
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
|
||||||
struct kvm_run *run)
|
struct kvm_run *run)
|
||||||
{
|
{
|
||||||
@ -827,6 +953,10 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* conversion between single and double precision */
|
||||||
|
if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
|
||||||
|
gpr = sp_to_dp(gpr);
|
||||||
|
|
||||||
if (vcpu->arch.mmio_sign_extend) {
|
if (vcpu->arch.mmio_sign_extend) {
|
||||||
switch (run->mmio.len) {
|
switch (run->mmio.len) {
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
@ -843,8 +973,6 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
|
|
||||||
|
|
||||||
switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
|
switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
|
||||||
case KVM_MMIO_REG_GPR:
|
case KVM_MMIO_REG_GPR:
|
||||||
kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
|
kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
|
||||||
@ -860,6 +988,17 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
|
|||||||
VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
|
VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
|
||||||
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
|
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
|
||||||
break;
|
break;
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_VSX
|
||||||
|
case KVM_MMIO_REG_VSX:
|
||||||
|
if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_DWORD)
|
||||||
|
kvmppc_set_vsr_dword(vcpu, gpr);
|
||||||
|
else if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_WORD)
|
||||||
|
kvmppc_set_vsr_word(vcpu, gpr);
|
||||||
|
else if (vcpu->arch.mmio_vsx_copy_type ==
|
||||||
|
KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
|
||||||
|
kvmppc_set_vsr_dword_dump(vcpu, gpr);
|
||||||
|
break;
|
||||||
#endif
|
#endif
|
||||||
default:
|
default:
|
||||||
BUG();
|
BUG();
|
||||||
@ -927,6 +1066,35 @@ int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||||||
return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
|
return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_VSX
|
||||||
|
int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
|
unsigned int rt, unsigned int bytes,
|
||||||
|
int is_default_endian, int mmio_sign_extend)
|
||||||
|
{
|
||||||
|
enum emulation_result emulated = EMULATE_DONE;
|
||||||
|
|
||||||
|
/* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */
|
||||||
|
if ( (vcpu->arch.mmio_vsx_copy_nums > 4) ||
|
||||||
|
(vcpu->arch.mmio_vsx_copy_nums < 0) ) {
|
||||||
|
return EMULATE_FAIL;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (vcpu->arch.mmio_vsx_copy_nums) {
|
||||||
|
emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
|
||||||
|
is_default_endian, mmio_sign_extend);
|
||||||
|
|
||||||
|
if (emulated != EMULATE_DONE)
|
||||||
|
break;
|
||||||
|
|
||||||
|
vcpu->arch.paddr_accessed += run->mmio.len;
|
||||||
|
|
||||||
|
vcpu->arch.mmio_vsx_copy_nums--;
|
||||||
|
vcpu->arch.mmio_vsx_offset++;
|
||||||
|
}
|
||||||
|
return emulated;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_VSX */
|
||||||
|
|
||||||
int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
u64 val, unsigned int bytes, int is_default_endian)
|
u64 val, unsigned int bytes, int is_default_endian)
|
||||||
{
|
{
|
||||||
@ -952,6 +1120,9 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||||||
vcpu->mmio_needed = 1;
|
vcpu->mmio_needed = 1;
|
||||||
vcpu->mmio_is_write = 1;
|
vcpu->mmio_is_write = 1;
|
||||||
|
|
||||||
|
if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
|
||||||
|
val = dp_to_sp(val);
|
||||||
|
|
||||||
/* Store the value at the lowest bytes in 'data'. */
|
/* Store the value at the lowest bytes in 'data'. */
|
||||||
if (!host_swabbed) {
|
if (!host_swabbed) {
|
||||||
switch (bytes) {
|
switch (bytes) {
|
||||||
@ -985,6 +1156,129 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvmppc_handle_store);
|
EXPORT_SYMBOL_GPL(kvmppc_handle_store);
|
||||||
|
|
||||||
|
#ifdef CONFIG_VSX
|
||||||
|
static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
|
||||||
|
{
|
||||||
|
u32 dword_offset, word_offset;
|
||||||
|
union kvmppc_one_reg reg;
|
||||||
|
int vsx_offset = 0;
|
||||||
|
int copy_type = vcpu->arch.mmio_vsx_copy_type;
|
||||||
|
int result = 0;
|
||||||
|
|
||||||
|
switch (copy_type) {
|
||||||
|
case KVMPPC_VSX_COPY_DWORD:
|
||||||
|
vsx_offset =
|
||||||
|
kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
|
||||||
|
|
||||||
|
if (vsx_offset == -1) {
|
||||||
|
result = -1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
|
||||||
|
*val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
|
||||||
|
} else {
|
||||||
|
reg.vval = VCPU_VSX_VR(vcpu, rs);
|
||||||
|
*val = reg.vsxval[vsx_offset];
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
case KVMPPC_VSX_COPY_WORD:
|
||||||
|
vsx_offset =
|
||||||
|
kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
|
||||||
|
|
||||||
|
if (vsx_offset == -1) {
|
||||||
|
result = -1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
|
||||||
|
dword_offset = vsx_offset / 2;
|
||||||
|
word_offset = vsx_offset % 2;
|
||||||
|
reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
|
||||||
|
*val = reg.vsx32val[word_offset];
|
||||||
|
} else {
|
||||||
|
reg.vval = VCPU_VSX_VR(vcpu, rs);
|
||||||
|
*val = reg.vsx32val[vsx_offset];
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
result = -1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
|
int rs, unsigned int bytes, int is_default_endian)
|
||||||
|
{
|
||||||
|
u64 val;
|
||||||
|
enum emulation_result emulated = EMULATE_DONE;
|
||||||
|
|
||||||
|
vcpu->arch.io_gpr = rs;
|
||||||
|
|
||||||
|
/* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */
|
||||||
|
if ( (vcpu->arch.mmio_vsx_copy_nums > 4) ||
|
||||||
|
(vcpu->arch.mmio_vsx_copy_nums < 0) ) {
|
||||||
|
return EMULATE_FAIL;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (vcpu->arch.mmio_vsx_copy_nums) {
|
||||||
|
if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
|
||||||
|
return EMULATE_FAIL;
|
||||||
|
|
||||||
|
emulated = kvmppc_handle_store(run, vcpu,
|
||||||
|
val, bytes, is_default_endian);
|
||||||
|
|
||||||
|
if (emulated != EMULATE_DONE)
|
||||||
|
break;
|
||||||
|
|
||||||
|
vcpu->arch.paddr_accessed += run->mmio.len;
|
||||||
|
|
||||||
|
vcpu->arch.mmio_vsx_copy_nums--;
|
||||||
|
vcpu->arch.mmio_vsx_offset++;
|
||||||
|
}
|
||||||
|
|
||||||
|
return emulated;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
|
||||||
|
struct kvm_run *run)
|
||||||
|
{
|
||||||
|
enum emulation_result emulated = EMULATE_FAIL;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
vcpu->arch.paddr_accessed += run->mmio.len;
|
||||||
|
|
||||||
|
if (!vcpu->mmio_is_write) {
|
||||||
|
emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
|
||||||
|
run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
|
||||||
|
} else {
|
||||||
|
emulated = kvmppc_handle_vsx_store(run, vcpu,
|
||||||
|
vcpu->arch.io_gpr, run->mmio.len, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (emulated) {
|
||||||
|
case EMULATE_DO_MMIO:
|
||||||
|
run->exit_reason = KVM_EXIT_MMIO;
|
||||||
|
r = RESUME_HOST;
|
||||||
|
break;
|
||||||
|
case EMULATE_FAIL:
|
||||||
|
pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
|
||||||
|
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||||
|
run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
|
||||||
|
r = RESUME_HOST;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
r = RESUME_GUEST;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_VSX */
|
||||||
|
|
||||||
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||||
{
|
{
|
||||||
int r = 0;
|
int r = 0;
|
||||||
@ -1087,13 +1381,24 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||||||
int r;
|
int r;
|
||||||
sigset_t sigsaved;
|
sigset_t sigsaved;
|
||||||
|
|
||||||
if (vcpu->sigset_active)
|
|
||||||
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
|
|
||||||
|
|
||||||
if (vcpu->mmio_needed) {
|
if (vcpu->mmio_needed) {
|
||||||
|
vcpu->mmio_needed = 0;
|
||||||
if (!vcpu->mmio_is_write)
|
if (!vcpu->mmio_is_write)
|
||||||
kvmppc_complete_mmio_load(vcpu, run);
|
kvmppc_complete_mmio_load(vcpu, run);
|
||||||
vcpu->mmio_needed = 0;
|
#ifdef CONFIG_VSX
|
||||||
|
if (vcpu->arch.mmio_vsx_copy_nums > 0) {
|
||||||
|
vcpu->arch.mmio_vsx_copy_nums--;
|
||||||
|
vcpu->arch.mmio_vsx_offset++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (vcpu->arch.mmio_vsx_copy_nums > 0) {
|
||||||
|
r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
|
||||||
|
if (r == RESUME_HOST) {
|
||||||
|
vcpu->mmio_needed = 1;
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
} else if (vcpu->arch.osi_needed) {
|
} else if (vcpu->arch.osi_needed) {
|
||||||
u64 *gprs = run->osi.gprs;
|
u64 *gprs = run->osi.gprs;
|
||||||
int i;
|
int i;
|
||||||
@ -1115,6 +1420,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (vcpu->sigset_active)
|
||||||
|
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
|
||||||
|
|
||||||
if (run->immediate_exit)
|
if (run->immediate_exit)
|
||||||
r = -EINTR;
|
r = -EINTR;
|
||||||
else
|
else
|
||||||
|
@ -314,6 +314,25 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mm_iommu_lookup);
|
EXPORT_SYMBOL_GPL(mm_iommu_lookup);
|
||||||
|
|
||||||
|
struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
|
||||||
|
unsigned long ua, unsigned long size)
|
||||||
|
{
|
||||||
|
struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
|
||||||
|
|
||||||
|
list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
|
||||||
|
next) {
|
||||||
|
if ((mem->ua <= ua) &&
|
||||||
|
(ua + size <= mem->ua +
|
||||||
|
(mem->entries << PAGE_SHIFT))) {
|
||||||
|
ret = mem;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm);
|
||||||
|
|
||||||
struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
|
struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
|
||||||
unsigned long ua, unsigned long entries)
|
unsigned long ua, unsigned long entries)
|
||||||
{
|
{
|
||||||
@ -345,6 +364,26 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
|
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
|
||||||
|
|
||||||
|
long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
|
||||||
|
unsigned long ua, unsigned long *hpa)
|
||||||
|
{
|
||||||
|
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
|
||||||
|
void *va = &mem->hpas[entry];
|
||||||
|
unsigned long *pa;
|
||||||
|
|
||||||
|
if (entry >= mem->entries)
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
pa = (void *) vmalloc_to_phys(va);
|
||||||
|
if (!pa)
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
*hpa = *pa | (ua & ~PAGE_MASK);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm);
|
||||||
|
|
||||||
long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
|
long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
|
||||||
{
|
{
|
||||||
if (atomic64_inc_not_zero(&mem->mapped))
|
if (atomic64_inc_not_zero(&mem->mapped))
|
||||||
|
@ -1424,8 +1424,7 @@ static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe
|
|||||||
iommu_group_put(pe->table_group.group);
|
iommu_group_put(pe->table_group.group);
|
||||||
BUG_ON(pe->table_group.group);
|
BUG_ON(pe->table_group.group);
|
||||||
}
|
}
|
||||||
pnv_pci_ioda2_table_free_pages(tbl);
|
iommu_tce_table_put(tbl);
|
||||||
iommu_free_table(tbl, of_node_full_name(dev->dev.of_node));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pnv_ioda_release_vf_PE(struct pci_dev *pdev)
|
static void pnv_ioda_release_vf_PE(struct pci_dev *pdev)
|
||||||
@ -1860,6 +1859,17 @@ static int pnv_ioda1_tce_xchg(struct iommu_table *tbl, long index,
|
|||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int pnv_ioda1_tce_xchg_rm(struct iommu_table *tbl, long index,
|
||||||
|
unsigned long *hpa, enum dma_data_direction *direction)
|
||||||
|
{
|
||||||
|
long ret = pnv_tce_xchg(tbl, index, hpa, direction);
|
||||||
|
|
||||||
|
if (!ret)
|
||||||
|
pnv_pci_p7ioc_tce_invalidate(tbl, index, 1, true);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
|
static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
|
||||||
@ -1874,6 +1884,7 @@ static struct iommu_table_ops pnv_ioda1_iommu_ops = {
|
|||||||
.set = pnv_ioda1_tce_build,
|
.set = pnv_ioda1_tce_build,
|
||||||
#ifdef CONFIG_IOMMU_API
|
#ifdef CONFIG_IOMMU_API
|
||||||
.exchange = pnv_ioda1_tce_xchg,
|
.exchange = pnv_ioda1_tce_xchg,
|
||||||
|
.exchange_rm = pnv_ioda1_tce_xchg_rm,
|
||||||
#endif
|
#endif
|
||||||
.clear = pnv_ioda1_tce_free,
|
.clear = pnv_ioda1_tce_free,
|
||||||
.get = pnv_tce_get,
|
.get = pnv_tce_get,
|
||||||
@ -1948,7 +1959,7 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
|
|||||||
{
|
{
|
||||||
struct iommu_table_group_link *tgl;
|
struct iommu_table_group_link *tgl;
|
||||||
|
|
||||||
list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
|
list_for_each_entry_lockless(tgl, &tbl->it_group_list, next) {
|
||||||
struct pnv_ioda_pe *pe = container_of(tgl->table_group,
|
struct pnv_ioda_pe *pe = container_of(tgl->table_group,
|
||||||
struct pnv_ioda_pe, table_group);
|
struct pnv_ioda_pe, table_group);
|
||||||
struct pnv_phb *phb = pe->phb;
|
struct pnv_phb *phb = pe->phb;
|
||||||
@ -2004,6 +2015,17 @@ static int pnv_ioda2_tce_xchg(struct iommu_table *tbl, long index,
|
|||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int pnv_ioda2_tce_xchg_rm(struct iommu_table *tbl, long index,
|
||||||
|
unsigned long *hpa, enum dma_data_direction *direction)
|
||||||
|
{
|
||||||
|
long ret = pnv_tce_xchg(tbl, index, hpa, direction);
|
||||||
|
|
||||||
|
if (!ret)
|
||||||
|
pnv_pci_ioda2_tce_invalidate(tbl, index, 1, true);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
|
static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
|
||||||
@ -2017,13 +2039,13 @@ static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
|
|||||||
static void pnv_ioda2_table_free(struct iommu_table *tbl)
|
static void pnv_ioda2_table_free(struct iommu_table *tbl)
|
||||||
{
|
{
|
||||||
pnv_pci_ioda2_table_free_pages(tbl);
|
pnv_pci_ioda2_table_free_pages(tbl);
|
||||||
iommu_free_table(tbl, "pnv");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct iommu_table_ops pnv_ioda2_iommu_ops = {
|
static struct iommu_table_ops pnv_ioda2_iommu_ops = {
|
||||||
.set = pnv_ioda2_tce_build,
|
.set = pnv_ioda2_tce_build,
|
||||||
#ifdef CONFIG_IOMMU_API
|
#ifdef CONFIG_IOMMU_API
|
||||||
.exchange = pnv_ioda2_tce_xchg,
|
.exchange = pnv_ioda2_tce_xchg,
|
||||||
|
.exchange_rm = pnv_ioda2_tce_xchg_rm,
|
||||||
#endif
|
#endif
|
||||||
.clear = pnv_ioda2_tce_free,
|
.clear = pnv_ioda2_tce_free,
|
||||||
.get = pnv_tce_get,
|
.get = pnv_tce_get,
|
||||||
@ -2203,7 +2225,7 @@ found:
|
|||||||
__free_pages(tce_mem, get_order(tce32_segsz * segs));
|
__free_pages(tce_mem, get_order(tce32_segsz * segs));
|
||||||
if (tbl) {
|
if (tbl) {
|
||||||
pnv_pci_unlink_table_and_group(tbl, &pe->table_group);
|
pnv_pci_unlink_table_and_group(tbl, &pe->table_group);
|
||||||
iommu_free_table(tbl, "pnv");
|
iommu_tce_table_put(tbl);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2293,16 +2315,16 @@ static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group,
|
|||||||
if (!tbl)
|
if (!tbl)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
tbl->it_ops = &pnv_ioda2_iommu_ops;
|
||||||
|
|
||||||
ret = pnv_pci_ioda2_table_alloc_pages(nid,
|
ret = pnv_pci_ioda2_table_alloc_pages(nid,
|
||||||
bus_offset, page_shift, window_size,
|
bus_offset, page_shift, window_size,
|
||||||
levels, tbl);
|
levels, tbl);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
iommu_free_table(tbl, "pnv");
|
iommu_tce_table_put(tbl);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
tbl->it_ops = &pnv_ioda2_iommu_ops;
|
|
||||||
|
|
||||||
*ptbl = tbl;
|
*ptbl = tbl;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -2343,7 +2365,7 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
|
|||||||
if (rc) {
|
if (rc) {
|
||||||
pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n",
|
pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n",
|
||||||
rc);
|
rc);
|
||||||
pnv_ioda2_table_free(tbl);
|
iommu_tce_table_put(tbl);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2431,7 +2453,7 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
|
|||||||
pnv_pci_ioda2_unset_window(&pe->table_group, 0);
|
pnv_pci_ioda2_unset_window(&pe->table_group, 0);
|
||||||
if (pe->pbus)
|
if (pe->pbus)
|
||||||
pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
|
pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
|
||||||
pnv_ioda2_table_free(tbl);
|
iommu_tce_table_put(tbl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
|
static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
|
||||||
@ -3406,7 +3428,7 @@ static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe)
|
|||||||
}
|
}
|
||||||
|
|
||||||
free_pages(tbl->it_base, get_order(tbl->it_size << 3));
|
free_pages(tbl->it_base, get_order(tbl->it_size << 3));
|
||||||
iommu_free_table(tbl, "pnv");
|
iommu_tce_table_put(tbl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
|
static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
|
||||||
@ -3433,7 +3455,7 @@ static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
|
|||||||
}
|
}
|
||||||
|
|
||||||
pnv_pci_ioda2_table_free_pages(tbl);
|
pnv_pci_ioda2_table_free_pages(tbl);
|
||||||
iommu_free_table(tbl, "pnv");
|
iommu_tce_table_put(tbl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe,
|
static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe,
|
||||||
|
@ -767,6 +767,7 @@ struct iommu_table *pnv_pci_table_alloc(int nid)
|
|||||||
|
|
||||||
tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
|
tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
|
||||||
INIT_LIST_HEAD_RCU(&tbl->it_group_list);
|
INIT_LIST_HEAD_RCU(&tbl->it_group_list);
|
||||||
|
kref_init(&tbl->it_kref);
|
||||||
|
|
||||||
return tbl;
|
return tbl;
|
||||||
}
|
}
|
||||||
|
@ -74,6 +74,7 @@ static struct iommu_table_group *iommu_pseries_alloc_group(int node)
|
|||||||
goto fail_exit;
|
goto fail_exit;
|
||||||
|
|
||||||
INIT_LIST_HEAD_RCU(&tbl->it_group_list);
|
INIT_LIST_HEAD_RCU(&tbl->it_group_list);
|
||||||
|
kref_init(&tbl->it_kref);
|
||||||
tgl->table_group = table_group;
|
tgl->table_group = table_group;
|
||||||
list_add_rcu(&tgl->next, &tbl->it_group_list);
|
list_add_rcu(&tgl->next, &tbl->it_group_list);
|
||||||
|
|
||||||
@ -115,7 +116,7 @@ static void iommu_pseries_free_group(struct iommu_table_group *table_group,
|
|||||||
BUG_ON(table_group->group);
|
BUG_ON(table_group->group);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
iommu_free_table(tbl, node_name);
|
iommu_tce_table_put(tbl);
|
||||||
|
|
||||||
kfree(table_group);
|
kfree(table_group);
|
||||||
}
|
}
|
||||||
|
@ -1318,7 +1318,7 @@ static void vio_dev_release(struct device *dev)
|
|||||||
struct iommu_table *tbl = get_iommu_table_base(dev);
|
struct iommu_table *tbl = get_iommu_table_base(dev);
|
||||||
|
|
||||||
if (tbl)
|
if (tbl)
|
||||||
iommu_free_table(tbl, of_node_full_name(dev->of_node));
|
iommu_tce_table_put(tbl);
|
||||||
of_node_put(dev->of_node);
|
of_node_put(dev->of_node);
|
||||||
kfree(to_vio_dev(dev));
|
kfree(to_vio_dev(dev));
|
||||||
}
|
}
|
||||||
|
@ -680,7 +680,7 @@ static void tce_iommu_free_table(struct tce_container *container,
|
|||||||
unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
|
unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
|
||||||
|
|
||||||
tce_iommu_userspace_view_free(tbl, container->mm);
|
tce_iommu_userspace_view_free(tbl, container->mm);
|
||||||
tbl->it_ops->free(tbl);
|
iommu_tce_table_put(tbl);
|
||||||
decrement_locked_vm(container->mm, pages);
|
decrement_locked_vm(container->mm, pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -892,6 +892,7 @@ struct kvm_ppc_resize_hpt {
|
|||||||
#define KVM_CAP_MIPS_64BIT 139
|
#define KVM_CAP_MIPS_64BIT 139
|
||||||
#define KVM_CAP_S390_GS 140
|
#define KVM_CAP_S390_GS 140
|
||||||
#define KVM_CAP_S390_AIS 141
|
#define KVM_CAP_S390_AIS 141
|
||||||
|
#define KVM_CAP_SPAPR_TCE_VFIO 142
|
||||||
|
|
||||||
#ifdef KVM_CAP_IRQ_ROUTING
|
#ifdef KVM_CAP_IRQ_ROUTING
|
||||||
|
|
||||||
@ -1096,6 +1097,7 @@ struct kvm_device_attr {
|
|||||||
#define KVM_DEV_VFIO_GROUP 1
|
#define KVM_DEV_VFIO_GROUP 1
|
||||||
#define KVM_DEV_VFIO_GROUP_ADD 1
|
#define KVM_DEV_VFIO_GROUP_ADD 1
|
||||||
#define KVM_DEV_VFIO_GROUP_DEL 2
|
#define KVM_DEV_VFIO_GROUP_DEL 2
|
||||||
|
#define KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE 3
|
||||||
|
|
||||||
enum kvm_device_type {
|
enum kvm_device_type {
|
||||||
KVM_DEV_TYPE_FSL_MPIC_20 = 1,
|
KVM_DEV_TYPE_FSL_MPIC_20 = 1,
|
||||||
@ -1117,6 +1119,11 @@ enum kvm_device_type {
|
|||||||
KVM_DEV_TYPE_MAX,
|
KVM_DEV_TYPE_MAX,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct kvm_vfio_spapr_tce {
|
||||||
|
__s32 groupfd;
|
||||||
|
__s32 tablefd;
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ioctls for VM fds
|
* ioctls for VM fds
|
||||||
*/
|
*/
|
||||||
|
105
virt/kvm/vfio.c
105
virt/kvm/vfio.c
@ -20,6 +20,10 @@
|
|||||||
#include <linux/vfio.h>
|
#include <linux/vfio.h>
|
||||||
#include "vfio.h"
|
#include "vfio.h"
|
||||||
|
|
||||||
|
#ifdef CONFIG_SPAPR_TCE_IOMMU
|
||||||
|
#include <asm/kvm_ppc.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
struct kvm_vfio_group {
|
struct kvm_vfio_group {
|
||||||
struct list_head node;
|
struct list_head node;
|
||||||
struct vfio_group *vfio_group;
|
struct vfio_group *vfio_group;
|
||||||
@ -89,6 +93,47 @@ static bool kvm_vfio_group_is_coherent(struct vfio_group *vfio_group)
|
|||||||
return ret > 0;
|
return ret > 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_SPAPR_TCE_IOMMU
|
||||||
|
static int kvm_vfio_external_user_iommu_id(struct vfio_group *vfio_group)
|
||||||
|
{
|
||||||
|
int (*fn)(struct vfio_group *);
|
||||||
|
int ret = -EINVAL;
|
||||||
|
|
||||||
|
fn = symbol_get(vfio_external_user_iommu_id);
|
||||||
|
if (!fn)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = fn(vfio_group);
|
||||||
|
|
||||||
|
symbol_put(vfio_external_user_iommu_id);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct iommu_group *kvm_vfio_group_get_iommu_group(
|
||||||
|
struct vfio_group *group)
|
||||||
|
{
|
||||||
|
int group_id = kvm_vfio_external_user_iommu_id(group);
|
||||||
|
|
||||||
|
if (group_id < 0)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
return iommu_group_get_by_id(group_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
|
||||||
|
struct vfio_group *vfio_group)
|
||||||
|
{
|
||||||
|
struct iommu_group *grp = kvm_vfio_group_get_iommu_group(vfio_group);
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(!grp))
|
||||||
|
return;
|
||||||
|
|
||||||
|
kvm_spapr_tce_release_iommu_group(kvm, grp);
|
||||||
|
iommu_group_put(grp);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Groups can use the same or different IOMMU domains. If the same then
|
* Groups can use the same or different IOMMU domains. If the same then
|
||||||
* adding a new group may change the coherency of groups we've previously
|
* adding a new group may change the coherency of groups we've previously
|
||||||
@ -211,6 +256,9 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
|
|||||||
|
|
||||||
mutex_unlock(&kv->lock);
|
mutex_unlock(&kv->lock);
|
||||||
|
|
||||||
|
#ifdef CONFIG_SPAPR_TCE_IOMMU
|
||||||
|
kvm_spapr_tce_release_vfio_group(dev->kvm, vfio_group);
|
||||||
|
#endif
|
||||||
kvm_vfio_group_set_kvm(vfio_group, NULL);
|
kvm_vfio_group_set_kvm(vfio_group, NULL);
|
||||||
|
|
||||||
kvm_vfio_group_put_external_user(vfio_group);
|
kvm_vfio_group_put_external_user(vfio_group);
|
||||||
@ -218,6 +266,57 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
|
|||||||
kvm_vfio_update_coherency(dev);
|
kvm_vfio_update_coherency(dev);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
#ifdef CONFIG_SPAPR_TCE_IOMMU
|
||||||
|
case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: {
|
||||||
|
struct kvm_vfio_spapr_tce param;
|
||||||
|
struct kvm_vfio *kv = dev->private;
|
||||||
|
struct vfio_group *vfio_group;
|
||||||
|
struct kvm_vfio_group *kvg;
|
||||||
|
struct fd f;
|
||||||
|
struct iommu_group *grp;
|
||||||
|
|
||||||
|
if (copy_from_user(¶m, (void __user *)arg,
|
||||||
|
sizeof(struct kvm_vfio_spapr_tce)))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
f = fdget(param.groupfd);
|
||||||
|
if (!f.file)
|
||||||
|
return -EBADF;
|
||||||
|
|
||||||
|
vfio_group = kvm_vfio_group_get_external_user(f.file);
|
||||||
|
fdput(f);
|
||||||
|
|
||||||
|
if (IS_ERR(vfio_group))
|
||||||
|
return PTR_ERR(vfio_group);
|
||||||
|
|
||||||
|
grp = kvm_vfio_group_get_iommu_group(vfio_group);
|
||||||
|
if (WARN_ON_ONCE(!grp)) {
|
||||||
|
kvm_vfio_group_put_external_user(vfio_group);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = -ENOENT;
|
||||||
|
|
||||||
|
mutex_lock(&kv->lock);
|
||||||
|
|
||||||
|
list_for_each_entry(kvg, &kv->group_list, node) {
|
||||||
|
if (kvg->vfio_group != vfio_group)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
ret = kvm_spapr_tce_attach_iommu_group(dev->kvm,
|
||||||
|
param.tablefd, grp);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&kv->lock);
|
||||||
|
|
||||||
|
iommu_group_put(grp);
|
||||||
|
kvm_vfio_group_put_external_user(vfio_group);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_SPAPR_TCE_IOMMU */
|
||||||
}
|
}
|
||||||
|
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
@ -242,6 +341,9 @@ static int kvm_vfio_has_attr(struct kvm_device *dev,
|
|||||||
switch (attr->attr) {
|
switch (attr->attr) {
|
||||||
case KVM_DEV_VFIO_GROUP_ADD:
|
case KVM_DEV_VFIO_GROUP_ADD:
|
||||||
case KVM_DEV_VFIO_GROUP_DEL:
|
case KVM_DEV_VFIO_GROUP_DEL:
|
||||||
|
#ifdef CONFIG_SPAPR_TCE_IOMMU
|
||||||
|
case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
|
||||||
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -257,6 +359,9 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
|
|||||||
struct kvm_vfio_group *kvg, *tmp;
|
struct kvm_vfio_group *kvg, *tmp;
|
||||||
|
|
||||||
list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
|
list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
|
||||||
|
#ifdef CONFIG_SPAPR_TCE_IOMMU
|
||||||
|
kvm_spapr_tce_release_vfio_group(dev->kvm, kvg->vfio_group);
|
||||||
|
#endif
|
||||||
kvm_vfio_group_set_kvm(kvg->vfio_group, NULL);
|
kvm_vfio_group_set_kvm(kvg->vfio_group, NULL);
|
||||||
kvm_vfio_group_put_external_user(kvg->vfio_group);
|
kvm_vfio_group_put_external_user(kvg->vfio_group);
|
||||||
list_del(&kvg->node);
|
list_del(&kvg->node);
|
||||||
|
Loading…
Reference in New Issue
Block a user