s390/fpu: get rid of MACHINE_HAS_VX
Get rid of MACHINE_HAS_VX and replace it with cpu_has_vx() which is a short readable wrapper for "test_facility(129)". Facility bit 129 is set if the vector facility is present. test_facility() returns also true for all bits which are set in the architecture level set of the cpu that the kernel is compiled for. This means that test_facility(129) is a compile time constant which returns true for z13 and later, since the vector facility bit is part of the z13 kernel ALS. In result the compiled code will have less runtime checks, and less code. Reviewed-by: Hendrik Brueckner <brueckner@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
This commit is contained in:
parent
68422c0069
commit
18564756ab
@ -82,7 +82,7 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src,
|
||||
* it cannot handle a block of data or less, but otherwise
|
||||
* it can handle data of arbitrary size
|
||||
*/
|
||||
if (bytes <= CHACHA_BLOCK_SIZE || nrounds != 20 || !MACHINE_HAS_VX)
|
||||
if (bytes <= CHACHA_BLOCK_SIZE || nrounds != 20 || !cpu_has_vx())
|
||||
chacha_crypt_generic(state, dst, src, bytes, nrounds);
|
||||
else
|
||||
chacha20_crypt_s390(state, dst, src, bytes,
|
||||
|
@ -46,6 +46,7 @@
|
||||
|
||||
#include <linux/preempt.h>
|
||||
#include <asm/asm-extable.h>
|
||||
#include <asm/fpu/internal.h>
|
||||
|
||||
void save_fpu_regs(void);
|
||||
void load_fpu_regs(void);
|
||||
|
@ -10,8 +10,14 @@
|
||||
#define _ASM_S390_FPU_INTERNAL_H
|
||||
|
||||
#include <linux/string.h>
|
||||
#include <asm/facility.h>
|
||||
#include <asm/fpu/types.h>
|
||||
|
||||
static inline bool cpu_has_vx(void)
|
||||
{
|
||||
return likely(test_facility(129));
|
||||
}
|
||||
|
||||
static inline void save_vx_regs(__vector128 *vxrs)
|
||||
{
|
||||
asm volatile(
|
||||
@ -41,7 +47,7 @@ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
|
||||
{
|
||||
fpregs->pad = 0;
|
||||
fpregs->fpc = fpu->fpc;
|
||||
if (MACHINE_HAS_VX)
|
||||
if (cpu_has_vx())
|
||||
convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
|
||||
else
|
||||
memcpy((freg_t *)&fpregs->fprs, fpu->fprs,
|
||||
@ -51,7 +57,7 @@ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
|
||||
static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
|
||||
{
|
||||
fpu->fpc = fpregs->fpc;
|
||||
if (MACHINE_HAS_VX)
|
||||
if (cpu_has_vx())
|
||||
convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
|
||||
else
|
||||
memcpy(fpu->fprs, (freg_t *)&fpregs->fprs,
|
||||
|
@ -28,7 +28,6 @@
|
||||
#define MACHINE_FLAG_TOPOLOGY BIT(10)
|
||||
#define MACHINE_FLAG_TE BIT(11)
|
||||
#define MACHINE_FLAG_TLB_LC BIT(12)
|
||||
#define MACHINE_FLAG_VX BIT(13)
|
||||
#define MACHINE_FLAG_TLB_GUEST BIT(14)
|
||||
#define MACHINE_FLAG_NX BIT(15)
|
||||
#define MACHINE_FLAG_GS BIT(16)
|
||||
@ -90,7 +89,6 @@ extern unsigned long mio_wb_bit_mask;
|
||||
#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
|
||||
#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
|
||||
#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
|
||||
#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX)
|
||||
#define MACHINE_HAS_TLB_GUEST (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_GUEST)
|
||||
#define MACHINE_HAS_NX (S390_lowcore.machine_flags & MACHINE_FLAG_NX)
|
||||
#define MACHINE_HAS_GS (S390_lowcore.machine_flags & MACHINE_FLAG_GS)
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/vdso.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include "compat_linux.h"
|
||||
#include "compat_ptrace.h"
|
||||
#include "entry.h"
|
||||
@ -133,7 +134,7 @@ static int save_sigregs_ext32(struct pt_regs *regs,
|
||||
return -EFAULT;
|
||||
|
||||
/* Save vector registers to signal stack */
|
||||
if (MACHINE_HAS_VX) {
|
||||
if (cpu_has_vx()) {
|
||||
for (i = 0; i < __NUM_VXRS_LOW; i++)
|
||||
vxrs[i] = current->thread.fpu.vxrs[i].low;
|
||||
if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
|
||||
@ -161,7 +162,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs,
|
||||
*(__u32 *)®s->gprs[i] = gprs_high[i];
|
||||
|
||||
/* Restore vector registers from signal stack */
|
||||
if (MACHINE_HAS_VX) {
|
||||
if (cpu_has_vx()) {
|
||||
if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
|
||||
sizeof(sregs_ext->vxrs_low)) ||
|
||||
__copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW,
|
||||
@ -261,7 +262,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
|
||||
* the machine supports it
|
||||
*/
|
||||
frame_size = sizeof(*frame) - sizeof(frame->sregs_ext.__reserved);
|
||||
if (!MACHINE_HAS_VX)
|
||||
if (!cpu_has_vx())
|
||||
frame_size -= sizeof(frame->sregs_ext.vxrs_low) +
|
||||
sizeof(frame->sregs_ext.vxrs_high);
|
||||
frame = get_sigframe(&ksig->ka, regs, frame_size);
|
||||
@ -344,11 +345,12 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
|
||||
* the machine supports it
|
||||
*/
|
||||
uc_flags = UC_GPRS_HIGH;
|
||||
if (MACHINE_HAS_VX) {
|
||||
if (cpu_has_vx()) {
|
||||
uc_flags |= UC_VXRS;
|
||||
} else
|
||||
} else {
|
||||
frame_size -= sizeof(frame->uc.uc_mcontext_ext.vxrs_low) +
|
||||
sizeof(frame->uc.uc_mcontext_ext.vxrs_high);
|
||||
}
|
||||
frame = get_sigframe(&ksig->ka, regs, frame_size);
|
||||
if (frame == (void __user *) -1UL)
|
||||
return -EFAULT;
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <asm/ipl.h>
|
||||
#include <asm/sclp.h>
|
||||
#include <asm/maccess.h>
|
||||
#include <asm/fpu/api.h>
|
||||
|
||||
#define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
|
||||
#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
|
||||
@ -319,7 +320,7 @@ static void *fill_cpu_elf_notes(void *ptr, int cpu, struct save_area *sa)
|
||||
ptr = nt_init(ptr, NT_S390_TODPREG, &sa->todpreg, sizeof(sa->todpreg));
|
||||
ptr = nt_init(ptr, NT_S390_CTRS, &sa->ctrs, sizeof(sa->ctrs));
|
||||
ptr = nt_init(ptr, NT_S390_PREFIX, &sa->prefix, sizeof(sa->prefix));
|
||||
if (MACHINE_HAS_VX) {
|
||||
if (cpu_has_vx()) {
|
||||
ptr = nt_init(ptr, NT_S390_VXRS_HIGH,
|
||||
&sa->vxrs_high, sizeof(sa->vxrs_high));
|
||||
ptr = nt_init(ptr, NT_S390_VXRS_LOW,
|
||||
@ -343,7 +344,7 @@ static size_t get_cpu_elf_notes_size(void)
|
||||
size += nt_size(NT_S390_TODPREG, sizeof(sa->todpreg));
|
||||
size += nt_size(NT_S390_CTRS, sizeof(sa->ctrs));
|
||||
size += nt_size(NT_S390_PREFIX, sizeof(sa->prefix));
|
||||
if (MACHINE_HAS_VX) {
|
||||
if (cpu_has_vx()) {
|
||||
size += nt_size(NT_S390_VXRS_HIGH, sizeof(sa->vxrs_high));
|
||||
size += nt_size(NT_S390_VXRS_LOW, sizeof(sa->vxrs_low));
|
||||
}
|
||||
|
@ -229,10 +229,8 @@ static __init void detect_machine_facilities(void)
|
||||
}
|
||||
if (test_facility(51))
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
|
||||
if (test_facility(129)) {
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
|
||||
if (test_facility(129))
|
||||
system_ctl_set_bit(0, CR0_VECTOR_BIT);
|
||||
}
|
||||
if (test_facility(130))
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
|
||||
if (test_facility(133))
|
||||
|
@ -24,7 +24,7 @@ void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
|
||||
/* Save floating point control */
|
||||
asm volatile("stfpc %0" : "=Q" (state->fpc));
|
||||
|
||||
if (!MACHINE_HAS_VX) {
|
||||
if (!cpu_has_vx()) {
|
||||
if (flags & KERNEL_VXR_V0V7) {
|
||||
/* Save floating-point registers */
|
||||
asm volatile("std 0,%0" : "=Q" (state->fprs[0]));
|
||||
@ -106,7 +106,7 @@ void __kernel_fpu_end(struct kernel_fpu *state, u32 flags)
|
||||
/* Restore floating-point controls */
|
||||
asm volatile("lfpc %0" : : "Q" (state->fpc));
|
||||
|
||||
if (!MACHINE_HAS_VX) {
|
||||
if (!cpu_has_vx()) {
|
||||
if (flags & KERNEL_VXR_V0V7) {
|
||||
/* Restore floating-point registers */
|
||||
asm volatile("ld 0,%0" : : "Q" (state->fprs[0]));
|
||||
@ -181,7 +181,7 @@ void __load_fpu_regs(void)
|
||||
struct fpu *state = ¤t->thread.fpu;
|
||||
|
||||
sfpc_safe(state->fpc);
|
||||
if (likely(MACHINE_HAS_VX)) {
|
||||
if (likely(cpu_has_vx())) {
|
||||
asm volatile("lgr 1,%0\n"
|
||||
"VLM 0,15,0,1\n"
|
||||
"VLM 16,31,256,1\n"
|
||||
@ -232,7 +232,7 @@ void save_fpu_regs(void)
|
||||
regs = current->thread.fpu.regs;
|
||||
|
||||
asm volatile("stfpc %0" : "=Q" (state->fpc));
|
||||
if (likely(MACHINE_HAS_VX)) {
|
||||
if (likely(cpu_has_vx())) {
|
||||
asm volatile("lgr 1,%0\n"
|
||||
"VSTM 0,15,0,1\n"
|
||||
"VSTM 16,31,256,1\n"
|
||||
|
@ -91,7 +91,7 @@ static noinline void __machine_kdump(void *image)
|
||||
}
|
||||
/* Store status of the boot CPU */
|
||||
mcesa = __va(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
|
||||
if (MACHINE_HAS_VX)
|
||||
if (cpu_has_vx())
|
||||
save_vx_regs((__vector128 *) mcesa->vector_save_area);
|
||||
if (MACHINE_HAS_GS) {
|
||||
local_ctl_store(2, &cr2_old.reg);
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/pai.h>
|
||||
#include <asm/vx-insn.h>
|
||||
#include <asm/fpu/api.h>
|
||||
|
||||
struct mcck_struct {
|
||||
unsigned int kill_task : 1;
|
||||
@ -45,7 +46,7 @@ static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
|
||||
|
||||
static inline int nmi_needs_mcesa(void)
|
||||
{
|
||||
return MACHINE_HAS_VX || MACHINE_HAS_GS;
|
||||
return cpu_has_vx() || MACHINE_HAS_GS;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -235,7 +236,7 @@ static int notrace s390_validate_registers(union mci mci)
|
||||
}
|
||||
|
||||
mcesa = __va(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
|
||||
if (!MACHINE_HAS_VX) {
|
||||
if (!cpu_has_vx()) {
|
||||
/* Validate floating point registers */
|
||||
asm volatile(
|
||||
" ld 0,0(%0)\n"
|
||||
|
@ -20,8 +20,10 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
|
||||
return 0;
|
||||
|
||||
idx -= PERF_REG_S390_FP0;
|
||||
fp = MACHINE_HAS_VX ? *(freg_t *)(current->thread.fpu.vxrs + idx)
|
||||
: current->thread.fpu.fprs[idx];
|
||||
if (cpu_has_vx())
|
||||
fp = *(freg_t *)(current->thread.fpu.vxrs + idx);
|
||||
else
|
||||
fp = current->thread.fpu.fprs[idx];
|
||||
return fp.ui;
|
||||
}
|
||||
|
||||
|
@ -201,8 +201,8 @@ static int __init setup_hwcaps(void)
|
||||
if (MACHINE_HAS_TE)
|
||||
elf_hwcap |= HWCAP_TE;
|
||||
|
||||
/* Use MACHINE_HAS_VX instead of facility bit 129. */
|
||||
if (MACHINE_HAS_VX) {
|
||||
/* vector */
|
||||
if (test_facility(129)) {
|
||||
elf_hwcap |= HWCAP_VXRS;
|
||||
if (test_facility(134))
|
||||
elf_hwcap |= HWCAP_VXRS_BCD;
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/runtime_instr.h>
|
||||
#include <asm/facility.h>
|
||||
#include <asm/fpu/api.h>
|
||||
|
||||
#include "entry.h"
|
||||
|
||||
@ -254,7 +255,7 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
|
||||
* or the child->thread.fpu.vxrs array
|
||||
*/
|
||||
offset = addr - offsetof(struct user, regs.fp_regs.fprs);
|
||||
if (MACHINE_HAS_VX)
|
||||
if (cpu_has_vx())
|
||||
tmp = *(addr_t *)
|
||||
((addr_t) child->thread.fpu.vxrs + 2*offset);
|
||||
else
|
||||
@ -402,7 +403,7 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
|
||||
* or the child->thread.fpu.vxrs array
|
||||
*/
|
||||
offset = addr - offsetof(struct user, regs.fp_regs.fprs);
|
||||
if (MACHINE_HAS_VX)
|
||||
if (cpu_has_vx())
|
||||
*(addr_t *)((addr_t)
|
||||
child->thread.fpu.vxrs + 2*offset) = data;
|
||||
else
|
||||
@ -629,7 +630,7 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
|
||||
* or the child->thread.fpu.vxrs array
|
||||
*/
|
||||
offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs);
|
||||
if (MACHINE_HAS_VX)
|
||||
if (cpu_has_vx())
|
||||
tmp = *(__u32 *)
|
||||
((addr_t) child->thread.fpu.vxrs + 2*offset);
|
||||
else
|
||||
@ -755,7 +756,7 @@ static int __poke_user_compat(struct task_struct *child,
|
||||
* or the child->thread.fpu.vxrs array
|
||||
*/
|
||||
offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs);
|
||||
if (MACHINE_HAS_VX)
|
||||
if (cpu_has_vx())
|
||||
*(__u32 *)((addr_t)
|
||||
child->thread.fpu.vxrs + 2*offset) = tmp;
|
||||
else
|
||||
@ -911,7 +912,7 @@ static int s390_fpregs_set(struct task_struct *target,
|
||||
if (target == current)
|
||||
save_fpu_regs();
|
||||
|
||||
if (MACHINE_HAS_VX)
|
||||
if (cpu_has_vx())
|
||||
convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
|
||||
else
|
||||
memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
|
||||
@ -934,7 +935,7 @@ static int s390_fpregs_set(struct task_struct *target,
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (MACHINE_HAS_VX)
|
||||
if (cpu_has_vx())
|
||||
convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
|
||||
else
|
||||
memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
|
||||
@ -985,7 +986,7 @@ static int s390_vxrs_low_get(struct task_struct *target,
|
||||
__u64 vxrs[__NUM_VXRS_LOW];
|
||||
int i;
|
||||
|
||||
if (!MACHINE_HAS_VX)
|
||||
if (!cpu_has_vx())
|
||||
return -ENODEV;
|
||||
if (target == current)
|
||||
save_fpu_regs();
|
||||
@ -1002,7 +1003,7 @@ static int s390_vxrs_low_set(struct task_struct *target,
|
||||
__u64 vxrs[__NUM_VXRS_LOW];
|
||||
int i, rc;
|
||||
|
||||
if (!MACHINE_HAS_VX)
|
||||
if (!cpu_has_vx())
|
||||
return -ENODEV;
|
||||
if (target == current)
|
||||
save_fpu_regs();
|
||||
@ -1022,7 +1023,7 @@ static int s390_vxrs_high_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
struct membuf to)
|
||||
{
|
||||
if (!MACHINE_HAS_VX)
|
||||
if (!cpu_has_vx())
|
||||
return -ENODEV;
|
||||
if (target == current)
|
||||
save_fpu_regs();
|
||||
@ -1037,7 +1038,7 @@ static int s390_vxrs_high_set(struct task_struct *target,
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (!MACHINE_HAS_VX)
|
||||
if (!cpu_has_vx())
|
||||
return -ENODEV;
|
||||
if (target == current)
|
||||
save_fpu_regs();
|
||||
|
@ -178,7 +178,7 @@ static int save_sigregs_ext(struct pt_regs *regs,
|
||||
int i;
|
||||
|
||||
/* Save vector registers to signal stack */
|
||||
if (MACHINE_HAS_VX) {
|
||||
if (cpu_has_vx()) {
|
||||
for (i = 0; i < __NUM_VXRS_LOW; i++)
|
||||
vxrs[i] = current->thread.fpu.vxrs[i].low;
|
||||
if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
|
||||
@ -198,7 +198,7 @@ static int restore_sigregs_ext(struct pt_regs *regs,
|
||||
int i;
|
||||
|
||||
/* Restore vector registers from signal stack */
|
||||
if (MACHINE_HAS_VX) {
|
||||
if (cpu_has_vx()) {
|
||||
if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
|
||||
sizeof(sregs_ext->vxrs_low)) ||
|
||||
__copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW,
|
||||
@ -296,7 +296,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
|
||||
* included in the signal frame on a 31-bit system.
|
||||
*/
|
||||
frame_size = sizeof(*frame) - sizeof(frame->sregs_ext);
|
||||
if (MACHINE_HAS_VX)
|
||||
if (cpu_has_vx())
|
||||
frame_size += sizeof(frame->sregs_ext);
|
||||
frame = get_sigframe(ka, regs, frame_size);
|
||||
if (frame == (void __user *) -1UL)
|
||||
@ -373,7 +373,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
|
||||
* included in the signal frame on a 31-bit system.
|
||||
*/
|
||||
uc_flags = 0;
|
||||
if (MACHINE_HAS_VX) {
|
||||
if (cpu_has_vx()) {
|
||||
frame_size += sizeof(_sigregs_ext);
|
||||
uc_flags |= UC_VXRS;
|
||||
}
|
||||
|
@ -582,7 +582,7 @@ int smp_store_status(int cpu)
|
||||
if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
|
||||
pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
|
||||
return -EIO;
|
||||
if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
|
||||
if (!cpu_has_vx() && !MACHINE_HAS_GS)
|
||||
return 0;
|
||||
pa = lc->mcesad & MCESA_ORIGIN_MASK;
|
||||
if (MACHINE_HAS_GS)
|
||||
@ -638,7 +638,7 @@ void __init smp_save_dump_ipl_cpu(void)
|
||||
copy_oldmem_kernel(regs, __LC_FPREGS_SAVE_AREA, 512);
|
||||
save_area_add_regs(sa, regs);
|
||||
memblock_free(regs, 512);
|
||||
if (MACHINE_HAS_VX)
|
||||
if (cpu_has_vx())
|
||||
save_area_add_vxrs(sa, boot_cpu_vector_save_area);
|
||||
}
|
||||
|
||||
@ -671,7 +671,7 @@ void __init smp_save_dump_secondary_cpus(void)
|
||||
panic("could not allocate memory for save area\n");
|
||||
__pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, __pa(page));
|
||||
save_area_add_regs(sa, page);
|
||||
if (MACHINE_HAS_VX) {
|
||||
if (cpu_has_vx()) {
|
||||
__pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, __pa(page));
|
||||
save_area_add_vxrs(sa, page);
|
||||
}
|
||||
|
@ -193,7 +193,7 @@ static void vector_exception(struct pt_regs *regs)
|
||||
{
|
||||
int si_code, vic;
|
||||
|
||||
if (!MACHINE_HAS_VX) {
|
||||
if (!cpu_has_vx()) {
|
||||
do_trap(regs, SIGILL, ILL_ILLOPN, "illegal operation");
|
||||
return;
|
||||
}
|
||||
|
@ -639,7 +639,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
|
||||
rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE);
|
||||
|
||||
/* Register-save areas */
|
||||
if (MACHINE_HAS_VX) {
|
||||
if (cpu_has_vx()) {
|
||||
convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
|
||||
rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128);
|
||||
} else {
|
||||
|
@ -618,7 +618,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
r = MACHINE_HAS_ESOP;
|
||||
break;
|
||||
case KVM_CAP_S390_VECTOR_REGISTERS:
|
||||
r = MACHINE_HAS_VX;
|
||||
r = test_facility(129);
|
||||
break;
|
||||
case KVM_CAP_S390_RI:
|
||||
r = test_facility(64);
|
||||
@ -767,7 +767,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
|
||||
mutex_lock(&kvm->lock);
|
||||
if (kvm->created_vcpus) {
|
||||
r = -EBUSY;
|
||||
} else if (MACHINE_HAS_VX) {
|
||||
} else if (cpu_has_vx()) {
|
||||
set_kvm_facility(kvm->arch.model.fac_mask, 129);
|
||||
set_kvm_facility(kvm->arch.model.fac_list, 129);
|
||||
if (test_facility(134)) {
|
||||
@ -3962,9 +3962,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
if (test_kvm_facility(vcpu->kvm, 156))
|
||||
vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
|
||||
/* fprs can be synchronized via vrs, even if the guest has no vx. With
|
||||
* MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
|
||||
* cpu_has_vx(), (load|store)_fpu_regs() will work with vrs format.
|
||||
*/
|
||||
if (MACHINE_HAS_VX)
|
||||
if (cpu_has_vx())
|
||||
vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
|
||||
else
|
||||
vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
|
||||
@ -4317,7 +4317,7 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
|
||||
vcpu_load(vcpu);
|
||||
|
||||
vcpu->run->s.regs.fpc = fpu->fpc;
|
||||
if (MACHINE_HAS_VX)
|
||||
if (cpu_has_vx())
|
||||
convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
|
||||
(freg_t *) fpu->fprs);
|
||||
else
|
||||
@ -4331,7 +4331,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
|
||||
{
|
||||
vcpu_load(vcpu);
|
||||
|
||||
if (MACHINE_HAS_VX)
|
||||
if (cpu_has_vx())
|
||||
convert_vx_to_fp((freg_t *) fpu->fprs,
|
||||
(__vector128 *) vcpu->run->s.regs.vrs);
|
||||
else
|
||||
@ -4956,7 +4956,7 @@ static void sync_regs(struct kvm_vcpu *vcpu)
|
||||
save_fpu_regs();
|
||||
vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
|
||||
vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
|
||||
if (MACHINE_HAS_VX)
|
||||
if (cpu_has_vx())
|
||||
current->thread.fpu.regs = vcpu->run->s.regs.vrs;
|
||||
else
|
||||
current->thread.fpu.regs = vcpu->run->s.regs.fprs;
|
||||
@ -5135,7 +5135,7 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
|
||||
gpa -= __LC_FPREGS_SAVE_AREA;
|
||||
|
||||
/* manually convert vector registers if necessary */
|
||||
if (MACHINE_HAS_VX) {
|
||||
if (cpu_has_vx()) {
|
||||
convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
|
||||
rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
|
||||
fprs, 128);
|
||||
|
@ -158,7 +158,7 @@ static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
|
||||
|
||||
static int raid6_s390vx$#_valid(void)
|
||||
{
|
||||
return MACHINE_HAS_VX;
|
||||
return cpu_has_vx();
|
||||
}
|
||||
|
||||
const struct raid6_calls raid6_s390vx$# = {
|
||||
|
Loading…
Reference in New Issue
Block a user