tools headers arm64: Update sysreg.h with kernel sources
The users of sysreg.h (perf, KVM selftests) are now generating the necessary sysreg-defs.h; sync sysreg.h with the kernel sources and fix the KVM selftests that use macros which suffered a rename. Signed-off-by: Jing Zhang <jingzhangos@google.com> Acked-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20231011195740.3349631-5-oliver.upton@linux.dev Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
parent
9697d84cc3
commit
0359c946b1
26
tools/arch/arm64/include/asm/gpr-num.h
Normal file
26
tools/arch/arm64/include/asm/gpr-num.h
Normal file
@ -0,0 +1,26 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
#ifndef __ASM_GPR_NUM_H
|
||||
#define __ASM_GPR_NUM_H
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
.irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
|
||||
.equ .L__gpr_num_x\num, \num
|
||||
.equ .L__gpr_num_w\num, \num
|
||||
.endr
|
||||
.equ .L__gpr_num_xzr, 31
|
||||
.equ .L__gpr_num_wzr, 31
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
#define __DEFINE_ASM_GPR_NUMS \
|
||||
" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \
|
||||
" .equ .L__gpr_num_x\\num, \\num\n" \
|
||||
" .equ .L__gpr_num_w\\num, \\num\n" \
|
||||
" .endr\n" \
|
||||
" .equ .L__gpr_num_xzr, 31\n" \
|
||||
" .equ .L__gpr_num_wzr, 31\n"
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __ASM_GPR_NUM_H */
|
File diff suppressed because it is too large
Load Diff
@ -146,8 +146,8 @@ static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu)
|
||||
|
||||
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val);
|
||||
|
||||
el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), val);
|
||||
return el0 == ID_AA64PFR0_ELx_64BIT_ONLY;
|
||||
el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
|
||||
return el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY;
|
||||
}
|
||||
|
||||
int main(void)
|
||||
|
@ -116,12 +116,12 @@ static void reset_debug_state(void)
|
||||
|
||||
/* Reset all bcr/bvr/wcr/wvr registers */
|
||||
dfr0 = read_sysreg(id_aa64dfr0_el1);
|
||||
brps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_BRPS), dfr0);
|
||||
brps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_BRPs), dfr0);
|
||||
for (i = 0; i <= brps; i++) {
|
||||
write_dbgbcr(i, 0);
|
||||
write_dbgbvr(i, 0);
|
||||
}
|
||||
wrps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_WRPS), dfr0);
|
||||
wrps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_WRPs), dfr0);
|
||||
for (i = 0; i <= wrps; i++) {
|
||||
write_dbgwcr(i, 0);
|
||||
write_dbgwvr(i, 0);
|
||||
@ -418,7 +418,7 @@ static void guest_code_ss(int test_cnt)
|
||||
|
||||
static int debug_version(uint64_t id_aa64dfr0)
|
||||
{
|
||||
return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), id_aa64dfr0);
|
||||
return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), id_aa64dfr0);
|
||||
}
|
||||
|
||||
static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn)
|
||||
@ -539,14 +539,14 @@ void test_guest_debug_exceptions_all(uint64_t aa64dfr0)
|
||||
int b, w, c;
|
||||
|
||||
/* Number of breakpoints */
|
||||
brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_BRPS), aa64dfr0) + 1;
|
||||
brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_BRPs), aa64dfr0) + 1;
|
||||
__TEST_REQUIRE(brp_num >= 2, "At least two breakpoints are required");
|
||||
|
||||
/* Number of watchpoints */
|
||||
wrp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_WRPS), aa64dfr0) + 1;
|
||||
wrp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_WRPs), aa64dfr0) + 1;
|
||||
|
||||
/* Number of context aware breakpoints */
|
||||
ctx_brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_CTX_CMPS), aa64dfr0) + 1;
|
||||
ctx_brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_CTX_CMPs), aa64dfr0) + 1;
|
||||
|
||||
pr_debug("%s brp_num:%d, wrp_num:%d, ctx_brp_num:%d\n", __func__,
|
||||
brp_num, wrp_num, ctx_brp_num);
|
||||
|
@ -96,14 +96,14 @@ static bool guest_check_lse(void)
|
||||
uint64_t isar0 = read_sysreg(id_aa64isar0_el1);
|
||||
uint64_t atomic;
|
||||
|
||||
atomic = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR0_ATOMICS), isar0);
|
||||
atomic = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_ATOMIC), isar0);
|
||||
return atomic >= 2;
|
||||
}
|
||||
|
||||
static bool guest_check_dc_zva(void)
|
||||
{
|
||||
uint64_t dczid = read_sysreg(dczid_el0);
|
||||
uint64_t dzp = FIELD_GET(ARM64_FEATURE_MASK(DCZID_DZP), dczid);
|
||||
uint64_t dzp = FIELD_GET(ARM64_FEATURE_MASK(DCZID_EL0_DZP), dczid);
|
||||
|
||||
return dzp == 0;
|
||||
}
|
||||
@ -196,7 +196,7 @@ static bool guest_set_ha(void)
|
||||
uint64_t hadbs, tcr;
|
||||
|
||||
/* Skip if HA is not supported. */
|
||||
hadbs = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_HADBS), mmfr1);
|
||||
hadbs = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HAFDBS), mmfr1);
|
||||
if (hadbs == 0)
|
||||
return false;
|
||||
|
||||
|
@ -518,9 +518,9 @@ void aarch64_get_supported_page_sizes(uint32_t ipa,
|
||||
err = ioctl(vcpu_fd, KVM_GET_ONE_REG, ®);
|
||||
TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG, vcpu_fd));
|
||||
|
||||
*ps4k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_TGRAN4), val) != 0xf;
|
||||
*ps64k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_TGRAN64), val) == 0;
|
||||
*ps16k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_TGRAN16), val) != 0;
|
||||
*ps4k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN4), val) != 0xf;
|
||||
*ps64k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN64), val) == 0;
|
||||
*ps16k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN16), val) != 0;
|
||||
|
||||
close(vcpu_fd);
|
||||
close(vm_fd);
|
||||
|
Loading…
Reference in New Issue
Block a user