LoongArch changes for v6.7
1, Support PREEMPT_DYNAMIC with static keys; 2, Relax memory ordering for atomic operations; 3, Support BPF CPU v4 instructions for LoongArch; 4, Some build and runtime warning fixes. -----BEGIN PGP SIGNATURE----- iQJKBAABCAA0FiEEzOlt8mkP+tbeiYy5AoYrw/LiJnoFAmVQWXgWHGNoZW5odWFj YWlAa2VybmVsLm9yZwAKCRAChivD8uImepDTEACS808EsgSNIM1+JwldhdqKOErt XDWlLuIddVpenInx8F+9GnZJzKBU+wl+Ow5ejcVarjcecIJDv5UhoVrbhpeOHkfv RszRXQR4p/ZNSFvdraYDjjJ9UX6bp5rq7vMUC2d9bLazMauAfwf7T/HJ5qj9OYZi RLlcwaKo2UQHYsT7nJicjh0qpH1YpZQBYTaUUCwzilzB6vAIOTf6X12vFmhtM/i+ 5RIPnesMA1IQSm2ywUODpDHCs7Pirvy8aJvx0CsYdi3xl1yg3pUS6u69Ms61uWlw 29yYhNbWmVnDikTVLTNISDb/jwto5SAVB2KQKBhF1trF4ZBNE6r7sP4m2tfllYo9 KXK9tm0U8McS5o46Qd5er6eEnxL7mEeAsc12tNKUYOMe3SIkmHJmj/rZQOtpsiBg zqQsYkGUfO2VAwMWiGke8dxPZElOYwZ3UCOpbEpXEXy3NW71VJTIuQFGmsYKJhdy 3xaAtQxdffE5yUTt2j3Y8Mex2b2oSUBSF263imsZjzWOOxd480iaoejtamf1V779 bElevzZjMDmbiQ7kiVSf96TWc7iYcSv33jhP4DorKIqnPseYPfrXEeD1xY7JV+IU kkvSlO0hAJzVMmQgu5n0PPT1wrVpuvwtbsfcRobIkr1vktZyLaKHRq7rh4R5HTRL ZUUm6c0kUDywGT+J4A== =bmFe -----END PGP SIGNATURE----- Merge tag 'loongarch-6.7' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson Pull LoongArch updates from Huacai Chen: - support PREEMPT_DYNAMIC with static keys - relax memory ordering for atomic operations - support BPF CPU v4 instructions for LoongArch - some build and runtime warning fixes * tag 'loongarch-6.7' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson: selftests/bpf: Enable cpu v4 tests for LoongArch LoongArch: BPF: Support signed mod instructions LoongArch: BPF: Support signed div instructions LoongArch: BPF: Support 32-bit offset jmp instructions LoongArch: BPF: Support unconditional bswap instructions LoongArch: BPF: Support sign-extension mov instructions LoongArch: BPF: Support sign-extension load instructions LoongArch: Add more instruction opcodes and emit_* helpers LoongArch/smp: Call rcutree_report_cpu_starting() earlier LoongArch: Relax memory ordering for atomic operations LoongArch: Mark __percpu functions as always inline LoongArch: Disable module from accessing external data directly LoongArch: Support PREEMPT_DYNAMIC with static keys
This commit is contained in:
commit
4eeee6636a
@ -136,6 +136,7 @@ config LOONGARCH
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_PREEMPT_DYNAMIC_KEY
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_RETHOOK
|
||||
select HAVE_RSEQ
|
||||
|
@ -68,6 +68,8 @@ LDFLAGS_vmlinux += -static -n -nostdlib
|
||||
ifdef CONFIG_AS_HAS_EXPLICIT_RELOCS
|
||||
cflags-y += $(call cc-option,-mexplicit-relocs)
|
||||
KBUILD_CFLAGS_KERNEL += $(call cc-option,-mdirect-extern-access)
|
||||
KBUILD_AFLAGS_MODULE += $(call cc-option,-fno-direct-access-external-data)
|
||||
KBUILD_CFLAGS_MODULE += $(call cc-option,-fno-direct-access-external-data)
|
||||
KBUILD_AFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
|
||||
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
|
||||
else
|
||||
|
@ -36,19 +36,19 @@
|
||||
static inline void arch_atomic_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
__asm__ __volatile__( \
|
||||
"am"#asm_op"_db.w" " $zero, %1, %0 \n" \
|
||||
"am"#asm_op".w" " $zero, %1, %0 \n" \
|
||||
: "+ZB" (v->counter) \
|
||||
: "r" (I) \
|
||||
: "memory"); \
|
||||
}
|
||||
|
||||
#define ATOMIC_OP_RETURN(op, I, asm_op, c_op) \
|
||||
static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
|
||||
#define ATOMIC_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
|
||||
static inline int arch_atomic_##op##_return##suffix(int i, atomic_t *v) \
|
||||
{ \
|
||||
int result; \
|
||||
\
|
||||
__asm__ __volatile__( \
|
||||
"am"#asm_op"_db.w" " %1, %2, %0 \n" \
|
||||
"am"#asm_op#mb".w" " %1, %2, %0 \n" \
|
||||
: "+ZB" (v->counter), "=&r" (result) \
|
||||
: "r" (I) \
|
||||
: "memory"); \
|
||||
@ -56,13 +56,13 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
|
||||
return result c_op I; \
|
||||
}
|
||||
|
||||
#define ATOMIC_FETCH_OP(op, I, asm_op) \
|
||||
static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
|
||||
#define ATOMIC_FETCH_OP(op, I, asm_op, mb, suffix) \
|
||||
static inline int arch_atomic_fetch_##op##suffix(int i, atomic_t *v) \
|
||||
{ \
|
||||
int result; \
|
||||
\
|
||||
__asm__ __volatile__( \
|
||||
"am"#asm_op"_db.w" " %1, %2, %0 \n" \
|
||||
"am"#asm_op#mb".w" " %1, %2, %0 \n" \
|
||||
: "+ZB" (v->counter), "=&r" (result) \
|
||||
: "r" (I) \
|
||||
: "memory"); \
|
||||
@ -72,29 +72,53 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
|
||||
|
||||
#define ATOMIC_OPS(op, I, asm_op, c_op) \
|
||||
ATOMIC_OP(op, I, asm_op) \
|
||||
ATOMIC_OP_RETURN(op, I, asm_op, c_op) \
|
||||
ATOMIC_FETCH_OP(op, I, asm_op)
|
||||
ATOMIC_OP_RETURN(op, I, asm_op, c_op, _db, ) \
|
||||
ATOMIC_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
|
||||
ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
|
||||
ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
|
||||
|
||||
ATOMIC_OPS(add, i, add, +)
|
||||
ATOMIC_OPS(sub, -i, add, +)
|
||||
|
||||
#define arch_atomic_add_return arch_atomic_add_return
|
||||
#define arch_atomic_add_return_acquire arch_atomic_add_return
|
||||
#define arch_atomic_add_return_release arch_atomic_add_return
|
||||
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
|
||||
#define arch_atomic_sub_return arch_atomic_sub_return
|
||||
#define arch_atomic_sub_return_acquire arch_atomic_sub_return
|
||||
#define arch_atomic_sub_return_release arch_atomic_sub_return
|
||||
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
|
||||
#define arch_atomic_fetch_add arch_atomic_fetch_add
|
||||
#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
|
||||
#define arch_atomic_fetch_add_release arch_atomic_fetch_add
|
||||
#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
|
||||
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
|
||||
#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
|
||||
#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
|
||||
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
|
||||
#define ATOMIC_OPS(op, I, asm_op) \
|
||||
ATOMIC_OP(op, I, asm_op) \
|
||||
ATOMIC_FETCH_OP(op, I, asm_op)
|
||||
ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
|
||||
ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
|
||||
|
||||
ATOMIC_OPS(and, i, and)
|
||||
ATOMIC_OPS(or, i, or)
|
||||
ATOMIC_OPS(xor, i, xor)
|
||||
|
||||
#define arch_atomic_fetch_and arch_atomic_fetch_and
|
||||
#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
|
||||
#define arch_atomic_fetch_and_release arch_atomic_fetch_and
|
||||
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
|
||||
#define arch_atomic_fetch_or arch_atomic_fetch_or
|
||||
#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
|
||||
#define arch_atomic_fetch_or_release arch_atomic_fetch_or
|
||||
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
|
||||
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
|
||||
#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
|
||||
#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
|
||||
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
@ -172,18 +196,18 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
|
||||
static inline void arch_atomic64_##op(long i, atomic64_t *v) \
|
||||
{ \
|
||||
__asm__ __volatile__( \
|
||||
"am"#asm_op"_db.d " " $zero, %1, %0 \n" \
|
||||
"am"#asm_op".d " " $zero, %1, %0 \n" \
|
||||
: "+ZB" (v->counter) \
|
||||
: "r" (I) \
|
||||
: "memory"); \
|
||||
}
|
||||
|
||||
#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \
|
||||
static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v) \
|
||||
#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
|
||||
static inline long arch_atomic64_##op##_return##suffix(long i, atomic64_t *v) \
|
||||
{ \
|
||||
long result; \
|
||||
__asm__ __volatile__( \
|
||||
"am"#asm_op"_db.d " " %1, %2, %0 \n" \
|
||||
"am"#asm_op#mb".d " " %1, %2, %0 \n" \
|
||||
: "+ZB" (v->counter), "=&r" (result) \
|
||||
: "r" (I) \
|
||||
: "memory"); \
|
||||
@ -191,13 +215,13 @@ static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v) \
|
||||
return result c_op I; \
|
||||
}
|
||||
|
||||
#define ATOMIC64_FETCH_OP(op, I, asm_op) \
|
||||
static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \
|
||||
#define ATOMIC64_FETCH_OP(op, I, asm_op, mb, suffix) \
|
||||
static inline long arch_atomic64_fetch_##op##suffix(long i, atomic64_t *v) \
|
||||
{ \
|
||||
long result; \
|
||||
\
|
||||
__asm__ __volatile__( \
|
||||
"am"#asm_op"_db.d " " %1, %2, %0 \n" \
|
||||
"am"#asm_op#mb".d " " %1, %2, %0 \n" \
|
||||
: "+ZB" (v->counter), "=&r" (result) \
|
||||
: "r" (I) \
|
||||
: "memory"); \
|
||||
@ -207,29 +231,53 @@ static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \
|
||||
|
||||
#define ATOMIC64_OPS(op, I, asm_op, c_op) \
|
||||
ATOMIC64_OP(op, I, asm_op) \
|
||||
ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \
|
||||
ATOMIC64_FETCH_OP(op, I, asm_op)
|
||||
ATOMIC64_OP_RETURN(op, I, asm_op, c_op, _db, ) \
|
||||
ATOMIC64_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
|
||||
ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
|
||||
ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
|
||||
|
||||
ATOMIC64_OPS(add, i, add, +)
|
||||
ATOMIC64_OPS(sub, -i, add, +)
|
||||
|
||||
#define arch_atomic64_add_return arch_atomic64_add_return
|
||||
#define arch_atomic64_add_return_acquire arch_atomic64_add_return
|
||||
#define arch_atomic64_add_return_release arch_atomic64_add_return
|
||||
#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
|
||||
#define arch_atomic64_sub_return arch_atomic64_sub_return
|
||||
#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
|
||||
#define arch_atomic64_sub_return_release arch_atomic64_sub_return
|
||||
#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
|
||||
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
|
||||
#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
|
||||
#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
|
||||
#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
|
||||
#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
|
||||
#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
|
||||
#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
|
||||
#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
|
||||
#define ATOMIC64_OPS(op, I, asm_op) \
|
||||
ATOMIC64_OP(op, I, asm_op) \
|
||||
ATOMIC64_FETCH_OP(op, I, asm_op)
|
||||
ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
|
||||
ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
|
||||
|
||||
ATOMIC64_OPS(and, i, and)
|
||||
ATOMIC64_OPS(or, i, or)
|
||||
ATOMIC64_OPS(xor, i, xor)
|
||||
|
||||
#define arch_atomic64_fetch_and arch_atomic64_fetch_and
|
||||
#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
|
||||
#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
|
||||
#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
|
||||
#define arch_atomic64_fetch_or arch_atomic64_fetch_or
|
||||
#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
|
||||
#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
|
||||
#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
|
||||
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
|
||||
#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
|
||||
#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
|
||||
#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
|
@ -65,6 +65,8 @@ enum reg2_op {
|
||||
revbd_op = 0x0f,
|
||||
revh2w_op = 0x10,
|
||||
revhd_op = 0x11,
|
||||
extwh_op = 0x16,
|
||||
extwb_op = 0x17,
|
||||
iocsrrdb_op = 0x19200,
|
||||
iocsrrdh_op = 0x19201,
|
||||
iocsrrdw_op = 0x19202,
|
||||
@ -572,6 +574,8 @@ static inline void emit_##NAME(union loongarch_instruction *insn, \
|
||||
DEF_EMIT_REG2_FORMAT(revb2h, revb2h_op)
|
||||
DEF_EMIT_REG2_FORMAT(revb2w, revb2w_op)
|
||||
DEF_EMIT_REG2_FORMAT(revbd, revbd_op)
|
||||
DEF_EMIT_REG2_FORMAT(extwh, extwh_op)
|
||||
DEF_EMIT_REG2_FORMAT(extwb, extwb_op)
|
||||
|
||||
#define DEF_EMIT_REG2I5_FORMAT(NAME, OP) \
|
||||
static inline void emit_##NAME(union loongarch_instruction *insn, \
|
||||
@ -623,6 +627,9 @@ DEF_EMIT_REG2I12_FORMAT(lu52id, lu52id_op)
|
||||
DEF_EMIT_REG2I12_FORMAT(andi, andi_op)
|
||||
DEF_EMIT_REG2I12_FORMAT(ori, ori_op)
|
||||
DEF_EMIT_REG2I12_FORMAT(xori, xori_op)
|
||||
DEF_EMIT_REG2I12_FORMAT(ldb, ldb_op)
|
||||
DEF_EMIT_REG2I12_FORMAT(ldh, ldh_op)
|
||||
DEF_EMIT_REG2I12_FORMAT(ldw, ldw_op)
|
||||
DEF_EMIT_REG2I12_FORMAT(ldbu, ldbu_op)
|
||||
DEF_EMIT_REG2I12_FORMAT(ldhu, ldhu_op)
|
||||
DEF_EMIT_REG2I12_FORMAT(ldwu, ldwu_op)
|
||||
@ -701,9 +708,12 @@ static inline void emit_##NAME(union loongarch_instruction *insn, \
|
||||
insn->reg3_format.rk = rk; \
|
||||
}
|
||||
|
||||
DEF_EMIT_REG3_FORMAT(addw, addw_op)
|
||||
DEF_EMIT_REG3_FORMAT(addd, addd_op)
|
||||
DEF_EMIT_REG3_FORMAT(subd, subd_op)
|
||||
DEF_EMIT_REG3_FORMAT(muld, muld_op)
|
||||
DEF_EMIT_REG3_FORMAT(divd, divd_op)
|
||||
DEF_EMIT_REG3_FORMAT(modd, modd_op)
|
||||
DEF_EMIT_REG3_FORMAT(divdu, divdu_op)
|
||||
DEF_EMIT_REG3_FORMAT(moddu, moddu_op)
|
||||
DEF_EMIT_REG3_FORMAT(and, and_op)
|
||||
@ -715,6 +725,9 @@ DEF_EMIT_REG3_FORMAT(srlw, srlw_op)
|
||||
DEF_EMIT_REG3_FORMAT(srld, srld_op)
|
||||
DEF_EMIT_REG3_FORMAT(sraw, sraw_op)
|
||||
DEF_EMIT_REG3_FORMAT(srad, srad_op)
|
||||
DEF_EMIT_REG3_FORMAT(ldxb, ldxb_op)
|
||||
DEF_EMIT_REG3_FORMAT(ldxh, ldxh_op)
|
||||
DEF_EMIT_REG3_FORMAT(ldxw, ldxw_op)
|
||||
DEF_EMIT_REG3_FORMAT(ldxbu, ldxbu_op)
|
||||
DEF_EMIT_REG3_FORMAT(ldxhu, ldxhu_op)
|
||||
DEF_EMIT_REG3_FORMAT(ldxwu, ldxwu_op)
|
||||
|
@ -32,7 +32,7 @@ static inline void set_my_cpu_offset(unsigned long off)
|
||||
#define __my_cpu_offset __my_cpu_offset
|
||||
|
||||
#define PERCPU_OP(op, asm_op, c_op) \
|
||||
static inline unsigned long __percpu_##op(void *ptr, \
|
||||
static __always_inline unsigned long __percpu_##op(void *ptr, \
|
||||
unsigned long val, int size) \
|
||||
{ \
|
||||
unsigned long ret; \
|
||||
@ -63,7 +63,7 @@ PERCPU_OP(and, and, &)
|
||||
PERCPU_OP(or, or, |)
|
||||
#undef PERCPU_OP
|
||||
|
||||
static inline unsigned long __percpu_read(void *ptr, int size)
|
||||
static __always_inline unsigned long __percpu_read(void *ptr, int size)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
@ -100,7 +100,7 @@ static inline unsigned long __percpu_read(void *ptr, int size)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void __percpu_write(void *ptr, unsigned long val, int size)
|
||||
static __always_inline void __percpu_write(void *ptr, unsigned long val, int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
@ -132,8 +132,8 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
|
||||
int size)
|
||||
static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
|
||||
int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
|
@ -504,8 +504,9 @@ asmlinkage void start_secondary(void)
|
||||
unsigned int cpu;
|
||||
|
||||
sync_counter();
|
||||
cpu = smp_processor_id();
|
||||
cpu = raw_smp_processor_id();
|
||||
set_my_cpu_offset(per_cpu_offset(cpu));
|
||||
rcutree_report_cpu_starting(cpu);
|
||||
|
||||
cpu_probe();
|
||||
constant_clockevent_init();
|
||||
|
@ -411,7 +411,11 @@ static int add_exception_handler(const struct bpf_insn *insn,
|
||||
off_t offset;
|
||||
struct exception_table_entry *ex;
|
||||
|
||||
if (!ctx->image || !ctx->prog->aux->extable || BPF_MODE(insn->code) != BPF_PROBE_MEM)
|
||||
if (!ctx->image || !ctx->prog->aux->extable)
|
||||
return 0;
|
||||
|
||||
if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
|
||||
BPF_MODE(insn->code) != BPF_PROBE_MEMSX)
|
||||
return 0;
|
||||
|
||||
if (WARN_ON_ONCE(ctx->num_exentries >= ctx->prog->aux->num_exentries))
|
||||
@ -450,7 +454,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
|
||||
{
|
||||
u8 tm = -1;
|
||||
u64 func_addr;
|
||||
bool func_addr_fixed;
|
||||
bool func_addr_fixed, sign_extend;
|
||||
int i = insn - ctx->prog->insnsi;
|
||||
int ret, jmp_offset;
|
||||
const u8 code = insn->code;
|
||||
@ -468,8 +472,23 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
|
||||
/* dst = src */
|
||||
case BPF_ALU | BPF_MOV | BPF_X:
|
||||
case BPF_ALU64 | BPF_MOV | BPF_X:
|
||||
move_reg(ctx, dst, src);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
switch (off) {
|
||||
case 0:
|
||||
move_reg(ctx, dst, src);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
break;
|
||||
case 8:
|
||||
move_reg(ctx, t1, src);
|
||||
emit_insn(ctx, extwb, dst, t1);
|
||||
break;
|
||||
case 16:
|
||||
move_reg(ctx, t1, src);
|
||||
emit_insn(ctx, extwh, dst, t1);
|
||||
break;
|
||||
case 32:
|
||||
emit_insn(ctx, addw, dst, src, LOONGARCH_GPR_ZERO);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
/* dst = imm */
|
||||
@ -534,39 +553,71 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
|
||||
/* dst = dst / src */
|
||||
case BPF_ALU | BPF_DIV | BPF_X:
|
||||
case BPF_ALU64 | BPF_DIV | BPF_X:
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
move_reg(ctx, t1, src);
|
||||
emit_zext_32(ctx, t1, is32);
|
||||
emit_insn(ctx, divdu, dst, dst, t1);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
if (!off) {
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
move_reg(ctx, t1, src);
|
||||
emit_zext_32(ctx, t1, is32);
|
||||
emit_insn(ctx, divdu, dst, dst, t1);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
} else {
|
||||
emit_sext_32(ctx, dst, is32);
|
||||
move_reg(ctx, t1, src);
|
||||
emit_sext_32(ctx, t1, is32);
|
||||
emit_insn(ctx, divd, dst, dst, t1);
|
||||
emit_sext_32(ctx, dst, is32);
|
||||
}
|
||||
break;
|
||||
|
||||
/* dst = dst / imm */
|
||||
case BPF_ALU | BPF_DIV | BPF_K:
|
||||
case BPF_ALU64 | BPF_DIV | BPF_K:
|
||||
move_imm(ctx, t1, imm, is32);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
emit_insn(ctx, divdu, dst, dst, t1);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
if (!off) {
|
||||
move_imm(ctx, t1, imm, is32);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
emit_insn(ctx, divdu, dst, dst, t1);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
} else {
|
||||
move_imm(ctx, t1, imm, false);
|
||||
emit_sext_32(ctx, t1, is32);
|
||||
emit_sext_32(ctx, dst, is32);
|
||||
emit_insn(ctx, divd, dst, dst, t1);
|
||||
emit_sext_32(ctx, dst, is32);
|
||||
}
|
||||
break;
|
||||
|
||||
/* dst = dst % src */
|
||||
case BPF_ALU | BPF_MOD | BPF_X:
|
||||
case BPF_ALU64 | BPF_MOD | BPF_X:
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
move_reg(ctx, t1, src);
|
||||
emit_zext_32(ctx, t1, is32);
|
||||
emit_insn(ctx, moddu, dst, dst, t1);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
if (!off) {
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
move_reg(ctx, t1, src);
|
||||
emit_zext_32(ctx, t1, is32);
|
||||
emit_insn(ctx, moddu, dst, dst, t1);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
} else {
|
||||
emit_sext_32(ctx, dst, is32);
|
||||
move_reg(ctx, t1, src);
|
||||
emit_sext_32(ctx, t1, is32);
|
||||
emit_insn(ctx, modd, dst, dst, t1);
|
||||
emit_sext_32(ctx, dst, is32);
|
||||
}
|
||||
break;
|
||||
|
||||
/* dst = dst % imm */
|
||||
case BPF_ALU | BPF_MOD | BPF_K:
|
||||
case BPF_ALU64 | BPF_MOD | BPF_K:
|
||||
move_imm(ctx, t1, imm, is32);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
emit_insn(ctx, moddu, dst, dst, t1);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
if (!off) {
|
||||
move_imm(ctx, t1, imm, is32);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
emit_insn(ctx, moddu, dst, dst, t1);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
} else {
|
||||
move_imm(ctx, t1, imm, false);
|
||||
emit_sext_32(ctx, t1, is32);
|
||||
emit_sext_32(ctx, dst, is32);
|
||||
emit_insn(ctx, modd, dst, dst, t1);
|
||||
emit_sext_32(ctx, dst, is32);
|
||||
}
|
||||
break;
|
||||
|
||||
/* dst = -dst */
|
||||
@ -712,6 +763,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
|
||||
break;
|
||||
|
||||
case BPF_ALU | BPF_END | BPF_FROM_BE:
|
||||
case BPF_ALU64 | BPF_END | BPF_FROM_LE:
|
||||
switch (imm) {
|
||||
case 16:
|
||||
emit_insn(ctx, revb2h, dst, dst);
|
||||
@ -828,7 +880,11 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
|
||||
|
||||
/* PC += off */
|
||||
case BPF_JMP | BPF_JA:
|
||||
jmp_offset = bpf2la_offset(i, off, ctx);
|
||||
case BPF_JMP32 | BPF_JA:
|
||||
if (BPF_CLASS(code) == BPF_JMP)
|
||||
jmp_offset = bpf2la_offset(i, off, ctx);
|
||||
else
|
||||
jmp_offset = bpf2la_offset(i, imm, ctx);
|
||||
if (emit_uncond_jmp(ctx, jmp_offset) < 0)
|
||||
goto toofar;
|
||||
break;
|
||||
@ -879,31 +935,56 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
|
||||
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
|
||||
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
|
||||
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
|
||||
/* dst_reg = (s64)*(signed size *)(src_reg + off) */
|
||||
case BPF_LDX | BPF_MEMSX | BPF_B:
|
||||
case BPF_LDX | BPF_MEMSX | BPF_H:
|
||||
case BPF_LDX | BPF_MEMSX | BPF_W:
|
||||
case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
|
||||
case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
|
||||
case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
|
||||
sign_extend = BPF_MODE(insn->code) == BPF_MEMSX ||
|
||||
BPF_MODE(insn->code) == BPF_PROBE_MEMSX;
|
||||
switch (BPF_SIZE(code)) {
|
||||
case BPF_B:
|
||||
if (is_signed_imm12(off)) {
|
||||
emit_insn(ctx, ldbu, dst, src, off);
|
||||
if (sign_extend)
|
||||
emit_insn(ctx, ldb, dst, src, off);
|
||||
else
|
||||
emit_insn(ctx, ldbu, dst, src, off);
|
||||
} else {
|
||||
move_imm(ctx, t1, off, is32);
|
||||
emit_insn(ctx, ldxbu, dst, src, t1);
|
||||
if (sign_extend)
|
||||
emit_insn(ctx, ldxb, dst, src, t1);
|
||||
else
|
||||
emit_insn(ctx, ldxbu, dst, src, t1);
|
||||
}
|
||||
break;
|
||||
case BPF_H:
|
||||
if (is_signed_imm12(off)) {
|
||||
emit_insn(ctx, ldhu, dst, src, off);
|
||||
if (sign_extend)
|
||||
emit_insn(ctx, ldh, dst, src, off);
|
||||
else
|
||||
emit_insn(ctx, ldhu, dst, src, off);
|
||||
} else {
|
||||
move_imm(ctx, t1, off, is32);
|
||||
emit_insn(ctx, ldxhu, dst, src, t1);
|
||||
if (sign_extend)
|
||||
emit_insn(ctx, ldxh, dst, src, t1);
|
||||
else
|
||||
emit_insn(ctx, ldxhu, dst, src, t1);
|
||||
}
|
||||
break;
|
||||
case BPF_W:
|
||||
if (is_signed_imm12(off)) {
|
||||
emit_insn(ctx, ldwu, dst, src, off);
|
||||
} else if (is_signed_imm14(off)) {
|
||||
emit_insn(ctx, ldptrw, dst, src, off);
|
||||
if (sign_extend)
|
||||
emit_insn(ctx, ldw, dst, src, off);
|
||||
else
|
||||
emit_insn(ctx, ldwu, dst, src, off);
|
||||
} else {
|
||||
move_imm(ctx, t1, off, is32);
|
||||
emit_insn(ctx, ldxwu, dst, src, t1);
|
||||
if (sign_extend)
|
||||
emit_insn(ctx, ldxw, dst, src, t1);
|
||||
else
|
||||
emit_insn(ctx, ldxwu, dst, src, t1);
|
||||
}
|
||||
break;
|
||||
case BPF_DW:
|
||||
|
@ -7,7 +7,8 @@
|
||||
|
||||
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
|
||||
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
|
||||
defined(__TARGET_ARCH_s390)) && __clang_major__ >= 18
|
||||
defined(__TARGET_ARCH_s390) || defined(__TARGET_ARCH_loongarch)) && \
|
||||
__clang_major__ >= 18
|
||||
const volatile int skip = 0;
|
||||
#else
|
||||
const volatile int skip = 1;
|
||||
|
@ -6,7 +6,8 @@
|
||||
|
||||
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
|
||||
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
|
||||
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
|
||||
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
|
||||
defined(__TARGET_ARCH_loongarch)) && \
|
||||
__clang_major__ >= 18
|
||||
|
||||
SEC("socket")
|
||||
|
@ -6,7 +6,8 @@
|
||||
|
||||
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
|
||||
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
|
||||
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
|
||||
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
|
||||
defined(__TARGET_ARCH_loongarch)) && \
|
||||
__clang_major__ >= 18
|
||||
|
||||
SEC("socket")
|
||||
|
@ -6,7 +6,8 @@
|
||||
|
||||
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
|
||||
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
|
||||
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
|
||||
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
|
||||
defined(__TARGET_ARCH_loongarch)) && \
|
||||
__clang_major__ >= 18
|
||||
|
||||
SEC("socket")
|
||||
|
@ -6,7 +6,8 @@
|
||||
|
||||
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
|
||||
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
|
||||
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
|
||||
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
|
||||
defined(__TARGET_ARCH_loongarch)) && \
|
||||
__clang_major__ >= 18
|
||||
|
||||
SEC("socket")
|
||||
|
@ -6,7 +6,8 @@
|
||||
|
||||
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
|
||||
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
|
||||
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
|
||||
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
|
||||
defined(__TARGET_ARCH_loongarch)) && \
|
||||
__clang_major__ >= 18
|
||||
|
||||
SEC("socket")
|
||||
|
Loading…
Reference in New Issue
Block a user