Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes (and cleanups) from Catalin Marinas: "Various arm64 fixes: - suspicious RCU usage warning - BPF (out of bounds array read and endianness conversion) - perf (of_node usage after of_node_put, cpu_pmu->plat_device assignment) - huge pmd/pud check for value 0 - rate-limiting should only take unhandled signals into account Clean-up: - incorrect use of pgprot_t type - unused header include - __init annotation to arm_cpuidle_init - pr_debug instead of pr_error for disabled GICC entries in ACPI/MADT" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: Fix show_unhandled_signal_ratelimited usage ARM64 / SMP: Switch pr_err() to pr_debug() for disabled GICC entry arm64: cpuidle: add __init section marker to arm_cpuidle_init arm64: Don't report clear pmds and puds as huge arm64: perf: fix unassigned cpu_pmu->plat_device when probing PMU PPIs arm64: perf: Don't use of_node after putting it arm64: fix incorrect use of pgprot_t variable arm64/hw_breakpoint.c: remove unnecessary header arm64: bpf: fix endianness conversion bugs arm64: bpf: fix out-of-bounds read in bpf2a64_offset() ARM64: smp: Fix suspicious RCU usage with ipi tracepoints
This commit is contained in:
@@ -15,7 +15,7 @@
|
|||||||
#include <asm/cpuidle.h>
|
#include <asm/cpuidle.h>
|
||||||
#include <asm/cpu_ops.h>
|
#include <asm/cpu_ops.h>
|
||||||
|
|
||||||
int arm_cpuidle_init(unsigned int cpu)
|
int __init arm_cpuidle_init(unsigned int cpu)
|
||||||
{
|
{
|
||||||
int ret = -EOPNOTSUPP;
|
int ret = -EOPNOTSUPP;
|
||||||
|
|
||||||
|
@@ -31,7 +31,6 @@
|
|||||||
#include <asm/current.h>
|
#include <asm/current.h>
|
||||||
#include <asm/debug-monitors.h>
|
#include <asm/debug-monitors.h>
|
||||||
#include <asm/hw_breakpoint.h>
|
#include <asm/hw_breakpoint.h>
|
||||||
#include <asm/kdebug.h>
|
|
||||||
#include <asm/traps.h>
|
#include <asm/traps.h>
|
||||||
#include <asm/cputype.h>
|
#include <asm/cputype.h>
|
||||||
#include <asm/system_misc.h>
|
#include <asm/system_misc.h>
|
||||||
|
@@ -1318,7 +1318,7 @@ static int armpmu_device_probe(struct platform_device *pdev)
|
|||||||
/* Don't bother with PPIs; they're already affine */
|
/* Don't bother with PPIs; they're already affine */
|
||||||
irq = platform_get_irq(pdev, 0);
|
irq = platform_get_irq(pdev, 0);
|
||||||
if (irq >= 0 && irq_is_percpu(irq))
|
if (irq >= 0 && irq_is_percpu(irq))
|
||||||
return 0;
|
goto out;
|
||||||
|
|
||||||
irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
|
irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
|
||||||
if (!irqs)
|
if (!irqs)
|
||||||
@@ -1340,12 +1340,13 @@ static int armpmu_device_probe(struct platform_device *pdev)
|
|||||||
if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
|
if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
of_node_put(dn);
|
|
||||||
if (cpu >= nr_cpu_ids) {
|
if (cpu >= nr_cpu_ids) {
|
||||||
pr_warn("Failed to find logical CPU for %s\n",
|
pr_warn("Failed to find logical CPU for %s\n",
|
||||||
dn->name);
|
dn->name);
|
||||||
|
of_node_put(dn);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
of_node_put(dn);
|
||||||
|
|
||||||
irqs[i] = cpu;
|
irqs[i] = cpu;
|
||||||
}
|
}
|
||||||
@@ -1355,6 +1356,7 @@ static int armpmu_device_probe(struct platform_device *pdev)
|
|||||||
else
|
else
|
||||||
kfree(irqs);
|
kfree(irqs);
|
||||||
|
|
||||||
|
out:
|
||||||
cpu_pmu->plat_device = pdev;
|
cpu_pmu->plat_device = pdev;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -396,13 +396,13 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
|
|||||||
{
|
{
|
||||||
u64 hwid = processor->arm_mpidr;
|
u64 hwid = processor->arm_mpidr;
|
||||||
|
|
||||||
if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
|
if (!(processor->flags & ACPI_MADT_ENABLED)) {
|
||||||
pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
|
pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(processor->flags & ACPI_MADT_ENABLED)) {
|
if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
|
||||||
pr_err("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
|
pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -693,7 +693,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
|
|||||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||||
|
|
||||||
if ((unsigned)ipinr < NR_IPI) {
|
if ((unsigned)ipinr < NR_IPI) {
|
||||||
trace_ipi_entry(ipi_types[ipinr]);
|
trace_ipi_entry_rcuidle(ipi_types[ipinr]);
|
||||||
__inc_irq_stat(cpu, ipi_irqs[ipinr]);
|
__inc_irq_stat(cpu, ipi_irqs[ipinr]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -736,7 +736,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if ((unsigned)ipinr < NR_IPI)
|
if ((unsigned)ipinr < NR_IPI)
|
||||||
trace_ipi_exit(ipi_types[ipinr]);
|
trace_ipi_exit_rcuidle(ipi_types[ipinr]);
|
||||||
set_irq_regs(old_regs);
|
set_irq_regs(old_regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -335,7 +335,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
|
|||||||
if (call_undef_hook(regs) == 0)
|
if (call_undef_hook(regs) == 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (show_unhandled_signals_ratelimited() && unhandled_signal(current, SIGILL)) {
|
if (unhandled_signal(current, SIGILL) && show_unhandled_signals_ratelimited()) {
|
||||||
pr_info("%s[%d]: undefined instruction: pc=%p\n",
|
pr_info("%s[%d]: undefined instruction: pc=%p\n",
|
||||||
current->comm, task_pid_nr(current), pc);
|
current->comm, task_pid_nr(current), pc);
|
||||||
dump_instr(KERN_INFO, regs);
|
dump_instr(KERN_INFO, regs);
|
||||||
|
@@ -115,7 +115,7 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
|
|||||||
{
|
{
|
||||||
struct siginfo si;
|
struct siginfo si;
|
||||||
|
|
||||||
if (show_unhandled_signals_ratelimited() && unhandled_signal(tsk, sig)) {
|
if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
|
||||||
pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
|
pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
|
||||||
tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
|
tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
|
||||||
addr, esr);
|
addr, esr);
|
||||||
|
@@ -33,13 +33,13 @@
|
|||||||
|
|
||||||
int pmd_huge(pmd_t pmd)
|
int pmd_huge(pmd_t pmd)
|
||||||
{
|
{
|
||||||
return !(pmd_val(pmd) & PMD_TABLE_BIT);
|
return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
|
||||||
}
|
}
|
||||||
|
|
||||||
int pud_huge(pud_t pud)
|
int pud_huge(pud_t pud)
|
||||||
{
|
{
|
||||||
#ifndef __PAGETABLE_PMD_FOLDED
|
#ifndef __PAGETABLE_PMD_FOLDED
|
||||||
return !(pud_val(pud) & PUD_TABLE_BIT);
|
return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
|
||||||
#else
|
#else
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
#endif
|
||||||
|
@@ -117,7 +117,7 @@ void split_pud(pud_t *old_pud, pmd_t *pmd)
|
|||||||
int i = 0;
|
int i = 0;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
set_pmd(pmd, __pmd(addr | prot));
|
set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
|
||||||
addr += PMD_SIZE;
|
addr += PMD_SIZE;
|
||||||
} while (pmd++, i++, i < PTRS_PER_PMD);
|
} while (pmd++, i++, i < PTRS_PER_PMD);
|
||||||
}
|
}
|
||||||
|
@@ -110,6 +110,10 @@
|
|||||||
/* Rd = Rn >> shift; signed */
|
/* Rd = Rn >> shift; signed */
|
||||||
#define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
|
#define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
|
||||||
|
|
||||||
|
/* Zero extend */
|
||||||
|
#define A64_UXTH(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 15)
|
||||||
|
#define A64_UXTW(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 31)
|
||||||
|
|
||||||
/* Move wide (immediate) */
|
/* Move wide (immediate) */
|
||||||
#define A64_MOVEW(sf, Rd, imm16, shift, type) \
|
#define A64_MOVEW(sf, Rd, imm16, shift, type) \
|
||||||
aarch64_insn_gen_movewide(Rd, imm16, shift, \
|
aarch64_insn_gen_movewide(Rd, imm16, shift, \
|
||||||
|
@@ -113,9 +113,9 @@ static inline void emit_a64_mov_i(const int is64, const int reg,
|
|||||||
static inline int bpf2a64_offset(int bpf_to, int bpf_from,
|
static inline int bpf2a64_offset(int bpf_to, int bpf_from,
|
||||||
const struct jit_ctx *ctx)
|
const struct jit_ctx *ctx)
|
||||||
{
|
{
|
||||||
int to = ctx->offset[bpf_to + 1];
|
int to = ctx->offset[bpf_to];
|
||||||
/* -1 to account for the Branch instruction */
|
/* -1 to account for the Branch instruction */
|
||||||
int from = ctx->offset[bpf_from + 1] - 1;
|
int from = ctx->offset[bpf_from] - 1;
|
||||||
|
|
||||||
return to - from;
|
return to - from;
|
||||||
}
|
}
|
||||||
@@ -289,23 +289,41 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
|||||||
case BPF_ALU | BPF_END | BPF_FROM_BE:
|
case BPF_ALU | BPF_END | BPF_FROM_BE:
|
||||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||||
if (BPF_SRC(code) == BPF_FROM_BE)
|
if (BPF_SRC(code) == BPF_FROM_BE)
|
||||||
break;
|
goto emit_bswap_uxt;
|
||||||
#else /* !CONFIG_CPU_BIG_ENDIAN */
|
#else /* !CONFIG_CPU_BIG_ENDIAN */
|
||||||
if (BPF_SRC(code) == BPF_FROM_LE)
|
if (BPF_SRC(code) == BPF_FROM_LE)
|
||||||
break;
|
goto emit_bswap_uxt;
|
||||||
#endif
|
#endif
|
||||||
switch (imm) {
|
switch (imm) {
|
||||||
case 16:
|
case 16:
|
||||||
emit(A64_REV16(is64, dst, dst), ctx);
|
emit(A64_REV16(is64, dst, dst), ctx);
|
||||||
|
/* zero-extend 16 bits into 64 bits */
|
||||||
|
emit(A64_UXTH(is64, dst, dst), ctx);
|
||||||
break;
|
break;
|
||||||
case 32:
|
case 32:
|
||||||
emit(A64_REV32(is64, dst, dst), ctx);
|
emit(A64_REV32(is64, dst, dst), ctx);
|
||||||
|
/* upper 32 bits already cleared */
|
||||||
break;
|
break;
|
||||||
case 64:
|
case 64:
|
||||||
emit(A64_REV64(dst, dst), ctx);
|
emit(A64_REV64(dst, dst), ctx);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
emit_bswap_uxt:
|
||||||
|
switch (imm) {
|
||||||
|
case 16:
|
||||||
|
/* zero-extend 16 bits into 64 bits */
|
||||||
|
emit(A64_UXTH(is64, dst, dst), ctx);
|
||||||
|
break;
|
||||||
|
case 32:
|
||||||
|
/* zero-extend 32 bits into 64 bits */
|
||||||
|
emit(A64_UXTW(is64, dst, dst), ctx);
|
||||||
|
break;
|
||||||
|
case 64:
|
||||||
|
/* nop */
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
break;
|
||||||
/* dst = imm */
|
/* dst = imm */
|
||||||
case BPF_ALU | BPF_MOV | BPF_K:
|
case BPF_ALU | BPF_MOV | BPF_K:
|
||||||
case BPF_ALU64 | BPF_MOV | BPF_K:
|
case BPF_ALU64 | BPF_MOV | BPF_K:
|
||||||
@@ -640,10 +658,11 @@ static int build_body(struct jit_ctx *ctx)
|
|||||||
const struct bpf_insn *insn = &prog->insnsi[i];
|
const struct bpf_insn *insn = &prog->insnsi[i];
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
ret = build_insn(insn, ctx);
|
||||||
|
|
||||||
if (ctx->image == NULL)
|
if (ctx->image == NULL)
|
||||||
ctx->offset[i] = ctx->idx;
|
ctx->offset[i] = ctx->idx;
|
||||||
|
|
||||||
ret = build_insn(insn, ctx);
|
|
||||||
if (ret > 0) {
|
if (ret > 0) {
|
||||||
i++;
|
i++;
|
||||||
continue;
|
continue;
|
||||||
|
Reference in New Issue
Block a user