parisc architecture fixes for kernel v6.4-rc4:
- Fix flush_dcache_page() for usage from irq context - Handle kprobes breakpoints only in kernel context - Handle kgdb breakpoints only in kernel context - Use num_present_cpus() in alternative patching code - Enable LOCKDEP support - Add lightweight spinlock checks - Flush AGP gatt writes and adjust gatt mask in parisc_agp_mask_memory() - Allow to reboot machine after system halt - Improve cache flushing for PCXL in arch_sync_dma_for_cpu() -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQS86RI+GtKfB8BJu973ErUQojoPXwUCZG/QkgAKCRD3ErUQojoP X9MBAQCe2rJxNPmdUVZ8okvKTE/qukbJxa+pSlYm/+w5s4Q6nAD/TCBM5Meil0Q2 IOYAadCt4qaZfCOcIHBOgRqrnLXUbQQ= =e2J6 -----END PGP SIGNATURE----- Merge tag 'parisc-for-6.4-3' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux Pull parisc architecture fixes from Helge Deller: "Quite a bunch of real bugfixes in here and most of them are tagged for backporting: A fix for cache flushing from irq context, a kprobes & kgdb breakpoint handling fix, and a fix in the alternative code patching function to take care of CPU hotplugging. parisc now provides LOCKDEP support and comes with a lightweight spinlock check. Both features helped me to find the cache flush bug. Additionally writing the AGP gatt has been fixed, the machine allows the user to reboot after a system halt and arch_sync_dma_for_cpu() has been optimized for PCXL PCUs. Summary: - Fix flush_dcache_page() for usage from irq context - Handle kprobes breakpoints only in kernel context - Handle kgdb breakpoints only in kernel context - Use num_present_cpus() in alternative patching code - Enable LOCKDEP support - Add lightweight spinlock checks - Flush AGP gatt writes and adjust gatt mask in parisc_agp_mask_memory() - Allow to reboot machine after system halt - Improve cache flushing for PCXL in arch_sync_dma_for_cpu()" * tag 'parisc-for-6.4-3' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux: parisc: Fix flush_dcache_page() for usage from irq context parisc: Handle kgdb breakpoints only in kernel context parisc: Handle kprobes breakpoints only in kernel context parisc: Allow to reboot machine after system halt parisc: Enable LOCKDEP support parisc: Add lightweight spinlock checks parisc: Use num_present_cpus() in alternative patching code parisc: Flush gatt writes and adjust gatt mask in parisc_agp_mask_memory() parisc: Improve cache flushing for PCXL in arch_sync_dma_for_cpu()
This commit is contained in:
commit
192fe71ce5
@ -130,6 +130,10 @@ config PM
|
||||
config STACKTRACE_SUPPORT
|
||||
def_bool y
|
||||
|
||||
config LOCKDEP_SUPPORT
|
||||
bool
|
||||
default y
|
||||
|
||||
config ISA_DMA_API
|
||||
bool
|
||||
|
||||
|
@ -1 +1,12 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
config LIGHTWEIGHT_SPINLOCK_CHECK
|
||||
bool "Enable lightweight spinlock checks"
|
||||
depends on SMP && !DEBUG_SPINLOCK
|
||||
default y
|
||||
help
|
||||
Add checks with low performance impact to the spinlock functions
|
||||
to catch memory overwrites at runtime. For more advanced
|
||||
spinlock debugging you should choose the DEBUG_SPINLOCK option
|
||||
which will detect unitialized spinlocks too.
|
||||
If unsure say Y here.
|
||||
|
@ -48,6 +48,10 @@ void flush_dcache_page(struct page *page);
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
|
||||
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
|
||||
#define flush_dcache_mmap_lock_irqsave(mapping, flags) \
|
||||
xa_lock_irqsave(&mapping->i_pages, flags)
|
||||
#define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \
|
||||
xa_unlock_irqrestore(&mapping->i_pages, flags)
|
||||
|
||||
#define flush_icache_page(vma,page) do { \
|
||||
flush_kernel_dcache_page_addr(page_address(page)); \
|
||||
|
@ -7,10 +7,26 @@
|
||||
#include <asm/processor.h>
|
||||
#include <asm/spinlock_types.h>
|
||||
|
||||
#define SPINLOCK_BREAK_INSN 0x0000c006 /* break 6,6 */
|
||||
|
||||
static inline void arch_spin_val_check(int lock_val)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK))
|
||||
asm volatile( "andcm,= %0,%1,%%r0\n"
|
||||
".word %2\n"
|
||||
: : "r" (lock_val), "r" (__ARCH_SPIN_LOCK_UNLOCKED_VAL),
|
||||
"i" (SPINLOCK_BREAK_INSN));
|
||||
}
|
||||
|
||||
static inline int arch_spin_is_locked(arch_spinlock_t *x)
|
||||
{
|
||||
volatile unsigned int *a = __ldcw_align(x);
|
||||
return READ_ONCE(*a) == 0;
|
||||
volatile unsigned int *a;
|
||||
int lock_val;
|
||||
|
||||
a = __ldcw_align(x);
|
||||
lock_val = READ_ONCE(*a);
|
||||
arch_spin_val_check(lock_val);
|
||||
return (lock_val == 0);
|
||||
}
|
||||
|
||||
static inline void arch_spin_lock(arch_spinlock_t *x)
|
||||
@ -18,9 +34,18 @@ static inline void arch_spin_lock(arch_spinlock_t *x)
|
||||
volatile unsigned int *a;
|
||||
|
||||
a = __ldcw_align(x);
|
||||
while (__ldcw(a) == 0)
|
||||
do {
|
||||
int lock_val_old;
|
||||
|
||||
lock_val_old = __ldcw(a);
|
||||
arch_spin_val_check(lock_val_old);
|
||||
if (lock_val_old)
|
||||
return; /* got lock */
|
||||
|
||||
/* wait until we should try to get lock again */
|
||||
while (*a == 0)
|
||||
continue;
|
||||
} while (1);
|
||||
}
|
||||
|
||||
static inline void arch_spin_unlock(arch_spinlock_t *x)
|
||||
@ -29,15 +54,19 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
|
||||
|
||||
a = __ldcw_align(x);
|
||||
/* Release with ordered store. */
|
||||
__asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
|
||||
__asm__ __volatile__("stw,ma %0,0(%1)"
|
||||
: : "r"(__ARCH_SPIN_LOCK_UNLOCKED_VAL), "r"(a) : "memory");
|
||||
}
|
||||
|
||||
static inline int arch_spin_trylock(arch_spinlock_t *x)
|
||||
{
|
||||
volatile unsigned int *a;
|
||||
int lock_val;
|
||||
|
||||
a = __ldcw_align(x);
|
||||
return __ldcw(a) != 0;
|
||||
lock_val = __ldcw(a);
|
||||
arch_spin_val_check(lock_val);
|
||||
return lock_val != 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2,13 +2,17 @@
|
||||
#ifndef __ASM_SPINLOCK_TYPES_H
|
||||
#define __ASM_SPINLOCK_TYPES_H
|
||||
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED_VAL 0x1a46
|
||||
|
||||
typedef struct {
|
||||
#ifdef CONFIG_PA20
|
||||
volatile unsigned int slock;
|
||||
# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
|
||||
# define __ARCH_SPIN_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED_VAL }
|
||||
#else
|
||||
volatile unsigned int lock[4];
|
||||
# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
|
||||
# define __ARCH_SPIN_LOCK_UNLOCKED \
|
||||
{ { __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL, \
|
||||
__ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL } }
|
||||
#endif
|
||||
} arch_spinlock_t;
|
||||
|
||||
|
@ -25,7 +25,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
|
||||
{
|
||||
struct alt_instr *entry;
|
||||
int index = 0, applied = 0;
|
||||
int num_cpus = num_online_cpus();
|
||||
int num_cpus = num_present_cpus();
|
||||
u16 cond_check;
|
||||
|
||||
cond_check = ALT_COND_ALWAYS |
|
||||
|
@ -399,6 +399,7 @@ void flush_dcache_page(struct page *page)
|
||||
unsigned long offset;
|
||||
unsigned long addr, old_addr = 0;
|
||||
unsigned long count = 0;
|
||||
unsigned long flags;
|
||||
pgoff_t pgoff;
|
||||
|
||||
if (mapping && !mapping_mapped(mapping)) {
|
||||
@ -420,7 +421,7 @@ void flush_dcache_page(struct page *page)
|
||||
* to flush one address here for them all to become coherent
|
||||
* on machines that support equivalent aliasing
|
||||
*/
|
||||
flush_dcache_mmap_lock(mapping);
|
||||
flush_dcache_mmap_lock_irqsave(mapping, flags);
|
||||
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
|
||||
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
|
||||
addr = mpnt->vm_start + offset;
|
||||
@ -460,7 +461,7 @@ void flush_dcache_page(struct page *page)
|
||||
}
|
||||
WARN_ON(++count == 4096);
|
||||
}
|
||||
flush_dcache_mmap_unlock(mapping);
|
||||
flush_dcache_mmap_unlock_irqrestore(mapping, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_dcache_page);
|
||||
|
||||
|
@ -446,11 +446,27 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
||||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
/*
|
||||
* fdc: The data cache line is written back to memory, if and only if
|
||||
* it is dirty, and then invalidated from the data cache.
|
||||
*/
|
||||
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
|
||||
unsigned long addr = (unsigned long) phys_to_virt(paddr);
|
||||
|
||||
switch (dir) {
|
||||
case DMA_TO_DEVICE:
|
||||
case DMA_BIDIRECTIONAL:
|
||||
flush_kernel_dcache_range(addr, size);
|
||||
return;
|
||||
case DMA_FROM_DEVICE:
|
||||
purge_kernel_dcache_range_asm(addr, addr + size);
|
||||
return;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
@ -122,13 +122,18 @@ void machine_power_off(void)
|
||||
/* It seems we have no way to power the system off via
|
||||
* software. The user has to press the button himself. */
|
||||
|
||||
printk(KERN_EMERG "System shut down completed.\n"
|
||||
"Please power this system off now.");
|
||||
printk("Power off or press RETURN to reboot.\n");
|
||||
|
||||
/* prevent soft lockup/stalled CPU messages for endless loop. */
|
||||
rcu_sysrq_start();
|
||||
lockup_detector_soft_poweroff();
|
||||
for (;;);
|
||||
while (1) {
|
||||
/* reboot if user presses RETURN key */
|
||||
if (pdc_iodc_getc() == 13) {
|
||||
printk("Rebooting...\n");
|
||||
machine_restart(NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void (*pm_power_off)(void);
|
||||
|
@ -47,6 +47,10 @@
|
||||
#include <linux/kgdb.h>
|
||||
#include <linux/kprobes.h>
|
||||
|
||||
#if defined(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK)
|
||||
#include <asm/spinlock.h>
|
||||
#endif
|
||||
|
||||
#include "../math-emu/math-emu.h" /* for handle_fpe() */
|
||||
|
||||
static void parisc_show_stack(struct task_struct *task,
|
||||
@ -291,24 +295,30 @@ static void handle_break(struct pt_regs *regs)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KPROBES
|
||||
if (unlikely(iir == PARISC_KPROBES_BREAK_INSN)) {
|
||||
if (unlikely(iir == PARISC_KPROBES_BREAK_INSN && !user_mode(regs))) {
|
||||
parisc_kprobe_break_handler(regs);
|
||||
return;
|
||||
}
|
||||
if (unlikely(iir == PARISC_KPROBES_BREAK_INSN2)) {
|
||||
if (unlikely(iir == PARISC_KPROBES_BREAK_INSN2 && !user_mode(regs))) {
|
||||
parisc_kprobe_ss_handler(regs);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KGDB
|
||||
if (unlikely(iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
|
||||
iir == PARISC_KGDB_BREAK_INSN)) {
|
||||
if (unlikely((iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
|
||||
iir == PARISC_KGDB_BREAK_INSN)) && !user_mode(regs)) {
|
||||
kgdb_handle_exception(9, SIGTRAP, 0, regs);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK
|
||||
if ((iir == SPINLOCK_BREAK_INSN) && !user_mode(regs)) {
|
||||
die_if_kernel("Spinlock was trashed", regs, 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (unlikely(iir != GDB_BREAK_INSN))
|
||||
parisc_printk_ratelimited(0, regs,
|
||||
KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
|
||||
|
@ -90,6 +90,9 @@ parisc_agp_tlbflush(struct agp_memory *mem)
|
||||
{
|
||||
struct _parisc_agp_info *info = &parisc_agp_info;
|
||||
|
||||
/* force fdc ops to be visible to IOMMU */
|
||||
asm_io_sync();
|
||||
|
||||
writeq(info->gart_base | ilog2(info->gart_size), info->ioc_regs+IOC_PCOM);
|
||||
readq(info->ioc_regs+IOC_PCOM); /* flush */
|
||||
}
|
||||
@ -158,6 +161,7 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
|
||||
info->gatt[j] =
|
||||
parisc_agp_mask_memory(agp_bridge,
|
||||
paddr, type);
|
||||
asm_io_fdc(&info->gatt[j]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -191,7 +195,16 @@ static unsigned long
|
||||
parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
|
||||
int type)
|
||||
{
|
||||
return SBA_PDIR_VALID_BIT | addr;
|
||||
unsigned ci; /* coherent index */
|
||||
dma_addr_t pa;
|
||||
|
||||
pa = addr & IOVP_MASK;
|
||||
asm("lci 0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pa)));
|
||||
|
||||
pa |= (ci >> PAGE_SHIFT) & 0xff;/* move CI (8 bits) into lowest byte */
|
||||
pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
|
||||
|
||||
return cpu_to_le64(pa);
|
||||
}
|
||||
|
||||
static void
|
||||
|
Loading…
x
Reference in New Issue
Block a user