Merge branch 'parisc-5.2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux
Pull more parisc updates from Helge Deller: "Two small enhancements, which I didn't included in the last pull request because I wanted to keep them a few more days in for-next before sending upstream: - Replace the ldcw barrier instruction by a nop instruction in the CAS code on uniprocessor machines. - Map variables read-only after init (enable ro_after_init feature)" * 'parisc-5.2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux: parisc: Use __ro_after_init in init.c parisc: Use __ro_after_init in unwind.c parisc: Use __ro_after_init in time.c parisc: Use __ro_after_init in processor.c parisc: Use __ro_after_init in process.c parisc: Use __ro_after_init in perf_images.h parisc: Use __ro_after_init in pci.c parisc: Use __ro_after_init in inventory.c parisc: Use __ro_after_init in head.S parisc: Use __ro_after_init in firmware.c parisc: Use __ro_after_init in drivers.c parisc: Use __ro_after_init in cache.c parisc: Enable the ro_after_init feature parisc: Drop LDCW barrier in CAS code when running UP
This commit is contained in:
commit
b2c9112821
@ -24,9 +24,6 @@
|
||||
|
||||
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
|
||||
|
||||
/* Read-only memory is marked before mark_rodata_ro() is called. */
|
||||
#define __ro_after_init __read_mostly
|
||||
|
||||
void parisc_cache_init(void); /* initializes cache-flushing */
|
||||
void disable_sr_hashing_asm(int); /* low level support for above */
|
||||
void disable_sr_hashing(void); /* turns off space register hashing */
|
||||
|
@ -29,9 +29,9 @@
|
||||
#include <asm/sections.h>
|
||||
#include <asm/shmparam.h>
|
||||
|
||||
int split_tlb __read_mostly;
|
||||
int dcache_stride __read_mostly;
|
||||
int icache_stride __read_mostly;
|
||||
int split_tlb __ro_after_init;
|
||||
int dcache_stride __ro_after_init;
|
||||
int icache_stride __ro_after_init;
|
||||
EXPORT_SYMBOL(dcache_stride);
|
||||
|
||||
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
|
||||
@ -51,12 +51,12 @@ DEFINE_SPINLOCK(pa_tlb_flush_lock);
|
||||
DEFINE_SPINLOCK(pa_swapper_pg_lock);
|
||||
|
||||
#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
|
||||
int pa_serialize_tlb_flushes __read_mostly;
|
||||
int pa_serialize_tlb_flushes __ro_after_init;
|
||||
#endif
|
||||
|
||||
struct pdc_cache_info cache_info __read_mostly;
|
||||
struct pdc_cache_info cache_info __ro_after_init;
|
||||
#ifndef CONFIG_PA20
|
||||
static struct pdc_btlb_info btlb_info __read_mostly;
|
||||
static struct pdc_btlb_info btlb_info __ro_after_init;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
@ -381,10 +381,10 @@ EXPORT_SYMBOL(flush_data_cache_local);
|
||||
EXPORT_SYMBOL(flush_kernel_icache_range_asm);
|
||||
|
||||
#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
|
||||
static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
|
||||
static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
|
||||
|
||||
#define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
|
||||
static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
|
||||
static unsigned long parisc_tlb_flush_threshold __ro_after_init = FLUSH_TLB_THRESHOLD;
|
||||
|
||||
void __init parisc_setup_cache_timing(void)
|
||||
{
|
||||
|
@ -41,7 +41,7 @@
|
||||
#include <asm/ropes.h>
|
||||
|
||||
/* See comments in include/asm-parisc/pci.h */
|
||||
const struct dma_map_ops *hppa_dma_ops __read_mostly;
|
||||
const struct dma_map_ops *hppa_dma_ops __ro_after_init;
|
||||
EXPORT_SYMBOL(hppa_dma_ops);
|
||||
|
||||
static struct device root = {
|
||||
|
@ -87,7 +87,7 @@ extern unsigned long pdc_result2[NUM_PDC_RESULT];
|
||||
|
||||
/* Firmware needs to be initially set to narrow to determine the
|
||||
* actual firmware width. */
|
||||
int parisc_narrow_firmware __read_mostly = 1;
|
||||
int parisc_narrow_firmware __ro_after_init = 1;
|
||||
#endif
|
||||
|
||||
/* On most currently-supported platforms, IODC I/O calls are 32-bit calls
|
||||
|
@ -376,7 +376,7 @@ smp_slave_stext:
|
||||
ENDPROC(parisc_kernel_start)
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
.section .data..read_mostly
|
||||
.section .data..ro_after_init
|
||||
|
||||
.align 4
|
||||
.export $global$,data
|
||||
|
@ -39,12 +39,12 @@
|
||||
*/
|
||||
#undef DEBUG_PAT
|
||||
|
||||
int pdc_type __read_mostly = PDC_TYPE_ILLEGAL;
|
||||
int pdc_type __ro_after_init = PDC_TYPE_ILLEGAL;
|
||||
|
||||
/* cell number and location (PAT firmware only) */
|
||||
unsigned long parisc_cell_num __read_mostly;
|
||||
unsigned long parisc_cell_loc __read_mostly;
|
||||
unsigned long parisc_pat_pdc_cap __read_mostly;
|
||||
unsigned long parisc_cell_num __ro_after_init;
|
||||
unsigned long parisc_cell_loc __ro_after_init;
|
||||
unsigned long parisc_pat_pdc_cap __ro_after_init;
|
||||
|
||||
|
||||
void __init setup_pdc(void)
|
||||
|
@ -45,14 +45,14 @@
|
||||
* #define pci_post_reset_delay 50
|
||||
*/
|
||||
|
||||
struct pci_port_ops *pci_port __read_mostly;
|
||||
struct pci_bios_ops *pci_bios __read_mostly;
|
||||
struct pci_port_ops *pci_port __ro_after_init;
|
||||
struct pci_bios_ops *pci_bios __ro_after_init;
|
||||
|
||||
static int pci_hba_count __read_mostly;
|
||||
static int pci_hba_count __ro_after_init;
|
||||
|
||||
/* parisc_pci_hba used by pci_port->in/out() ops to lookup bus data. */
|
||||
#define PCI_HBA_MAX 32
|
||||
static struct pci_hba_data *parisc_pci_hba[PCI_HBA_MAX] __read_mostly;
|
||||
static struct pci_hba_data *parisc_pci_hba[PCI_HBA_MAX] __ro_after_init;
|
||||
|
||||
|
||||
/********************************************************************
|
||||
|
@ -25,7 +25,7 @@
|
||||
|
||||
#define PCXU_IMAGE_SIZE 584
|
||||
|
||||
static uint32_t onyx_images[][PCXU_IMAGE_SIZE/sizeof(uint32_t)] __read_mostly = {
|
||||
static uint32_t onyx_images[][PCXU_IMAGE_SIZE/sizeof(uint32_t)] __ro_after_init = {
|
||||
/*
|
||||
* CPI:
|
||||
*
|
||||
@ -2093,7 +2093,7 @@ static uint32_t onyx_images[][PCXU_IMAGE_SIZE/sizeof(uint32_t)] __read_mostly =
|
||||
};
|
||||
#define PCXW_IMAGE_SIZE 576
|
||||
|
||||
static uint32_t cuda_images[][PCXW_IMAGE_SIZE/sizeof(uint32_t)] __read_mostly = {
|
||||
static uint32_t cuda_images[][PCXW_IMAGE_SIZE/sizeof(uint32_t)] __ro_after_init = {
|
||||
/*
|
||||
* CPI: FROM CPI.IDF (Image 0)
|
||||
*
|
||||
|
@ -192,7 +192,7 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
|
||||
* QEMU idle the host too.
|
||||
*/
|
||||
|
||||
int running_on_qemu __read_mostly;
|
||||
int running_on_qemu __ro_after_init;
|
||||
EXPORT_SYMBOL(running_on_qemu);
|
||||
|
||||
void __cpuidle arch_cpu_idle_dead(void)
|
||||
|
@ -43,10 +43,10 @@
|
||||
#include <asm/irq.h> /* for struct irq_region */
|
||||
#include <asm/parisc-device.h>
|
||||
|
||||
struct system_cpuinfo_parisc boot_cpu_data __read_mostly;
|
||||
struct system_cpuinfo_parisc boot_cpu_data __ro_after_init;
|
||||
EXPORT_SYMBOL(boot_cpu_data);
|
||||
#ifdef CONFIG_PA8X00
|
||||
int _parisc_requires_coherency __read_mostly;
|
||||
int _parisc_requires_coherency __ro_after_init;
|
||||
EXPORT_SYMBOL(_parisc_requires_coherency);
|
||||
#endif
|
||||
|
||||
|
@ -641,7 +641,8 @@ cas_action:
|
||||
2: stw %r24, 0(%r26)
|
||||
/* Free lock */
|
||||
#ifdef CONFIG_SMP
|
||||
LDCW 0(%sr2,%r20), %r1 /* Barrier */
|
||||
98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
|
||||
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
|
||||
#endif
|
||||
stw %r20, 0(%sr2,%r20)
|
||||
#if ENABLE_LWS_DEBUG
|
||||
@ -658,7 +659,8 @@ cas_action:
|
||||
/* Error occurred on load or store */
|
||||
/* Free lock */
|
||||
#ifdef CONFIG_SMP
|
||||
LDCW 0(%sr2,%r20), %r1 /* Barrier */
|
||||
98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
|
||||
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
|
||||
#endif
|
||||
stw %r20, 0(%sr2,%r20)
|
||||
#if ENABLE_LWS_DEBUG
|
||||
@ -862,7 +864,8 @@ cas2_action:
|
||||
cas2_end:
|
||||
/* Free lock */
|
||||
#ifdef CONFIG_SMP
|
||||
LDCW 0(%sr2,%r20), %r1 /* Barrier */
|
||||
98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
|
||||
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
|
||||
#endif
|
||||
stw %r20, 0(%sr2,%r20)
|
||||
/* Enable interrupts */
|
||||
@ -875,7 +878,8 @@ cas2_end:
|
||||
/* Error occurred on load or store */
|
||||
/* Free lock */
|
||||
#ifdef CONFIG_SMP
|
||||
LDCW 0(%sr2,%r20), %r1 /* Barrier */
|
||||
98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
|
||||
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
|
||||
#endif
|
||||
stw %r20, 0(%sr2,%r20)
|
||||
ssm PSW_SM_I, %r0
|
||||
|
@ -40,7 +40,7 @@
|
||||
|
||||
#include <linux/timex.h>
|
||||
|
||||
static unsigned long clocktick __read_mostly; /* timer cycles per tick */
|
||||
static unsigned long clocktick __ro_after_init; /* timer cycles per tick */
|
||||
|
||||
/*
|
||||
* We keep time on PA-RISC Linux by using the Interval Timer which is
|
||||
|
@ -40,7 +40,7 @@ static DEFINE_SPINLOCK(unwind_lock);
|
||||
* we can call unwind_init as early in the bootup process as
|
||||
* possible (before the slab allocator is initialized)
|
||||
*/
|
||||
static struct unwind_table kernel_unwind_table __read_mostly;
|
||||
static struct unwind_table kernel_unwind_table __ro_after_init;
|
||||
static LIST_HEAD(unwind_tables);
|
||||
|
||||
static inline const struct unwind_table_entry *
|
||||
|
@ -18,9 +18,6 @@
|
||||
*(.data..vm0.pgd) \
|
||||
*(.data..vm0.pte)
|
||||
|
||||
/* No __ro_after_init data in the .rodata section - which will always be ro */
|
||||
#define RO_AFTER_INIT_DATA
|
||||
|
||||
#include <asm-generic/vmlinux.lds.h>
|
||||
|
||||
/* needed for the processor specific cache alignment size */
|
||||
|
@ -66,7 +66,7 @@ static struct resource pdcdata_resource = {
|
||||
.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
|
||||
};
|
||||
|
||||
static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
|
||||
static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __ro_after_init;
|
||||
|
||||
/* The following array is initialized from the firmware specific
|
||||
* information retrieved in kernel/inventory.c.
|
||||
@ -345,16 +345,7 @@ static void __init setup_bootmem(void)
|
||||
memblock_dump_all();
|
||||
}
|
||||
|
||||
static int __init parisc_text_address(unsigned long vaddr)
|
||||
{
|
||||
static unsigned long head_ptr __initdata;
|
||||
|
||||
if (!head_ptr)
|
||||
head_ptr = PAGE_MASK & (unsigned long)
|
||||
dereference_function_descriptor(&parisc_kernel_start);
|
||||
|
||||
return core_kernel_text(vaddr) || vaddr == head_ptr;
|
||||
}
|
||||
static bool kernel_set_to_readonly;
|
||||
|
||||
static void __init map_pages(unsigned long start_vaddr,
|
||||
unsigned long start_paddr, unsigned long size,
|
||||
@ -372,10 +363,11 @@ static void __init map_pages(unsigned long start_vaddr,
|
||||
unsigned long vaddr;
|
||||
unsigned long ro_start;
|
||||
unsigned long ro_end;
|
||||
unsigned long kernel_end;
|
||||
unsigned long kernel_start, kernel_end;
|
||||
|
||||
ro_start = __pa((unsigned long)_text);
|
||||
ro_end = __pa((unsigned long)&data_start);
|
||||
kernel_start = __pa((unsigned long)&__init_begin);
|
||||
kernel_end = __pa((unsigned long)&_end);
|
||||
|
||||
end_paddr = start_paddr + size;
|
||||
@ -438,26 +430,30 @@ static void __init map_pages(unsigned long start_vaddr,
|
||||
pg_table = (pte_t *) __va(pg_table) + start_pte;
|
||||
for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
|
||||
pte_t pte;
|
||||
pgprot_t prot;
|
||||
bool huge = false;
|
||||
|
||||
if (force)
|
||||
pte = __mk_pte(address, pgprot);
|
||||
else if (parisc_text_address(vaddr)) {
|
||||
pte = __mk_pte(address, PAGE_KERNEL_EXEC);
|
||||
if (address >= ro_start && address < kernel_end)
|
||||
pte = pte_mkhuge(pte);
|
||||
if (force) {
|
||||
prot = pgprot;
|
||||
} else if (address < kernel_start || address >= kernel_end) {
|
||||
/* outside kernel memory */
|
||||
prot = PAGE_KERNEL;
|
||||
} else if (!kernel_set_to_readonly) {
|
||||
/* still initializing, allow writing to RO memory */
|
||||
prot = PAGE_KERNEL_RWX;
|
||||
huge = true;
|
||||
} else if (address >= ro_start) {
|
||||
/* Code (ro) and Data areas */
|
||||
prot = (address < ro_end) ?
|
||||
PAGE_KERNEL_EXEC : PAGE_KERNEL;
|
||||
huge = true;
|
||||
} else {
|
||||
prot = PAGE_KERNEL;
|
||||
}
|
||||
else
|
||||
#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
|
||||
if (address >= ro_start && address < ro_end) {
|
||||
pte = __mk_pte(address, PAGE_KERNEL_EXEC);
|
||||
|
||||
pte = __mk_pte(address, prot);
|
||||
if (huge)
|
||||
pte = pte_mkhuge(pte);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
pte = __mk_pte(address, pgprot);
|
||||
if (address >= ro_start && address < kernel_end)
|
||||
pte = pte_mkhuge(pte);
|
||||
}
|
||||
|
||||
if (address >= end_paddr)
|
||||
break;
|
||||
@ -493,6 +489,12 @@ void __ref free_initmem(void)
|
||||
{
|
||||
unsigned long init_begin = (unsigned long)__init_begin;
|
||||
unsigned long init_end = (unsigned long)__init_end;
|
||||
unsigned long kernel_end = (unsigned long)&_end;
|
||||
|
||||
/* Remap kernel text and data, but do not touch init section yet. */
|
||||
kernel_set_to_readonly = true;
|
||||
map_pages(init_end, __pa(init_end), kernel_end - init_end,
|
||||
PAGE_KERNEL, 0);
|
||||
|
||||
/* The init text pages are marked R-X. We have to
|
||||
* flush the icache and mark them RW-
|
||||
@ -509,7 +511,7 @@ void __ref free_initmem(void)
|
||||
PAGE_KERNEL, 1);
|
||||
|
||||
/* force the kernel to see the new TLB entries */
|
||||
__flush_tlb_range(0, init_begin, init_end);
|
||||
__flush_tlb_range(0, init_begin, kernel_end);
|
||||
|
||||
/* finally dump all the instructions which were cached, since the
|
||||
* pages are no-longer executable */
|
||||
@ -527,8 +529,9 @@ void mark_rodata_ro(void)
|
||||
{
|
||||
/* rodata memory was already mapped with KERNEL_RO access rights by
|
||||
pagetable_init() and map_pages(). No need to do additional stuff here */
|
||||
printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
|
||||
(unsigned long)(__end_rodata - __start_rodata) >> 10);
|
||||
unsigned long roai_size = __end_ro_after_init - __start_ro_after_init;
|
||||
|
||||
pr_info("Write protected read-only-after-init data: %luk\n", roai_size >> 10);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -554,11 +557,11 @@ void mark_rodata_ro(void)
|
||||
#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
|
||||
& ~(VM_MAP_OFFSET-1)))
|
||||
|
||||
void *parisc_vmalloc_start __read_mostly;
|
||||
void *parisc_vmalloc_start __ro_after_init;
|
||||
EXPORT_SYMBOL(parisc_vmalloc_start);
|
||||
|
||||
#ifdef CONFIG_PA11
|
||||
unsigned long pcxl_dma_start __read_mostly;
|
||||
unsigned long pcxl_dma_start __ro_after_init;
|
||||
#endif
|
||||
|
||||
void __init mem_init(void)
|
||||
@ -632,7 +635,7 @@ void __init mem_init(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
unsigned long *empty_zero_page __read_mostly;
|
||||
unsigned long *empty_zero_page __ro_after_init;
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user