RISC-V Fixes for 6.4-rc6
* A fix to avoid ISA-disallowed privilege mappings that can result from WRITE+EXEC mmap requests from userspace. * A fix for kfence to handle the huge pages. * A fix to avoid converting misaligned VAs to huge pages. * ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE has been selected so kprobe can understand user pointers. -----BEGIN PGP SIGNATURE----- iQJHBAABCAAxFiEEKzw3R0RoQ7JKlDp6LhMZ81+7GIkFAmSDOpgTHHBhbG1lckBk YWJiZWx0LmNvbQAKCRAuExnzX7sYie+/D/oCjQbBFZaOEsRtZp2SlMz0COjVurXv ClQWSqPEvbWg28dZvapZzjRcBc6X7Th6P6ia1FIa/XDTLKTPBdBoVSo1iRfH12bm 1CBsEKP08vMeN3b2nOD82B0XFl9PCB2AEHYo88a8k6ifEYMwfRU6g8ldHZC2HMF1 3z2vT7XR40t9E7MNPBG7Kn+2JHob7iB8bqMAZfoxyth4q8H2s7QEGCCwtwFRWGix h6NW66WojWnTn+cniX8NbIY+5xV37xH/S4x2cFqGUklHD1/B8rCnXPpJqtmcSb9n pGV30m7sw8sYdWHPABjMutRVCRv0DpPmqUEHAOThLzAoIqBHv+4e9ov8PmU8pJcz 5em6Cl+5Io/qzNa+uXT3cO1tfAzCid2r91cbpfa8RTBu8ZIf1GPS0SrgrdofU+Mw X5j90J8Hd7YH+egfI4DOZXxE+79VV8AVtH/aPWJxriOoAFjxzvP6OCckJo5ee4A7 EWhxsdQZVQ+WMga7yWMBknmxFYlabNjZrZ+/bAhfHTseljVGkHxr5dF+78g5dyZt yvcnHMTiDXHKdRaHknquBh9hAVh2s4xNea00x3h0ybZR0GVJH3ZnWTdz7RLtyop7 tWEcFHngQRtKJeIn33T6yioRkfUq2ODXKmBAJq0OwQCV8S8f42mE72iVw67fJQ9u XWJdYX0CqM3YPg== =CaWh -----END PGP SIGNATURE----- Merge tag 'riscv-for-linus-6.4-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux Pull RISC-V fixes from Palmer Dabbelt: - A fix to avoid ISA-disallowed privilege mappings that can result from WRITE+EXEC mmap requests from userspace. - A fix for kfence to handle the huge pages. - A fix to avoid converting misaligned VAs to huge pages. - ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE has been selected so kprobe can understand user pointers. * tag 'riscv-for-linus-6.4-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: riscv: fix kprobe __user string arg print fault issue riscv: Check the virtual alignment before choosing a map size riscv: Fix kfence now that the linear mapping can be backed by PUD/P4D/PGD riscv: mm: Ensure prot of VM_WRITE and VM_EXEC must be readable
This commit is contained in:
commit
0f506c7f7d
@ -26,6 +26,7 @@ config RISCV
|
|||||||
select ARCH_HAS_GIGANTIC_PAGE
|
select ARCH_HAS_GIGANTIC_PAGE
|
||||||
select ARCH_HAS_KCOV
|
select ARCH_HAS_KCOV
|
||||||
select ARCH_HAS_MMIOWB
|
select ARCH_HAS_MMIOWB
|
||||||
|
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
|
||||||
select ARCH_HAS_PMEM_API
|
select ARCH_HAS_PMEM_API
|
||||||
select ARCH_HAS_PTE_SPECIAL
|
select ARCH_HAS_PTE_SPECIAL
|
||||||
select ARCH_HAS_SET_DIRECT_MAP if MMU
|
select ARCH_HAS_SET_DIRECT_MAP if MMU
|
||||||
|
@ -8,41 +8,8 @@
|
|||||||
#include <asm-generic/pgalloc.h>
|
#include <asm-generic/pgalloc.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
|
|
||||||
static inline int split_pmd_page(unsigned long addr)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
unsigned long pfn = PFN_DOWN(__pa((addr & PMD_MASK)));
|
|
||||||
pmd_t *pmd = pmd_off_k(addr);
|
|
||||||
pte_t *pte = pte_alloc_one_kernel(&init_mm);
|
|
||||||
|
|
||||||
if (!pte)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
for (i = 0; i < PTRS_PER_PTE; i++)
|
|
||||||
set_pte(pte + i, pfn_pte(pfn + i, PAGE_KERNEL));
|
|
||||||
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(pte)), PAGE_TABLE));
|
|
||||||
|
|
||||||
flush_tlb_kernel_range(addr, addr + PMD_SIZE);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool arch_kfence_init_pool(void)
|
static inline bool arch_kfence_init_pool(void)
|
||||||
{
|
{
|
||||||
int ret;
|
|
||||||
unsigned long addr;
|
|
||||||
pmd_t *pmd;
|
|
||||||
|
|
||||||
for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
|
|
||||||
addr += PAGE_SIZE) {
|
|
||||||
pmd = pmd_off_k(addr);
|
|
||||||
|
|
||||||
if (pmd_leaf(*pmd)) {
|
|
||||||
ret = split_pmd_page(addr);
|
|
||||||
if (ret)
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -165,8 +165,7 @@ extern struct pt_alloc_ops pt_ops __initdata;
|
|||||||
_PAGE_EXEC | _PAGE_WRITE)
|
_PAGE_EXEC | _PAGE_WRITE)
|
||||||
|
|
||||||
#define PAGE_COPY PAGE_READ
|
#define PAGE_COPY PAGE_READ
|
||||||
#define PAGE_COPY_EXEC PAGE_EXEC
|
#define PAGE_COPY_EXEC PAGE_READ_EXEC
|
||||||
#define PAGE_COPY_READ_EXEC PAGE_READ_EXEC
|
|
||||||
#define PAGE_SHARED PAGE_WRITE
|
#define PAGE_SHARED PAGE_WRITE
|
||||||
#define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
|
#define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
|
||||||
|
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
#ifdef CONFIG_RELOCATABLE
|
#ifdef CONFIG_RELOCATABLE
|
||||||
#include <linux/elf.h>
|
#include <linux/elf.h>
|
||||||
#endif
|
#endif
|
||||||
|
#include <linux/kfence.h>
|
||||||
|
|
||||||
#include <asm/fixmap.h>
|
#include <asm/fixmap.h>
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
@ -293,7 +294,7 @@ static const pgprot_t protection_map[16] = {
|
|||||||
[VM_EXEC] = PAGE_EXEC,
|
[VM_EXEC] = PAGE_EXEC,
|
||||||
[VM_EXEC | VM_READ] = PAGE_READ_EXEC,
|
[VM_EXEC | VM_READ] = PAGE_READ_EXEC,
|
||||||
[VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
|
[VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
|
||||||
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_READ_EXEC,
|
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC,
|
||||||
[VM_SHARED] = PAGE_NONE,
|
[VM_SHARED] = PAGE_NONE,
|
||||||
[VM_SHARED | VM_READ] = PAGE_READ,
|
[VM_SHARED | VM_READ] = PAGE_READ,
|
||||||
[VM_SHARED | VM_WRITE] = PAGE_SHARED,
|
[VM_SHARED | VM_WRITE] = PAGE_SHARED,
|
||||||
@ -659,18 +660,19 @@ void __init create_pgd_mapping(pgd_t *pgdp,
|
|||||||
create_pgd_next_mapping(nextp, va, pa, sz, prot);
|
create_pgd_next_mapping(nextp, va, pa, sz, prot);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
|
static uintptr_t __init best_map_size(phys_addr_t pa, uintptr_t va,
|
||||||
|
phys_addr_t size)
|
||||||
{
|
{
|
||||||
if (!(base & (PGDIR_SIZE - 1)) && size >= PGDIR_SIZE)
|
if (!(pa & (PGDIR_SIZE - 1)) && !(va & (PGDIR_SIZE - 1)) && size >= PGDIR_SIZE)
|
||||||
return PGDIR_SIZE;
|
return PGDIR_SIZE;
|
||||||
|
|
||||||
if (!(base & (P4D_SIZE - 1)) && size >= P4D_SIZE)
|
if (!(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE)
|
||||||
return P4D_SIZE;
|
return P4D_SIZE;
|
||||||
|
|
||||||
if (!(base & (PUD_SIZE - 1)) && size >= PUD_SIZE)
|
if (!(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE)
|
||||||
return PUD_SIZE;
|
return PUD_SIZE;
|
||||||
|
|
||||||
if (!(base & (PMD_SIZE - 1)) && size >= PMD_SIZE)
|
if (!(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE)
|
||||||
return PMD_SIZE;
|
return PMD_SIZE;
|
||||||
|
|
||||||
return PAGE_SIZE;
|
return PAGE_SIZE;
|
||||||
@ -1167,14 +1169,16 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void __init create_linear_mapping_range(phys_addr_t start,
|
static void __init create_linear_mapping_range(phys_addr_t start,
|
||||||
phys_addr_t end)
|
phys_addr_t end,
|
||||||
|
uintptr_t fixed_map_size)
|
||||||
{
|
{
|
||||||
phys_addr_t pa;
|
phys_addr_t pa;
|
||||||
uintptr_t va, map_size;
|
uintptr_t va, map_size;
|
||||||
|
|
||||||
for (pa = start; pa < end; pa += map_size) {
|
for (pa = start; pa < end; pa += map_size) {
|
||||||
va = (uintptr_t)__va(pa);
|
va = (uintptr_t)__va(pa);
|
||||||
map_size = best_map_size(pa, end - pa);
|
map_size = fixed_map_size ? fixed_map_size :
|
||||||
|
best_map_size(pa, va, end - pa);
|
||||||
|
|
||||||
create_pgd_mapping(swapper_pg_dir, va, pa, map_size,
|
create_pgd_mapping(swapper_pg_dir, va, pa, map_size,
|
||||||
pgprot_from_va(va));
|
pgprot_from_va(va));
|
||||||
@ -1184,6 +1188,7 @@ static void __init create_linear_mapping_range(phys_addr_t start,
|
|||||||
static void __init create_linear_mapping_page_table(void)
|
static void __init create_linear_mapping_page_table(void)
|
||||||
{
|
{
|
||||||
phys_addr_t start, end;
|
phys_addr_t start, end;
|
||||||
|
phys_addr_t kfence_pool __maybe_unused;
|
||||||
u64 i;
|
u64 i;
|
||||||
|
|
||||||
#ifdef CONFIG_STRICT_KERNEL_RWX
|
#ifdef CONFIG_STRICT_KERNEL_RWX
|
||||||
@ -1197,6 +1202,19 @@ static void __init create_linear_mapping_page_table(void)
|
|||||||
memblock_mark_nomap(krodata_start, krodata_size);
|
memblock_mark_nomap(krodata_start, krodata_size);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_KFENCE
|
||||||
|
/*
|
||||||
|
* kfence pool must be backed by PAGE_SIZE mappings, so allocate it
|
||||||
|
* before we setup the linear mapping so that we avoid using hugepages
|
||||||
|
* for this region.
|
||||||
|
*/
|
||||||
|
kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
|
||||||
|
BUG_ON(!kfence_pool);
|
||||||
|
|
||||||
|
memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
|
||||||
|
__kfence_pool = __va(kfence_pool);
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Map all memory banks in the linear mapping */
|
/* Map all memory banks in the linear mapping */
|
||||||
for_each_mem_range(i, &start, &end) {
|
for_each_mem_range(i, &start, &end) {
|
||||||
if (start >= end)
|
if (start >= end)
|
||||||
@ -1207,17 +1225,25 @@ static void __init create_linear_mapping_page_table(void)
|
|||||||
if (end >= __pa(PAGE_OFFSET) + memory_limit)
|
if (end >= __pa(PAGE_OFFSET) + memory_limit)
|
||||||
end = __pa(PAGE_OFFSET) + memory_limit;
|
end = __pa(PAGE_OFFSET) + memory_limit;
|
||||||
|
|
||||||
create_linear_mapping_range(start, end);
|
create_linear_mapping_range(start, end, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_STRICT_KERNEL_RWX
|
#ifdef CONFIG_STRICT_KERNEL_RWX
|
||||||
create_linear_mapping_range(ktext_start, ktext_start + ktext_size);
|
create_linear_mapping_range(ktext_start, ktext_start + ktext_size, 0);
|
||||||
create_linear_mapping_range(krodata_start,
|
create_linear_mapping_range(krodata_start,
|
||||||
krodata_start + krodata_size);
|
krodata_start + krodata_size, 0);
|
||||||
|
|
||||||
memblock_clear_nomap(ktext_start, ktext_size);
|
memblock_clear_nomap(ktext_start, ktext_size);
|
||||||
memblock_clear_nomap(krodata_start, krodata_size);
|
memblock_clear_nomap(krodata_start, krodata_size);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_KFENCE
|
||||||
|
create_linear_mapping_range(kfence_pool,
|
||||||
|
kfence_pool + KFENCE_POOL_SIZE,
|
||||||
|
PAGE_SIZE);
|
||||||
|
|
||||||
|
memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init setup_vm_final(void)
|
static void __init setup_vm_final(void)
|
||||||
|
Loading…
Reference in New Issue
Block a user