86d0c16427
iomap_max_page_shift is expected to contain a page shift, so it can't be a
'bool', has to be an 'unsigned int'
And fix the default values: P4D_SHIFT is when huge iomap is allowed.
However, on some architectures (eg: powerpc book3s/64), P4D_SHIFT is not a
constant so it can't be used to initialise a static variable. So,
initialise iomap_max_page_shift with a maximum shift supported by the
architecture, it is gated by P4D_SHIFT in vmap_try_huge_p4d() anyway.
Link: https://lkml.kernel.org/r/ad2d366015794a9f21320dcbdd0a8eb98979e9df.1620898113.git.christophe.leroy@csgroup.eu
Fixes: bbc180a5ad
("mm: HUGE_VMAP arch support cleanup")
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
75 lines
1.9 KiB
C
75 lines
1.9 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Re-map IO memory to kernel address space so that we can access it.
|
|
* This is needed for high PCI addresses that aren't mapped in the
|
|
* 640k-1MB IO memory area on PC's
|
|
*
|
|
* (C) Copyright 1995 1996 Linus Torvalds
|
|
*/
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/io.h>
|
|
#include <linux/export.h>
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include "pgalloc-track.h"
|
|
|
|
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
|
|
static unsigned int __ro_after_init iomap_max_page_shift = BITS_PER_LONG - 1;
|
|
|
|
static int __init set_nohugeiomap(char *str)
|
|
{
|
|
iomap_max_page_shift = PAGE_SHIFT;
|
|
return 0;
|
|
}
|
|
early_param("nohugeiomap", set_nohugeiomap);
|
|
#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
|
|
static const unsigned int iomap_max_page_shift = PAGE_SHIFT;
|
|
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
|
|
|
|
int ioremap_page_range(unsigned long addr,
|
|
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
|
|
{
|
|
return vmap_range(addr, end, phys_addr, prot, iomap_max_page_shift);
|
|
}
|
|
|
|
#ifdef CONFIG_GENERIC_IOREMAP
|
|
void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
|
|
{
|
|
unsigned long offset, vaddr;
|
|
phys_addr_t last_addr;
|
|
struct vm_struct *area;
|
|
|
|
/* Disallow wrap-around or zero size */
|
|
last_addr = addr + size - 1;
|
|
if (!size || last_addr < addr)
|
|
return NULL;
|
|
|
|
/* Page-align mappings */
|
|
offset = addr & (~PAGE_MASK);
|
|
addr -= offset;
|
|
size = PAGE_ALIGN(size + offset);
|
|
|
|
area = get_vm_area_caller(size, VM_IOREMAP,
|
|
__builtin_return_address(0));
|
|
if (!area)
|
|
return NULL;
|
|
vaddr = (unsigned long)area->addr;
|
|
|
|
if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
|
|
free_vm_area(area);
|
|
return NULL;
|
|
}
|
|
|
|
return (void __iomem *)(vaddr + offset);
|
|
}
|
|
EXPORT_SYMBOL(ioremap_prot);
|
|
|
|
void iounmap(volatile void __iomem *addr)
|
|
{
|
|
vunmap((void *)((unsigned long)addr & PAGE_MASK));
|
|
}
|
|
EXPORT_SYMBOL(iounmap);
|
|
#endif /* CONFIG_GENERIC_IOREMAP */
|