5d260625b1
This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220711070600.2378316-23-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Vineet Gupta <vgupta@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Brian Cain <bcain@quicinc.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Christoph Hellwig <hch@infradead.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Dinh Nguyen <dinguyen@kernel.org> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Guo Ren <guoren@kernel.org> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: Jeff Dike <jdike@addtoit.com> Cc: Jonas Bonn <jonas@southpole.se> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Henderson <rth@twiddle.net> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Sam Ravnborg <sam@ravnborg.org> Cc: Stafford Horne <shorne@gmail.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: WANG Xuerui <kernel@xen0n.name> Cc: Will Deacon <will@kernel.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
97 lines
2.4 KiB
C
97 lines
2.4 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* ARC700 mmap
|
|
*
|
|
* (started from arm version - for VIPT alias handling)
|
|
*
|
|
* Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
|
|
*/
|
|
|
|
#include <linux/fs.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/sched/mm.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#define COLOUR_ALIGN(addr, pgoff) \
|
|
((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
|
|
(((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
|
|
|
|
/*
|
|
* Ensure that shared mappings are correctly aligned to
|
|
* avoid aliasing issues with VIPT caches.
|
|
* We need to ensure that
|
|
* a specific page of an object is always mapped at a multiple of
|
|
* SHMLBA bytes.
|
|
*/
|
|
unsigned long
|
|
arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|
unsigned long len, unsigned long pgoff, unsigned long flags)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *vma;
|
|
int do_align = 0;
|
|
int aliasing = cache_is_vipt_aliasing();
|
|
struct vm_unmapped_area_info info;
|
|
|
|
/*
|
|
* We only need to do colour alignment if D cache aliases.
|
|
*/
|
|
if (aliasing)
|
|
do_align = filp || (flags & MAP_SHARED);
|
|
|
|
/*
|
|
* We enforce the MAP_FIXED case.
|
|
*/
|
|
if (flags & MAP_FIXED) {
|
|
if (aliasing && flags & MAP_SHARED &&
|
|
(addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
|
|
return -EINVAL;
|
|
return addr;
|
|
}
|
|
|
|
if (len > TASK_SIZE)
|
|
return -ENOMEM;
|
|
|
|
if (addr) {
|
|
if (do_align)
|
|
addr = COLOUR_ALIGN(addr, pgoff);
|
|
else
|
|
addr = PAGE_ALIGN(addr);
|
|
|
|
vma = find_vma(mm, addr);
|
|
if (TASK_SIZE - len >= addr &&
|
|
(!vma || addr + len <= vm_start_gap(vma)))
|
|
return addr;
|
|
}
|
|
|
|
info.flags = 0;
|
|
info.length = len;
|
|
info.low_limit = mm->mmap_base;
|
|
info.high_limit = TASK_SIZE;
|
|
info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
|
|
info.align_offset = pgoff << PAGE_SHIFT;
|
|
return vm_unmapped_area(&info);
|
|
}
|
|
|
|
static const pgprot_t protection_map[16] = {
|
|
[VM_NONE] = PAGE_U_NONE,
|
|
[VM_READ] = PAGE_U_R,
|
|
[VM_WRITE] = PAGE_U_R,
|
|
[VM_WRITE | VM_READ] = PAGE_U_R,
|
|
[VM_EXEC] = PAGE_U_X_R,
|
|
[VM_EXEC | VM_READ] = PAGE_U_X_R,
|
|
[VM_EXEC | VM_WRITE] = PAGE_U_X_R,
|
|
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_R,
|
|
[VM_SHARED] = PAGE_U_NONE,
|
|
[VM_SHARED | VM_READ] = PAGE_U_R,
|
|
[VM_SHARED | VM_WRITE] = PAGE_U_W_R,
|
|
[VM_SHARED | VM_WRITE | VM_READ] = PAGE_U_W_R,
|
|
[VM_SHARED | VM_EXEC] = PAGE_U_X_R,
|
|
[VM_SHARED | VM_EXEC | VM_READ] = PAGE_U_X_R,
|
|
[VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_U_X_W_R,
|
|
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_W_R
|
|
};
|
|
DECLARE_VM_GET_PAGE_PROT
|