2019-06-03 08:44:50 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2012-03-05 15:49:27 +04:00
/*
* Based on arch / arm / mm / mmap . c
*
* Copyright ( C ) 2012 ARM Ltd .
*/
2021-01-27 15:52:16 +03:00
# include <linux/io.h>
2017-05-19 18:42:00 +03:00
# include <linux/memblock.h>
2022-03-03 21:00:44 +03:00
# include <linux/mm.h>
2021-01-26 15:24:44 +03:00
# include <linux/types.h>
2012-03-05 15:49:27 +04:00
2022-03-03 21:00:44 +03:00
# include <asm/cpufeature.h>
2021-01-26 15:24:44 +03:00
# include <asm/page.h>
2012-03-05 15:49:27 +04:00
2022-07-11 10:05:39 +03:00
static pgprot_t protection_map [ 16 ] __ro_after_init = {
[ VM_NONE ] = PAGE_NONE ,
[ VM_READ ] = PAGE_READONLY ,
[ VM_WRITE ] = PAGE_READONLY ,
[ VM_WRITE | VM_READ ] = PAGE_READONLY ,
/* PAGE_EXECONLY if Enhanced PAN */
[ VM_EXEC ] = PAGE_READONLY_EXEC ,
[ VM_EXEC | VM_READ ] = PAGE_READONLY_EXEC ,
[ VM_EXEC | VM_WRITE ] = PAGE_READONLY_EXEC ,
[ VM_EXEC | VM_WRITE | VM_READ ] = PAGE_READONLY_EXEC ,
[ VM_SHARED ] = PAGE_NONE ,
[ VM_SHARED | VM_READ ] = PAGE_READONLY ,
[ VM_SHARED | VM_WRITE ] = PAGE_SHARED ,
[ VM_SHARED | VM_WRITE | VM_READ ] = PAGE_SHARED ,
/* PAGE_EXECONLY if Enhanced PAN */
[ VM_SHARED | VM_EXEC ] = PAGE_READONLY_EXEC ,
[ VM_SHARED | VM_EXEC | VM_READ ] = PAGE_READONLY_EXEC ,
[ VM_SHARED | VM_EXEC | VM_WRITE ] = PAGE_SHARED_EXEC ,
[ VM_SHARED | VM_EXEC | VM_WRITE | VM_READ ] = PAGE_SHARED_EXEC
} ;
2012-03-05 15:49:27 +04:00
/*
* You really shouldn ' t be using read ( ) or write ( ) on / dev / mem . This might go
* away in the future .
*/
2014-10-02 18:56:59 +04:00
int valid_phys_addr_range ( phys_addr_t addr , size_t size )
2012-03-05 15:49:27 +04:00
{
2017-05-19 18:42:00 +03:00
/*
* Check whether addr is covered by a memory region without the
* MEMBLOCK_NOMAP attribute , and whether that region covers the
* entire range . In theory , this could lead to false negatives
* if the range is covered by distinct but adjacent memory regions
* that only differ in other attributes . However , few of such
* attributes have been defined , and it is debatable whether it
* follows that / dev / mem read ( ) calls should be able traverse
* such boundaries .
*/
return memblock_is_region_memory ( addr , size ) & &
memblock_is_map_memory ( addr ) ;
2012-03-05 15:49:27 +04:00
}
/*
* Do not allow / dev / mem mappings beyond the supported physical range .
*/
int valid_mmap_phys_addr_range ( unsigned long pfn , size_t size )
{
return ! ( ( ( pfn < < PAGE_SHIFT ) + size ) & ~ PHYS_MASK ) ;
}
2022-03-03 21:00:44 +03:00
static int __init adjust_protection_map ( void )
{
/*
* With Enhanced PAN we can honour the execute - only permissions as
* there is no PAN override with such mappings .
*/
if ( cpus_have_const_cap ( ARM64_HAS_EPAN ) ) {
protection_map [ VM_EXEC ] = PAGE_EXECONLY ;
protection_map [ VM_EXEC | VM_SHARED ] = PAGE_EXECONLY ;
}
return 0 ;
}
arch_initcall ( adjust_protection_map ) ;
2022-04-29 09:16:13 +03:00
pgprot_t vm_get_page_prot ( unsigned long vm_flags )
{
pteval_t prot = pgprot_val ( protection_map [ vm_flags &
( VM_READ | VM_WRITE | VM_EXEC | VM_SHARED ) ] ) ;
if ( vm_flags & VM_ARM64_BTI )
prot | = PTE_GP ;
/*
* There are two conditions required for returning a Normal Tagged
* memory type : ( 1 ) the user requested it via PROT_MTE passed to
* mmap ( ) or mprotect ( ) and ( 2 ) the corresponding vma supports MTE . We
* register ( 1 ) as VM_MTE in the vma - > vm_flags and ( 2 ) as
* VM_MTE_ALLOWED . Note that the latter can only be set during the
* mmap ( ) call since mprotect ( ) does not accept MAP_ * flags .
* Checking for VM_MTE only is sufficient since arch_validate_flags ( )
* does not permit ( VM_MTE & ! VM_MTE_ALLOWED ) .
*/
if ( vm_flags & VM_MTE )
prot | = PTE_ATTRINDX ( MT_NORMAL_TAGGED ) ;
return __pgprot ( prot ) ;
}
EXPORT_SYMBOL ( vm_get_page_prot ) ;