2014-11-06 15:19:41 +08:00
/*
* Copyright ( C ) 2013 Altera Corporation
* Copyright ( C ) 2010 Tobias Klauser < tklauser @ distanz . ch >
* Copyright ( C ) 2009 Wind River Systems Inc
* Implemented by fredrik . markstrom @ gmail . com and ivarholmqvist @ gmail . com
* Copyright ( C ) 2004 Microtronix Datacom Ltd
*
* based on arch / m68k / mm / init . c
*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*/
# include <linux/signal.h>
# include <linux/sched.h>
# include <linux/kernel.h>
# include <linux/errno.h>
# include <linux/string.h>
# include <linux/types.h>
# include <linux/ptrace.h>
# include <linux/mman.h>
# include <linux/mm.h>
# include <linux/init.h>
# include <linux/pagemap.h>
2018-10-30 15:09:49 -07:00
# include <linux/memblock.h>
2014-11-06 15:19:41 +08:00
# include <linux/slab.h>
# include <linux/binfmts.h>
# include <asm/setup.h>
# include <asm/page.h>
# include <asm/sections.h>
# include <asm/tlb.h>
# include <asm/mmu_context.h>
# include <asm/cpuinfo.h>
# include <asm/processor.h>
pgd_t * pgd_current ;
/*
* paging_init ( ) continues the virtual memory environment setup which
* was begun by the code in arch / head . S .
* The parameters are pointers to where to stick the starting and ending
* addresses of available kernel virtual memory .
*/
void __init paging_init ( void )
{
2020-06-03 15:57:06 -07:00
unsigned long max_zone_pfn [ MAX_NR_ZONES ] = { 0 } ;
2014-11-06 15:19:41 +08:00
pagetable_init ( ) ;
pgd_current = swapper_pg_dir ;
2020-06-03 15:57:06 -07:00
max_zone_pfn [ ZONE_NORMAL ] = max_mapnr ;
2014-11-06 15:19:41 +08:00
/* pass the memory from the bootmem allocator to the main allocator */
2020-06-03 15:57:06 -07:00
free_area_init ( max_zone_pfn ) ;
2014-11-06 15:19:41 +08:00
flush_dcache_range ( ( unsigned long ) empty_zero_page ,
( unsigned long ) empty_zero_page + PAGE_SIZE ) ;
}
void __init mem_init ( void )
{
unsigned long end_mem = memory_end ; /* this must not include
kernel stack at top */
pr_debug ( " mem_init: start=%lx, end=%lx \n " , memory_start , memory_end ) ;
end_mem & = PAGE_MASK ;
high_memory = __va ( end_mem ) ;
/* this will put all memory onto the freelists */
2018-10-30 15:09:30 -07:00
memblock_free_all ( ) ;
2014-11-06 15:19:41 +08:00
}
void __init mmu_init ( void )
{
flush_tlb_all ( ) ;
}
2022-07-03 17:11:57 +03:00
pgd_t swapper_pg_dir [ PTRS_PER_PGD ] __aligned ( PAGE_SIZE ) ;
2022-07-03 17:11:56 +03:00
pte_t invalid_pte_table [ PTRS_PER_PTE ] __aligned ( PAGE_SIZE ) ;
2014-11-06 15:19:41 +08:00
static struct page * kuser_page [ 1 ] ;
static int alloc_kuser_page ( void )
{
extern char __kuser_helper_start [ ] , __kuser_helper_end [ ] ;
int kuser_sz = __kuser_helper_end - __kuser_helper_start ;
unsigned long vpage ;
vpage = get_zeroed_page ( GFP_ATOMIC ) ;
if ( ! vpage )
return - ENOMEM ;
/* Copy kuser helpers */
memcpy ( ( void * ) vpage , __kuser_helper_start , kuser_sz ) ;
flush_icache_range ( vpage , vpage + KUSER_SIZE ) ;
kuser_page [ 0 ] = virt_to_page ( vpage ) ;
return 0 ;
}
arch_initcall ( alloc_kuser_page ) ;
int arch_setup_additional_pages ( struct linux_binprm * bprm , int uses_interp )
{
struct mm_struct * mm = current - > mm ;
int ret ;
2020-06-08 21:33:25 -07:00
mmap_write_lock ( mm ) ;
2014-11-06 15:19:41 +08:00
/* Map kuser helpers to user space address */
ret = install_special_mapping ( mm , KUSER_BASE , KUSER_SIZE ,
VM_READ | VM_EXEC | VM_MAYREAD |
VM_MAYEXEC , kuser_page ) ;
2020-06-08 21:33:25 -07:00
mmap_write_unlock ( mm ) ;
2014-11-06 15:19:41 +08:00
return ret ;
}
const char * arch_vma_name ( struct vm_area_struct * vma )
{
return ( vma - > vm_start = = KUSER_BASE ) ? " [kuser] " : NULL ;
}
2022-07-11 12:35:49 +05:30
static const pgprot_t protection_map [ 16 ] = {
[ VM_NONE ] = MKP ( 0 , 0 , 0 ) ,
[ VM_READ ] = MKP ( 0 , 0 , 1 ) ,
[ VM_WRITE ] = MKP ( 0 , 0 , 0 ) ,
[ VM_WRITE | VM_READ ] = MKP ( 0 , 0 , 1 ) ,
[ VM_EXEC ] = MKP ( 1 , 0 , 0 ) ,
[ VM_EXEC | VM_READ ] = MKP ( 1 , 0 , 1 ) ,
[ VM_EXEC | VM_WRITE ] = MKP ( 1 , 0 , 0 ) ,
[ VM_EXEC | VM_WRITE | VM_READ ] = MKP ( 1 , 0 , 1 ) ,
[ VM_SHARED ] = MKP ( 0 , 0 , 0 ) ,
[ VM_SHARED | VM_READ ] = MKP ( 0 , 0 , 1 ) ,
[ VM_SHARED | VM_WRITE ] = MKP ( 0 , 1 , 0 ) ,
[ VM_SHARED | VM_WRITE | VM_READ ] = MKP ( 0 , 1 , 1 ) ,
[ VM_SHARED | VM_EXEC ] = MKP ( 1 , 0 , 0 ) ,
[ VM_SHARED | VM_EXEC | VM_READ ] = MKP ( 1 , 0 , 1 ) ,
[ VM_SHARED | VM_EXEC | VM_WRITE ] = MKP ( 1 , 1 , 0 ) ,
[ VM_SHARED | VM_EXEC | VM_WRITE | VM_READ ] = MKP ( 1 , 1 , 1 )
} ;
DECLARE_VM_GET_PAGE_PROT