2005-04-16 15:20:36 -07:00
/*
2008-08-02 10:55:55 +01:00
* arch / arm / include / asm / pgtable . h
2005-04-16 15:20:36 -07:00
*
* Copyright ( C ) 1995 - 2002 Russell King
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# ifndef _ASMARM_PGTABLE_H
# define _ASMARM_PGTABLE_H
2010-11-16 00:22:09 +00:00
# include <linux/const.h>
2006-06-20 20:46:52 +01:00
# include <asm/proc-fns.h>
# ifndef CONFIG_MMU
2011-11-22 17:30:28 +00:00
# include <asm-generic/4level-fixup.h>
2012-10-02 18:01:25 +01:00
# include <asm/pgtable-nommu.h>
2006-06-20 20:46:52 +01:00
# else
2005-04-16 15:20:36 -07:00
2011-11-22 17:30:28 +00:00
# include <asm-generic/pgtable-nopud.h>
2005-04-16 15:20:36 -07:00
# include <asm/memory.h>
2006-12-13 14:34:43 +00:00
# include <asm/pgtable-hwdef.h>
2005-04-16 15:20:36 -07:00
2012-07-25 14:39:26 +01:00
# include <asm/tlbflush.h>
2011-11-22 17:30:29 +00:00
# ifdef CONFIG_ARM_LPAE
# include <asm/pgtable-3level.h>
# else
2011-09-05 17:41:02 +01:00
# include <asm/pgtable-2level.h>
2011-11-22 17:30:29 +00:00
# endif
2011-09-05 17:41:02 +01:00
2005-05-03 12:20:29 +01:00
/*
* Just any arbitrary offset to the start of the vmalloc VM area : the
* current 8 MB value just means that there will be a 8 MB " hole " after the
* physical memory until the kernel virtual memory starts . That means that
* any out - of - bounds memory accesses will hopefully be caught .
* The vmalloc ( ) routines leaves a hole of 4 kB between each vmalloced
* area for the same reason . ; )
*/
# define VMALLOC_OFFSET (8*1024*1024)
# define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
2015-09-13 03:25:26 +01:00
# define VMALLOC_END 0xff800000UL
2005-05-03 12:20:29 +01:00
2005-04-16 15:20:36 -07:00
# define LIBRARY_TEXT_START 0x0c000000
# ifndef __ASSEMBLY__
2010-11-16 00:19:55 +00:00
extern void __pte_error ( const char * file , int line , pte_t ) ;
extern void __pmd_error ( const char * file , int line , pmd_t ) ;
extern void __pgd_error ( const char * file , int line , pgd_t ) ;
2005-04-16 15:20:36 -07:00
2010-11-16 00:19:55 +00:00
# define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte)
# define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
# define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2005-04-16 15:20:36 -07:00
2005-04-19 13:29:21 -07:00
/*
* This is the lowest virtual address we can permit any user space
* mapping to be mapped at . This is particularly important for
* non - high vector CPUs .
*/
2013-11-28 21:43:40 +00:00
# define FIRST_USER_ADDRESS (PAGE_SIZE * 2)
2005-04-19 13:29:21 -07:00
2013-04-29 15:07:45 -07:00
/*
* Use TASK_SIZE as the ceiling argument for free_pgtables ( ) and
* free_pgd_range ( ) to avoid freeing the modules pmd when LPAE is enabled ( pmd
* page shared between user and kernel ) .
*/
# ifdef CONFIG_ARM_LPAE
# define USER_PGTABLES_CEILING TASK_SIZE
# endif
2005-04-16 15:20:36 -07:00
/*
2007-02-11 13:45:13 +01:00
* The pgprot_ * and protection_map entries will be fixed up in runtime
* to include the cachable and bufferable bits based on memory policy ,
* as well as any architecture dependent bits like global / ASID and SMP
* shared mapping bits .
2005-04-16 15:20:36 -07:00
*/
2008-09-06 20:04:59 +01:00
# define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2005-04-16 15:20:36 -07:00
2007-02-11 13:45:13 +01:00
extern pgprot_t pgprot_user ;
2005-04-16 15:20:36 -07:00
extern pgprot_t pgprot_kernel ;
2013-01-20 18:28:04 -05:00
extern pgprot_t pgprot_hyp_device ;
extern pgprot_t pgprot_s2 ;
extern pgprot_t pgprot_s2_device ;
2005-04-16 15:20:36 -07:00
2008-09-07 17:16:54 +01:00
# define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
2005-04-16 15:20:36 -07:00
2012-09-01 05:22:12 +01:00
# define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE)
2010-11-16 08:40:36 +00:00
# define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
# define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
# define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
# define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
# define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
# define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
2010-11-16 00:23:31 +00:00
# define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
# define PAGE_KERNEL_EXEC pgprot_kernel
2016-06-13 15:00:49 +01:00
# define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_XN)
2016-06-13 15:00:48 +01:00
# define PAGE_HYP_EXEC _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY)
2016-06-13 15:00:47 +01:00
# define PAGE_HYP_RO _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY | L_PTE_XN)
2013-01-20 18:28:04 -05:00
# define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
# define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
2014-09-17 14:56:19 -07:00
# define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY)
2010-11-16 00:23:31 +00:00
2012-09-01 05:22:12 +01:00
# define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
2010-11-16 08:40:36 +00:00
# define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
# define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
# define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
# define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
# define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
# define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
2007-02-11 13:45:13 +01:00
2010-11-26 17:39:28 +00:00
# define __pgprot_modify(prot,mask,bits) \
__pgprot ( ( pgprot_val ( prot ) & ~ ( mask ) ) | ( bits ) )
# define pgprot_noncached(prot) \
__pgprot_modify ( prot , L_PTE_MT_MASK , L_PTE_MT_UNCACHED )
# define pgprot_writecombine(prot) \
__pgprot_modify ( prot , L_PTE_MT_MASK , L_PTE_MT_BUFFERABLE )
2011-06-28 12:42:56 -07:00
# define pgprot_stronglyordered(prot) \
__pgprot_modify ( prot , L_PTE_MT_MASK , L_PTE_MT_UNCACHED )
2010-11-26 17:39:28 +00:00
# ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
# define pgprot_dmacoherent(prot) \
2010-11-16 00:23:31 +00:00
__pgprot_modify ( prot , L_PTE_MT_MASK , L_PTE_MT_BUFFERABLE | L_PTE_XN )
2010-11-26 17:39:28 +00:00
# define __HAVE_PHYS_MEM_ACCESS_PROT
struct file ;
extern pgprot_t phys_mem_access_prot ( struct file * file , unsigned long pfn ,
unsigned long size , pgprot_t vma_prot ) ;
# else
# define pgprot_dmacoherent(prot) \
2010-11-16 00:23:31 +00:00
__pgprot_modify ( prot , L_PTE_MT_MASK , L_PTE_MT_UNCACHED | L_PTE_XN )
2010-11-26 17:39:28 +00:00
# endif
2005-04-16 15:20:36 -07:00
# endif /* __ASSEMBLY__ */
/*
* The table below defines the page protection levels that we insert into our
* Linux page table version . These get translated into the best that the
* architecture can perform . Note that on most ARM hardware :
* 1 ) We cannot do execute protection
* 2 ) If we could do execute protection , then read is implied
* 3 ) write implies read permissions
*/
2007-02-11 13:45:13 +01:00
# define __P000 __PAGE_NONE
# define __P001 __PAGE_READONLY
# define __P010 __PAGE_COPY
# define __P011 __PAGE_COPY
2008-09-07 17:16:54 +01:00
# define __P100 __PAGE_READONLY_EXEC
# define __P101 __PAGE_READONLY_EXEC
# define __P110 __PAGE_COPY_EXEC
# define __P111 __PAGE_COPY_EXEC
2007-02-11 13:45:13 +01:00
# define __S000 __PAGE_NONE
# define __S001 __PAGE_READONLY
# define __S010 __PAGE_SHARED
# define __S011 __PAGE_SHARED
2008-09-07 17:16:54 +01:00
# define __S100 __PAGE_READONLY_EXEC
# define __S101 __PAGE_READONLY_EXEC
# define __S110 __PAGE_SHARED_EXEC
# define __S111 __PAGE_SHARED_EXEC
2005-04-16 15:20:36 -07:00
# ifndef __ASSEMBLY__
/*
* ZERO_PAGE is a global shared page that is always zero : used
* for zero - mapped memory areas etc . .
*/
extern struct page * empty_zero_page ;
# define ZERO_PAGE(vaddr) (empty_zero_page)
2010-11-26 20:12:12 +00:00
extern pgd_t swapper_pg_dir [ PTRS_PER_PGD ] ;
/* to find an entry in a page-table-directory */
# define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
# define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
/* to find an entry in a kernel page-table-directory */
# define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
2010-11-26 20:35:25 +00:00
# define pmd_none(pmd) (!pmd_val(pmd))
static inline pte_t * pmd_page_vaddr ( pmd_t pmd )
{
2011-09-05 17:52:36 +01:00
return __va ( pmd_val ( pmd ) & PHYS_MASK & ( s32 ) PAGE_MASK ) ;
2010-11-26 20:35:25 +00:00
}
2011-09-05 17:52:36 +01:00
# define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
2010-11-26 20:35:25 +00:00
2009-08-17 20:02:06 +01:00
# ifndef CONFIG_HIGHPTE
2010-11-26 20:35:25 +00:00
# define __pte_map(pmd) pmd_page_vaddr(*(pmd))
2010-10-26 14:21:52 -07:00
# define __pte_unmap(pte) do { } while (0)
2009-08-17 20:02:06 +01:00
# else
2010-11-16 00:16:01 +00:00
# define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd)))
# define __pte_unmap(pte) kunmap_atomic(pte)
2009-08-17 20:02:06 +01:00
# endif
2005-04-16 15:20:36 -07:00
2010-11-26 20:35:25 +00:00
# define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
# define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr))
# define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr))
# define pte_unmap(pte) __pte_unmap(pte)
2011-09-05 17:52:36 +01:00
# define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
2011-02-15 12:42:57 +01:00
# define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
2010-11-26 20:35:25 +00:00
# define pte_page(pte) pfn_to_page(pte_pfn(pte))
# define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
# define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
2006-12-13 14:34:43 +00:00
2014-07-18 16:15:27 +01:00
# define pte_isset(pte, val) ((u32)(val) == (val) ? pte_val(pte) & (val) \
: ! ! ( pte_val ( pte ) & ( val ) ) )
# define pte_isclear(pte, val) (!(pte_val(pte) & (val)))
2012-08-10 17:51:18 +01:00
# define pte_none(pte) (!pte_val(pte))
2014-07-18 16:15:27 +01:00
# define pte_present(pte) (pte_isset((pte), L_PTE_PRESENT))
# define pte_valid(pte) (pte_isset((pte), L_PTE_VALID))
ARM: 7985/1: mm: implement pte_accessible for faulting mappings
The pte_accessible macro can be used to identify page table entries
capable of being cached by a TLB. In principle, this differs from
pte_present, since PROT_NONE mappings are mapped using invalid entries
identified as present and ptes designated as `old' can use either
invalid entries or those with the access flag cleared (guaranteed not to
be in the TLB). However, there is a race to take care of, as described
in 20841405940e ("mm: fix TLB flush race between migration, and
change_protection_range"), between a page being migrated and mprotected
at the same time. In this case, we can check whether a TLB invalidation
is pending for the mm and if so, temporarily consider PROT_NONE mappings
as valid.
This patch implements a quick pte_accessible macro for ARM by simply
checking if the pte is valid/present depending on the mm. For classic
MMU, these checks are identical and will generate some false positives
for PROT_NONE mappings, but this is better than the current asm-generic
definition of ((void)(pte),1).
Finally, pte_present_user is moved to use pte_valid (and renamed
appropriately) since we don't care about cache flushing for faulting
mappings.
Acked-by: Steve Capper <steve.capper@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-02-21 17:01:48 +01:00
# define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
2014-07-18 16:15:27 +01:00
# define pte_write(pte) (pte_isclear((pte), L_PTE_RDONLY))
# define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY))
# define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG))
# define pte_exec(pte) (pte_isclear((pte), L_PTE_XN))
2012-08-10 17:51:18 +01:00
ARM: 7985/1: mm: implement pte_accessible for faulting mappings
The pte_accessible macro can be used to identify page table entries
capable of being cached by a TLB. In principle, this differs from
pte_present, since PROT_NONE mappings are mapped using invalid entries
identified as present and ptes designated as `old' can use either
invalid entries or those with the access flag cleared (guaranteed not to
be in the TLB). However, there is a race to take care of, as described
in 20841405940e ("mm: fix TLB flush race between migration, and
change_protection_range"), between a page being migrated and mprotected
at the same time. In this case, we can check whether a TLB invalidation
is pending for the mm and if so, temporarily consider PROT_NONE mappings
as valid.
This patch implements a quick pte_accessible macro for ARM by simply
checking if the pte is valid/present depending on the mm. For classic
MMU, these checks are identical and will generate some false positives
for PROT_NONE mappings, but this is better than the current asm-generic
definition of ((void)(pte),1).
Finally, pte_present_user is moved to use pte_valid (and renamed
appropriately) since we don't care about cache flushing for faulting
mappings.
Acked-by: Steve Capper <steve.capper@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-02-21 17:01:48 +01:00
# define pte_valid_user(pte) \
2014-07-18 16:15:27 +01:00
( pte_valid ( pte ) & & pte_isset ( ( pte ) , L_PTE_USER ) & & pte_young ( pte ) )
2012-08-10 17:51:18 +01:00
2010-09-13 15:58:06 +01:00
# if __LINUX_ARM_ARCH__ < 6
static inline void __sync_icache_dcache ( pte_t pteval )
{
}
# else
extern void __sync_icache_dcache ( pte_t pteval ) ;
# endif
static inline void set_pte_at ( struct mm_struct * mm , unsigned long addr ,
pte_t * ptep , pte_t pteval )
{
2012-08-10 17:51:18 +01:00
unsigned long ext = 0 ;
ARM: 7985/1: mm: implement pte_accessible for faulting mappings
The pte_accessible macro can be used to identify page table entries
capable of being cached by a TLB. In principle, this differs from
pte_present, since PROT_NONE mappings are mapped using invalid entries
identified as present and ptes designated as `old' can use either
invalid entries or those with the access flag cleared (guaranteed not to
be in the TLB). However, there is a race to take care of, as described
in 20841405940e ("mm: fix TLB flush race between migration, and
change_protection_range"), between a page being migrated and mprotected
at the same time. In this case, we can check whether a TLB invalidation
is pending for the mm and if so, temporarily consider PROT_NONE mappings
as valid.
This patch implements a quick pte_accessible macro for ARM by simply
checking if the pte is valid/present depending on the mm. For classic
MMU, these checks are identical and will generate some false positives
for PROT_NONE mappings, but this is better than the current asm-generic
definition of ((void)(pte),1).
Finally, pte_present_user is moved to use pte_valid (and renamed
appropriately) since we don't care about cache flushing for faulting
mappings.
Acked-by: Steve Capper <steve.capper@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-02-21 17:01:48 +01:00
if ( addr < TASK_SIZE & & pte_valid_user ( pteval ) ) {
2014-10-09 15:29:16 -07:00
if ( ! pte_special ( pteval ) )
__sync_icache_dcache ( pteval ) ;
2012-08-10 17:51:18 +01:00
ext | = PTE_EXT_NG ;
2010-09-13 15:58:06 +01:00
}
2005-04-16 15:20:36 -07:00
2012-08-10 17:51:18 +01:00
set_pte_ext ( ptep , pteval , ext ) ;
}
2010-09-13 15:58:06 +01:00
2014-11-29 03:03:51 +01:00
static inline pte_t clear_pte_bit ( pte_t pte , pgprot_t prot )
{
pte_val ( pte ) & = ~ pgprot_val ( prot ) ;
return pte ;
}
static inline pte_t set_pte_bit ( pte_t pte , pgprot_t prot )
{
pte_val ( pte ) | = pgprot_val ( prot ) ;
return pte ;
}
static inline pte_t pte_wrprotect ( pte_t pte )
{
return set_pte_bit ( pte , __pgprot ( L_PTE_RDONLY ) ) ;
}
static inline pte_t pte_mkwrite ( pte_t pte )
{
return clear_pte_bit ( pte , __pgprot ( L_PTE_RDONLY ) ) ;
}
static inline pte_t pte_mkclean ( pte_t pte )
{
return clear_pte_bit ( pte , __pgprot ( L_PTE_DIRTY ) ) ;
}
static inline pte_t pte_mkdirty ( pte_t pte )
{
return set_pte_bit ( pte , __pgprot ( L_PTE_DIRTY ) ) ;
}
static inline pte_t pte_mkold ( pte_t pte )
{
return clear_pte_bit ( pte , __pgprot ( L_PTE_YOUNG ) ) ;
}
static inline pte_t pte_mkyoung ( pte_t pte )
{
return set_pte_bit ( pte , __pgprot ( L_PTE_YOUNG ) ) ;
}
static inline pte_t pte_mkexec ( pte_t pte )
{
return clear_pte_bit ( pte , __pgprot ( L_PTE_XN ) ) ;
}
static inline pte_t pte_mknexec ( pte_t pte )
{
return set_pte_bit ( pte , __pgprot ( L_PTE_XN ) ) ;
}
2005-04-16 15:20:36 -07:00
static inline pte_t pte_modify ( pte_t pte , pgprot_t newprot )
{
2013-02-18 17:51:20 +01:00
const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
L_PTE_NONE | L_PTE_VALID ;
2005-04-16 15:20:36 -07:00
pte_val ( pte ) = ( pte_val ( pte ) & ~ mask ) | ( pgprot_val ( newprot ) & mask ) ;
return pte ;
}
2009-07-05 11:30:15 +01:00
/*
* Encode and decode a swap entry . Swap entries are stored in the Linux
* page tables as follows :
*
* 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
2015-02-10 14:10:17 -08:00
* < - - - - - - - - - - - - - - - offset - - - - - - - - - - - - - - - - - - - - - - - - > < type - > 0 0
2005-04-16 15:20:36 -07:00
*
2015-02-10 14:10:17 -08:00
* This gives us up to 31 swap files and 128 GB per swap file . Note that
2009-07-05 11:30:15 +01:00
* the offset field is always non - zero .
2005-04-16 15:20:36 -07:00
*/
2015-02-10 14:10:17 -08:00
# define __SWP_TYPE_SHIFT 2
2012-08-10 17:51:19 +01:00
# define __SWP_TYPE_BITS 5
2009-07-05 11:30:15 +01:00
# define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
# define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
# define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
# define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
# define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
2005-04-16 15:20:36 -07:00
# define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
# define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
2009-07-05 11:30:15 +01:00
/*
* It is an error for the kernel to have more swap files than we can
* encode in the PTEs . This ensures that we know when MAX_SWAPFILES
* is increased beyond what we presently support .
*/
# define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
2005-04-16 15:20:36 -07:00
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
/* FIXME: this is not correct */
# define kern_addr_valid(addr) (1)
# include <asm-generic/pgtable.h>
/*
* We provide our own arch_get_unmapped_area to cope with VIPT caches .
*/
# define HAVE_ARCH_UNMAPPED_AREA
2011-11-22 04:01:07 +01:00
# define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
2005-04-16 15:20:36 -07:00
# define pgtable_cache_init() do { } while (0)
# endif /* !__ASSEMBLY__ */
2006-06-20 20:46:52 +01:00
# endif /* CONFIG_MMU */
2005-04-16 15:20:36 -07:00
# endif /* _ASMARM_PGTABLE_H */