2008-07-17 21:55:51 -07:00
# ifndef _SPARC_PGTABLE_H
# define _SPARC_PGTABLE_H
2008-07-27 23:00:59 +02:00
/* asm/pgtable.h: Defines and functions used to work
2008-07-17 21:55:51 -07:00
* with Sparc page tables .
*
* Copyright ( C ) 1995 David S . Miller ( davem @ caip . rutgers . edu )
* Copyright ( C ) 1998 Jakub Jelinek ( jj @ sunsite . mff . cuni . cz )
*/
2011-04-21 15:48:39 -07:00
# include <linux/const.h>
2008-07-17 21:55:51 -07:00
# ifndef __ASSEMBLY__
# include <asm-generic/4level-fixup.h>
# include <linux/spinlock.h>
2015-09-08 15:00:59 -07:00
# include <linux/mm_types.h>
2008-07-17 21:55:51 -07:00
# include <asm/types.h>
# include <asm/pgtsrmmu.h>
2012-05-13 10:21:25 +02:00
# include <asm/vaddrs.h>
2008-07-17 21:55:51 -07:00
# include <asm/oplib.h>
2012-03-28 18:30:03 +01:00
# include <asm/cpu_type.h>
2008-07-17 21:55:51 -07:00
struct vm_area_struct ;
struct page ;
2014-05-16 23:25:50 +02:00
void load_mmu ( void ) ;
unsigned long calc_highpages ( void ) ;
2014-04-21 21:39:18 +02:00
unsigned long __init bootmem_init ( unsigned long * pages_avail ) ;
2008-07-17 21:55:51 -07:00
# define pte_ERROR(e) __builtin_trap()
# define pmd_ERROR(e) __builtin_trap()
# define pgd_ERROR(e) __builtin_trap()
2012-05-10 23:12:10 +02:00
# define PMD_SHIFT 22
2008-07-17 21:55:51 -07:00
# define PMD_SIZE (1UL << PMD_SHIFT)
# define PMD_MASK (~(PMD_SIZE-1))
# define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
2012-05-12 12:02:02 -07:00
# define PGDIR_SHIFT SRMMU_PGDIR_SHIFT
# define PGDIR_SIZE SRMMU_PGDIR_SIZE
# define PGDIR_MASK SRMMU_PGDIR_MASK
2008-07-17 21:55:51 -07:00
# define PTRS_PER_PTE 1024
2012-05-12 12:02:02 -07:00
# define PTRS_PER_PMD SRMMU_PTRS_PER_PMD
# define PTRS_PER_PGD SRMMU_PTRS_PER_PGD
# define USER_PTRS_PER_PGD PAGE_OFFSET / SRMMU_PGDIR_SIZE
2015-02-11 15:26:41 -08:00
# define FIRST_USER_ADDRESS 0UL
2008-07-17 21:55:51 -07:00
# define PTE_SIZE (PTRS_PER_PTE*4)
2012-05-12 12:52:47 -07:00
# define PAGE_NONE SRMMU_PAGE_NONE
# define PAGE_SHARED SRMMU_PAGE_SHARED
# define PAGE_COPY SRMMU_PAGE_COPY
# define PAGE_READONLY SRMMU_PAGE_RDONLY
# define PAGE_KERNEL SRMMU_PAGE_KERNEL
2008-07-17 21:55:51 -07:00
2012-07-26 11:02:16 +00:00
/* Top-level page directory - dummy used by init-mm.
* srmmu . c will assign the real one ( which is dynamically sized ) */
# define swapper_pg_dir NULL
2008-07-17 21:55:51 -07:00
2014-05-16 23:25:50 +02:00
void paging_init ( void ) ;
2008-07-17 21:55:51 -07:00
extern unsigned long ptr_in_current_pgd ;
2012-05-12 12:52:47 -07:00
/* xwr */
# define __P000 PAGE_NONE
# define __P001 PAGE_READONLY
# define __P010 PAGE_COPY
# define __P011 PAGE_COPY
# define __P100 PAGE_READONLY
# define __P101 PAGE_READONLY
# define __P110 PAGE_COPY
# define __P111 PAGE_COPY
# define __S000 PAGE_NONE
# define __S001 PAGE_READONLY
# define __S010 PAGE_SHARED
# define __S011 PAGE_SHARED
# define __S100 PAGE_READONLY
# define __S101 PAGE_READONLY
# define __S110 PAGE_SHARED
# define __S111 PAGE_SHARED
2008-07-17 21:55:51 -07:00
/* First physical page can be anywhere, the following is needed so that
* va - - > pa and vice versa conversions work properly without performance
* hit for all __pa ( ) / __va ( ) operations .
*/
extern unsigned long phys_base ;
extern unsigned long pfn_base ;
/*
* ZERO_PAGE is a global shared page that is always zero : used
* for zero - mapped memory areas etc . .
*/
extern unsigned long empty_zero_page ;
# define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
2012-05-12 12:26:47 -07:00
/*
* In general all page table modifications should use the V8 atomic
* swap instruction . This insures the mmu and the cpu are in sync
* with respect to ref / mod bits in the page tables .
*/
static inline unsigned long srmmu_swap ( unsigned long * addr , unsigned long value )
{
2015-02-11 15:25:35 -08:00
__asm__ __volatile__ ( " swap [%2], %0 " :
" =&r " ( value ) : " 0 " ( value ) , " r " ( addr ) : " memory " ) ;
2012-05-12 12:26:47 -07:00
return value ;
}
2012-05-12 13:39:23 -07:00
/* Certain architectures need to do special things when pte's
* within a page table are directly modified . Thus , the following
* hook is made available .
*/
static inline void set_pte ( pte_t * ptep , pte_t pteval )
2012-05-12 12:26:47 -07:00
{
srmmu_swap ( ( unsigned long * ) ptep , pte_val ( pteval ) ) ;
}
2012-05-12 13:39:23 -07:00
# define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
2012-05-12 12:33:08 -07:00
static inline int srmmu_device_memory ( unsigned long x )
{
return ( ( x & 0xF0000000 ) ! = 0 ) ;
}
static inline struct page * pmd_page ( pmd_t pmd )
{
if ( srmmu_device_memory ( pmd_val ( pmd ) ) )
BUG ( ) ;
return pfn_to_page ( ( pmd_val ( pmd ) & SRMMU_PTD_PMASK ) > > ( PAGE_SHIFT - 4 ) ) ;
}
2012-05-13 10:21:25 +02:00
static inline unsigned long pgd_page_vaddr ( pgd_t pgd )
{
if ( srmmu_device_memory ( pgd_val ( pgd ) ) ) {
return ~ 0 ;
} else {
unsigned long v = pgd_val ( pgd ) & SRMMU_PTD_PMASK ;
return ( unsigned long ) __nocache_va ( v < < 4 ) ;
}
}
2008-07-17 21:55:51 -07:00
2012-05-12 13:39:23 -07:00
static inline int pte_present ( pte_t pte )
{
return ( ( pte_val ( pte ) & SRMMU_ET_MASK ) = = SRMMU_ET_PTE ) ;
}
2008-07-17 21:55:51 -07:00
static inline int pte_none ( pte_t pte )
{
2010-05-25 23:36:31 -07:00
return ! pte_val ( pte ) ;
2008-07-17 21:55:51 -07:00
}
2012-05-12 12:26:47 -07:00
static inline void __pte_clear ( pte_t * ptep )
{
2012-05-12 13:39:23 -07:00
set_pte ( ptep , __pte ( 0 ) ) ;
2012-05-12 12:26:47 -07:00
}
static inline void pte_clear ( struct mm_struct * mm , unsigned long addr , pte_t * ptep )
{
__pte_clear ( ptep ) ;
}
2008-07-17 21:55:51 -07:00
2012-05-12 13:30:28 -07:00
static inline int pmd_bad ( pmd_t pmd )
{
return ( pmd_val ( pmd ) & SRMMU_ET_MASK ) ! = SRMMU_ET_PTD ;
}
static inline int pmd_present ( pmd_t pmd )
{
return ( ( pmd_val ( pmd ) & SRMMU_ET_MASK ) = = SRMMU_ET_PTD ) ;
}
2008-07-17 21:55:51 -07:00
static inline int pmd_none ( pmd_t pmd )
{
2010-05-25 23:36:31 -07:00
return ! pmd_val ( pmd ) ;
2008-07-17 21:55:51 -07:00
}
2012-05-12 12:26:47 -07:00
static inline void pmd_clear ( pmd_t * pmdp )
{
int i ;
for ( i = 0 ; i < PTRS_PER_PTE / SRMMU_REAL_PTRS_PER_PTE ; i + + )
2012-05-12 13:39:23 -07:00
set_pte ( ( pte_t * ) & pmdp - > pmdv [ i ] , __pte ( 0 ) ) ;
2012-05-12 12:26:47 -07:00
}
2008-07-17 21:55:51 -07:00
2012-05-12 13:13:16 -07:00
static inline int pgd_none ( pgd_t pgd )
{
return ! ( pgd_val ( pgd ) & 0xFFFFFFF ) ;
}
2008-07-17 21:55:51 -07:00
2012-05-12 13:13:16 -07:00
static inline int pgd_bad ( pgd_t pgd )
{
return ( pgd_val ( pgd ) & SRMMU_ET_MASK ) ! = SRMMU_ET_PTD ;
}
static inline int pgd_present ( pgd_t pgd )
{
return ( ( pgd_val ( pgd ) & SRMMU_ET_MASK ) = = SRMMU_ET_PTD ) ;
}
2012-05-12 12:26:47 -07:00
static inline void pgd_clear ( pgd_t * pgdp )
{
2012-05-12 13:39:23 -07:00
set_pte ( ( pte_t * ) pgdp , __pte ( 0 ) ) ;
2012-05-12 12:26:47 -07:00
}
2008-07-17 21:55:51 -07:00
/*
* The following only work if pte_present ( ) is true .
* Undefined behaviour if not . .
*/
static inline int pte_write ( pte_t pte )
{
2012-05-12 13:48:10 -07:00
return pte_val ( pte ) & SRMMU_WRITE ;
2008-07-17 21:55:51 -07:00
}
static inline int pte_dirty ( pte_t pte )
{
2012-05-12 13:48:10 -07:00
return pte_val ( pte ) & SRMMU_DIRTY ;
2008-07-17 21:55:51 -07:00
}
static inline int pte_young ( pte_t pte )
{
2012-05-12 13:48:10 -07:00
return pte_val ( pte ) & SRMMU_REF ;
2008-07-17 21:55:51 -07:00
}
static inline int pte_special ( pte_t pte )
{
return 0 ;
}
static inline pte_t pte_wrprotect ( pte_t pte )
{
2012-05-12 13:54:58 -07:00
return __pte ( pte_val ( pte ) & ~ SRMMU_WRITE ) ;
2008-07-17 21:55:51 -07:00
}
static inline pte_t pte_mkclean ( pte_t pte )
{
2012-05-12 13:54:58 -07:00
return __pte ( pte_val ( pte ) & ~ SRMMU_DIRTY ) ;
2008-07-17 21:55:51 -07:00
}
static inline pte_t pte_mkold ( pte_t pte )
{
2012-05-12 13:54:58 -07:00
return __pte ( pte_val ( pte ) & ~ SRMMU_REF ) ;
2008-07-17 21:55:51 -07:00
}
2012-05-12 13:54:58 -07:00
static inline pte_t pte_mkwrite ( pte_t pte )
{
return __pte ( pte_val ( pte ) | SRMMU_WRITE ) ;
}
2008-07-17 21:55:51 -07:00
2012-05-12 13:54:58 -07:00
static inline pte_t pte_mkdirty ( pte_t pte )
{
return __pte ( pte_val ( pte ) | SRMMU_DIRTY ) ;
}
static inline pte_t pte_mkyoung ( pte_t pte )
{
return __pte ( pte_val ( pte ) | SRMMU_REF ) ;
}
2008-07-17 21:55:51 -07:00
# define pte_mkspecial(pte) (pte)
# define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
2012-05-12 12:33:08 -07:00
static inline unsigned long pte_pfn ( pte_t pte )
{
if ( srmmu_device_memory ( pte_val ( pte ) ) ) {
/* Just return something that will cause
* pfn_valid ( ) to return false . This makes
* copy_one_pte ( ) to just directly copy to
* PTE over .
*/
return ~ 0UL ;
}
return ( pte_val ( pte ) & SRMMU_PTE_PMASK ) > > ( PAGE_SHIFT - 4 ) ;
}
2008-07-17 21:55:51 -07:00
# define pte_page(pte) pfn_to_page(pte_pfn(pte))
/*
* Conversion functions : convert a page and protection to a page entry ,
* and a page entry and page directory to the page they refer to .
*/
2012-05-12 13:39:23 -07:00
static inline pte_t mk_pte ( struct page * page , pgprot_t pgprot )
{
return __pte ( ( page_to_pfn ( page ) < < ( PAGE_SHIFT - 4 ) ) | pgprot_val ( pgprot ) ) ;
}
2008-07-17 21:55:51 -07:00
2012-05-12 13:39:23 -07:00
static inline pte_t mk_pte_phys ( unsigned long page , pgprot_t pgprot )
{
return __pte ( ( ( page ) > > 4 ) | pgprot_val ( pgprot ) ) ;
}
2008-07-17 21:55:51 -07:00
2012-05-12 13:39:23 -07:00
static inline pte_t mk_pte_io ( unsigned long page , pgprot_t pgprot , int space )
{
return __pte ( ( ( page ) > > 4 ) | ( space < < 28 ) | pgprot_val ( pgprot ) ) ;
}
2008-07-17 21:55:51 -07:00
2012-05-11 11:35:17 +00:00
# define pgprot_noncached pgprot_noncached
static inline pgprot_t pgprot_noncached ( pgprot_t prot )
{
prot & = ~ __pgprot ( SRMMU_CACHE ) ;
return prot ;
}
2008-07-17 21:55:51 -07:00
static pte_t pte_modify ( pte_t pte , pgprot_t newprot ) __attribute_const__ ;
static inline pte_t pte_modify ( pte_t pte , pgprot_t newprot )
{
2012-05-13 10:21:25 +02:00
return __pte ( ( pte_val ( pte ) & SRMMU_CHG_MASK ) |
2008-07-17 21:55:51 -07:00
pgprot_val ( newprot ) ) ;
}
# define pgd_index(address) ((address) >> PGDIR_SHIFT)
/* to find an entry in a page-table-directory */
# define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
/* to find an entry in a kernel page-table-directory */
# define pgd_offset_k(address) pgd_offset(&init_mm, address)
/* Find an entry in the second-level page table.. */
2012-05-13 10:21:25 +02:00
static inline pmd_t * pmd_offset ( pgd_t * dir , unsigned long address )
{
return ( pmd_t * ) pgd_page_vaddr ( * dir ) +
( ( address > > PMD_SHIFT ) & ( PTRS_PER_PMD - 1 ) ) ;
}
2008-07-17 21:55:51 -07:00
/* Find an entry in the third-level page table.. */
2012-05-13 10:21:25 +02:00
pte_t * pte_offset_kernel ( pmd_t * dir , unsigned long address ) ;
2008-07-17 21:55:51 -07:00
/*
2012-05-12 00:35:45 -07:00
* This shortcut works on sun4m ( and sun4d ) because the nocache area is static .
2008-07-17 21:55:51 -07:00
*/
# define pte_offset_map(d, a) pte_offset_kernel(d,a)
# define pte_unmap(pte) do{}while(0)
struct seq_file ;
2012-05-13 10:21:25 +02:00
void mmu_info ( struct seq_file * m ) ;
2008-07-17 21:55:51 -07:00
/* Fault handler stuff... */
# define FAULT_CODE_PROT 0x1
# define FAULT_CODE_WRITE 0x2
# define FAULT_CODE_USER 0x4
2012-05-13 13:16:39 -07:00
# define update_mmu_cache(vma, address, ptep) do { } while (0)
2008-07-17 21:55:51 -07:00
2012-05-13 10:21:25 +02:00
void srmmu_mapiorange ( unsigned int bus , unsigned long xpa ,
unsigned long xva , unsigned int len ) ;
void srmmu_unmapiorange ( unsigned long virt_addr , unsigned int len ) ;
2008-07-17 21:55:51 -07:00
/* Encode and de-code a swap entry */
2012-05-13 10:21:25 +02:00
static inline unsigned long __swp_type ( swp_entry_t entry )
{
return ( entry . val > > SRMMU_SWP_TYPE_SHIFT ) & SRMMU_SWP_TYPE_MASK ;
}
2008-07-17 21:55:51 -07:00
2012-05-13 10:21:25 +02:00
static inline unsigned long __swp_offset ( swp_entry_t entry )
{
return ( entry . val > > SRMMU_SWP_OFF_SHIFT ) & SRMMU_SWP_OFF_MASK ;
}
static inline swp_entry_t __swp_entry ( unsigned long type , unsigned long offset )
{
return ( swp_entry_t ) {
( type & SRMMU_SWP_TYPE_MASK ) < < SRMMU_SWP_TYPE_SHIFT
| ( offset & SRMMU_SWP_OFF_MASK ) < < SRMMU_SWP_OFF_SHIFT } ;
}
2008-07-17 21:55:51 -07:00
# define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
# define __swp_entry_to_pte(x) ((pte_t) { (x).val })
static inline unsigned long
__get_phys ( unsigned long addr )
{
switch ( sparc_cpu_model ) {
case sun4m :
case sun4d :
return ( ( srmmu_get_pte ( addr ) & 0xffffff00 ) < < 4 ) ;
default :
return 0 ;
}
}
static inline int
__get_iospace ( unsigned long addr )
{
switch ( sparc_cpu_model ) {
case sun4m :
case sun4d :
return ( srmmu_get_pte ( addr ) > > 28 ) ;
default :
return - 1 ;
}
}
extern unsigned long * sparc_valid_addr_bitmap ;
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
# define kern_addr_valid(addr) \
( test_bit ( __pa ( ( unsigned long ) ( addr ) ) > > 20 , sparc_valid_addr_bitmap ) )
/*
* For sparc32 & 64 , the pfn in io_remap_pfn_range ( ) carries < iospace > in
* its high 4 bits . These macros / functions put it there or get it from there .
*/
# define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
# define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
# define GET_PFN(pfn) (pfn & 0x0fffffffUL)
2014-05-16 23:25:50 +02:00
int remap_pfn_range ( struct vm_area_struct * , unsigned long , unsigned long ,
unsigned long , pgprot_t ) ;
2011-11-17 18:17:59 -08:00
static inline int io_remap_pfn_range ( struct vm_area_struct * vma ,
unsigned long from , unsigned long pfn ,
unsigned long size , pgprot_t prot )
{
unsigned long long offset , space , phys_base ;
offset = ( ( unsigned long long ) GET_PFN ( pfn ) ) < < PAGE_SHIFT ;
space = GET_IOSPACE ( pfn ) ;
phys_base = offset | ( space < < 32ULL ) ;
return remap_pfn_range ( vma , from , phys_base > > PAGE_SHIFT , size , prot ) ;
}
2013-05-11 12:13:10 -04:00
# define io_remap_pfn_range io_remap_pfn_range
2011-11-17 18:17:59 -08:00
2008-07-17 21:55:51 -07:00
# define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
# define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
( { \
int __changed = ! pte_same ( * ( __ptep ) , __entry ) ; \
if ( __changed ) { \
set_pte_at ( ( __vma ) - > vm_mm , ( __address ) , __ptep , __entry ) ; \
flush_tlb_page ( __vma , __address ) ; \
} \
2012-05-10 23:12:10 +02:00
__changed ; \
2008-07-17 21:55:51 -07:00
} )
# include <asm-generic/pgtable.h>
# endif /* !(__ASSEMBLY__) */
2011-04-21 15:48:39 -07:00
# define VMALLOC_START _AC(0xfe600000,UL)
# define VMALLOC_END _AC(0xffc00000,UL)
2008-07-17 21:55:51 -07:00
/* We provide our own get_unmapped_area to cope with VA holes for userland */
# define HAVE_ARCH_UNMAPPED_AREA
/*
* No page table caches to initialise
*/
# define pgtable_cache_init() do { } while (0)
# endif /* !(_SPARC_PGTABLE_H) */