2005-04-16 15:20:36 -07:00
/*
* include / asm - s390 / pgalloc . h
*
* S390 version
* Copyright ( C ) 1999 , 2000 IBM Deutschland Entwicklung GmbH , IBM Corporation
* Author ( s ) : Hartmut Penner ( hp @ de . ibm . com )
* Martin Schwidefsky ( schwidefsky @ de . ibm . com )
*
* Derived from " include/asm-i386/pgalloc.h "
* Copyright ( C ) 1994 Linus Torvalds
*/
# ifndef _S390_PGALLOC_H
# define _S390_PGALLOC_H
# include <linux/threads.h>
# include <linux/gfp.h>
# include <linux/mm.h>
# define check_pgt_cache() do {} while (0)
extern void diag10 ( unsigned long addr ) ;
2006-09-20 15:59:37 +02:00
/*
* Page allocation orders .
*/
# ifndef __s390x__
2006-12-08 15:56:07 +01:00
# define PTE_ALLOC_ORDER 0
# define PMD_ALLOC_ORDER 0
2006-09-20 15:59:37 +02:00
# define PGD_ALLOC_ORDER 1
# else /* __s390x__ */
2006-12-08 15:56:07 +01:00
# define PTE_ALLOC_ORDER 0
2006-09-20 15:59:37 +02:00
# define PMD_ALLOC_ORDER 2
# define PGD_ALLOC_ORDER 2
# endif /* __s390x__ */
2005-04-16 15:20:36 -07:00
/*
* Allocate and free page tables . The xxx_kernel ( ) versions are
* used to allocate a kernel page table - this turns on ASN bits
* if any .
*/
static inline pgd_t * pgd_alloc ( struct mm_struct * mm )
{
2006-09-20 15:59:37 +02:00
pgd_t * pgd = ( pgd_t * ) __get_free_pages ( GFP_KERNEL , PGD_ALLOC_ORDER ) ;
2005-04-16 15:20:36 -07:00
int i ;
2006-09-20 15:59:37 +02:00
if ( ! pgd )
return NULL ;
2007-02-05 21:18:17 +01:00
if ( s390_noexec ) {
pgd_t * shadow_pgd = ( pgd_t * )
__get_free_pages ( GFP_KERNEL , PGD_ALLOC_ORDER ) ;
struct page * page = virt_to_page ( pgd ) ;
if ( ! shadow_pgd ) {
free_pages ( ( unsigned long ) pgd , PGD_ALLOC_ORDER ) ;
return NULL ;
}
page - > lru . next = ( void * ) shadow_pgd ;
}
2006-09-20 15:59:37 +02:00
for ( i = 0 ; i < PTRS_PER_PGD ; i + + )
2005-04-16 15:20:36 -07:00
# ifndef __s390x__
2006-09-20 15:59:37 +02:00
pmd_clear ( pmd_offset ( pgd + i , i * PGDIR_SIZE ) ) ;
# else
pgd_clear ( pgd + i ) ;
# endif
2005-04-16 15:20:36 -07:00
return pgd ;
}
static inline void pgd_free ( pgd_t * pgd )
{
2007-02-05 21:18:17 +01:00
pgd_t * shadow_pgd = get_shadow_pgd ( pgd ) ;
if ( shadow_pgd )
free_pages ( ( unsigned long ) shadow_pgd , PGD_ALLOC_ORDER ) ;
2006-09-20 15:59:37 +02:00
free_pages ( ( unsigned long ) pgd , PGD_ALLOC_ORDER ) ;
2005-04-16 15:20:36 -07:00
}
# ifndef __s390x__
/*
* page middle directory allocation / free routines .
* We use pmd cache only on s390x , so these are dummy routines . This
* code never triggers because the pgd will always be present .
*/
# define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
# define pmd_free(x) do { } while (0)
# define __pmd_free_tlb(tlb,x) do { } while (0)
# define pgd_populate(mm, pmd, pte) BUG()
2007-02-05 21:18:17 +01:00
# define pgd_populate_kernel(mm, pmd, pte) BUG()
2005-04-16 15:20:36 -07:00
# else /* __s390x__ */
static inline pmd_t * pmd_alloc_one ( struct mm_struct * mm , unsigned long vmaddr )
{
2006-09-20 15:59:37 +02:00
pmd_t * pmd = ( pmd_t * ) __get_free_pages ( GFP_KERNEL , PMD_ALLOC_ORDER ) ;
int i ;
2005-04-16 15:20:36 -07:00
2006-09-20 15:59:37 +02:00
if ( ! pmd )
return NULL ;
2007-02-05 21:18:17 +01:00
if ( s390_noexec ) {
pmd_t * shadow_pmd = ( pmd_t * )
__get_free_pages ( GFP_KERNEL , PMD_ALLOC_ORDER ) ;
struct page * page = virt_to_page ( pmd ) ;
if ( ! shadow_pmd ) {
free_pages ( ( unsigned long ) pmd , PMD_ALLOC_ORDER ) ;
return NULL ;
}
page - > lru . next = ( void * ) shadow_pmd ;
}
2006-09-20 15:59:37 +02:00
for ( i = 0 ; i < PTRS_PER_PMD ; i + + )
pmd_clear ( pmd + i ) ;
2005-04-16 15:20:36 -07:00
return pmd ;
}
static inline void pmd_free ( pmd_t * pmd )
{
2007-02-05 21:18:17 +01:00
pmd_t * shadow_pmd = get_shadow_pmd ( pmd ) ;
if ( shadow_pmd )
free_pages ( ( unsigned long ) shadow_pmd , PMD_ALLOC_ORDER ) ;
2006-09-20 15:59:37 +02:00
free_pages ( ( unsigned long ) pmd , PMD_ALLOC_ORDER ) ;
2005-04-16 15:20:36 -07:00
}
# define __pmd_free_tlb(tlb,pmd) \
do { \
tlb_flush_mmu ( tlb , 0 , 0 ) ; \
pmd_free ( pmd ) ; \
} while ( 0 )
2007-02-05 21:18:17 +01:00
static inline void
pgd_populate_kernel ( struct mm_struct * mm , pgd_t * pgd , pmd_t * pmd )
2005-04-16 15:20:36 -07:00
{
pgd_val ( * pgd ) = _PGD_ENTRY | __pa ( pmd ) ;
}
2007-02-05 21:18:17 +01:00
static inline void pgd_populate ( struct mm_struct * mm , pgd_t * pgd , pmd_t * pmd )
{
pgd_t * shadow_pgd = get_shadow_pgd ( pgd ) ;
pmd_t * shadow_pmd = get_shadow_pmd ( pmd ) ;
if ( shadow_pgd & & shadow_pmd )
pgd_populate_kernel ( mm , shadow_pgd , shadow_pmd ) ;
pgd_populate_kernel ( mm , pgd , pmd ) ;
}
2005-04-16 15:20:36 -07:00
# endif /* __s390x__ */
static inline void
pmd_populate_kernel ( struct mm_struct * mm , pmd_t * pmd , pte_t * pte )
{
# ifndef __s390x__
pmd_val ( pmd [ 0 ] ) = _PAGE_TABLE + __pa ( pte ) ;
pmd_val ( pmd [ 1 ] ) = _PAGE_TABLE + __pa ( pte + 256 ) ;
pmd_val ( pmd [ 2 ] ) = _PAGE_TABLE + __pa ( pte + 512 ) ;
pmd_val ( pmd [ 3 ] ) = _PAGE_TABLE + __pa ( pte + 768 ) ;
# else /* __s390x__ */
pmd_val ( * pmd ) = _PMD_ENTRY + __pa ( pte ) ;
pmd_val1 ( * pmd ) = _PMD_ENTRY + __pa ( pte + 256 ) ;
# endif /* __s390x__ */
}
static inline void
pmd_populate ( struct mm_struct * mm , pmd_t * pmd , struct page * page )
{
2007-02-05 21:18:17 +01:00
pte_t * pte = ( pte_t * ) page_to_phys ( page ) ;
pmd_t * shadow_pmd = get_shadow_pmd ( pmd ) ;
pte_t * shadow_pte = get_shadow_pte ( pte ) ;
pmd_populate_kernel ( mm , pmd , pte ) ;
if ( shadow_pmd & & shadow_pte )
pmd_populate_kernel ( mm , shadow_pmd , shadow_pte ) ;
2005-04-16 15:20:36 -07:00
}
/*
* page table entry allocation / free routines .
*/
static inline pte_t *
pte_alloc_one_kernel ( struct mm_struct * mm , unsigned long vmaddr )
{
2006-09-20 15:59:37 +02:00
pte_t * pte = ( pte_t * ) __get_free_page ( GFP_KERNEL | __GFP_REPEAT ) ;
int i ;
if ( ! pte )
return NULL ;
2007-02-05 21:18:17 +01:00
if ( s390_noexec ) {
pte_t * shadow_pte = ( pte_t * )
__get_free_page ( GFP_KERNEL | __GFP_REPEAT ) ;
struct page * page = virt_to_page ( pte ) ;
if ( ! shadow_pte ) {
free_page ( ( unsigned long ) pte ) ;
return NULL ;
}
page - > lru . next = ( void * ) shadow_pte ;
}
2006-09-20 15:59:37 +02:00
for ( i = 0 ; i < PTRS_PER_PTE ; i + + ) {
pte_clear ( mm , vmaddr , pte + i ) ;
vmaddr + = PAGE_SIZE ;
2005-04-16 15:20:36 -07:00
}
return pte ;
}
static inline struct page *
pte_alloc_one ( struct mm_struct * mm , unsigned long vmaddr )
{
pte_t * pte = pte_alloc_one_kernel ( mm , vmaddr ) ;
if ( pte )
return virt_to_page ( pte ) ;
2006-07-12 16:41:55 +02:00
return NULL ;
2005-04-16 15:20:36 -07:00
}
static inline void pte_free_kernel ( pte_t * pte )
{
2007-02-05 21:18:17 +01:00
pte_t * shadow_pte = get_shadow_pte ( pte ) ;
if ( shadow_pte )
free_page ( ( unsigned long ) shadow_pte ) ;
free_page ( ( unsigned long ) pte ) ;
2005-04-16 15:20:36 -07:00
}
static inline void pte_free ( struct page * pte )
{
2007-02-05 21:18:17 +01:00
struct page * shadow_page = get_shadow_page ( pte ) ;
if ( shadow_page )
__free_page ( shadow_page ) ;
__free_page ( pte ) ;
2005-04-16 15:20:36 -07:00
}
2007-02-05 21:18:17 +01:00
# define __pte_free_tlb(tlb, pte) \
( { \
struct mmu_gather * __tlb = ( tlb ) ; \
struct page * __pte = ( pte ) ; \
struct page * shadow_page = get_shadow_page ( __pte ) ; \
if ( shadow_page ) \
tlb_remove_page ( __tlb , shadow_page ) ; \
tlb_remove_page ( __tlb , __pte ) ; \
} )
2005-04-16 15:20:36 -07:00
# endif /* _S390_PGALLOC_H */