2005-04-16 15:20:36 -07:00
/*
* include / asm - s390 / pgalloc . h
*
* S390 version
* Copyright ( C ) 1999 , 2000 IBM Deutschland Entwicklung GmbH , IBM Corporation
* Author ( s ) : Hartmut Penner ( hp @ de . ibm . com )
* Martin Schwidefsky ( schwidefsky @ de . ibm . com )
*
* Derived from " include/asm-i386/pgalloc.h "
* Copyright ( C ) 1994 Linus Torvalds
*/
# ifndef _S390_PGALLOC_H
# define _S390_PGALLOC_H
# include <linux/threads.h>
# include <linux/gfp.h>
# include <linux/mm.h>
# define check_pgt_cache() do {} while (0)
2007-10-22 12:52:47 +02:00
unsigned long * crst_table_alloc ( struct mm_struct * , int ) ;
void crst_table_free ( unsigned long * ) ;
2006-09-20 15:59:37 +02:00
2007-10-22 12:52:47 +02:00
unsigned long * page_table_alloc ( int ) ;
void page_table_free ( unsigned long * ) ;
2005-04-16 15:20:36 -07:00
2007-10-22 12:52:47 +02:00
static inline void clear_table ( unsigned long * s , unsigned long val , size_t n )
2005-04-16 15:20:36 -07:00
{
2007-10-22 12:52:47 +02:00
* s = val ;
n = ( n / 256 ) - 1 ;
asm volatile (
# ifdef CONFIG_64BIT
" mvc 8(248,%0),0(%0) \n "
2006-09-20 15:59:37 +02:00
# else
2007-10-22 12:52:47 +02:00
" mvc 4(252,%0),0(%0) \n "
2006-09-20 15:59:37 +02:00
# endif
2007-10-22 12:52:47 +02:00
" 0: mvc 256(256,%0),0(%0) \n "
" la %0,256(%0) \n "
" brct %1,0b \n "
: " +a " ( s ) , " +d " ( n ) ) ;
2005-04-16 15:20:36 -07:00
}
2007-10-22 12:52:47 +02:00
static inline void crst_table_init ( unsigned long * crst , unsigned long entry )
2005-04-16 15:20:36 -07:00
{
2007-10-22 12:52:47 +02:00
clear_table ( crst , entry , sizeof ( unsigned long ) * 2048 ) ;
crst = get_shadow_table ( crst ) ;
if ( crst )
clear_table ( crst , entry , sizeof ( unsigned long ) * 2048 ) ;
2005-04-16 15:20:36 -07:00
}
# ifndef __s390x__
2007-10-22 12:52:47 +02:00
static inline unsigned long pgd_entry_type ( struct mm_struct * mm )
{
return _SEGMENT_ENTRY_EMPTY ;
}
# define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
# define pmd_free(x) do { } while (0)
# define pgd_populate(mm, pmd, pte) BUG()
2007-02-05 21:18:17 +01:00
# define pgd_populate_kernel(mm, pmd, pte) BUG()
2007-10-22 12:52:47 +02:00
2005-04-16 15:20:36 -07:00
# else /* __s390x__ */
2007-10-22 12:52:47 +02:00
static inline unsigned long pgd_entry_type ( struct mm_struct * mm )
2005-04-16 15:20:36 -07:00
{
2007-10-22 12:52:47 +02:00
return _REGION3_ENTRY_EMPTY ;
2005-04-16 15:20:36 -07:00
}
2007-10-22 12:52:47 +02:00
static inline pmd_t * pmd_alloc_one ( struct mm_struct * mm , unsigned long vmaddr )
2005-04-16 15:20:36 -07:00
{
2007-10-22 12:52:47 +02:00
unsigned long * crst = crst_table_alloc ( mm , s390_noexec ) ;
if ( crst )
crst_table_init ( crst , _SEGMENT_ENTRY_EMPTY ) ;
return ( pmd_t * ) crst ;
2005-04-16 15:20:36 -07:00
}
2007-10-22 12:52:47 +02:00
# define pmd_free(pmd) crst_table_free((unsigned long *) pmd)
2005-04-16 15:20:36 -07:00
2007-10-22 12:52:47 +02:00
static inline void pgd_populate_kernel ( struct mm_struct * mm ,
pgd_t * pgd , pmd_t * pmd )
2005-04-16 15:20:36 -07:00
{
2007-10-22 12:52:47 +02:00
pgd_val ( * pgd ) = _REGION3_ENTRY | __pa ( pmd ) ;
2005-04-16 15:20:36 -07:00
}
2007-02-05 21:18:17 +01:00
static inline void pgd_populate ( struct mm_struct * mm , pgd_t * pgd , pmd_t * pmd )
{
2007-10-22 12:52:47 +02:00
pgd_t * shadow_pgd = get_shadow_table ( pgd ) ;
pmd_t * shadow_pmd = get_shadow_table ( pmd ) ;
2007-02-05 21:18:17 +01:00
if ( shadow_pgd & & shadow_pmd )
pgd_populate_kernel ( mm , shadow_pgd , shadow_pmd ) ;
pgd_populate_kernel ( mm , pgd , pmd ) ;
}
2005-04-16 15:20:36 -07:00
# endif /* __s390x__ */
2007-10-22 12:52:47 +02:00
static inline pgd_t * pgd_alloc ( struct mm_struct * mm )
{
unsigned long * crst = crst_table_alloc ( mm , s390_noexec ) ;
if ( crst )
crst_table_init ( crst , pgd_entry_type ( mm ) ) ;
return ( pgd_t * ) crst ;
}
# define pgd_free(pgd) crst_table_free((unsigned long *) pgd)
2005-04-16 15:20:36 -07:00
static inline void
pmd_populate_kernel ( struct mm_struct * mm , pmd_t * pmd , pte_t * pte )
{
# ifndef __s390x__
2007-10-22 12:52:47 +02:00
pmd_val ( pmd [ 0 ] ) = _SEGMENT_ENTRY + __pa ( pte ) ;
pmd_val ( pmd [ 1 ] ) = _SEGMENT_ENTRY + __pa ( pte + 256 ) ;
pmd_val ( pmd [ 2 ] ) = _SEGMENT_ENTRY + __pa ( pte + 512 ) ;
pmd_val ( pmd [ 3 ] ) = _SEGMENT_ENTRY + __pa ( pte + 768 ) ;
2005-04-16 15:20:36 -07:00
# else /* __s390x__ */
2007-10-22 12:52:47 +02:00
pmd_val ( * pmd ) = _SEGMENT_ENTRY + __pa ( pte ) ;
pmd_val1 ( * pmd ) = _SEGMENT_ENTRY + __pa ( pte + 256 ) ;
2005-04-16 15:20:36 -07:00
# endif /* __s390x__ */
}
static inline void
pmd_populate ( struct mm_struct * mm , pmd_t * pmd , struct page * page )
{
2007-02-05 21:18:17 +01:00
pte_t * pte = ( pte_t * ) page_to_phys ( page ) ;
2007-10-22 12:52:47 +02:00
pmd_t * shadow_pmd = get_shadow_table ( pmd ) ;
2007-02-05 21:18:17 +01:00
pte_t * shadow_pte = get_shadow_pte ( pte ) ;
pmd_populate_kernel ( mm , pmd , pte ) ;
if ( shadow_pmd & & shadow_pte )
pmd_populate_kernel ( mm , shadow_pmd , shadow_pte ) ;
2005-04-16 15:20:36 -07:00
}
/*
* page table entry allocation / free routines .
*/
2007-10-22 12:52:47 +02:00
# define pte_alloc_one_kernel(mm, vmaddr) \
( ( pte_t * ) page_table_alloc ( s390_noexec ) )
# define pte_alloc_one(mm, vmaddr) \
virt_to_page ( page_table_alloc ( s390_noexec ) )
# define pte_free_kernel(pte) \
page_table_free ( ( unsigned long * ) pte )
# define pte_free(pte) \
page_table_free ( ( unsigned long * ) page_to_phys ( ( struct page * ) pte ) )
2005-04-16 15:20:36 -07:00
# endif /* _S390_PGALLOC_H */