2005-04-17 02:20:36 +04:00
# ifndef _ASM_IA64_PGALLOC_H
# define _ASM_IA64_PGALLOC_H
/*
* This file contains the functions and defines necessary to allocate
* page tables .
*
* This hopefully works with any ( fixed ) ia - 64 page - size , as defined
* in < asm / page . h > ( currently 8192 ) .
*
* Copyright ( C ) 1998 - 2001 Hewlett - Packard Co
* David Mosberger - Tang < davidm @ hpl . hp . com >
* Copyright ( C ) 2000 , Goutham Rao < goutham . rao @ intel . com >
*/
# include <linux/compiler.h>
# include <linux/mm.h>
# include <linux/page-flags.h>
# include <linux/threads.h>
# include <asm/mmu_context.h>
2005-04-26 00:13:16 +04:00
DECLARE_PER_CPU ( unsigned long * , __pgtable_quicklist ) ;
# define pgtable_quicklist __ia64_per_cpu_var(__pgtable_quicklist)
DECLARE_PER_CPU ( long , __pgtable_quicklist_size ) ;
# define pgtable_quicklist_size __ia64_per_cpu_var(__pgtable_quicklist_size)
2005-04-17 02:20:36 +04:00
2005-04-26 00:13:16 +04:00
static inline long pgtable_quicklist_total_size ( void )
{
2005-04-26 00:16:16 +04:00
long ql_size = 0 ;
2005-04-26 00:13:16 +04:00
int cpuid ;
for_each_online_cpu ( cpuid ) {
ql_size + = per_cpu ( __pgtable_quicklist_size , cpuid ) ;
}
return ql_size ;
}
static inline void * pgtable_quicklist_alloc ( void )
2005-04-17 02:20:36 +04:00
{
unsigned long * ret = NULL ;
preempt_disable ( ) ;
2005-04-26 00:13:16 +04:00
ret = pgtable_quicklist ;
2005-04-17 02:20:36 +04:00
if ( likely ( ret ! = NULL ) ) {
2005-04-26 00:13:16 +04:00
pgtable_quicklist = ( unsigned long * ) ( * ret ) ;
2005-04-17 02:20:36 +04:00
ret [ 0 ] = 0 ;
2005-04-26 00:13:16 +04:00
- - pgtable_quicklist_size ;
2005-04-26 20:04:31 +04:00
preempt_enable ( ) ;
2005-04-26 00:13:16 +04:00
} else {
2005-04-26 20:04:31 +04:00
preempt_enable ( ) ;
2005-04-26 00:13:16 +04:00
ret = ( unsigned long * ) __get_free_page ( GFP_KERNEL | __GFP_ZERO ) ;
}
2005-04-17 02:20:36 +04:00
2005-04-26 00:13:16 +04:00
return ret ;
2005-04-17 02:20:36 +04:00
}
2005-04-26 00:13:16 +04:00
static inline void pgtable_quicklist_free ( void * pgtable_entry )
2005-04-17 02:20:36 +04:00
{
2005-04-26 00:13:16 +04:00
# ifdef CONFIG_NUMA
unsigned long nid = page_to_nid ( virt_to_page ( pgtable_entry ) ) ;
2005-04-17 02:20:36 +04:00
2005-04-26 00:13:16 +04:00
if ( unlikely ( nid ! = numa_node_id ( ) ) ) {
free_page ( ( unsigned long ) pgtable_entry ) ;
return ;
2005-04-17 02:20:36 +04:00
}
2005-04-26 00:13:16 +04:00
# endif
2005-04-17 02:20:36 +04:00
preempt_disable ( ) ;
2005-04-26 00:13:16 +04:00
* ( unsigned long * ) pgtable_entry = ( unsigned long ) pgtable_quicklist ;
pgtable_quicklist = ( unsigned long * ) pgtable_entry ;
+ + pgtable_quicklist_size ;
2005-04-17 02:20:36 +04:00
preempt_enable ( ) ;
}
2005-04-26 00:13:16 +04:00
static inline pgd_t * pgd_alloc ( struct mm_struct * mm )
2005-04-17 02:20:36 +04:00
{
2005-04-26 00:13:16 +04:00
return pgtable_quicklist_alloc ( ) ;
2005-04-17 02:20:36 +04:00
}
2005-04-26 00:13:16 +04:00
static inline void pgd_free ( pgd_t * pgd )
2005-04-17 02:20:36 +04:00
{
2005-04-26 00:13:16 +04:00
pgtable_quicklist_free ( pgd ) ;
2005-04-17 02:20:36 +04:00
}
2005-11-11 18:35:43 +03:00
# ifdef CONFIG_PGTABLE_4
static inline void
pgd_populate ( struct mm_struct * mm , pgd_t * pgd_entry , pud_t * pud )
{
pgd_val ( * pgd_entry ) = __pa ( pud ) ;
}
static inline pud_t * pud_alloc_one ( struct mm_struct * mm , unsigned long addr )
{
return pgtable_quicklist_alloc ( ) ;
}
static inline void pud_free ( pud_t * pud )
{
pgtable_quicklist_free ( pud ) ;
}
# define __pud_free_tlb(tlb, pud) pud_free(pud)
# endif /* CONFIG_PGTABLE_4 */
2005-04-26 00:13:16 +04:00
static inline void
pud_populate ( struct mm_struct * mm , pud_t * pud_entry , pmd_t * pmd )
2005-04-17 02:20:36 +04:00
{
2005-04-26 00:13:16 +04:00
pud_val ( * pud_entry ) = __pa ( pmd ) ;
}
2005-04-17 02:20:36 +04:00
2005-04-26 00:13:16 +04:00
static inline pmd_t * pmd_alloc_one ( struct mm_struct * mm , unsigned long addr )
{
return pgtable_quicklist_alloc ( ) ;
2005-04-17 02:20:36 +04:00
}
2005-04-26 00:13:16 +04:00
static inline void pmd_free ( pmd_t * pmd )
2005-04-17 02:20:36 +04:00
{
2005-04-26 00:13:16 +04:00
pgtable_quicklist_free ( pmd ) ;
2005-04-17 02:20:36 +04:00
}
# define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
static inline void
2005-04-26 00:13:16 +04:00
pmd_populate ( struct mm_struct * mm , pmd_t * pmd_entry , struct page * pte )
2005-04-17 02:20:36 +04:00
{
pmd_val ( * pmd_entry ) = page_to_phys ( pte ) ;
}
static inline void
2005-04-26 00:13:16 +04:00
pmd_populate_kernel ( struct mm_struct * mm , pmd_t * pmd_entry , pte_t * pte )
2005-04-17 02:20:36 +04:00
{
pmd_val ( * pmd_entry ) = __pa ( pte ) ;
}
2005-04-26 00:13:16 +04:00
static inline struct page * pte_alloc_one ( struct mm_struct * mm ,
unsigned long addr )
2005-04-17 02:20:36 +04:00
{
2005-04-26 00:13:16 +04:00
return virt_to_page ( pgtable_quicklist_alloc ( ) ) ;
2005-04-17 02:20:36 +04:00
}
2005-04-26 00:13:16 +04:00
static inline pte_t * pte_alloc_one_kernel ( struct mm_struct * mm ,
unsigned long addr )
2005-04-17 02:20:36 +04:00
{
2005-04-26 00:13:16 +04:00
return pgtable_quicklist_alloc ( ) ;
2005-04-17 02:20:36 +04:00
}
2005-04-26 00:13:16 +04:00
static inline void pte_free ( struct page * pte )
2005-04-17 02:20:36 +04:00
{
2005-04-26 00:13:16 +04:00
pgtable_quicklist_free ( page_address ( pte ) ) ;
2005-04-17 02:20:36 +04:00
}
2005-04-26 00:13:16 +04:00
static inline void pte_free_kernel ( pte_t * pte )
2005-04-17 02:20:36 +04:00
{
2005-04-26 00:13:16 +04:00
pgtable_quicklist_free ( pte ) ;
2005-04-17 02:20:36 +04:00
}
2005-04-26 00:13:16 +04:00
# define __pte_free_tlb(tlb, pte) pte_free(pte)
2005-04-17 02:20:36 +04:00
2005-04-26 00:13:16 +04:00
extern void check_pgt_cache ( void ) ;
2005-04-17 02:20:36 +04:00
2005-04-26 00:13:16 +04:00
# endif /* _ASM_IA64_PGALLOC_H */