2005-04-16 15:20:36 -07:00
# ifndef _X86_64_PGALLOC_H
# define _X86_64_PGALLOC_H
# include <asm/pda.h>
# include <linux/threads.h>
# include <linux/mm.h>
# define pmd_populate_kernel(mm, pmd, pte) \
set_pmd ( pmd , __pmd ( _PAGE_TABLE | __pa ( pte ) ) )
# define pud_populate(mm, pud, pmd) \
set_pud ( pud , __pud ( _PAGE_TABLE | __pa ( pmd ) ) )
# define pgd_populate(mm, pgd, pud) \
set_pgd ( pgd , __pgd ( _PAGE_TABLE | __pa ( pud ) ) )
static inline void pmd_populate ( struct mm_struct * mm , pmd_t * pmd , struct page * pte )
{
set_pmd ( pmd , __pmd ( _PAGE_TABLE | ( page_to_pfn ( pte ) < < PAGE_SHIFT ) ) ) ;
}
2005-09-12 18:49:24 +02:00
static inline void pmd_free ( pmd_t * pmd )
2005-04-16 15:20:36 -07:00
{
BUG_ON ( ( unsigned long ) pmd & ( PAGE_SIZE - 1 ) ) ;
free_page ( ( unsigned long ) pmd ) ;
}
static inline pmd_t * pmd_alloc_one ( struct mm_struct * mm , unsigned long addr )
{
return ( pmd_t * ) get_zeroed_page ( GFP_KERNEL | __GFP_REPEAT ) ;
}
static inline pud_t * pud_alloc_one ( struct mm_struct * mm , unsigned long addr )
{
return ( pud_t * ) get_zeroed_page ( GFP_KERNEL | __GFP_REPEAT ) ;
}
static inline void pud_free ( pud_t * pud )
{
BUG_ON ( ( unsigned long ) pud & ( PAGE_SIZE - 1 ) ) ;
free_page ( ( unsigned long ) pud ) ;
}
2006-03-25 16:29:40 +01:00
static inline void pgd_list_add ( pgd_t * pgd )
{
struct page * page = virt_to_page ( pgd ) ;
spin_lock ( & pgd_lock ) ;
2007-05-02 19:27:10 +02:00
list_add ( & page - > lru , & pgd_list ) ;
2006-03-25 16:29:40 +01:00
spin_unlock ( & pgd_lock ) ;
}
static inline void pgd_list_del ( pgd_t * pgd )
{
2007-05-02 19:27:10 +02:00
struct page * page = virt_to_page ( pgd ) ;
2006-03-25 16:29:40 +01:00
spin_lock ( & pgd_lock ) ;
2007-05-02 19:27:10 +02:00
list_del ( & page - > lru ) ;
2006-03-25 16:29:40 +01:00
spin_unlock ( & pgd_lock ) ;
}
2005-04-16 15:20:36 -07:00
static inline pgd_t * pgd_alloc ( struct mm_struct * mm )
{
unsigned boundary ;
pgd_t * pgd = ( pgd_t * ) __get_free_page ( GFP_KERNEL | __GFP_REPEAT ) ;
if ( ! pgd )
return NULL ;
2006-03-25 16:29:40 +01:00
pgd_list_add ( pgd ) ;
2005-04-16 15:20:36 -07:00
/*
* Copy kernel pointers in from init .
* Could keep a freelist or slab cache of those because the kernel
* part never changes .
*/
boundary = pgd_index ( __PAGE_OFFSET ) ;
memset ( pgd , 0 , boundary * sizeof ( pgd_t ) ) ;
memcpy ( pgd + boundary ,
init_level4_pgt + boundary ,
( PTRS_PER_PGD - boundary ) * sizeof ( pgd_t ) ) ;
return pgd ;
}
static inline void pgd_free ( pgd_t * pgd )
{
BUG_ON ( ( unsigned long ) pgd & ( PAGE_SIZE - 1 ) ) ;
2006-03-25 16:29:40 +01:00
pgd_list_del ( pgd ) ;
2005-04-16 15:20:36 -07:00
free_page ( ( unsigned long ) pgd ) ;
}
static inline pte_t * pte_alloc_one_kernel ( struct mm_struct * mm , unsigned long address )
{
return ( pte_t * ) get_zeroed_page ( GFP_KERNEL | __GFP_REPEAT ) ;
}
static inline struct page * pte_alloc_one ( struct mm_struct * mm , unsigned long address )
{
void * p = ( void * ) get_zeroed_page ( GFP_KERNEL | __GFP_REPEAT ) ;
if ( ! p )
return NULL ;
return virt_to_page ( p ) ;
}
/* Should really implement gc for free page table pages. This could be
done with a reference count in struct page . */
2005-09-12 18:49:24 +02:00
static inline void pte_free_kernel ( pte_t * pte )
2005-04-16 15:20:36 -07:00
{
BUG_ON ( ( unsigned long ) pte & ( PAGE_SIZE - 1 ) ) ;
free_page ( ( unsigned long ) pte ) ;
}
2005-09-12 18:49:24 +02:00
static inline void pte_free ( struct page * pte )
2005-04-16 15:20:36 -07:00
{
__free_page ( pte ) ;
}
# define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
# define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
# define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
# endif /* _X86_64_PGALLOC_H */