2005-04-17 02:20:36 +04:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 1994 - 2001 , 2003 by Ralf Baechle
* Copyright ( C ) 1999 , 2000 , 2001 Silicon Graphics , Inc .
*/
# ifndef _ASM_PGALLOC_H
# define _ASM_PGALLOC_H
# include <linux/config.h>
# include <linux/highmem.h>
# include <linux/mm.h>
static inline void pmd_populate_kernel ( struct mm_struct * mm , pmd_t * pmd ,
pte_t * pte )
{
set_pmd ( pmd , __pmd ( ( unsigned long ) pte ) ) ;
}
static inline void pmd_populate ( struct mm_struct * mm , pmd_t * pmd ,
struct page * pte )
{
set_pmd ( pmd , __pmd ( ( unsigned long ) page_address ( pte ) ) ) ;
}
2005-02-10 15:19:59 +03:00
/*
* Initialize a new pmd table with invalid pointers .
*/
extern void pmd_init ( unsigned long page , unsigned long pagetable ) ;
# ifdef CONFIG_64BIT
static inline void pud_populate ( struct mm_struct * mm , pud_t * pud , pmd_t * pmd )
{
set_pud ( pud , __pud ( ( unsigned long ) pmd ) ) ;
}
# endif
2005-04-17 02:20:36 +04:00
/*
* Initialize a new pgd / pmd table with invalid pointers .
*/
extern void pgd_init ( unsigned long page ) ;
static inline pgd_t * pgd_alloc ( struct mm_struct * mm )
{
pgd_t * ret , * init ;
ret = ( pgd_t * ) __get_free_pages ( GFP_KERNEL , PGD_ORDER ) ;
if ( ret ) {
init = pgd_offset ( & init_mm , 0 ) ;
pgd_init ( ( unsigned long ) ret ) ;
memcpy ( ret + USER_PTRS_PER_PGD , init + USER_PTRS_PER_PGD ,
( PTRS_PER_PGD - USER_PTRS_PER_PGD ) * sizeof ( pgd_t ) ) ;
}
return ret ;
}
static inline void pgd_free ( pgd_t * pgd )
{
free_pages ( ( unsigned long ) pgd , PGD_ORDER ) ;
}
static inline pte_t * pte_alloc_one_kernel ( struct mm_struct * mm ,
unsigned long address )
{
pte_t * pte ;
pte = ( pte_t * ) __get_free_pages ( GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO , PTE_ORDER ) ;
return pte ;
}
static inline struct page * pte_alloc_one ( struct mm_struct * mm ,
unsigned long address )
{
struct page * pte ;
pte = alloc_pages ( GFP_KERNEL | __GFP_REPEAT , PTE_ORDER ) ;
if ( pte )
clear_highpage ( pte ) ;
return pte ;
}
static inline void pte_free_kernel ( pte_t * pte )
{
free_pages ( ( unsigned long ) pte , PTE_ORDER ) ;
}
static inline void pte_free ( struct page * pte )
{
__free_pages ( pte , PTE_ORDER ) ;
}
# define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_32BIT
2005-04-17 02:20:36 +04:00
/*
* allocating and freeing a pmd is trivial : the 1 - entry pmd is
* inside the pgd , so has no extra memory associated with it .
*/
# define pmd_free(x) do { } while (0)
# define __pmd_free_tlb(tlb,x) do { } while (0)
2005-02-10 15:19:59 +03:00
2005-04-17 02:20:36 +04:00
# endif
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_64BIT
2005-04-17 02:20:36 +04:00
static inline pmd_t * pmd_alloc_one ( struct mm_struct * mm , unsigned long address )
{
pmd_t * pmd ;
pmd = ( pmd_t * ) __get_free_pages ( GFP_KERNEL | __GFP_REPEAT , PMD_ORDER ) ;
if ( pmd )
pmd_init ( ( unsigned long ) pmd , ( unsigned long ) invalid_pte_table ) ;
return pmd ;
}
static inline void pmd_free ( pmd_t * pmd )
{
free_pages ( ( unsigned long ) pmd , PMD_ORDER ) ;
}
# define __pmd_free_tlb(tlb,x) pmd_free(x)
# endif
# define check_pgt_cache() do { } while (0)
# endif /* _ASM_PGALLOC_H */