2005-04-17 02:20:36 +04:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 1994 - 2001 , 2003 by Ralf Baechle
* Copyright ( C ) 1999 , 2000 , 2001 Silicon Graphics , Inc .
*/
# ifndef _ASM_PGALLOC_H
# define _ASM_PGALLOC_H
# include <linux/highmem.h>
# include <linux/mm.h>
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 01:22:52 +04:00
# include <linux/sched.h>
2005-04-17 02:20:36 +04:00
static inline void pmd_populate_kernel ( struct mm_struct * mm , pmd_t * pmd ,
pte_t * pte )
{
set_pmd ( pmd , __pmd ( ( unsigned long ) pte ) ) ;
}
static inline void pmd_populate ( struct mm_struct * mm , pmd_t * pmd ,
2008-02-08 15:22:04 +03:00
pgtable_t pte )
2005-04-17 02:20:36 +04:00
{
set_pmd ( pmd , __pmd ( ( unsigned long ) page_address ( pte ) ) ) ;
}
2008-02-08 15:22:04 +03:00
# define pmd_pgtable(pmd) pmd_page(pmd)
2005-04-17 02:20:36 +04:00
2005-02-10 15:19:59 +03:00
/*
* Initialize a new pmd table with invalid pointers .
*/
extern void pmd_init ( unsigned long page , unsigned long pagetable ) ;
# ifdef CONFIG_64BIT
static inline void pud_populate ( struct mm_struct * mm , pud_t * pud , pmd_t * pmd )
{
set_pud ( pud , __pud ( ( unsigned long ) pmd ) ) ;
}
# endif
2005-04-17 02:20:36 +04:00
/*
* Initialize a new pgd / pmd table with invalid pointers .
*/
extern void pgd_init ( unsigned long page ) ;
static inline pgd_t * pgd_alloc ( struct mm_struct * mm )
{
pgd_t * ret , * init ;
ret = ( pgd_t * ) __get_free_pages ( GFP_KERNEL , PGD_ORDER ) ;
if ( ret ) {
2006-10-24 05:29:01 +04:00
init = pgd_offset ( & init_mm , 0UL ) ;
2005-04-17 02:20:36 +04:00
pgd_init ( ( unsigned long ) ret ) ;
memcpy ( ret + USER_PTRS_PER_PGD , init + USER_PTRS_PER_PGD ,
( PTRS_PER_PGD - USER_PTRS_PER_PGD ) * sizeof ( pgd_t ) ) ;
}
return ret ;
}
2008-02-05 09:29:14 +03:00
static inline void pgd_free ( struct mm_struct * mm , pgd_t * pgd )
2005-04-17 02:20:36 +04:00
{
free_pages ( ( unsigned long ) pgd , PGD_ORDER ) ;
}
static inline pte_t * pte_alloc_one_kernel ( struct mm_struct * mm ,
unsigned long address )
{
pte_t * pte ;
pte = ( pte_t * ) __get_free_pages ( GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO , PTE_ORDER ) ;
return pte ;
}
static inline struct page * pte_alloc_one ( struct mm_struct * mm ,
unsigned long address )
{
struct page * pte ;
pte = alloc_pages ( GFP_KERNEL | __GFP_REPEAT , PTE_ORDER ) ;
2008-02-08 15:22:04 +03:00
if ( pte ) {
2005-04-17 02:20:36 +04:00
clear_highpage ( pte ) ;
2008-02-08 15:22:04 +03:00
pgtable_page_ctor ( pte ) ;
}
2005-04-17 02:20:36 +04:00
return pte ;
}
2008-02-05 09:29:14 +03:00
static inline void pte_free_kernel ( struct mm_struct * mm , pte_t * pte )
2005-04-17 02:20:36 +04:00
{
free_pages ( ( unsigned long ) pte , PTE_ORDER ) ;
}
2008-02-08 15:22:04 +03:00
static inline void pte_free ( struct mm_struct * mm , pgtable_t pte )
2005-04-17 02:20:36 +04:00
{
2008-02-08 15:22:04 +03:00
pgtable_page_dtor ( pte ) ;
2005-04-17 02:20:36 +04:00
__free_pages ( pte , PTE_ORDER ) ;
}
2008-02-08 15:22:04 +03:00
# define __pte_free_tlb(tlb,pte) \
do { \
pgtable_page_dtor ( pte ) ; \
tlb_remove_page ( ( tlb ) , pte ) ; \
} while ( 0 )
2005-04-17 02:20:36 +04:00
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_32BIT
2005-04-17 02:20:36 +04:00
/*
* allocating and freeing a pmd is trivial : the 1 - entry pmd is
* inside the pgd , so has no extra memory associated with it .
*/
2008-02-05 09:29:14 +03:00
# define pmd_free(mm, x) do { } while (0)
2007-10-12 02:46:15 +04:00
# define __pmd_free_tlb(tlb, x) do { } while (0)
2005-02-10 15:19:59 +03:00
2005-04-17 02:20:36 +04:00
# endif
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_64BIT
2005-04-17 02:20:36 +04:00
static inline pmd_t * pmd_alloc_one ( struct mm_struct * mm , unsigned long address )
{
pmd_t * pmd ;
pmd = ( pmd_t * ) __get_free_pages ( GFP_KERNEL | __GFP_REPEAT , PMD_ORDER ) ;
if ( pmd )
pmd_init ( ( unsigned long ) pmd , ( unsigned long ) invalid_pte_table ) ;
return pmd ;
}
2008-02-05 09:29:14 +03:00
static inline void pmd_free ( struct mm_struct * mm , pmd_t * pmd )
2005-04-17 02:20:36 +04:00
{
free_pages ( ( unsigned long ) pmd , PMD_ORDER ) ;
}
2008-02-05 09:29:14 +03:00
# define __pmd_free_tlb(tlb, x) pmd_free((tlb)->mm, x)
2005-04-17 02:20:36 +04:00
# endif
# define check_pgt_cache() do { } while (0)
2007-02-18 19:27:34 +03:00
extern void pagetable_init ( void ) ;
2005-04-17 02:20:36 +04:00
# endif /* _ASM_PGALLOC_H */