2005-04-17 02:20:36 +04:00
# ifndef __PARISC_MMU_CONTEXT_H
# define __PARISC_MMU_CONTEXT_H
# include <linux/mm.h>
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 01:22:52 +04:00
# include <linux/sched.h>
2011-07-27 03:09:06 +04:00
# include <linux/atomic.h>
2005-04-17 02:20:36 +04:00
# include <asm/pgalloc.h>
# include <asm/pgtable.h>
2007-05-02 21:27:14 +04:00
# include <asm-generic/mm_hooks.h>
2005-04-17 02:20:36 +04:00
static inline void enter_lazy_tlb ( struct mm_struct * mm , struct task_struct * tsk )
{
}
/* on PA-RISC, we actually have enough contexts to justify an allocator
* for them . prumpf */
extern unsigned long alloc_sid ( void ) ;
extern void free_sid ( unsigned long ) ;
static inline int
init_new_context ( struct task_struct * tsk , struct mm_struct * mm )
{
BUG_ON ( atomic_read ( & mm - > mm_users ) ! = 1 ) ;
mm - > context = alloc_sid ( ) ;
return 0 ;
}
static inline void
destroy_context ( struct mm_struct * mm )
{
free_sid ( mm - > context ) ;
mm - > context = 0 ;
}
2008-12-23 02:58:31 +03:00
static inline unsigned long __space_to_prot ( mm_context_t context )
2005-04-17 02:20:36 +04:00
{
# if SPACEID_SHIFT == 0
2008-12-22 20:29:02 +03:00
return context < < 1 ;
2005-04-17 02:20:36 +04:00
# else
2008-12-22 20:29:02 +03:00
return context > > ( SPACEID_SHIFT - 1 ) ;
2005-04-17 02:20:36 +04:00
# endif
}
2008-12-22 20:29:02 +03:00
static inline void load_context ( mm_context_t context )
{
mtsp ( context , 3 ) ;
mtctl ( __space_to_prot ( context ) , 8 ) ;
}
2005-04-17 02:20:36 +04:00
static inline void switch_mm ( struct mm_struct * prev , struct mm_struct * next , struct task_struct * tsk )
{
if ( prev ! = next ) {
mtctl ( __pa ( next - > pgd ) , 25 ) ;
load_context ( next - > context ) ;
}
}
# define deactivate_mm(tsk,mm) do { } while (0)
static inline void activate_mm ( struct mm_struct * prev , struct mm_struct * next )
{
/*
* Activate_mm is our one chance to allocate a space id
* for a new mm created in the exec path . There ' s also
* some lazy tlb stuff , which is currently dead code , but
* we only allocate a space id if one hasn ' t been allocated
* already , so we should be OK .
*/
BUG_ON ( next = = & init_mm ) ; /* Should never happen */
if ( next - > context = = 0 )
next - > context = alloc_sid ( ) ;
switch_mm ( prev , next , current ) ;
}
# endif