2005-04-16 15:20:36 -07:00
# ifndef _X8664_TLBFLUSH_H
# define _X8664_TLBFLUSH_H
# include <linux/mm.h>
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 01:22:52 +04:00
# include <linux/sched.h>
2005-04-16 15:20:36 -07:00
# include <asm/processor.h>
[PATCH] x86-64: Remove duplicated code for reading control registers
On Tue, Mar 13, 2007 at 05:33:09AM -0700, Randy.Dunlap wrote:
> On Tue, 13 Mar 2007, Glauber de Oliveira Costa wrote:
>
> > Tiny cleanup:
> >
> > In x86_64, the same functions for reading cr3 and writing cr{3,4} are
> > defined in tlbflush.h and system.h, whith just a name change.
> > The only difference is the clobbering of memory, which seems a safe, and
> > even needed change for the write_cr4. This patch removes the duplicate.
> > write_cr3() is moved to system.h for consistency.
>
> missing patch.....
>
thanks. Attached now
--
Glauber de Oliveira Costa
Red Hat Inc.
"Free as in Freedom"
Signed-off-by: Andi Kleen <ak@suse.de>
2007-05-02 19:27:06 +02:00
# include <asm/system.h>
2006-09-26 10:52:29 +02:00
static inline void __flush_tlb ( void )
{
[PATCH] x86-64: Remove duplicated code for reading control registers
On Tue, Mar 13, 2007 at 05:33:09AM -0700, Randy.Dunlap wrote:
> On Tue, 13 Mar 2007, Glauber de Oliveira Costa wrote:
>
> > Tiny cleanup:
> >
> > In x86_64, the same functions for reading cr3 and writing cr{3,4} are
> > defined in tlbflush.h and system.h, whith just a name change.
> > The only difference is the clobbering of memory, which seems a safe, and
> > even needed change for the write_cr4. This patch removes the duplicate.
> > write_cr3() is moved to system.h for consistency.
>
> missing patch.....
>
thanks. Attached now
--
Glauber de Oliveira Costa
Red Hat Inc.
"Free as in Freedom"
Signed-off-by: Andi Kleen <ak@suse.de>
2007-05-02 19:27:06 +02:00
write_cr3 ( read_cr3 ( ) ) ;
2006-09-26 10:52:29 +02:00
}
static inline void __flush_tlb_all ( void )
{
[PATCH] x86-64: Remove duplicated code for reading control registers
On Tue, Mar 13, 2007 at 05:33:09AM -0700, Randy.Dunlap wrote:
> On Tue, 13 Mar 2007, Glauber de Oliveira Costa wrote:
>
> > Tiny cleanup:
> >
> > In x86_64, the same functions for reading cr3 and writing cr{3,4} are
> > defined in tlbflush.h and system.h, whith just a name change.
> > The only difference is the clobbering of memory, which seems a safe, and
> > even needed change for the write_cr4. This patch removes the duplicate.
> > write_cr3() is moved to system.h for consistency.
>
> missing patch.....
>
thanks. Attached now
--
Glauber de Oliveira Costa
Red Hat Inc.
"Free as in Freedom"
Signed-off-by: Andi Kleen <ak@suse.de>
2007-05-02 19:27:06 +02:00
unsigned long cr4 = read_cr4 ( ) ;
write_cr4 ( cr4 & ~ X86_CR4_PGE ) ; /* clear PGE */
write_cr4 ( cr4 ) ; /* write old PGE again and flush TLBs */
2006-09-26 10:52:29 +02:00
}
2005-04-16 15:20:36 -07:00
# define __flush_tlb_one(addr) \
2006-09-26 10:52:29 +02:00
__asm__ __volatile__ ( " invlpg (%0) " : : " r " ( addr ) : " memory " )
2005-04-16 15:20:36 -07:00
/*
* TLB flushing :
*
* - flush_tlb ( ) flushes the current mm struct TLBs
* - flush_tlb_all ( ) flushes all processes TLBs
* - flush_tlb_mm ( mm ) flushes the specified mm context TLB ' s
* - flush_tlb_page ( vma , vmaddr ) flushes one page
* - flush_tlb_range ( vma , start , end ) flushes a range of pages
* - flush_tlb_kernel_range ( start , end ) flushes a range of kernel pages
*
2005-07-28 21:15:35 -07:00
* x86 - 64 can only flush individual pages or full VMs . For a range flush
* we always do the full VM . Might be worth trying if for a small
* range a few INVLPGs in a row are a win .
2005-04-16 15:20:36 -07:00
*/
# ifndef CONFIG_SMP
# define flush_tlb() __flush_tlb()
# define flush_tlb_all() __flush_tlb_all()
# define local_flush_tlb() __flush_tlb()
static inline void flush_tlb_mm ( struct mm_struct * mm )
{
if ( mm = = current - > active_mm )
__flush_tlb ( ) ;
}
static inline void flush_tlb_page ( struct vm_area_struct * vma ,
unsigned long addr )
{
if ( vma - > vm_mm = = current - > active_mm )
__flush_tlb_one ( addr ) ;
}
static inline void flush_tlb_range ( struct vm_area_struct * vma ,
unsigned long start , unsigned long end )
{
if ( vma - > vm_mm = = current - > active_mm )
__flush_tlb ( ) ;
}
# else
# include <asm/smp.h>
# define local_flush_tlb() \
__flush_tlb ( )
extern void flush_tlb_all ( void ) ;
extern void flush_tlb_current_task ( void ) ;
extern void flush_tlb_mm ( struct mm_struct * ) ;
extern void flush_tlb_page ( struct vm_area_struct * , unsigned long ) ;
# define flush_tlb() flush_tlb_current_task()
static inline void flush_tlb_range ( struct vm_area_struct * vma , unsigned long start , unsigned long end )
{
flush_tlb_mm ( vma - > vm_mm ) ;
}
# define TLBSTATE_OK 1
# define TLBSTATE_LAZY 2
2005-09-12 18:49:24 +02:00
/* Roughly an IPI every 20MB with 4k pages for freeing page table
ranges . Cost is about 42 k of memory for each CPU . */
# define ARCH_FREE_PTE_NR 5350
2005-04-16 15:20:36 -07:00
# endif
2007-07-21 17:11:24 +02:00
static inline void flush_tlb_kernel_range ( unsigned long start ,
unsigned long end )
{
flush_tlb_all ( ) ;
}
2005-04-16 15:20:36 -07:00
# endif /* _X8664_TLBFLUSH_H */