2008-10-22 22:26:29 -07:00
# ifndef _ASM_X86_TLBFLUSH_H
# define _ASM_X86_TLBFLUSH_H
2008-01-30 13:30:35 +01:00
# include <linux/mm.h>
# include <linux/sched.h>
# include <asm/processor.h>
2012-03-28 18:11:12 +01:00
# include <asm/special_insns.h>
2008-01-30 13:30:35 +01:00
# ifdef CONFIG_PARAVIRT
# include <asm/paravirt.h>
# else
# define __flush_tlb() __native_flush_tlb()
# define __flush_tlb_global() __native_flush_tlb_global()
# define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
# endif
static inline void __native_flush_tlb ( void )
{
2009-04-23 10:21:38 -07:00
native_write_cr3 ( native_read_cr3 ( ) ) ;
2008-01-30 13:30:35 +01:00
}
static inline void __native_flush_tlb_global ( void )
{
2008-05-12 21:21:15 +02:00
unsigned long flags ;
unsigned long cr4 ;
2008-01-30 13:30:35 +01:00
2008-05-12 21:21:15 +02:00
/*
* Read - modify - write to CR4 - protect it from preemption and
* from interrupts . ( Use the raw variant because this code can
* be called from deep inside debugging code . )
*/
raw_local_irq_save ( flags ) ;
2009-04-23 10:21:38 -07:00
cr4 = native_read_cr4 ( ) ;
2008-01-30 13:30:35 +01:00
/* clear PGE */
2009-04-23 10:21:38 -07:00
native_write_cr4 ( cr4 & ~ X86_CR4_PGE ) ;
2008-01-30 13:30:35 +01:00
/* write old PGE again and flush TLBs */
2009-04-23 10:21:38 -07:00
native_write_cr4 ( cr4 ) ;
2008-05-12 21:21:15 +02:00
raw_local_irq_restore ( flags ) ;
2008-01-30 13:30:35 +01:00
}
static inline void __native_flush_tlb_single ( unsigned long addr )
{
2008-03-23 01:03:45 -07:00
asm volatile ( " invlpg (%0) " :: " r " (addr) : " memory " ) ;
2008-01-30 13:30:35 +01:00
}
static inline void __flush_tlb_all ( void )
{
if ( cpu_has_pge )
__flush_tlb_global ( ) ;
else
__flush_tlb ( ) ;
}
static inline void __flush_tlb_one ( unsigned long addr )
{
if ( cpu_has_invlpg )
__flush_tlb_single ( addr ) ;
else
__flush_tlb ( ) ;
}
2012-05-10 18:01:59 +08:00
# define TLB_FLUSH_ALL -1UL
2008-01-30 13:30:35 +01:00
/*
* TLB flushing :
*
* - flush_tlb ( ) flushes the current mm struct TLBs
* - flush_tlb_all ( ) flushes all processes TLBs
* - flush_tlb_mm ( mm ) flushes the specified mm context TLB ' s
* - flush_tlb_page ( vma , vmaddr ) flushes one page
* - flush_tlb_range ( vma , start , end ) flushes a range of pages
* - flush_tlb_kernel_range ( start , end ) flushes a range of kernel pages
x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range
x86 has no flush_tlb_range support in instruction level. Currently the
flush_tlb_range just implemented by flushing all page table. That is not
the best solution for all scenarios. In fact, if we just use 'invlpg' to
flush few lines from TLB, we can get the performance gain from later
remain TLB lines accessing.
But the 'invlpg' instruction costs much of time. Its execution time can
compete with cr3 rewriting, and even a bit more on SNB CPU.
So, on a 512 4KB TLB entries CPU, the balance points is at:
(512 - X) * 100ns(assumed TLB refill cost) =
X(TLB flush entries) * 100ns(assumed invlpg cost)
Here, X is 256, that is 1/2 of 512 entries.
But with the mysterious CPU pre-fetcher and page miss handler Unit, the
assumed TLB refill cost is far lower then 100ns in sequential access. And
2 HT siblings in one core makes the memory access more faster if they are
accessing the same memory. So, in the patch, I just do the change when
the target entries is less than 1/16 of whole active tlb entries.
Actually, I have no data support for the percentage '1/16', so any
suggestions are welcomed.
As to hugetlb, guess due to smaller page table, and smaller active TLB
entries, I didn't see benefit via my benchmark, so no optimizing now.
My micro benchmark show in ideal scenarios, the performance improves 70
percent in reading. And in worst scenario, the reading/writing
performance is similar with unpatched 3.4-rc4 kernel.
Here is the reading data on my 2P * 4cores *HT NHM EP machine, with THP
'always':
multi thread testing, '-t' paramter is thread number:
with patch unpatched 3.4-rc4
./mprotect -t 1 14ns 24ns
./mprotect -t 2 13ns 22ns
./mprotect -t 4 12ns 19ns
./mprotect -t 8 14ns 16ns
./mprotect -t 16 28ns 26ns
./mprotect -t 32 54ns 51ns
./mprotect -t 128 200ns 199ns
Single process with sequencial flushing and memory accessing:
with patch unpatched 3.4-rc4
./mprotect 7ns 11ns
./mprotect -p 4096 -l 8 -n 10240
21ns 21ns
[ hpa: http://lkml.kernel.org/r/1B4B44D9196EFF41AE41FDA404FC0A100BFF94@SHSMSX101.ccr.corp.intel.com
has additional performance numbers. ]
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-3-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 09:02:17 +08:00
* - flush_tlb_others ( cpumask , mm , start , end ) flushes TLBs on other cpus
2008-01-30 13:30:35 +01:00
*
* . . but the i386 has somewhat limited tlb flushing capabilities ,
* and page - granular flushes are available only on i486 and up .
*/
# ifndef CONFIG_SMP
# define flush_tlb() __flush_tlb()
# define flush_tlb_all() __flush_tlb_all()
# define local_flush_tlb() __flush_tlb()
static inline void flush_tlb_mm ( struct mm_struct * mm )
{
if ( mm = = current - > active_mm )
__flush_tlb ( ) ;
}
static inline void flush_tlb_page ( struct vm_area_struct * vma ,
unsigned long addr )
{
if ( vma - > vm_mm = = current - > active_mm )
__flush_tlb_one ( addr ) ;
}
static inline void flush_tlb_range ( struct vm_area_struct * vma ,
unsigned long start , unsigned long end )
{
if ( vma - > vm_mm = = current - > active_mm )
__flush_tlb ( ) ;
}
2009-01-10 21:58:09 -08:00
static inline void native_flush_tlb_others ( const struct cpumask * cpumask ,
2008-01-30 13:30:35 +01:00
struct mm_struct * mm ,
x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range
x86 has no flush_tlb_range support in instruction level. Currently the
flush_tlb_range just implemented by flushing all page table. That is not
the best solution for all scenarios. In fact, if we just use 'invlpg' to
flush few lines from TLB, we can get the performance gain from later
remain TLB lines accessing.
But the 'invlpg' instruction costs much of time. Its execution time can
compete with cr3 rewriting, and even a bit more on SNB CPU.
So, on a 512 4KB TLB entries CPU, the balance points is at:
(512 - X) * 100ns(assumed TLB refill cost) =
X(TLB flush entries) * 100ns(assumed invlpg cost)
Here, X is 256, that is 1/2 of 512 entries.
But with the mysterious CPU pre-fetcher and page miss handler Unit, the
assumed TLB refill cost is far lower then 100ns in sequential access. And
2 HT siblings in one core makes the memory access more faster if they are
accessing the same memory. So, in the patch, I just do the change when
the target entries is less than 1/16 of whole active tlb entries.
Actually, I have no data support for the percentage '1/16', so any
suggestions are welcomed.
As to hugetlb, guess due to smaller page table, and smaller active TLB
entries, I didn't see benefit via my benchmark, so no optimizing now.
My micro benchmark show in ideal scenarios, the performance improves 70
percent in reading. And in worst scenario, the reading/writing
performance is similar with unpatched 3.4-rc4 kernel.
Here is the reading data on my 2P * 4cores *HT NHM EP machine, with THP
'always':
multi thread testing, '-t' paramter is thread number:
with patch unpatched 3.4-rc4
./mprotect -t 1 14ns 24ns
./mprotect -t 2 13ns 22ns
./mprotect -t 4 12ns 19ns
./mprotect -t 8 14ns 16ns
./mprotect -t 16 28ns 26ns
./mprotect -t 32 54ns 51ns
./mprotect -t 128 200ns 199ns
Single process with sequencial flushing and memory accessing:
with patch unpatched 3.4-rc4
./mprotect 7ns 11ns
./mprotect -p 4096 -l 8 -n 10240
21ns 21ns
[ hpa: http://lkml.kernel.org/r/1B4B44D9196EFF41AE41FDA404FC0A100BFF94@SHSMSX101.ccr.corp.intel.com
has additional performance numbers. ]
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-3-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 09:02:17 +08:00
unsigned long start ,
unsigned long end )
2008-01-30 13:30:35 +01:00
{
}
2008-09-03 14:30:23 +01:00
static inline void reset_lazy_tlbstate ( void )
{
}
2008-01-30 13:30:35 +01:00
# else /* SMP */
# include <asm/smp.h>
# define local_flush_tlb() __flush_tlb()
extern void flush_tlb_all ( void ) ;
extern void flush_tlb_current_task ( void ) ;
extern void flush_tlb_mm ( struct mm_struct * ) ;
extern void flush_tlb_page ( struct vm_area_struct * , unsigned long ) ;
x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range
x86 has no flush_tlb_range support in instruction level. Currently the
flush_tlb_range just implemented by flushing all page table. That is not
the best solution for all scenarios. In fact, if we just use 'invlpg' to
flush few lines from TLB, we can get the performance gain from later
remain TLB lines accessing.
But the 'invlpg' instruction costs much of time. Its execution time can
compete with cr3 rewriting, and even a bit more on SNB CPU.
So, on a 512 4KB TLB entries CPU, the balance points is at:
(512 - X) * 100ns(assumed TLB refill cost) =
X(TLB flush entries) * 100ns(assumed invlpg cost)
Here, X is 256, that is 1/2 of 512 entries.
But with the mysterious CPU pre-fetcher and page miss handler Unit, the
assumed TLB refill cost is far lower then 100ns in sequential access. And
2 HT siblings in one core makes the memory access more faster if they are
accessing the same memory. So, in the patch, I just do the change when
the target entries is less than 1/16 of whole active tlb entries.
Actually, I have no data support for the percentage '1/16', so any
suggestions are welcomed.
As to hugetlb, guess due to smaller page table, and smaller active TLB
entries, I didn't see benefit via my benchmark, so no optimizing now.
My micro benchmark show in ideal scenarios, the performance improves 70
percent in reading. And in worst scenario, the reading/writing
performance is similar with unpatched 3.4-rc4 kernel.
Here is the reading data on my 2P * 4cores *HT NHM EP machine, with THP
'always':
multi thread testing, '-t' paramter is thread number:
with patch unpatched 3.4-rc4
./mprotect -t 1 14ns 24ns
./mprotect -t 2 13ns 22ns
./mprotect -t 4 12ns 19ns
./mprotect -t 8 14ns 16ns
./mprotect -t 16 28ns 26ns
./mprotect -t 32 54ns 51ns
./mprotect -t 128 200ns 199ns
Single process with sequencial flushing and memory accessing:
with patch unpatched 3.4-rc4
./mprotect 7ns 11ns
./mprotect -p 4096 -l 8 -n 10240
21ns 21ns
[ hpa: http://lkml.kernel.org/r/1B4B44D9196EFF41AE41FDA404FC0A100BFF94@SHSMSX101.ccr.corp.intel.com
has additional performance numbers. ]
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-3-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 09:02:17 +08:00
extern void flush_tlb_range ( struct vm_area_struct * vma ,
unsigned long start , unsigned long end ) ;
2008-01-30 13:30:35 +01:00
# define flush_tlb() flush_tlb_current_task()
2009-01-10 21:58:09 -08:00
void native_flush_tlb_others ( const struct cpumask * cpumask ,
x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range
x86 has no flush_tlb_range support in instruction level. Currently the
flush_tlb_range just implemented by flushing all page table. That is not
the best solution for all scenarios. In fact, if we just use 'invlpg' to
flush few lines from TLB, we can get the performance gain from later
remain TLB lines accessing.
But the 'invlpg' instruction costs much of time. Its execution time can
compete with cr3 rewriting, and even a bit more on SNB CPU.
So, on a 512 4KB TLB entries CPU, the balance points is at:
(512 - X) * 100ns(assumed TLB refill cost) =
X(TLB flush entries) * 100ns(assumed invlpg cost)
Here, X is 256, that is 1/2 of 512 entries.
But with the mysterious CPU pre-fetcher and page miss handler Unit, the
assumed TLB refill cost is far lower then 100ns in sequential access. And
2 HT siblings in one core makes the memory access more faster if they are
accessing the same memory. So, in the patch, I just do the change when
the target entries is less than 1/16 of whole active tlb entries.
Actually, I have no data support for the percentage '1/16', so any
suggestions are welcomed.
As to hugetlb, guess due to smaller page table, and smaller active TLB
entries, I didn't see benefit via my benchmark, so no optimizing now.
My micro benchmark show in ideal scenarios, the performance improves 70
percent in reading. And in worst scenario, the reading/writing
performance is similar with unpatched 3.4-rc4 kernel.
Here is the reading data on my 2P * 4cores *HT NHM EP machine, with THP
'always':
multi thread testing, '-t' paramter is thread number:
with patch unpatched 3.4-rc4
./mprotect -t 1 14ns 24ns
./mprotect -t 2 13ns 22ns
./mprotect -t 4 12ns 19ns
./mprotect -t 8 14ns 16ns
./mprotect -t 16 28ns 26ns
./mprotect -t 32 54ns 51ns
./mprotect -t 128 200ns 199ns
Single process with sequencial flushing and memory accessing:
with patch unpatched 3.4-rc4
./mprotect 7ns 11ns
./mprotect -p 4096 -l 8 -n 10240
21ns 21ns
[ hpa: http://lkml.kernel.org/r/1B4B44D9196EFF41AE41FDA404FC0A100BFF94@SHSMSX101.ccr.corp.intel.com
has additional performance numbers. ]
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-3-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 09:02:17 +08:00
struct mm_struct * mm ,
unsigned long start , unsigned long end ) ;
2008-01-30 13:30:35 +01:00
# define TLBSTATE_OK 1
# define TLBSTATE_LAZY 2
2008-03-23 01:03:45 -07:00
struct tlb_state {
2008-01-30 13:30:35 +01:00
struct mm_struct * active_mm ;
int state ;
} ;
2009-04-21 23:00:24 +01:00
DECLARE_PER_CPU_SHARED_ALIGNED ( struct tlb_state , cpu_tlbstate ) ;
2008-09-03 14:30:23 +01:00
static inline void reset_lazy_tlbstate ( void )
{
2012-05-11 15:35:27 +08:00
this_cpu_write ( cpu_tlbstate . state , 0 ) ;
this_cpu_write ( cpu_tlbstate . active_mm , & init_mm ) ;
2008-09-03 14:30:23 +01:00
}
2008-01-30 13:30:35 +01:00
# endif /* SMP */
# ifndef CONFIG_PARAVIRT
x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range
x86 has no flush_tlb_range support in instruction level. Currently the
flush_tlb_range just implemented by flushing all page table. That is not
the best solution for all scenarios. In fact, if we just use 'invlpg' to
flush few lines from TLB, we can get the performance gain from later
remain TLB lines accessing.
But the 'invlpg' instruction costs much of time. Its execution time can
compete with cr3 rewriting, and even a bit more on SNB CPU.
So, on a 512 4KB TLB entries CPU, the balance points is at:
(512 - X) * 100ns(assumed TLB refill cost) =
X(TLB flush entries) * 100ns(assumed invlpg cost)
Here, X is 256, that is 1/2 of 512 entries.
But with the mysterious CPU pre-fetcher and page miss handler Unit, the
assumed TLB refill cost is far lower then 100ns in sequential access. And
2 HT siblings in one core makes the memory access more faster if they are
accessing the same memory. So, in the patch, I just do the change when
the target entries is less than 1/16 of whole active tlb entries.
Actually, I have no data support for the percentage '1/16', so any
suggestions are welcomed.
As to hugetlb, guess due to smaller page table, and smaller active TLB
entries, I didn't see benefit via my benchmark, so no optimizing now.
My micro benchmark show in ideal scenarios, the performance improves 70
percent in reading. And in worst scenario, the reading/writing
performance is similar with unpatched 3.4-rc4 kernel.
Here is the reading data on my 2P * 4cores *HT NHM EP machine, with THP
'always':
multi thread testing, '-t' paramter is thread number:
with patch unpatched 3.4-rc4
./mprotect -t 1 14ns 24ns
./mprotect -t 2 13ns 22ns
./mprotect -t 4 12ns 19ns
./mprotect -t 8 14ns 16ns
./mprotect -t 16 28ns 26ns
./mprotect -t 32 54ns 51ns
./mprotect -t 128 200ns 199ns
Single process with sequencial flushing and memory accessing:
with patch unpatched 3.4-rc4
./mprotect 7ns 11ns
./mprotect -p 4096 -l 8 -n 10240
21ns 21ns
[ hpa: http://lkml.kernel.org/r/1B4B44D9196EFF41AE41FDA404FC0A100BFF94@SHSMSX101.ccr.corp.intel.com
has additional performance numbers. ]
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-3-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-28 09:02:17 +08:00
# define flush_tlb_others(mask, mm, start, end) \
native_flush_tlb_others ( mask , mm , start , end )
2007-10-11 11:20:03 +02:00
# endif
2008-01-30 13:30:35 +01:00
static inline void flush_tlb_kernel_range ( unsigned long start ,
unsigned long end )
{
flush_tlb_all ( ) ;
}
2008-10-22 22:26:29 -07:00
# endif /* _ASM_X86_TLBFLUSH_H */