2014-11-06 15:19:48 +08:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 2009 , Wind River Systems Inc
* Implemented by fredrik . markstrom @ gmail . com and ivarholmqvist @ gmail . com
*/
# include <linux/export.h>
# include <linux/sched.h>
# include <linux/mm.h>
# include <linux/fs.h>
# include <asm/cacheflush.h>
# include <asm/cpuinfo.h>
static void __flush_dcache ( unsigned long start , unsigned long end )
{
unsigned long addr ;
start & = ~ ( cpuinfo . dcache_line_size - 1 ) ;
end + = ( cpuinfo . dcache_line_size - 1 ) ;
end & = ~ ( cpuinfo . dcache_line_size - 1 ) ;
if ( end > start + cpuinfo . dcache_size )
end = start + cpuinfo . dcache_size ;
for ( addr = start ; addr < end ; addr + = cpuinfo . dcache_line_size ) {
__asm__ __volatile__ ( " flushd 0(%0) \n "
: /* Outputs */
: /* Inputs */ " r " ( addr )
/* : No clobber */ ) ;
}
}
static void __invalidate_dcache ( unsigned long start , unsigned long end )
{
unsigned long addr ;
start & = ~ ( cpuinfo . dcache_line_size - 1 ) ;
end + = ( cpuinfo . dcache_line_size - 1 ) ;
end & = ~ ( cpuinfo . dcache_line_size - 1 ) ;
for ( addr = start ; addr < end ; addr + = cpuinfo . dcache_line_size ) {
__asm__ __volatile__ ( " initda 0(%0) \n "
: /* Outputs */
: /* Inputs */ " r " ( addr )
/* : No clobber */ ) ;
}
}
static void __flush_icache ( unsigned long start , unsigned long end )
{
unsigned long addr ;
start & = ~ ( cpuinfo . icache_line_size - 1 ) ;
end + = ( cpuinfo . icache_line_size - 1 ) ;
end & = ~ ( cpuinfo . icache_line_size - 1 ) ;
if ( end > start + cpuinfo . icache_size )
end = start + cpuinfo . icache_size ;
for ( addr = start ; addr < end ; addr + = cpuinfo . icache_line_size ) {
__asm__ __volatile__ ( " flushi %0 \n "
: /* Outputs */
: /* Inputs */ " r " ( addr )
/* : No clobber */ ) ;
}
__asm__ __volatile ( " flushp \n " ) ;
}
static void flush_aliases ( struct address_space * mapping , struct page * page )
{
struct mm_struct * mm = current - > active_mm ;
struct vm_area_struct * mpnt ;
pgoff_t pgoff ;
pgoff = page - > index ;
flush_dcache_mmap_lock ( mapping ) ;
vma_interval_tree_foreach ( mpnt , & mapping - > i_mmap , pgoff , pgoff ) {
unsigned long offset ;
if ( mpnt - > vm_mm ! = mm )
continue ;
if ( ! ( mpnt - > vm_flags & VM_MAYSHARE ) )
continue ;
offset = ( pgoff - mpnt - > vm_pgoff ) < < PAGE_SHIFT ;
flush_cache_page ( mpnt , mpnt - > vm_start + offset ,
page_to_pfn ( page ) ) ;
}
flush_dcache_mmap_unlock ( mapping ) ;
}
void flush_cache_all ( void )
{
2015-11-26 22:25:58 +08:00
__flush_dcache ( 0 , cpuinfo . dcache_size ) ;
2014-11-06 15:19:48 +08:00
__flush_icache ( 0 , cpuinfo . icache_size ) ;
}
void flush_cache_mm ( struct mm_struct * mm )
{
flush_cache_all ( ) ;
}
void flush_cache_dup_mm ( struct mm_struct * mm )
{
flush_cache_all ( ) ;
}
void flush_icache_range ( unsigned long start , unsigned long end )
{
2015-04-24 14:18:55 +08:00
__flush_dcache ( start , end ) ;
2014-11-06 15:19:48 +08:00
__flush_icache ( start , end ) ;
}
void flush_dcache_range ( unsigned long start , unsigned long end )
{
__flush_dcache ( start , end ) ;
2015-04-24 14:18:55 +08:00
__flush_icache ( start , end ) ;
2014-11-06 15:19:48 +08:00
}
EXPORT_SYMBOL ( flush_dcache_range ) ;
void invalidate_dcache_range ( unsigned long start , unsigned long end )
{
__invalidate_dcache ( start , end ) ;
}
EXPORT_SYMBOL ( invalidate_dcache_range ) ;
void flush_cache_range ( struct vm_area_struct * vma , unsigned long start ,
unsigned long end )
{
__flush_dcache ( start , end ) ;
if ( vma = = NULL | | ( vma - > vm_flags & VM_EXEC ) )
__flush_icache ( start , end ) ;
}
void flush_icache_page ( struct vm_area_struct * vma , struct page * page )
{
unsigned long start = ( unsigned long ) page_address ( page ) ;
unsigned long end = start + PAGE_SIZE ;
2015-04-24 14:18:55 +08:00
__flush_dcache ( start , end ) ;
2014-11-06 15:19:48 +08:00
__flush_icache ( start , end ) ;
}
void flush_cache_page ( struct vm_area_struct * vma , unsigned long vmaddr ,
unsigned long pfn )
{
unsigned long start = vmaddr ;
unsigned long end = start + PAGE_SIZE ;
__flush_dcache ( start , end ) ;
if ( vma - > vm_flags & VM_EXEC )
__flush_icache ( start , end ) ;
}
2015-04-24 14:18:55 +08:00
void __flush_dcache_page ( struct address_space * mapping , struct page * page )
{
/*
* Writeback any data associated with the kernel mapping of this
* page . This ensures that data in the physical page is mutually
* coherent with the kernels mapping .
*/
unsigned long start = ( unsigned long ) page_address ( page ) ;
2015-11-26 22:25:58 +08:00
__flush_dcache ( start , start + PAGE_SIZE ) ;
2015-04-24 14:18:55 +08:00
}
2014-11-06 15:19:48 +08:00
void flush_dcache_page ( struct page * page )
{
struct address_space * mapping ;
/*
* The zero page is never written to , so never has any dirty
* cache lines , and therefore never needs to be flushed .
*/
if ( page = = ZERO_PAGE ( 0 ) )
return ;
mm: fix races between swapoff and flush dcache
Thanks to commit 4b3ef9daa4fc ("mm/swap: split swap cache into 64MB
trunks"), after swapoff the address_space associated with the swap
device will be freed. So page_mapping() users which may touch the
address_space need some kind of mechanism to prevent the address_space
from being freed during accessing.
The dcache flushing functions (flush_dcache_page(), etc) in architecture
specific code may access the address_space of swap device for anonymous
pages in swap cache via page_mapping() function. But in some cases
there are no mechanisms to prevent the swap device from being swapoff,
for example,
CPU1 CPU2
__get_user_pages() swapoff()
flush_dcache_page()
mapping = page_mapping()
... exit_swap_address_space()
... kvfree(spaces)
mapping_mapped(mapping)
The address space may be accessed after being freed.
But from cachetlb.txt and Russell King, flush_dcache_page() only care
about file cache pages, for anonymous pages, flush_anon_page() should be
used. The implementation of flush_dcache_page() in all architectures
follows this too. They will check whether page_mapping() is NULL and
whether mapping_mapped() is true to determine whether to flush the
dcache immediately. And they will use interval tree (mapping->i_mmap)
to find all user space mappings. While mapping_mapped() and
mapping->i_mmap isn't used by anonymous pages in swap cache at all.
So, to fix the race between swapoff and flush dcache, __page_mapping()
is add to return the address_space for file cache pages and NULL
otherwise. All page_mapping() invoking in flush dcache functions are
replaced with page_mapping_file().
[akpm@linux-foundation.org: simplify page_mapping_file(), per Mike]
Link: http://lkml.kernel.org/r/20180305083634.15174-1-ying.huang@intel.com
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Chen Liqin <liqin.linux@gmail.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: "James E.J. Bottomley" <jejb@parisc-linux.org>
Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Chris Zankel <chris@zankel.net>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Ley Foon Tan <lftan@altera.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-04-05 16:24:39 -07:00
mapping = page_mapping_file ( page ) ;
2014-11-06 15:19:48 +08:00
/* Flush this page if there are aliases. */
if ( mapping & & ! mapping_mapped ( mapping ) ) {
clear_bit ( PG_dcache_clean , & page - > flags ) ;
} else {
2015-04-24 14:18:55 +08:00
__flush_dcache_page ( mapping , page ) ;
if ( mapping ) {
unsigned long start = ( unsigned long ) page_address ( page ) ;
2014-11-06 15:19:48 +08:00
flush_aliases ( mapping , page ) ;
2015-04-24 14:18:55 +08:00
flush_icache_range ( start , start + PAGE_SIZE ) ;
}
2014-11-06 15:19:48 +08:00
set_bit ( PG_dcache_clean , & page - > flags ) ;
}
}
EXPORT_SYMBOL ( flush_dcache_page ) ;
void update_mmu_cache ( struct vm_area_struct * vma ,
2018-11-07 10:35:34 +08:00
unsigned long address , pte_t * ptep )
2014-11-06 15:19:48 +08:00
{
2018-11-07 10:35:34 +08:00
pte_t pte = * ptep ;
unsigned long pfn = pte_pfn ( pte ) ;
2014-11-06 15:19:48 +08:00
struct page * page ;
2015-04-24 14:18:55 +08:00
struct address_space * mapping ;
2014-11-06 15:19:48 +08:00
2018-11-07 10:35:34 +08:00
reload_tlb_page ( vma , address , pte ) ;
2018-11-01 17:42:16 +08:00
2014-11-06 15:19:48 +08:00
if ( ! pfn_valid ( pfn ) )
return ;
/*
* The zero page is never written to , so never has any dirty
* cache lines , and therefore never needs to be flushed .
*/
page = pfn_to_page ( pfn ) ;
if ( page = = ZERO_PAGE ( 0 ) )
return ;
mm: fix races between swapoff and flush dcache
Thanks to commit 4b3ef9daa4fc ("mm/swap: split swap cache into 64MB
trunks"), after swapoff the address_space associated with the swap
device will be freed. So page_mapping() users which may touch the
address_space need some kind of mechanism to prevent the address_space
from being freed during accessing.
The dcache flushing functions (flush_dcache_page(), etc) in architecture
specific code may access the address_space of swap device for anonymous
pages in swap cache via page_mapping() function. But in some cases
there are no mechanisms to prevent the swap device from being swapoff,
for example,
CPU1 CPU2
__get_user_pages() swapoff()
flush_dcache_page()
mapping = page_mapping()
... exit_swap_address_space()
... kvfree(spaces)
mapping_mapped(mapping)
The address space may be accessed after being freed.
But from cachetlb.txt and Russell King, flush_dcache_page() only care
about file cache pages, for anonymous pages, flush_anon_page() should be
used. The implementation of flush_dcache_page() in all architectures
follows this too. They will check whether page_mapping() is NULL and
whether mapping_mapped() is true to determine whether to flush the
dcache immediately. And they will use interval tree (mapping->i_mmap)
to find all user space mappings. While mapping_mapped() and
mapping->i_mmap isn't used by anonymous pages in swap cache at all.
So, to fix the race between swapoff and flush dcache, __page_mapping()
is add to return the address_space for file cache pages and NULL
otherwise. All page_mapping() invoking in flush dcache functions are
replaced with page_mapping_file().
[akpm@linux-foundation.org: simplify page_mapping_file(), per Mike]
Link: http://lkml.kernel.org/r/20180305083634.15174-1-ying.huang@intel.com
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Chen Liqin <liqin.linux@gmail.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: "James E.J. Bottomley" <jejb@parisc-linux.org>
Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Chris Zankel <chris@zankel.net>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Ley Foon Tan <lftan@altera.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-04-05 16:24:39 -07:00
mapping = page_mapping_file ( page ) ;
2015-04-24 14:18:55 +08:00
if ( ! test_and_set_bit ( PG_dcache_clean , & page - > flags ) )
__flush_dcache_page ( mapping , page ) ;
if ( mapping )
{
flush_aliases ( mapping , page ) ;
if ( vma - > vm_flags & VM_EXEC )
flush_icache_page ( vma , page ) ;
2014-11-06 15:19:48 +08:00
}
}
void copy_user_page ( void * vto , void * vfrom , unsigned long vaddr ,
struct page * to )
{
__flush_dcache ( vaddr , vaddr + PAGE_SIZE ) ;
2015-04-24 14:18:55 +08:00
__flush_icache ( vaddr , vaddr + PAGE_SIZE ) ;
2014-11-06 15:19:48 +08:00
copy_page ( vto , vfrom ) ;
__flush_dcache ( ( unsigned long ) vto , ( unsigned long ) vto + PAGE_SIZE ) ;
2015-04-24 14:18:55 +08:00
__flush_icache ( ( unsigned long ) vto , ( unsigned long ) vto + PAGE_SIZE ) ;
2014-11-06 15:19:48 +08:00
}
void clear_user_page ( void * addr , unsigned long vaddr , struct page * page )
{
__flush_dcache ( vaddr , vaddr + PAGE_SIZE ) ;
2015-04-24 14:18:55 +08:00
__flush_icache ( vaddr , vaddr + PAGE_SIZE ) ;
2014-11-06 15:19:48 +08:00
clear_page ( addr ) ;
__flush_dcache ( ( unsigned long ) addr , ( unsigned long ) addr + PAGE_SIZE ) ;
2015-04-24 14:18:55 +08:00
__flush_icache ( ( unsigned long ) addr , ( unsigned long ) addr + PAGE_SIZE ) ;
2014-11-06 15:19:48 +08:00
}
void copy_from_user_page ( struct vm_area_struct * vma , struct page * page ,
unsigned long user_vaddr ,
void * dst , void * src , int len )
{
flush_cache_page ( vma , user_vaddr , page_to_pfn ( page ) ) ;
memcpy ( dst , src , len ) ;
2015-11-26 22:25:58 +08:00
__flush_dcache ( ( unsigned long ) src , ( unsigned long ) src + len ) ;
2014-11-06 15:19:48 +08:00
if ( vma - > vm_flags & VM_EXEC )
__flush_icache ( ( unsigned long ) src , ( unsigned long ) src + len ) ;
}
void copy_to_user_page ( struct vm_area_struct * vma , struct page * page ,
unsigned long user_vaddr ,
void * dst , void * src , int len )
{
flush_cache_page ( vma , user_vaddr , page_to_pfn ( page ) ) ;
memcpy ( dst , src , len ) ;
2015-11-26 22:25:58 +08:00
__flush_dcache ( ( unsigned long ) dst , ( unsigned long ) dst + len ) ;
2014-11-06 15:19:48 +08:00
if ( vma - > vm_flags & VM_EXEC )
__flush_icache ( ( unsigned long ) dst , ( unsigned long ) dst + len ) ;
}