2005-06-08 15:28:24 +01:00
/*
* linux / arch / arm / lib / copypage - xscale . S
*
* Copyright ( C ) 1995 - 2005 Russell King
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This handles the mini data cache , as found on SA11x0 and XScale
* processors . When we copy a user page page , we map it in such a way
* that accesses to this page will not touch the main data cache , but
* will be cached in the mini data cache . This prevents us thrashing
* the main data cache on page faults .
*/
# include <linux/init.h>
# include <linux/mm.h>
2008-10-31 15:08:35 +00:00
# include <linux/highmem.h>
2005-06-08 15:28:24 +01:00
# include <asm/pgtable.h>
# include <asm/tlbflush.h>
2006-12-30 16:08:50 +01:00
# include <asm/cacheflush.h>
2005-06-08 15:28:24 +01:00
2006-08-21 17:06:38 +01:00
# include "mm.h"
2005-06-08 15:28:24 +01:00
# define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
2008-09-06 20:04:59 +01:00
L_PTE_MT_MINICACHE )
2005-06-08 15:28:24 +01:00
2009-07-03 08:44:46 -05:00
static DEFINE_RAW_SPINLOCK ( minicache_lock ) ;
2005-06-08 15:28:24 +01:00
/*
2008-10-31 15:08:35 +00:00
* XScale mini - dcache optimised copy_user_highpage
2005-06-08 15:28:24 +01:00
*
* We flush the destination cache lines just before we write the data into the
* corresponding address . Since the Dcache is read - allocate , this removes the
* Dcache aliasing issue . The writes will be forwarded to the write buffer ,
* and merged as appropriate .
*/
2009-03-12 18:03:16 +01:00
static void __naked
2005-06-08 15:28:24 +01:00
mc_copy_user_page ( void * from , void * to )
{
/*
* Strangely enough , best performance is achieved
* when prefetching destination as well . ( NP )
*/
asm volatile (
" stmfd sp!, {r4, r5, lr} \n \
mov lr , % 2 \ n \
pld [ r0 , # 0 ] \ n \
pld [ r0 , # 32 ] \ n \
pld [ r1 , # 0 ] \ n \
pld [ r1 , # 32 ] \ n \
1 : pld [ r0 , # 64 ] \ n \
pld [ r0 , # 96 ] \ n \
pld [ r1 , # 64 ] \ n \
pld [ r1 , # 96 ] \ n \
2 : ldrd r2 , [ r0 ] , # 8 \ n \
ldrd r4 , [ r0 ] , # 8 \ n \
mov ip , r1 \ n \
strd r2 , [ r1 ] , # 8 \ n \
ldrd r2 , [ r0 ] , # 8 \ n \
strd r4 , [ r1 ] , # 8 \ n \
ldrd r4 , [ r0 ] , # 8 \ n \
strd r2 , [ r1 ] , # 8 \ n \
strd r4 , [ r1 ] , # 8 \ n \
mcr p15 , 0 , ip , c7 , c10 , 1 @ clean D line \ n \
ldrd r2 , [ r0 ] , # 8 \ n \
mcr p15 , 0 , ip , c7 , c6 , 1 @ invalidate D line \ n \
ldrd r4 , [ r0 ] , # 8 \ n \
mov ip , r1 \ n \
strd r2 , [ r1 ] , # 8 \ n \
ldrd r2 , [ r0 ] , # 8 \ n \
strd r4 , [ r1 ] , # 8 \ n \
ldrd r4 , [ r0 ] , # 8 \ n \
strd r2 , [ r1 ] , # 8 \ n \
strd r4 , [ r1 ] , # 8 \ n \
mcr p15 , 0 , ip , c7 , c10 , 1 @ clean D line \ n \
subs lr , lr , # 1 \ n \
mcr p15 , 0 , ip , c7 , c6 , 1 @ invalidate D line \ n \
bgt 1 b \ n \
beq 2 b \ n \
ldmfd sp ! , { r4 , r5 , pc } "
:
: " r " ( from ) , " r " ( to ) , " I " ( PAGE_SIZE / 64 - 1 ) ) ;
}
2008-10-31 15:08:35 +00:00
void xscale_mc_copy_user_highpage ( struct page * to , struct page * from ,
2009-10-05 15:17:45 +01:00
unsigned long vaddr , struct vm_area_struct * vma )
2005-06-08 15:28:24 +01:00
{
2011-11-25 23:14:15 +08:00
void * kto = kmap_atomic ( to ) ;
2006-12-30 16:08:50 +01:00
2010-09-13 15:57:36 +01:00
if ( ! test_and_set_bit ( PG_dcache_clean , & from - > flags ) )
mm: fix races between swapoff and flush dcache
Thanks to commit 4b3ef9daa4fc ("mm/swap: split swap cache into 64MB
trunks"), after swapoff the address_space associated with the swap
device will be freed. So page_mapping() users which may touch the
address_space need some kind of mechanism to prevent the address_space
from being freed during accessing.
The dcache flushing functions (flush_dcache_page(), etc) in architecture
specific code may access the address_space of swap device for anonymous
pages in swap cache via page_mapping() function. But in some cases
there are no mechanisms to prevent the swap device from being swapoff,
for example,
CPU1 CPU2
__get_user_pages() swapoff()
flush_dcache_page()
mapping = page_mapping()
... exit_swap_address_space()
... kvfree(spaces)
mapping_mapped(mapping)
The address space may be accessed after being freed.
But from cachetlb.txt and Russell King, flush_dcache_page() only care
about file cache pages, for anonymous pages, flush_anon_page() should be
used. The implementation of flush_dcache_page() in all architectures
follows this too. They will check whether page_mapping() is NULL and
whether mapping_mapped() is true to determine whether to flush the
dcache immediately. And they will use interval tree (mapping->i_mmap)
to find all user space mappings. While mapping_mapped() and
mapping->i_mmap isn't used by anonymous pages in swap cache at all.
So, to fix the race between swapoff and flush dcache, __page_mapping()
is add to return the address_space for file cache pages and NULL
otherwise. All page_mapping() invoking in flush dcache functions are
replaced with page_mapping_file().
[akpm@linux-foundation.org: simplify page_mapping_file(), per Mike]
Link: http://lkml.kernel.org/r/20180305083634.15174-1-ying.huang@intel.com
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Chen Liqin <liqin.linux@gmail.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: "James E.J. Bottomley" <jejb@parisc-linux.org>
Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Chris Zankel <chris@zankel.net>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Ley Foon Tan <lftan@altera.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-04-05 16:24:39 -07:00
__flush_dcache_page ( page_mapping_file ( from ) , from ) ;
2006-12-30 16:08:50 +01:00
2009-07-03 08:44:46 -05:00
raw_spin_lock ( & minicache_lock ) ;
2005-06-08 15:28:24 +01:00
2011-07-02 15:20:44 +01:00
set_top_pte ( COPYPAGE_MINICACHE , mk_pte ( from , minicache_pgprot ) ) ;
2005-06-08 15:28:24 +01:00
mc_copy_user_page ( ( void * ) COPYPAGE_MINICACHE , kto ) ;
2009-07-03 08:44:46 -05:00
raw_spin_unlock ( & minicache_lock ) ;
2008-10-31 15:08:35 +00:00
2011-11-25 23:14:15 +08:00
kunmap_atomic ( kto ) ;
2005-06-08 15:28:24 +01:00
}
/*
* XScale optimised clear_user_page
*/
2008-10-31 16:32:19 +00:00
void
xscale_mc_clear_user_highpage ( struct page * page , unsigned long vaddr )
2005-06-08 15:28:24 +01:00
{
2011-11-25 23:14:15 +08:00
void * ptr , * kaddr = kmap_atomic ( page ) ;
2005-06-08 15:28:24 +01:00
asm volatile (
2008-11-04 02:42:27 -05:00
" mov r1, %2 \n \
2005-06-08 15:28:24 +01:00
mov r2 , # 0 \ n \
mov r3 , # 0 \ n \
2008-10-31 16:32:19 +00:00
1 : mov ip , % 0 \ n \
strd r2 , [ % 0 ] , # 8 \ n \
strd r2 , [ % 0 ] , # 8 \ n \
strd r2 , [ % 0 ] , # 8 \ n \
strd r2 , [ % 0 ] , # 8 \ n \
2005-06-08 15:28:24 +01:00
mcr p15 , 0 , ip , c7 , c10 , 1 @ clean D line \ n \
subs r1 , r1 , # 1 \ n \
mcr p15 , 0 , ip , c7 , c6 , 1 @ invalidate D line \ n \
2008-10-31 16:32:19 +00:00
bne 1 b "
2008-11-04 02:42:27 -05:00
: " =r " ( ptr )
: " 0 " ( kaddr ) , " I " ( PAGE_SIZE / 32 )
2008-10-31 16:32:19 +00:00
: " r1 " , " r2 " , " r3 " , " ip " ) ;
2011-11-25 23:14:15 +08:00
kunmap_atomic ( kaddr ) ;
2005-06-08 15:28:24 +01:00
}
struct cpu_user_fns xscale_mc_user_fns __initdata = {
2008-10-31 16:32:19 +00:00
. cpu_clear_user_highpage = xscale_mc_clear_user_highpage ,
2008-10-31 15:08:35 +00:00
. cpu_copy_user_highpage = xscale_mc_copy_user_highpage ,
2005-06-08 15:28:24 +01:00
} ;