2005-05-10 17:23:01 +04:00
/*
* linux / arch / arm / lib / copypage - armv4mc . S
*
* Copyright ( C ) 1995 - 2005 Russell King
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This handles the mini data cache , as found on SA11x0 and XScale
* processors . When we copy a user page page , we map it in such a way
* that accesses to this page will not touch the main data cache , but
* will be cached in the mini data cache . This prevents us thrashing
* the main data cache on page faults .
*/
# include <linux/init.h>
# include <linux/mm.h>
2008-10-31 18:08:35 +03:00
# include <linux/highmem.h>
2005-05-10 17:23:01 +04:00
# include <asm/pgtable.h>
# include <asm/tlbflush.h>
2006-12-30 18:08:50 +03:00
# include <asm/cacheflush.h>
2005-05-10 17:23:01 +04:00
2006-08-21 20:06:38 +04:00
# include "mm.h"
2005-05-10 17:23:01 +04:00
# define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
2008-09-06 23:04:59 +04:00
L_PTE_MT_MINICACHE )
2005-05-10 17:23:01 +04:00
2009-07-03 17:44:46 +04:00
static DEFINE_RAW_SPINLOCK ( minicache_lock ) ;
2005-05-10 17:23:01 +04:00
/*
2008-10-31 18:08:35 +03:00
* ARMv4 mini - dcache optimised copy_user_highpage
2005-05-10 17:23:01 +04:00
*
* We flush the destination cache lines just before we write the data into the
* corresponding address . Since the Dcache is read - allocate , this removes the
* Dcache aliasing issue . The writes will be forwarded to the write buffer ,
* and merged as appropriate .
*
* Note : We rely on all ARMv4 processors implementing the " invalidate D line "
* instruction . If your processor does not supply this , you have to write your
2008-10-31 18:08:35 +03:00
* own copy_user_highpage that does the right thing .
2005-05-10 17:23:01 +04:00
*/
2018-11-07 19:49:00 +03:00
static void mc_copy_user_page ( void * from , void * to )
2005-05-10 17:23:01 +04:00
{
2018-11-07 19:49:00 +03:00
int tmp ;
asm volatile ( " \
2019-02-18 02:58:29 +03:00
. syntax unified \ n \
2005-05-10 17:23:01 +04:00
ldmia % 0 ! , { r2 , r3 , ip , lr } @ 4 \ n \
1 : mcr p15 , 0 , % 1 , c7 , c6 , 1 @ 1 invalidate D line \ n \
stmia % 1 ! , { r2 , r3 , ip , lr } @ 4 \ n \
ldmia % 0 ! , { r2 , r3 , ip , lr } @ 4 + 1 \ n \
stmia % 1 ! , { r2 , r3 , ip , lr } @ 4 \ n \
ldmia % 0 ! , { r2 , r3 , ip , lr } @ 4 \ n \
mcr p15 , 0 , % 1 , c7 , c6 , 1 @ 1 invalidate D line \ n \
stmia % 1 ! , { r2 , r3 , ip , lr } @ 4 \ n \
ldmia % 0 ! , { r2 , r3 , ip , lr } @ 4 \ n \
2018-11-07 19:49:00 +03:00
subs % 2 , % 2 , # 1 @ 1 \ n \
2005-05-10 17:23:01 +04:00
stmia % 1 ! , { r2 , r3 , ip , lr } @ 4 \ n \
2019-02-18 02:58:29 +03:00
ldmiane % 0 ! , { r2 , r3 , ip , lr } @ 4 \ n \
2018-11-07 19:49:00 +03:00
bne 1 b @ "
: " +&r " ( from ) , " +&r " ( to ) , " =&r " ( tmp )
: " 2 " ( PAGE_SIZE / 64 )
: " r2 " , " r3 " , " ip " , " lr " ) ;
2005-05-10 17:23:01 +04:00
}
2009-01-18 19:24:19 +03:00
void v4_mc_copy_user_highpage ( struct page * to , struct page * from ,
2009-10-05 18:17:45 +04:00
unsigned long vaddr , struct vm_area_struct * vma )
2005-05-10 17:23:01 +04:00
{
2011-11-25 19:14:15 +04:00
void * kto = kmap_atomic ( to ) ;
2006-12-30 18:08:50 +03:00
2010-09-13 18:57:36 +04:00
if ( ! test_and_set_bit ( PG_dcache_clean , & from - > flags ) )
mm: fix races between swapoff and flush dcache
Thanks to commit 4b3ef9daa4fc ("mm/swap: split swap cache into 64MB
trunks"), after swapoff the address_space associated with the swap
device will be freed. So page_mapping() users which may touch the
address_space need some kind of mechanism to prevent the address_space
from being freed during accessing.
The dcache flushing functions (flush_dcache_page(), etc) in architecture
specific code may access the address_space of swap device for anonymous
pages in swap cache via page_mapping() function. But in some cases
there are no mechanisms to prevent the swap device from being swapoff,
for example,
CPU1 CPU2
__get_user_pages() swapoff()
flush_dcache_page()
mapping = page_mapping()
... exit_swap_address_space()
... kvfree(spaces)
mapping_mapped(mapping)
The address space may be accessed after being freed.
But from cachetlb.txt and Russell King, flush_dcache_page() only care
about file cache pages, for anonymous pages, flush_anon_page() should be
used. The implementation of flush_dcache_page() in all architectures
follows this too. They will check whether page_mapping() is NULL and
whether mapping_mapped() is true to determine whether to flush the
dcache immediately. And they will use interval tree (mapping->i_mmap)
to find all user space mappings. While mapping_mapped() and
mapping->i_mmap isn't used by anonymous pages in swap cache at all.
So, to fix the race between swapoff and flush dcache, __page_mapping()
is add to return the address_space for file cache pages and NULL
otherwise. All page_mapping() invoking in flush dcache functions are
replaced with page_mapping_file().
[akpm@linux-foundation.org: simplify page_mapping_file(), per Mike]
Link: http://lkml.kernel.org/r/20180305083634.15174-1-ying.huang@intel.com
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Chen Liqin <liqin.linux@gmail.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: "James E.J. Bottomley" <jejb@parisc-linux.org>
Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Chris Zankel <chris@zankel.net>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Ley Foon Tan <lftan@altera.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-04-06 02:24:39 +03:00
__flush_dcache_page ( page_mapping_file ( from ) , from ) ;
2006-12-30 18:08:50 +03:00
2009-07-03 17:44:46 +04:00
raw_spin_lock ( & minicache_lock ) ;
2005-05-10 17:23:01 +04:00
2011-07-02 18:20:44 +04:00
set_top_pte ( COPYPAGE_MINICACHE , mk_pte ( from , minicache_pgprot ) ) ;
2005-05-10 17:23:01 +04:00
2011-07-02 17:46:27 +04:00
mc_copy_user_page ( ( void * ) COPYPAGE_MINICACHE , kto ) ;
2005-05-10 17:23:01 +04:00
2009-07-03 17:44:46 +04:00
raw_spin_unlock ( & minicache_lock ) ;
2008-10-31 18:08:35 +03:00
2011-11-25 19:14:15 +04:00
kunmap_atomic ( kto ) ;
2005-05-10 17:23:01 +04:00
}
/*
* ARMv4 optimised clear_user_page
*/
2008-10-31 19:32:19 +03:00
void v4_mc_clear_user_highpage ( struct page * page , unsigned long vaddr )
2005-05-10 17:23:01 +04:00
{
2011-11-25 19:14:15 +04:00
void * ptr , * kaddr = kmap_atomic ( page ) ;
2008-10-31 19:32:19 +03:00
asm volatile ( " \
2008-11-04 10:42:27 +03:00
mov r1 , % 2 @ 1 \ n \
2005-05-10 17:23:01 +04:00
mov r2 , # 0 @ 1 \ n \
mov r3 , # 0 @ 1 \ n \
mov ip , # 0 @ 1 \ n \
mov lr , # 0 @ 1 \ n \
2008-10-31 19:32:19 +03:00
1 : mcr p15 , 0 , % 0 , c7 , c6 , 1 @ 1 invalidate D line \ n \
stmia % 0 ! , { r2 , r3 , ip , lr } @ 4 \ n \
stmia % 0 ! , { r2 , r3 , ip , lr } @ 4 \ n \
mcr p15 , 0 , % 0 , c7 , c6 , 1 @ 1 invalidate D line \ n \
stmia % 0 ! , { r2 , r3 , ip , lr } @ 4 \ n \
stmia % 0 ! , { r2 , r3 , ip , lr } @ 4 \ n \
2005-05-10 17:23:01 +04:00
subs r1 , r1 , # 1 @ 1 \ n \
2008-10-31 19:32:19 +03:00
bne 1 b @ 1 "
2008-11-04 10:42:27 +03:00
: " =r " ( ptr )
: " 0 " ( kaddr ) , " I " ( PAGE_SIZE / 64 )
2008-10-31 19:32:19 +03:00
: " r1 " , " r2 " , " r3 " , " ip " , " lr " ) ;
2011-11-25 19:14:15 +04:00
kunmap_atomic ( kaddr ) ;
2005-05-10 17:23:01 +04:00
}
struct cpu_user_fns v4_mc_user_fns __initdata = {
2008-10-31 19:32:19 +03:00
. cpu_clear_user_highpage = v4_mc_clear_user_highpage ,
2008-10-31 18:08:35 +03:00
. cpu_copy_user_highpage = v4_mc_copy_user_highpage ,
2005-05-10 17:23:01 +04:00
} ;