2005-04-17 02:20:36 +04:00
/*
* linux / arch / arm / mm / copypage - v6 . c
*
* Copyright ( C ) 2002 Deep Blue Solutions Ltd , All Rights Reserved .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/init.h>
# include <linux/spinlock.h>
# include <linux/mm.h>
2008-10-31 18:08:35 +03:00
# include <linux/highmem.h>
2005-04-17 02:20:36 +04:00
# include <asm/pgtable.h>
# include <asm/shmparam.h>
# include <asm/tlbflush.h>
# include <asm/cacheflush.h>
2008-08-10 21:10:19 +04:00
# include <asm/cachetype.h>
2005-04-17 02:20:36 +04:00
2006-08-21 20:06:38 +04:00
# include "mm.h"
2005-04-17 02:20:36 +04:00
# if SHMLBA > 16384
# error FIX ME
# endif
2009-07-03 17:44:46 +04:00
static DEFINE_RAW_SPINLOCK ( v6_lock ) ;
2005-04-17 02:20:36 +04:00
/*
* Copy the user page . No aliasing to deal with so we can just
* attack the kernel ' s existing mapping of these pages .
*/
2008-10-31 18:08:35 +03:00
static void v6_copy_user_highpage_nonaliasing ( struct page * to ,
2009-10-05 18:17:45 +04:00
struct page * from , unsigned long vaddr , struct vm_area_struct * vma )
2005-04-17 02:20:36 +04:00
{
2008-10-31 18:08:35 +03:00
void * kto , * kfrom ;
2011-11-25 19:14:15 +04:00
kfrom = kmap_atomic ( from ) ;
kto = kmap_atomic ( to ) ;
2005-04-17 02:20:36 +04:00
copy_page ( kto , kfrom ) ;
2011-11-25 19:14:15 +04:00
kunmap_atomic ( kto ) ;
kunmap_atomic ( kfrom ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Clear the user page . No aliasing to deal with so we can just
* attack the kernel ' s existing mapping of this page .
*/
2008-10-31 19:32:19 +03:00
static void v6_clear_user_highpage_nonaliasing ( struct page * page , unsigned long vaddr )
2005-04-17 02:20:36 +04:00
{
2011-11-25 19:14:15 +04:00
void * kaddr = kmap_atomic ( page ) ;
2005-04-17 02:20:36 +04:00
clear_page ( kaddr ) ;
2011-11-25 19:14:15 +04:00
kunmap_atomic ( kaddr ) ;
2005-04-17 02:20:36 +04:00
}
/*
2008-10-31 18:08:35 +03:00
* Discard data in the kernel mapping for the new page .
* FIXME : needs this MCRR to be supported .
2005-04-17 02:20:36 +04:00
*/
2008-10-31 18:08:35 +03:00
static void discard_old_kernel_data ( void * kto )
2005-04-17 02:20:36 +04:00
{
__asm__ ( " mcrr p15, 0, %1, %0, c6 @ 0xec401f06 "
:
: " r " ( kto ) ,
2014-11-29 04:51:49 +03:00
" r " ( ( unsigned long ) kto + PAGE_SIZE - 1 )
2005-04-17 02:20:36 +04:00
: " cc " ) ;
2008-10-31 18:08:35 +03:00
}
/*
* Copy the page , taking account of the cache colour .
*/
static void v6_copy_user_highpage_aliasing ( struct page * to ,
2009-10-05 18:17:45 +04:00
struct page * from , unsigned long vaddr , struct vm_area_struct * vma )
2008-10-31 18:08:35 +03:00
{
unsigned int offset = CACHE_COLOUR ( vaddr ) ;
unsigned long kfrom , kto ;
2010-09-13 18:57:36 +04:00
if ( ! test_and_set_bit ( PG_dcache_clean , & from - > flags ) )
mm: fix races between swapoff and flush dcache
Thanks to commit 4b3ef9daa4fc ("mm/swap: split swap cache into 64MB
trunks"), after swapoff the address_space associated with the swap
device will be freed. So page_mapping() users which may touch the
address_space need some kind of mechanism to prevent the address_space
from being freed during accessing.
The dcache flushing functions (flush_dcache_page(), etc) in architecture
specific code may access the address_space of swap device for anonymous
pages in swap cache via page_mapping() function. But in some cases
there are no mechanisms to prevent the swap device from being swapoff,
for example,
CPU1 CPU2
__get_user_pages() swapoff()
flush_dcache_page()
mapping = page_mapping()
... exit_swap_address_space()
... kvfree(spaces)
mapping_mapped(mapping)
The address space may be accessed after being freed.
But from cachetlb.txt and Russell King, flush_dcache_page() only care
about file cache pages, for anonymous pages, flush_anon_page() should be
used. The implementation of flush_dcache_page() in all architectures
follows this too. They will check whether page_mapping() is NULL and
whether mapping_mapped() is true to determine whether to flush the
dcache immediately. And they will use interval tree (mapping->i_mmap)
to find all user space mappings. While mapping_mapped() and
mapping->i_mmap isn't used by anonymous pages in swap cache at all.
So, to fix the race between swapoff and flush dcache, __page_mapping()
is add to return the address_space for file cache pages and NULL
otherwise. All page_mapping() invoking in flush dcache functions are
replaced with page_mapping_file().
[akpm@linux-foundation.org: simplify page_mapping_file(), per Mike]
Link: http://lkml.kernel.org/r/20180305083634.15174-1-ying.huang@intel.com
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Chen Liqin <liqin.linux@gmail.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: "James E.J. Bottomley" <jejb@parisc-linux.org>
Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Chris Zankel <chris@zankel.net>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Ley Foon Tan <lftan@altera.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-04-06 02:24:39 +03:00
__flush_dcache_page ( page_mapping_file ( from ) , from ) ;
2008-10-31 18:08:35 +03:00
/* FIXME: not highmem safe */
discard_old_kernel_data ( page_address ( to ) ) ;
2005-04-17 02:20:36 +04:00
/*
* Now copy the page using the same cache colour as the
* pages ultimate destination .
*/
2009-07-03 17:44:46 +04:00
raw_spin_lock ( & v6_lock ) ;
2005-04-17 02:20:36 +04:00
2011-07-02 17:46:27 +04:00
kfrom = COPYPAGE_V6_FROM + ( offset < < PAGE_SHIFT ) ;
kto = COPYPAGE_V6_TO + ( offset < < PAGE_SHIFT ) ;
2005-04-17 02:20:36 +04:00
2011-07-02 18:20:44 +04:00
set_top_pte ( kfrom , mk_pte ( from , PAGE_KERNEL ) ) ;
set_top_pte ( kto , mk_pte ( to , PAGE_KERNEL ) ) ;
2005-04-17 02:20:36 +04:00
2008-10-31 18:08:35 +03:00
copy_page ( ( void * ) kto , ( void * ) kfrom ) ;
2005-04-17 02:20:36 +04:00
2009-07-03 17:44:46 +04:00
raw_spin_unlock ( & v6_lock ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Clear the user page . We need to deal with the aliasing issues ,
* so remap the kernel page into the same cache colour as the user
* page .
*/
2008-10-31 19:32:19 +03:00
static void v6_clear_user_highpage_aliasing ( struct page * page , unsigned long vaddr )
2005-04-17 02:20:36 +04:00
{
2011-07-02 17:46:27 +04:00
unsigned long to = COPYPAGE_V6_TO + ( CACHE_COLOUR ( vaddr ) < < PAGE_SHIFT ) ;
2005-04-17 02:20:36 +04:00
2008-10-31 19:32:19 +03:00
/* FIXME: not highmem safe */
discard_old_kernel_data ( page_address ( page ) ) ;
2005-04-17 02:20:36 +04:00
/*
* Now clear the page using the same cache colour as
* the pages ultimate destination .
*/
2009-07-03 17:44:46 +04:00
raw_spin_lock ( & v6_lock ) ;
2005-04-17 02:20:36 +04:00
2011-07-02 18:20:44 +04:00
set_top_pte ( to , mk_pte ( page , PAGE_KERNEL ) ) ;
2005-04-17 02:20:36 +04:00
clear_page ( ( void * ) to ) ;
2009-07-03 17:44:46 +04:00
raw_spin_unlock ( & v6_lock ) ;
2005-04-17 02:20:36 +04:00
}
struct cpu_user_fns v6_user_fns __initdata = {
2008-10-31 19:32:19 +03:00
. cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing ,
2008-10-31 18:08:35 +03:00
. cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing ,
2005-04-17 02:20:36 +04:00
} ;
static int __init v6_userpage_init ( void )
{
if ( cache_is_vipt_aliasing ( ) ) {
2008-10-31 19:32:19 +03:00
cpu_user . cpu_clear_user_highpage = v6_clear_user_highpage_aliasing ;
2008-10-31 18:08:35 +03:00
cpu_user . cpu_copy_user_highpage = v6_copy_user_highpage_aliasing ;
2005-04-17 02:20:36 +04:00
}
return 0 ;
}
2005-05-10 20:30:47 +04:00
core_initcall ( v6_userpage_init ) ;