2005-04-17 02:20:36 +04:00
/*
* linux / arch / arm / mm / copypage - v6 . c
*
* Copyright ( C ) 2002 Deep Blue Solutions Ltd , All Rights Reserved .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/init.h>
# include <linux/spinlock.h>
# include <linux/mm.h>
2008-10-31 18:08:35 +03:00
# include <linux/highmem.h>
2005-04-17 02:20:36 +04:00
# include <asm/pgtable.h>
# include <asm/shmparam.h>
# include <asm/tlbflush.h>
# include <asm/cacheflush.h>
2008-08-10 21:10:19 +04:00
# include <asm/cachetype.h>
2005-04-17 02:20:36 +04:00
2006-08-21 20:06:38 +04:00
# include "mm.h"
2005-04-17 02:20:36 +04:00
# if SHMLBA > 16384
# error FIX ME
# endif
# define from_address (0xffff8000)
# define to_address (0xffffc000)
static DEFINE_SPINLOCK ( v6_lock ) ;
/*
* Copy the user page . No aliasing to deal with so we can just
* attack the kernel ' s existing mapping of these pages .
*/
2008-10-31 18:08:35 +03:00
static void v6_copy_user_highpage_nonaliasing ( struct page * to ,
struct page * from , unsigned long vaddr )
2005-04-17 02:20:36 +04:00
{
2008-10-31 18:08:35 +03:00
void * kto , * kfrom ;
kfrom = kmap_atomic ( from , KM_USER0 ) ;
kto = kmap_atomic ( to , KM_USER1 ) ;
2005-04-17 02:20:36 +04:00
copy_page ( kto , kfrom ) ;
2008-10-31 18:08:35 +03:00
kunmap_atomic ( kto , KM_USER1 ) ;
kunmap_atomic ( kfrom , KM_USER0 ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Clear the user page . No aliasing to deal with so we can just
* attack the kernel ' s existing mapping of this page .
*/
2008-10-31 19:32:19 +03:00
static void v6_clear_user_highpage_nonaliasing ( struct page * page , unsigned long vaddr )
2005-04-17 02:20:36 +04:00
{
2008-10-31 19:32:19 +03:00
void * kaddr = kmap_atomic ( page , KM_USER0 ) ;
2005-04-17 02:20:36 +04:00
clear_page ( kaddr ) ;
2008-10-31 19:32:19 +03:00
kunmap_atomic ( kaddr , KM_USER0 ) ;
2005-04-17 02:20:36 +04:00
}
/*
2008-10-31 18:08:35 +03:00
* Discard data in the kernel mapping for the new page .
* FIXME : needs this MCRR to be supported .
2005-04-17 02:20:36 +04:00
*/
2008-10-31 18:08:35 +03:00
static void discard_old_kernel_data ( void * kto )
2005-04-17 02:20:36 +04:00
{
__asm__ ( " mcrr p15, 0, %1, %0, c6 @ 0xec401f06 "
:
: " r " ( kto ) ,
" r " ( ( unsigned long ) kto + PAGE_SIZE - L1_CACHE_BYTES )
: " cc " ) ;
2008-10-31 18:08:35 +03:00
}
/*
* Copy the page , taking account of the cache colour .
*/
static void v6_copy_user_highpage_aliasing ( struct page * to ,
struct page * from , unsigned long vaddr )
{
unsigned int offset = CACHE_COLOUR ( vaddr ) ;
unsigned long kfrom , kto ;
if ( test_and_clear_bit ( PG_dcache_dirty , & from - > flags ) )
__flush_dcache_page ( page_mapping ( from ) , from ) ;
/* FIXME: not highmem safe */
discard_old_kernel_data ( page_address ( to ) ) ;
2005-04-17 02:20:36 +04:00
/*
* Now copy the page using the same cache colour as the
* pages ultimate destination .
*/
spin_lock ( & v6_lock ) ;
2008-10-31 18:08:35 +03:00
set_pte_ext ( TOP_PTE ( from_address ) + offset , pfn_pte ( page_to_pfn ( from ) , PAGE_KERNEL ) , 0 ) ;
set_pte_ext ( TOP_PTE ( to_address ) + offset , pfn_pte ( page_to_pfn ( to ) , PAGE_KERNEL ) , 0 ) ;
2005-04-17 02:20:36 +04:00
2008-10-31 18:08:35 +03:00
kfrom = from_address + ( offset < < PAGE_SHIFT ) ;
kto = to_address + ( offset < < PAGE_SHIFT ) ;
2005-04-17 02:20:36 +04:00
2008-10-31 18:08:35 +03:00
flush_tlb_kernel_page ( kfrom ) ;
flush_tlb_kernel_page ( kto ) ;
2005-04-17 02:20:36 +04:00
2008-10-31 18:08:35 +03:00
copy_page ( ( void * ) kto , ( void * ) kfrom ) ;
2005-04-17 02:20:36 +04:00
spin_unlock ( & v6_lock ) ;
}
/*
* Clear the user page . We need to deal with the aliasing issues ,
* so remap the kernel page into the same cache colour as the user
* page .
*/
2008-10-31 19:32:19 +03:00
static void v6_clear_user_highpage_aliasing ( struct page * page , unsigned long vaddr )
2005-04-17 02:20:36 +04:00
{
2005-06-20 14:31:09 +04:00
unsigned int offset = CACHE_COLOUR ( vaddr ) ;
2005-04-17 02:20:36 +04:00
unsigned long to = to_address + ( offset < < PAGE_SHIFT ) ;
2008-10-31 19:32:19 +03:00
/* FIXME: not highmem safe */
discard_old_kernel_data ( page_address ( page ) ) ;
2005-04-17 02:20:36 +04:00
/*
* Now clear the page using the same cache colour as
* the pages ultimate destination .
*/
spin_lock ( & v6_lock ) ;
2008-10-31 19:32:19 +03:00
set_pte_ext ( TOP_PTE ( to_address ) + offset , pfn_pte ( page_to_pfn ( page ) , PAGE_KERNEL ) , 0 ) ;
2005-04-17 02:20:36 +04:00
flush_tlb_kernel_page ( to ) ;
clear_page ( ( void * ) to ) ;
spin_unlock ( & v6_lock ) ;
}
struct cpu_user_fns v6_user_fns __initdata = {
2008-10-31 19:32:19 +03:00
. cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing ,
2008-10-31 18:08:35 +03:00
. cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing ,
2005-04-17 02:20:36 +04:00
} ;
static int __init v6_userpage_init ( void )
{
if ( cache_is_vipt_aliasing ( ) ) {
2008-10-31 19:32:19 +03:00
cpu_user . cpu_clear_user_highpage = v6_clear_user_highpage_aliasing ;
2008-10-31 18:08:35 +03:00
cpu_user . cpu_copy_user_highpage = v6_copy_user_highpage_aliasing ;
2005-04-17 02:20:36 +04:00
}
return 0 ;
}
2005-05-10 20:30:47 +04:00
core_initcall ( v6_userpage_init ) ;