2008-10-31 16:08:02 +03:00
/*
* linux / arch / arm / mm / copypage - v4wb . c
*
* Copyright ( C ) 1995 - 1999 Russell King
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/init.h>
2008-10-31 18:08:35 +03:00
# include <linux/highmem.h>
2008-10-31 16:08:02 +03:00
/*
2008-10-31 18:08:35 +03:00
* ARMv4 optimised copy_user_highpage
2008-10-31 16:08:02 +03:00
*
* We flush the destination cache lines just before we write the data into the
* corresponding address . Since the Dcache is read - allocate , this removes the
* Dcache aliasing issue . The writes will be forwarded to the write buffer ,
* and merged as appropriate .
*
* Note : We rely on all ARMv4 processors implementing the " invalidate D line "
* instruction . If your processor does not supply this , you have to write your
2008-10-31 18:08:35 +03:00
* own copy_user_highpage that does the right thing .
2008-10-31 16:08:02 +03:00
*/
2009-03-12 20:03:16 +03:00
static void __naked
2008-10-31 18:08:35 +03:00
v4wb_copy_user_page ( void * kto , const void * kfrom )
2008-10-31 16:08:02 +03:00
{
asm ( " \
stmfd sp ! , { r4 , lr } @ 2 \ n \
2010-06-04 07:05:15 +04:00
mov r2 , % 2 @ 1 \ n \
2008-10-31 16:08:02 +03:00
ldmia r1 ! , { r3 , r4 , ip , lr } @ 4 \ n \
1 : mcr p15 , 0 , r0 , c7 , c6 , 1 @ 1 invalidate D line \ n \
stmia r0 ! , { r3 , r4 , ip , lr } @ 4 \ n \
ldmia r1 ! , { r3 , r4 , ip , lr } @ 4 + 1 \ n \
stmia r0 ! , { r3 , r4 , ip , lr } @ 4 \ n \
ldmia r1 ! , { r3 , r4 , ip , lr } @ 4 \ n \
mcr p15 , 0 , r0 , c7 , c6 , 1 @ 1 invalidate D line \ n \
stmia r0 ! , { r3 , r4 , ip , lr } @ 4 \ n \
ldmia r1 ! , { r3 , r4 , ip , lr } @ 4 \ n \
subs r2 , r2 , # 1 @ 1 \ n \
stmia r0 ! , { r3 , r4 , ip , lr } @ 4 \ n \
ldmneia r1 ! , { r3 , r4 , ip , lr } @ 4 \ n \
bne 1 b @ 1 \ n \
mcr p15 , 0 , r1 , c7 , c10 , 4 @ 1 drain WB \ n \
ldmfd sp ! , { r4 , pc } @ 3 "
:
2010-06-04 07:05:15 +04:00
: " r " ( kto ) , " r " ( kfrom ) , " I " ( PAGE_SIZE / 64 ) ) ;
2008-10-31 16:08:02 +03:00
}
2008-10-31 18:08:35 +03:00
void v4wb_copy_user_highpage ( struct page * to , struct page * from ,
2009-10-05 18:17:45 +04:00
unsigned long vaddr , struct vm_area_struct * vma )
2008-10-31 18:08:35 +03:00
{
void * kto , * kfrom ;
kto = kmap_atomic ( to , KM_USER0 ) ;
kfrom = kmap_atomic ( from , KM_USER1 ) ;
2009-10-05 18:34:22 +04:00
flush_cache_page ( vma , vaddr , page_to_pfn ( from ) ) ;
2008-10-31 18:08:35 +03:00
v4wb_copy_user_page ( kto , kfrom ) ;
kunmap_atomic ( kfrom , KM_USER1 ) ;
kunmap_atomic ( kto , KM_USER0 ) ;
}
2008-10-31 16:08:02 +03:00
/*
* ARMv4 optimised clear_user_page
*
* Same story as above .
*/
2008-10-31 19:32:19 +03:00
void v4wb_clear_user_highpage ( struct page * page , unsigned long vaddr )
2008-10-31 16:08:02 +03:00
{
2008-11-04 10:42:27 +03:00
void * ptr , * kaddr = kmap_atomic ( page , KM_USER0 ) ;
asm volatile ( " \
mov r1 , % 2 @ 1 \ n \
2008-10-31 16:08:02 +03:00
mov r2 , # 0 @ 1 \ n \
mov r3 , # 0 @ 1 \ n \
mov ip , # 0 @ 1 \ n \
mov lr , # 0 @ 1 \ n \
2008-10-31 19:32:19 +03:00
1 : mcr p15 , 0 , % 0 , c7 , c6 , 1 @ 1 invalidate D line \ n \
stmia % 0 ! , { r2 , r3 , ip , lr } @ 4 \ n \
stmia % 0 ! , { r2 , r3 , ip , lr } @ 4 \ n \
mcr p15 , 0 , % 0 , c7 , c6 , 1 @ 1 invalidate D line \ n \
stmia % 0 ! , { r2 , r3 , ip , lr } @ 4 \ n \
stmia % 0 ! , { r2 , r3 , ip , lr } @ 4 \ n \
2008-10-31 16:08:02 +03:00
subs r1 , r1 , # 1 @ 1 \ n \
bne 1 b @ 1 \ n \
2008-10-31 19:32:19 +03:00
mcr p15 , 0 , r1 , c7 , c10 , 4 @ 1 drain WB "
2008-11-04 10:42:27 +03:00
: " =r " ( ptr )
: " 0 " ( kaddr ) , " I " ( PAGE_SIZE / 64 )
2008-10-31 19:32:19 +03:00
: " r1 " , " r2 " , " r3 " , " ip " , " lr " ) ;
kunmap_atomic ( kaddr , KM_USER0 ) ;
2008-10-31 16:08:02 +03:00
}
struct cpu_user_fns v4wb_user_fns __initdata = {
2008-10-31 19:32:19 +03:00
. cpu_clear_user_highpage = v4wb_clear_user_highpage ,
2008-10-31 18:08:35 +03:00
. cpu_copy_user_highpage = v4wb_copy_user_highpage ,
2008-10-31 16:08:02 +03:00
} ;