2008-10-31 16:08:02 +03:00
/*
* linux / arch / arm / mm / copypage - xsc3 . S
*
* Copyright ( C ) 2004 Intel Corp .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* Adapted for 3 rd gen XScale core , no more mini - dcache
* Author : Matt Gilbert ( matthew . m . gilbert @ intel . com )
*/
# include <linux/init.h>
2008-10-31 18:08:35 +03:00
# include <linux/highmem.h>
2008-10-31 16:08:02 +03:00
/*
* General note :
* We don ' t really want write - allocate cache behaviour for these functions
* since that will just eat through 8 K of the cache .
*/
/*
2008-10-31 18:08:35 +03:00
* XSC3 optimised copy_user_highpage
2008-10-31 16:08:02 +03:00
* r0 = destination
* r1 = source
*
* The source page may have some clean entries in the cache already , but we
* can safely ignore them - break_cow ( ) will flush them out of the cache
* if we eventually end up using our copied page .
*
*/
2009-03-12 20:03:16 +03:00
static void __naked
2008-10-31 18:08:35 +03:00
xsc3_mc_copy_user_page ( void * kto , const void * kfrom )
2008-10-31 16:08:02 +03:00
{
asm ( " \
stmfd sp ! , { r4 , r5 , lr } \ n \
2010-06-04 07:05:15 +04:00
mov lr , % 2 \ n \
2008-10-31 16:08:02 +03:00
\ n \
pld [ r1 , # 0 ] \ n \
pld [ r1 , # 32 ] \ n \
1 : pld [ r1 , # 64 ] \ n \
pld [ r1 , # 96 ] \ n \
\ n \
2 : ldrd r2 , [ r1 ] , # 8 \ n \
mov ip , r0 \ n \
ldrd r4 , [ r1 ] , # 8 \ n \
mcr p15 , 0 , ip , c7 , c6 , 1 @ invalidate \ n \
strd r2 , [ r0 ] , # 8 \ n \
ldrd r2 , [ r1 ] , # 8 \ n \
strd r4 , [ r0 ] , # 8 \ n \
ldrd r4 , [ r1 ] , # 8 \ n \
strd r2 , [ r0 ] , # 8 \ n \
strd r4 , [ r0 ] , # 8 \ n \
ldrd r2 , [ r1 ] , # 8 \ n \
mov ip , r0 \ n \
ldrd r4 , [ r1 ] , # 8 \ n \
mcr p15 , 0 , ip , c7 , c6 , 1 @ invalidate \ n \
strd r2 , [ r0 ] , # 8 \ n \
ldrd r2 , [ r1 ] , # 8 \ n \
subs lr , lr , # 1 \ n \
strd r4 , [ r0 ] , # 8 \ n \
ldrd r4 , [ r1 ] , # 8 \ n \
strd r2 , [ r0 ] , # 8 \ n \
strd r4 , [ r0 ] , # 8 \ n \
bgt 1 b \ n \
beq 2 b \ n \
\ n \
ldmfd sp ! , { r4 , r5 , pc } "
:
2010-06-04 07:05:15 +04:00
: " r " ( kto ) , " r " ( kfrom ) , " I " ( PAGE_SIZE / 64 - 1 ) ) ;
2008-10-31 16:08:02 +03:00
}
2008-10-31 18:08:35 +03:00
void xsc3_mc_copy_user_highpage ( struct page * to , struct page * from ,
2009-10-05 18:17:45 +04:00
unsigned long vaddr , struct vm_area_struct * vma )
2008-10-31 18:08:35 +03:00
{
void * kto , * kfrom ;
kto = kmap_atomic ( to , KM_USER0 ) ;
kfrom = kmap_atomic ( from , KM_USER1 ) ;
2009-10-05 18:34:22 +04:00
flush_cache_page ( vma , vaddr , page_to_pfn ( from ) ) ;
2008-10-31 18:08:35 +03:00
xsc3_mc_copy_user_page ( kto , kfrom ) ;
kunmap_atomic ( kfrom , KM_USER1 ) ;
kunmap_atomic ( kto , KM_USER0 ) ;
}
2008-10-31 16:08:02 +03:00
/*
* XScale optimised clear_user_page
* r0 = destination
* r1 = virtual user address of ultimate destination page
*/
2008-10-31 19:32:19 +03:00
void xsc3_mc_clear_user_highpage ( struct page * page , unsigned long vaddr )
2008-10-31 16:08:02 +03:00
{
2008-11-04 10:42:27 +03:00
void * ptr , * kaddr = kmap_atomic ( page , KM_USER0 ) ;
asm volatile ( " \
mov r1 , % 2 \ n \
2008-10-31 16:08:02 +03:00
mov r2 , # 0 \ n \
mov r3 , # 0 \ n \
2008-10-31 19:32:19 +03:00
1 : mcr p15 , 0 , % 0 , c7 , c6 , 1 @ invalidate line \ n \
strd r2 , [ % 0 ] , # 8 \ n \
strd r2 , [ % 0 ] , # 8 \ n \
strd r2 , [ % 0 ] , # 8 \ n \
strd r2 , [ % 0 ] , # 8 \ n \
2008-10-31 16:08:02 +03:00
subs r1 , r1 , # 1 \ n \
2008-10-31 19:32:19 +03:00
bne 1 b "
2008-11-04 10:42:27 +03:00
: " =r " ( ptr )
: " 0 " ( kaddr ) , " I " ( PAGE_SIZE / 32 )
2008-10-31 19:32:19 +03:00
: " r1 " , " r2 " , " r3 " ) ;
kunmap_atomic ( kaddr , KM_USER0 ) ;
2008-10-31 16:08:02 +03:00
}
struct cpu_user_fns xsc3_mc_user_fns __initdata = {
2008-10-31 19:32:19 +03:00
. cpu_clear_user_highpage = xsc3_mc_clear_user_highpage ,
2008-10-31 18:08:35 +03:00
. cpu_copy_user_highpage = xsc3_mc_copy_user_highpage ,
2008-10-31 16:08:02 +03:00
} ;