2009-03-09 21:30:09 +03:00
/*
* linux / arch / arm / lib / uaccess_with_memcpy . c
*
* Written by : Lennert Buytenhek and Nicolas Pitre
* Copyright ( C ) 2009 Marvell Semiconductor
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/kernel.h>
# include <linux/ctype.h>
# include <linux/uaccess.h>
# include <linux/rwsem.h>
# include <linux/mm.h>
# include <linux/sched.h>
# include <linux/hardirq.h> /* for in_atomic() */
# include <asm/current.h>
# include <asm/page.h>
static int
pin_page_for_write ( const void __user * _addr , pte_t * * ptep , spinlock_t * * ptlp )
{
unsigned long addr = ( unsigned long ) _addr ;
pgd_t * pgd ;
pmd_t * pmd ;
pte_t * pte ;
spinlock_t * ptl ;
pgd = pgd_offset ( current - > mm , addr ) ;
if ( unlikely ( pgd_none ( * pgd ) | | pgd_bad ( * pgd ) ) )
return 0 ;
pmd = pmd_offset ( pgd , addr ) ;
if ( unlikely ( pmd_none ( * pmd ) | | pmd_bad ( * pmd ) ) )
return 0 ;
pte = pte_offset_map_lock ( current - > mm , pmd , addr , & ptl ) ;
if ( unlikely ( ! pte_present ( * pte ) | | ! pte_young ( * pte ) | |
! pte_write ( * pte ) | | ! pte_dirty ( * pte ) ) ) {
pte_unmap_unlock ( pte , ptl ) ;
return 0 ;
}
* ptep = pte ;
* ptlp = ptl ;
return 1 ;
}
2009-05-22 06:17:17 +04:00
static unsigned long noinline
__copy_to_user_memcpy ( void __user * to , const void * from , unsigned long n )
2009-03-09 21:30:09 +03:00
{
int atomic ;
if ( unlikely ( segment_eq ( get_fs ( ) , KERNEL_DS ) ) ) {
memcpy ( ( void * ) to , from , n ) ;
return 0 ;
}
/* the mmap semaphore is taken only if not in an atomic context */
atomic = in_atomic ( ) ;
if ( ! atomic )
down_read ( & current - > mm - > mmap_sem ) ;
while ( n ) {
pte_t * pte ;
spinlock_t * ptl ;
int tocopy ;
while ( ! pin_page_for_write ( to , & pte , & ptl ) ) {
if ( ! atomic )
up_read ( & current - > mm - > mmap_sem ) ;
if ( __put_user ( 0 , ( char __user * ) to ) )
goto out ;
if ( ! atomic )
down_read ( & current - > mm - > mmap_sem ) ;
}
tocopy = ( ~ ( unsigned long ) to & ~ PAGE_MASK ) + 1 ;
if ( tocopy > n )
tocopy = n ;
memcpy ( ( void * ) to , from , tocopy ) ;
to + = tocopy ;
from + = tocopy ;
n - = tocopy ;
pte_unmap_unlock ( pte , ptl ) ;
}
if ( ! atomic )
up_read ( & current - > mm - > mmap_sem ) ;
out :
return n ;
}
2009-05-22 06:17:17 +04:00
unsigned long
__copy_to_user ( void __user * to , const void * from , unsigned long n )
{
/*
* This test is stubbed out of the main function above to keep
* the overhead for small copies low by avoiding a large
* register dump on the stack just to reload them right away .
* With frame pointer disabled , tail call optimization kicks in
* as well making this test almost invisible .
*/
2009-05-30 05:55:50 +04:00
if ( n < 64 )
2009-05-22 06:17:17 +04:00
return __copy_to_user_std ( to , from , n ) ;
return __copy_to_user_memcpy ( to , from , n ) ;
}
static unsigned long noinline
__clear_user_memset ( void __user * addr , unsigned long n )
2009-03-09 21:30:09 +03:00
{
if ( unlikely ( segment_eq ( get_fs ( ) , KERNEL_DS ) ) ) {
memset ( ( void * ) addr , 0 , n ) ;
return 0 ;
}
down_read ( & current - > mm - > mmap_sem ) ;
while ( n ) {
pte_t * pte ;
spinlock_t * ptl ;
int tocopy ;
while ( ! pin_page_for_write ( addr , & pte , & ptl ) ) {
up_read ( & current - > mm - > mmap_sem ) ;
if ( __put_user ( 0 , ( char __user * ) addr ) )
goto out ;
down_read ( & current - > mm - > mmap_sem ) ;
}
tocopy = ( ~ ( unsigned long ) addr & ~ PAGE_MASK ) + 1 ;
if ( tocopy > n )
tocopy = n ;
memset ( ( void * ) addr , 0 , tocopy ) ;
addr + = tocopy ;
n - = tocopy ;
pte_unmap_unlock ( pte , ptl ) ;
}
up_read ( & current - > mm - > mmap_sem ) ;
out :
return n ;
}
2009-05-22 06:17:17 +04:00
unsigned long __clear_user ( void __user * addr , unsigned long n )
{
/* See rational for this in __copy_to_user() above. */
2009-05-30 05:55:50 +04:00
if ( n < 64 )
2009-05-22 06:17:17 +04:00
return __clear_user_std ( addr , n ) ;
return __clear_user_memset ( addr , n ) ;
}
2009-05-30 05:55:50 +04:00
#if 0
/*
* This code is disabled by default , but kept around in case the chosen
* thresholds need to be revalidated . Some overhead ( small but still )
* would be implied by a runtime determined variable threshold , and
* so far the measurement on concerned targets didn ' t show a worthwhile
* variation .
*
* Note that a fairly precise sched_clock ( ) implementation is needed
* for results to make some sense .
*/
# include <linux/vmalloc.h>
static int __init test_size_treshold ( void )
{
struct page * src_page , * dst_page ;
void * user_ptr , * kernel_ptr ;
unsigned long long t0 , t1 , t2 ;
int size , ret ;
ret = - ENOMEM ;
src_page = alloc_page ( GFP_KERNEL ) ;
if ( ! src_page )
goto no_src ;
dst_page = alloc_page ( GFP_KERNEL ) ;
if ( ! dst_page )
goto no_dst ;
kernel_ptr = page_address ( src_page ) ;
user_ptr = vmap ( & dst_page , 1 , VM_IOREMAP , __pgprot ( __P010 ) ) ;
if ( ! user_ptr )
goto no_vmap ;
/* warm up the src page dcache */
ret = __copy_to_user_memcpy ( user_ptr , kernel_ptr , PAGE_SIZE ) ;
for ( size = PAGE_SIZE ; size > = 4 ; size / = 2 ) {
t0 = sched_clock ( ) ;
ret | = __copy_to_user_memcpy ( user_ptr , kernel_ptr , size ) ;
t1 = sched_clock ( ) ;
ret | = __copy_to_user_std ( user_ptr , kernel_ptr , size ) ;
t2 = sched_clock ( ) ;
printk ( " copy_to_user: %d %llu %llu \n " , size , t1 - t0 , t2 - t1 ) ;
}
for ( size = PAGE_SIZE ; size > = 4 ; size / = 2 ) {
t0 = sched_clock ( ) ;
ret | = __clear_user_memset ( user_ptr , size ) ;
t1 = sched_clock ( ) ;
ret | = __clear_user_std ( user_ptr , size ) ;
t2 = sched_clock ( ) ;
printk ( " clear_user: %d %llu %llu \n " , size , t1 - t0 , t2 - t1 ) ;
}
if ( ret )
ret = - EFAULT ;
vunmap ( user_ptr ) ;
no_vmap :
put_page ( dst_page ) ;
no_dst :
put_page ( src_page ) ;
no_src :
return ret ;
}
subsys_initcall ( test_size_treshold ) ;
# endif