2008-10-22 22:26:29 -07:00
# ifndef _ASM_X86_UACCESS_32_H
# define _ASM_X86_UACCESS_32_H
2005-04-16 15:20:36 -07:00
/*
* User space memory access functions
*/
# include <linux/errno.h>
# include <linux/thread_info.h>
# include <linux/string.h>
2008-02-04 16:47:59 +01:00
# include <asm/asm.h>
2005-04-16 15:20:36 -07:00
# include <asm/page.h>
2008-03-23 01:03:48 -07:00
unsigned long __must_check __copy_to_user_ll
( void __user * to , const void * from , unsigned long n ) ;
unsigned long __must_check __copy_from_user_ll
( void * to , const void __user * from , unsigned long n ) ;
unsigned long __must_check __copy_from_user_ll_nozero
( void * to , const void __user * from , unsigned long n ) ;
unsigned long __must_check __copy_from_user_ll_nocache
( void * to , const void __user * from , unsigned long n ) ;
unsigned long __must_check __copy_from_user_ll_nocache_nozero
( void * to , const void __user * from , unsigned long n ) ;
2005-04-16 15:20:36 -07:00
2007-05-02 19:27:06 +02:00
/**
* __copy_to_user_inatomic : - Copy a block of data into user space , with less checking .
* @ to : Destination address , in user space .
* @ from : Source address , in kernel space .
* @ n : Number of bytes to copy .
*
* Context : User context only .
*
* Copy data from kernel space to user space . Caller must check
* the specified block with access_ok ( ) before calling this function .
* The caller should also make sure he pins the user space address
2009-09-17 15:54:01 +03:00
* so that we don ' t result in page fault and sleep .
2005-04-16 15:20:36 -07:00
*/
2006-01-14 13:21:30 -08:00
static __always_inline unsigned long __must_check
2005-04-16 15:20:36 -07:00
__copy_to_user_inatomic ( void __user * to , const void * from , unsigned long n )
{
2016-06-23 15:04:01 -07:00
check_object_size ( from , n , true ) ;
2005-04-16 15:20:36 -07:00
return __copy_to_user_ll ( to , from , n ) ;
}
/**
2006-10-11 01:22:10 -07:00
* __copy_to_user : - Copy a block of data into user space , with less checking .
* @ to : Destination address , in user space .
* @ from : Source address , in kernel space .
2005-04-16 15:20:36 -07:00
* @ n : Number of bytes to copy .
*
2015-05-11 17:52:08 +02:00
* Context : User context only . This function may sleep if pagefaults are
* enabled .
2005-04-16 15:20:36 -07:00
*
2006-10-11 01:22:10 -07:00
* Copy data from kernel space to user space . Caller must check
2005-04-16 15:20:36 -07:00
* the specified block with access_ok ( ) before calling this function .
*
* Returns number of bytes that could not be copied .
* On success , this will be zero .
*/
2006-10-11 01:22:10 -07:00
static __always_inline unsigned long __must_check
__copy_to_user ( void __user * to , const void * from , unsigned long n )
{
2008-09-10 13:37:17 +02:00
might_fault ( ) ;
2008-09-10 13:37:17 +02:00
return __copy_to_user_inatomic ( to , from , n ) ;
2006-10-11 01:22:10 -07:00
}
2006-01-14 13:21:30 -08:00
static __always_inline unsigned long
2005-04-16 15:20:36 -07:00
__copy_from_user_inatomic ( void * to , const void __user * from , unsigned long n )
{
2006-06-25 05:48:02 -07:00
return __copy_from_user_ll_nozero ( to , from , n ) ;
}
2006-10-11 01:22:10 -07:00
/**
* __copy_from_user : - Copy a block of data from user space , with less checking .
* @ to : Destination address , in kernel space .
* @ from : Source address , in user space .
* @ n : Number of bytes to copy .
*
2015-05-11 17:52:08 +02:00
* Context : User context only . This function may sleep if pagefaults are
* enabled .
2006-10-11 01:22:10 -07:00
*
* Copy data from user space to kernel space . Caller must check
* the specified block with access_ok ( ) before calling this function .
*
* Returns number of bytes that could not be copied .
* On success , this will be zero .
*
* If some data could not be copied , this function will pad the copied
* data to the requested size using zero bytes .
*
* An alternate version - __copy_from_user_inatomic ( ) - may be called from
* atomic context and will fail rather than sleep . In this case the
* uncopied bytes will * NOT * be padded with zeros . See fs / filemap . h
* for explanation of why this is needed .
*/
2006-06-25 05:48:02 -07:00
static __always_inline unsigned long
__copy_from_user ( void * to , const void __user * from , unsigned long n )
{
2008-09-10 13:37:17 +02:00
might_fault ( ) ;
2016-06-23 15:04:01 -07:00
check_object_size ( to , n , false ) ;
2005-04-16 15:20:36 -07:00
if ( __builtin_constant_p ( n ) ) {
unsigned long ret ;
switch ( n ) {
case 1 :
2016-02-23 14:58:52 -08:00
__uaccess_begin ( ) ;
2005-04-16 15:20:36 -07:00
__get_user_size ( * ( u8 * ) to , from , 1 , ret , 1 ) ;
2016-02-23 14:58:52 -08:00
__uaccess_end ( ) ;
2005-04-16 15:20:36 -07:00
return ret ;
case 2 :
2016-02-23 14:58:52 -08:00
__uaccess_begin ( ) ;
2005-04-16 15:20:36 -07:00
__get_user_size ( * ( u16 * ) to , from , 2 , ret , 2 ) ;
2016-02-23 14:58:52 -08:00
__uaccess_end ( ) ;
2005-04-16 15:20:36 -07:00
return ret ;
case 4 :
2016-02-23 14:58:52 -08:00
__uaccess_begin ( ) ;
2005-04-16 15:20:36 -07:00
__get_user_size ( * ( u32 * ) to , from , 4 , ret , 4 ) ;
2016-02-23 14:58:52 -08:00
__uaccess_end ( ) ;
2005-04-16 15:20:36 -07:00
return ret ;
}
}
return __copy_from_user_ll ( to , from , n ) ;
}
2006-06-25 05:48:02 -07:00
static __always_inline unsigned long __copy_from_user_nocache ( void * to ,
2006-06-23 02:04:16 -07:00
const void __user * from , unsigned long n )
{
2008-09-10 13:37:17 +02:00
might_fault ( ) ;
2006-06-23 02:04:16 -07:00
if ( __builtin_constant_p ( n ) ) {
unsigned long ret ;
switch ( n ) {
case 1 :
2016-02-23 14:58:52 -08:00
__uaccess_begin ( ) ;
2006-06-23 02:04:16 -07:00
__get_user_size ( * ( u8 * ) to , from , 1 , ret , 1 ) ;
2016-02-23 14:58:52 -08:00
__uaccess_end ( ) ;
2006-06-23 02:04:16 -07:00
return ret ;
case 2 :
2016-02-23 14:58:52 -08:00
__uaccess_begin ( ) ;
2006-06-23 02:04:16 -07:00
__get_user_size ( * ( u16 * ) to , from , 2 , ret , 2 ) ;
2016-02-23 14:58:52 -08:00
__uaccess_end ( ) ;
2006-06-23 02:04:16 -07:00
return ret ;
case 4 :
2016-02-23 14:58:52 -08:00
__uaccess_begin ( ) ;
2006-06-23 02:04:16 -07:00
__get_user_size ( * ( u32 * ) to , from , 4 , ret , 4 ) ;
2016-02-23 14:58:52 -08:00
__uaccess_end ( ) ;
2006-06-23 02:04:16 -07:00
return ret ;
}
}
return __copy_from_user_ll_nocache ( to , from , n ) ;
}
2006-01-14 13:21:30 -08:00
static __always_inline unsigned long
2008-03-23 01:03:48 -07:00
__copy_from_user_inatomic_nocache ( void * to , const void __user * from ,
unsigned long n )
2005-04-16 15:20:36 -07:00
{
2006-06-25 05:48:02 -07:00
return __copy_from_user_ll_nocache_nozero ( to , from , n ) ;
2006-06-23 02:04:16 -07:00
}
2008-10-22 22:26:29 -07:00
# endif /* _ASM_X86_UACCESS_32_H */