2005-04-16 15:20:36 -07:00
/*
2008-08-02 10:55:55 +01:00
* arch / arm / include / asm / uaccess . h
2005-04-16 15:20:36 -07:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# ifndef _ASMARM_UACCESS_H
# define _ASMARM_UACCESS_H
/*
* User space memory access functions
*/
2008-11-29 17:35:51 +00:00
# include <linux/string.h>
2005-04-16 15:20:36 -07:00
# include <asm/memory.h>
# include <asm/domain.h>
2009-07-24 12:32:57 +01:00
# include <asm/unified.h>
2012-03-28 18:30:01 +01:00
# include <asm/compiler.h>
2005-04-16 15:20:36 -07:00
2016-12-25 03:33:03 -05:00
# include <asm/extable.h>
2005-04-16 15:20:36 -07:00
2015-08-19 11:02:28 +01:00
/*
* These two functions allow hooking accesses to userspace to increase
* system integrity by ensuring that the kernel can not inadvertantly
* perform such accesses ( eg , via list poison values ) which could then
* be exploited for priviledge escalation .
*/
static inline unsigned int uaccess_save_and_enable ( void )
{
2015-08-19 20:40:41 +01:00
# ifdef CONFIG_CPU_SW_DOMAIN_PAN
unsigned int old_domain = get_domain ( ) ;
/* Set the current domain access to permit user accesses */
set_domain ( ( old_domain & ~ domain_mask ( DOMAIN_USER ) ) |
domain_val ( DOMAIN_USER , DOMAIN_CLIENT ) ) ;
return old_domain ;
# else
2015-08-19 11:02:28 +01:00
return 0 ;
2015-08-19 20:40:41 +01:00
# endif
2015-08-19 11:02:28 +01:00
}
static inline void uaccess_restore ( unsigned int flags )
{
2015-08-19 20:40:41 +01:00
# ifdef CONFIG_CPU_SW_DOMAIN_PAN
/* Restore the user access mask */
set_domain ( flags ) ;
# endif
2015-08-19 11:02:28 +01:00
}
2006-06-21 20:38:17 +01:00
/*
* These two are intentionally not defined anywhere - if the kernel
* code generates any references to them , that ' s a bug .
*/
extern int __get_user_bad ( void ) ;
extern int __put_user_bad ( void ) ;
2005-04-16 15:20:36 -07:00
/*
* Note that this is actually 0x1 , 0000 , 0000
*/
# define KERNEL_DS 0x00000000
# define get_ds() (KERNEL_DS)
2006-06-21 20:38:17 +01:00
# ifdef CONFIG_MMU
# define USER_DS TASK_SIZE
2005-04-16 15:20:36 -07:00
# define get_fs() (current_thread_info()->addr_limit)
2006-06-21 20:38:17 +01:00
static inline void set_fs ( mm_segment_t fs )
2005-04-16 15:20:36 -07:00
{
current_thread_info ( ) - > addr_limit = fs ;
modify_domain ( DOMAIN_KERNEL , fs ? DOMAIN_CLIENT : DOMAIN_MANAGER ) ;
}
2015-01-06 15:11:13 +02:00
# define segment_eq(a, b) ((a) == (b))
2005-04-16 15:20:36 -07:00
/* We use 33-bit arithmetic here... */
2015-01-06 15:11:13 +02:00
# define __range_ok(addr, size) ({ \
2007-02-10 01:45:41 -08:00
unsigned long flag , roksum ; \
2005-04-16 15:20:36 -07:00
__chk_user_ptr ( addr ) ; \
__asm__ ( " adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0 " \
2007-02-10 01:45:41 -08:00
: " =&r " ( flag ) , " =&r " ( roksum ) \
2005-04-16 15:20:36 -07:00
: " r " ( addr ) , " Ir " ( size ) , " 0 " ( current_thread_info ( ) - > addr_limit ) \
: " cc " ) ; \
flag ; } )
/*
* Single - value transfer routines . They automatically use the right
* size if we just have the right pointer type . Note that the functions
* which read from user space ( * get_ * ) need to take care not to leak
* kernel data even if the calling code is buggy and fails to check
* the return value . This means zeroing out the destination variable
* or buffer on error . Normally this is done out of line by the
* fixup code , but there are a few places where it intrudes on the
* main code path . When we only write to user space , there is no
* problem .
*/
extern int __get_user_1 ( void * ) ;
extern int __get_user_2 ( void * ) ;
extern int __get_user_4 ( void * ) ;
2014-09-04 06:07:33 +01:00
extern int __get_user_32t_8 ( void * ) ;
ARM: 8091/2: add get_user() support for 8 byte types
Recent contributions, including to DRM and binder, introduce 64-bit
values in their interfaces. A common motivation for this is to allow
the same ABI for 32- and 64-bit userspaces (and therefore also a shared
ABI for 32/64 hybrid userspaces). Anyhow, the developers would like to
avoid gotchas like having to use copy_from_user().
This feature is already implemented on x86-32 and the majority of other
32-bit architectures. The current list of get_user_8 hold out
architectures are: arm, avr32, blackfin, m32r, metag, microblaze,
mn10300, sh.
Credit:
My name sits rather uneasily at the top of this patch. The v1 and
v2 versions of the patch were written by Rob Clark and to produce v4
I mostly copied code from Russell King and H. Peter Anvin. However I
have mangled the patch sufficiently that *blame* is rightfully mine
even if credit should more widely shared.
Changelog:
v5: updated to use the ret macro (requested by Russell King)
v4: remove an inlined add on big endian systems (spotted by Russell King),
used __ARMEB__ rather than BIG_ENDIAN (to match rest of file),
cleared r3 on EFAULT during __get_user_8.
v3: fix a couple of checkpatch issues
v2: pass correct size to check_uaccess, and better handling of narrowing
double word read with __get_user_xb() (Russell King's suggestion)
v1: original
Signed-off-by: Rob Clark <robdclark@gmail.com>
Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-07-10 20:58:08 +01:00
extern int __get_user_8 ( void * ) ;
2014-09-04 06:07:33 +01:00
extern int __get_user_64t_1 ( void * ) ;
extern int __get_user_64t_2 ( void * ) ;
extern int __get_user_64t_4 ( void * ) ;
2005-04-16 15:20:36 -07:00
2012-09-07 18:22:28 +01:00
# define __GUP_CLOBBER_1 "lr", "cc"
# ifdef CONFIG_CPU_USE_DOMAINS
# define __GUP_CLOBBER_2 "ip", "lr", "cc"
# else
# define __GUP_CLOBBER_2 "lr", "cc"
# endif
# define __GUP_CLOBBER_4 "lr", "cc"
2014-09-04 06:07:33 +01:00
# define __GUP_CLOBBER_32t_8 "lr", "cc"
ARM: 8091/2: add get_user() support for 8 byte types
Recent contributions, including to DRM and binder, introduce 64-bit
values in their interfaces. A common motivation for this is to allow
the same ABI for 32- and 64-bit userspaces (and therefore also a shared
ABI for 32/64 hybrid userspaces). Anyhow, the developers would like to
avoid gotchas like having to use copy_from_user().
This feature is already implemented on x86-32 and the majority of other
32-bit architectures. The current list of get_user_8 hold out
architectures are: arm, avr32, blackfin, m32r, metag, microblaze,
mn10300, sh.
Credit:
My name sits rather uneasily at the top of this patch. The v1 and
v2 versions of the patch were written by Rob Clark and to produce v4
I mostly copied code from Russell King and H. Peter Anvin. However I
have mangled the patch sufficiently that *blame* is rightfully mine
even if credit should more widely shared.
Changelog:
v5: updated to use the ret macro (requested by Russell King)
v4: remove an inlined add on big endian systems (spotted by Russell King),
used __ARMEB__ rather than BIG_ENDIAN (to match rest of file),
cleared r3 on EFAULT during __get_user_8.
v3: fix a couple of checkpatch issues
v2: pass correct size to check_uaccess, and better handling of narrowing
double word read with __get_user_xb() (Russell King's suggestion)
v1: original
Signed-off-by: Rob Clark <robdclark@gmail.com>
Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-07-10 20:58:08 +01:00
# define __GUP_CLOBBER_8 "lr", "cc"
2012-09-07 18:22:28 +01:00
2015-01-06 15:11:13 +02:00
# define __get_user_x(__r2, __p, __e, __l, __s) \
2005-04-16 15:20:36 -07:00
__asm__ __volatile__ ( \
__asmeq ( " %0 " , " r0 " ) __asmeq ( " %1 " , " r2 " ) \
2012-09-07 18:22:28 +01:00
__asmeq ( " %3 " , " r1 " ) \
2005-04-16 15:20:36 -07:00
" bl __get_user_ " # __s \
: " =&r " ( __e ) , " =r " ( __r2 ) \
2012-09-07 18:22:28 +01:00
: " 0 " ( __p ) , " r " ( __l ) \
: __GUP_CLOBBER_ # # __s )
2005-04-16 15:20:36 -07:00
ARM: 8091/2: add get_user() support for 8 byte types
Recent contributions, including to DRM and binder, introduce 64-bit
values in their interfaces. A common motivation for this is to allow
the same ABI for 32- and 64-bit userspaces (and therefore also a shared
ABI for 32/64 hybrid userspaces). Anyhow, the developers would like to
avoid gotchas like having to use copy_from_user().
This feature is already implemented on x86-32 and the majority of other
32-bit architectures. The current list of get_user_8 hold out
architectures are: arm, avr32, blackfin, m32r, metag, microblaze,
mn10300, sh.
Credit:
My name sits rather uneasily at the top of this patch. The v1 and
v2 versions of the patch were written by Rob Clark and to produce v4
I mostly copied code from Russell King and H. Peter Anvin. However I
have mangled the patch sufficiently that *blame* is rightfully mine
even if credit should more widely shared.
Changelog:
v5: updated to use the ret macro (requested by Russell King)
v4: remove an inlined add on big endian systems (spotted by Russell King),
used __ARMEB__ rather than BIG_ENDIAN (to match rest of file),
cleared r3 on EFAULT during __get_user_8.
v3: fix a couple of checkpatch issues
v2: pass correct size to check_uaccess, and better handling of narrowing
double word read with __get_user_xb() (Russell King's suggestion)
v1: original
Signed-off-by: Rob Clark <robdclark@gmail.com>
Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-07-10 20:58:08 +01:00
/* narrowing a double-word get into a single 32bit word register: */
# ifdef __ARMEB__
2015-01-06 15:11:13 +02:00
# define __get_user_x_32t(__r2, __p, __e, __l, __s) \
2014-09-04 06:07:33 +01:00
__get_user_x ( __r2 , __p , __e , __l , 32 t_8 )
ARM: 8091/2: add get_user() support for 8 byte types
Recent contributions, including to DRM and binder, introduce 64-bit
values in their interfaces. A common motivation for this is to allow
the same ABI for 32- and 64-bit userspaces (and therefore also a shared
ABI for 32/64 hybrid userspaces). Anyhow, the developers would like to
avoid gotchas like having to use copy_from_user().
This feature is already implemented on x86-32 and the majority of other
32-bit architectures. The current list of get_user_8 hold out
architectures are: arm, avr32, blackfin, m32r, metag, microblaze,
mn10300, sh.
Credit:
My name sits rather uneasily at the top of this patch. The v1 and
v2 versions of the patch were written by Rob Clark and to produce v4
I mostly copied code from Russell King and H. Peter Anvin. However I
have mangled the patch sufficiently that *blame* is rightfully mine
even if credit should more widely shared.
Changelog:
v5: updated to use the ret macro (requested by Russell King)
v4: remove an inlined add on big endian systems (spotted by Russell King),
used __ARMEB__ rather than BIG_ENDIAN (to match rest of file),
cleared r3 on EFAULT during __get_user_8.
v3: fix a couple of checkpatch issues
v2: pass correct size to check_uaccess, and better handling of narrowing
double word read with __get_user_xb() (Russell King's suggestion)
v1: original
Signed-off-by: Rob Clark <robdclark@gmail.com>
Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-07-10 20:58:08 +01:00
# else
2014-09-04 06:07:33 +01:00
# define __get_user_x_32t __get_user_x
ARM: 8091/2: add get_user() support for 8 byte types
Recent contributions, including to DRM and binder, introduce 64-bit
values in their interfaces. A common motivation for this is to allow
the same ABI for 32- and 64-bit userspaces (and therefore also a shared
ABI for 32/64 hybrid userspaces). Anyhow, the developers would like to
avoid gotchas like having to use copy_from_user().
This feature is already implemented on x86-32 and the majority of other
32-bit architectures. The current list of get_user_8 hold out
architectures are: arm, avr32, blackfin, m32r, metag, microblaze,
mn10300, sh.
Credit:
My name sits rather uneasily at the top of this patch. The v1 and
v2 versions of the patch were written by Rob Clark and to produce v4
I mostly copied code from Russell King and H. Peter Anvin. However I
have mangled the patch sufficiently that *blame* is rightfully mine
even if credit should more widely shared.
Changelog:
v5: updated to use the ret macro (requested by Russell King)
v4: remove an inlined add on big endian systems (spotted by Russell King),
used __ARMEB__ rather than BIG_ENDIAN (to match rest of file),
cleared r3 on EFAULT during __get_user_8.
v3: fix a couple of checkpatch issues
v2: pass correct size to check_uaccess, and better handling of narrowing
double word read with __get_user_xb() (Russell King's suggestion)
v1: original
Signed-off-by: Rob Clark <robdclark@gmail.com>
Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-07-10 20:58:08 +01:00
# endif
2014-09-04 06:07:33 +01:00
/*
* storing result into proper least significant word of 64 bit target var ,
* different only for big endian case where 64 bit __r2 lsw is r3 :
*/
# ifdef __ARMEB__
# define __get_user_x_64t(__r2, __p, __e, __l, __s) \
__asm__ __volatile__ ( \
__asmeq ( " %0 " , " r0 " ) __asmeq ( " %1 " , " r2 " ) \
__asmeq ( " %3 " , " r1 " ) \
" bl __get_user_64t_ " # __s \
: " =&r " ( __e ) , " =r " ( __r2 ) \
: " 0 " ( __p ) , " r " ( __l ) \
: __GUP_CLOBBER_ # # __s )
# else
# define __get_user_x_64t __get_user_x
# endif
2015-01-06 15:11:13 +02:00
# define __get_user_check(x, p) \
2005-04-16 15:20:36 -07:00
( { \
2012-09-07 18:22:28 +01:00
unsigned long __limit = current_thread_info ( ) - > addr_limit - 1 ; \
2007-02-17 20:11:19 +01:00
register const typeof ( * ( p ) ) __user * __p asm ( " r0 " ) = ( p ) ; \
ARM: 8091/2: add get_user() support for 8 byte types
Recent contributions, including to DRM and binder, introduce 64-bit
values in their interfaces. A common motivation for this is to allow
the same ABI for 32- and 64-bit userspaces (and therefore also a shared
ABI for 32/64 hybrid userspaces). Anyhow, the developers would like to
avoid gotchas like having to use copy_from_user().
This feature is already implemented on x86-32 and the majority of other
32-bit architectures. The current list of get_user_8 hold out
architectures are: arm, avr32, blackfin, m32r, metag, microblaze,
mn10300, sh.
Credit:
My name sits rather uneasily at the top of this patch. The v1 and
v2 versions of the patch were written by Rob Clark and to produce v4
I mostly copied code from Russell King and H. Peter Anvin. However I
have mangled the patch sufficiently that *blame* is rightfully mine
even if credit should more widely shared.
Changelog:
v5: updated to use the ret macro (requested by Russell King)
v4: remove an inlined add on big endian systems (spotted by Russell King),
used __ARMEB__ rather than BIG_ENDIAN (to match rest of file),
cleared r3 on EFAULT during __get_user_8.
v3: fix a couple of checkpatch issues
v2: pass correct size to check_uaccess, and better handling of narrowing
double word read with __get_user_xb() (Russell King's suggestion)
v1: original
Signed-off-by: Rob Clark <robdclark@gmail.com>
Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-07-10 20:58:08 +01:00
register typeof ( x ) __r2 asm ( " r2 " ) ; \
2012-09-07 18:22:28 +01:00
register unsigned long __l asm ( " r1 " ) = __limit ; \
2005-04-16 15:20:36 -07:00
register int __e asm ( " r0 " ) ; \
2015-08-19 11:02:28 +01:00
unsigned int __ua_flags = uaccess_save_and_enable ( ) ; \
2005-04-16 15:20:36 -07:00
switch ( sizeof ( * ( __p ) ) ) { \
case 1 : \
2014-09-04 06:07:33 +01:00
if ( sizeof ( ( x ) ) > = 8 ) \
__get_user_x_64t ( __r2 , __p , __e , __l , 1 ) ; \
else \
__get_user_x ( __r2 , __p , __e , __l , 1 ) ; \
2012-09-07 18:22:28 +01:00
break ; \
2005-04-16 15:20:36 -07:00
case 2 : \
2014-09-04 06:07:33 +01:00
if ( sizeof ( ( x ) ) > = 8 ) \
__get_user_x_64t ( __r2 , __p , __e , __l , 2 ) ; \
else \
__get_user_x ( __r2 , __p , __e , __l , 2 ) ; \
2005-04-16 15:20:36 -07:00
break ; \
case 4 : \
2014-09-04 06:07:33 +01:00
if ( sizeof ( ( x ) ) > = 8 ) \
__get_user_x_64t ( __r2 , __p , __e , __l , 4 ) ; \
else \
__get_user_x ( __r2 , __p , __e , __l , 4 ) ; \
2005-04-16 15:20:36 -07:00
break ; \
ARM: 8091/2: add get_user() support for 8 byte types
Recent contributions, including to DRM and binder, introduce 64-bit
values in their interfaces. A common motivation for this is to allow
the same ABI for 32- and 64-bit userspaces (and therefore also a shared
ABI for 32/64 hybrid userspaces). Anyhow, the developers would like to
avoid gotchas like having to use copy_from_user().
This feature is already implemented on x86-32 and the majority of other
32-bit architectures. The current list of get_user_8 hold out
architectures are: arm, avr32, blackfin, m32r, metag, microblaze,
mn10300, sh.
Credit:
My name sits rather uneasily at the top of this patch. The v1 and
v2 versions of the patch were written by Rob Clark and to produce v4
I mostly copied code from Russell King and H. Peter Anvin. However I
have mangled the patch sufficiently that *blame* is rightfully mine
even if credit should more widely shared.
Changelog:
v5: updated to use the ret macro (requested by Russell King)
v4: remove an inlined add on big endian systems (spotted by Russell King),
used __ARMEB__ rather than BIG_ENDIAN (to match rest of file),
cleared r3 on EFAULT during __get_user_8.
v3: fix a couple of checkpatch issues
v2: pass correct size to check_uaccess, and better handling of narrowing
double word read with __get_user_xb() (Russell King's suggestion)
v1: original
Signed-off-by: Rob Clark <robdclark@gmail.com>
Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-07-10 20:58:08 +01:00
case 8 : \
if ( sizeof ( ( x ) ) < 8 ) \
2014-09-04 06:07:33 +01:00
__get_user_x_32t ( __r2 , __p , __e , __l , 4 ) ; \
ARM: 8091/2: add get_user() support for 8 byte types
Recent contributions, including to DRM and binder, introduce 64-bit
values in their interfaces. A common motivation for this is to allow
the same ABI for 32- and 64-bit userspaces (and therefore also a shared
ABI for 32/64 hybrid userspaces). Anyhow, the developers would like to
avoid gotchas like having to use copy_from_user().
This feature is already implemented on x86-32 and the majority of other
32-bit architectures. The current list of get_user_8 hold out
architectures are: arm, avr32, blackfin, m32r, metag, microblaze,
mn10300, sh.
Credit:
My name sits rather uneasily at the top of this patch. The v1 and
v2 versions of the patch were written by Rob Clark and to produce v4
I mostly copied code from Russell King and H. Peter Anvin. However I
have mangled the patch sufficiently that *blame* is rightfully mine
even if credit should more widely shared.
Changelog:
v5: updated to use the ret macro (requested by Russell King)
v4: remove an inlined add on big endian systems (spotted by Russell King),
used __ARMEB__ rather than BIG_ENDIAN (to match rest of file),
cleared r3 on EFAULT during __get_user_8.
v3: fix a couple of checkpatch issues
v2: pass correct size to check_uaccess, and better handling of narrowing
double word read with __get_user_xb() (Russell King's suggestion)
v1: original
Signed-off-by: Rob Clark <robdclark@gmail.com>
Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-07-10 20:58:08 +01:00
else \
__get_user_x ( __r2 , __p , __e , __l , 8 ) ; \
break ; \
2005-04-16 15:20:36 -07:00
default : __e = __get_user_bad ( ) ; break ; \
} \
2015-08-19 11:02:28 +01:00
uaccess_restore ( __ua_flags ) ; \
2005-11-18 14:22:03 +00:00
x = ( typeof ( * ( p ) ) ) __r2 ; \
2005-04-16 15:20:36 -07:00
__e ; \
} )
2015-01-06 15:11:13 +02:00
# define get_user(x, p) \
2012-09-07 18:24:10 +01:00
( { \
might_fault ( ) ; \
2015-01-06 15:11:13 +02:00
__get_user_check ( x , p ) ; \
2012-09-07 18:24:10 +01:00
} )
2006-06-21 20:38:17 +01:00
extern int __put_user_1 ( void * , unsigned int ) ;
extern int __put_user_2 ( void * , unsigned int ) ;
extern int __put_user_4 ( void * , unsigned int ) ;
extern int __put_user_8 ( void * , unsigned long long ) ;
2016-02-04 16:54:45 +00:00
# define __put_user_check(__pu_val, __ptr, __err, __s) \
2006-06-21 20:38:17 +01:00
( { \
2012-09-07 18:22:28 +01:00
unsigned long __limit = current_thread_info ( ) - > addr_limit - 1 ; \
2016-02-04 16:54:45 +00:00
register typeof ( __pu_val ) __r2 asm ( " r2 " ) = __pu_val ; \
register const void __user * __p asm ( " r0 " ) = __ptr ; \
2012-09-07 18:22:28 +01:00
register unsigned long __l asm ( " r1 " ) = __limit ; \
2006-06-21 20:38:17 +01:00
register int __e asm ( " r0 " ) ; \
2016-02-04 16:54:45 +00:00
__asm__ __volatile__ ( \
__asmeq ( " %0 " , " r0 " ) __asmeq ( " %2 " , " r2 " ) \
__asmeq ( " %3 " , " r1 " ) \
" bl __put_user_ " # __s \
: " =&r " ( __e ) \
: " 0 " ( __p ) , " r " ( __r2 ) , " r " ( __l ) \
: " ip " , " lr " , " cc " ) ; \
__err = __e ; \
2006-06-21 20:38:17 +01:00
} )
# else /* CONFIG_MMU */
/*
* uClinux has only one addr space , so has simplified address limits .
*/
# define USER_DS KERNEL_DS
2015-01-06 15:11:13 +02:00
# define segment_eq(a, b) (1)
# define __addr_ok(addr) ((void)(addr), 1)
# define __range_ok(addr, size) ((void)(addr), 0)
2006-06-21 20:38:17 +01:00
# define get_fs() (KERNEL_DS)
static inline void set_fs ( mm_segment_t fs )
{
}
2015-01-06 15:11:13 +02:00
# define get_user(x, p) __get_user(x, p)
2016-02-04 16:54:45 +00:00
# define __put_user_check __put_user_nocheck
2006-06-21 20:38:17 +01:00
# endif /* CONFIG_MMU */
2015-01-06 15:11:13 +02:00
# define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
2006-06-21 20:38:17 +01:00
2012-07-06 15:45:39 +01:00
# define user_addr_max() \
2017-03-20 21:08:07 -04:00
( uaccess_kernel ( ) ? ~ 0UL : get_fs ( ) )
2012-07-06 15:45:39 +01:00
2006-06-21 20:38:17 +01:00
/*
* The " __xxx " versions of the user access functions do not verify the
* address space - it must have been done previously with a separate
* " access_ok() " call .
*
* The " xxx_error " versions set the third argument to EFAULT if an
* error occurs , and leave it unchanged on success . Note that these
* versions are void ( ie , don ' t return a value as such ) .
*/
2015-01-06 15:11:13 +02:00
# define __get_user(x, ptr) \
2005-04-16 15:20:36 -07:00
( { \
long __gu_err = 0 ; \
2015-01-06 15:11:13 +02:00
__get_user_err ( ( x ) , ( ptr ) , __gu_err ) ; \
2005-04-16 15:20:36 -07:00
__gu_err ; \
} )
2015-01-06 15:11:13 +02:00
# define __get_user_error(x, ptr, err) \
2005-04-16 15:20:36 -07:00
( { \
2015-01-06 15:11:13 +02:00
__get_user_err ( ( x ) , ( ptr ) , err ) ; \
2005-04-16 15:20:36 -07:00
( void ) 0 ; \
} )
2015-01-06 15:11:13 +02:00
# define __get_user_err(x, ptr, err) \
2005-04-16 15:20:36 -07:00
do { \
unsigned long __gu_addr = ( unsigned long ) ( ptr ) ; \
unsigned long __gu_val ; \
2015-08-19 11:02:28 +01:00
unsigned int __ua_flags ; \
2005-04-16 15:20:36 -07:00
__chk_user_ptr ( ptr ) ; \
2012-09-07 18:24:10 +01:00
might_fault ( ) ; \
2015-08-19 11:02:28 +01:00
__ua_flags = uaccess_save_and_enable ( ) ; \
2005-04-16 15:20:36 -07:00
switch ( sizeof ( * ( ptr ) ) ) { \
2015-01-06 15:11:13 +02:00
case 1 : __get_user_asm_byte ( __gu_val , __gu_addr , err ) ; break ; \
case 2 : __get_user_asm_half ( __gu_val , __gu_addr , err ) ; break ; \
case 4 : __get_user_asm_word ( __gu_val , __gu_addr , err ) ; break ; \
2005-04-16 15:20:36 -07:00
default : ( __gu_val ) = __get_user_bad ( ) ; \
} \
2015-08-19 11:02:28 +01:00
uaccess_restore ( __ua_flags ) ; \
2005-04-16 15:20:36 -07:00
( x ) = ( __typeof__ ( * ( ptr ) ) ) __gu_val ; \
} while ( 0 )
2015-08-18 23:06:25 +01:00
# define __get_user_asm(x, addr, err, instr) \
2005-04-16 15:20:36 -07:00
__asm__ __volatile__ ( \
2015-08-18 23:06:25 +01:00
" 1: " TUSER ( instr ) " %1, [%2], #0 \n " \
2005-04-16 15:20:36 -07:00
" 2: \n " \
2015-03-24 10:41:09 +01:00
" .pushsection .text.fixup, \" ax \" \n " \
2005-04-16 15:20:36 -07:00
" .align 2 \n " \
" 3: mov %0, %3 \n " \
" mov %1, #0 \n " \
" b 2b \n " \
2010-04-19 10:15:03 +01:00
" .popsection \n " \
" .pushsection __ex_table, \" a \" \n " \
2005-04-16 15:20:36 -07:00
" .align 3 \n " \
" .long 1b, 3b \n " \
2010-04-19 10:15:03 +01:00
" .popsection " \
2005-04-16 15:20:36 -07:00
: " +r " ( err ) , " =&r " ( x ) \
: " r " ( addr ) , " i " ( - EFAULT ) \
: " cc " )
2015-08-18 23:06:25 +01:00
# define __get_user_asm_byte(x, addr, err) \
__get_user_asm ( x , addr , err , ldrb )
2005-04-16 15:20:36 -07:00
# ifndef __ARMEB__
2015-01-06 15:11:13 +02:00
# define __get_user_asm_half(x, __gu_addr, err) \
2005-04-16 15:20:36 -07:00
( { \
unsigned long __b1 , __b2 ; \
__get_user_asm_byte ( __b1 , __gu_addr , err ) ; \
__get_user_asm_byte ( __b2 , __gu_addr + 1 , err ) ; \
( x ) = __b1 | ( __b2 < < 8 ) ; \
} )
# else
2015-01-06 15:11:13 +02:00
# define __get_user_asm_half(x, __gu_addr, err) \
2005-04-16 15:20:36 -07:00
( { \
unsigned long __b1 , __b2 ; \
__get_user_asm_byte ( __b1 , __gu_addr , err ) ; \
__get_user_asm_byte ( __b2 , __gu_addr + 1 , err ) ; \
( x ) = ( __b1 < < 8 ) | __b2 ; \
} )
# endif
2015-01-06 15:11:13 +02:00
# define __get_user_asm_word(x, addr, err) \
2015-08-18 23:06:25 +01:00
__get_user_asm ( x , addr , err , ldr )
2005-04-16 15:20:36 -07:00
2016-02-04 16:54:45 +00:00
# define __put_user_switch(x, ptr, __err, __fn) \
do { \
const __typeof__ ( * ( ptr ) ) __user * __pu_ptr = ( ptr ) ; \
__typeof__ ( * ( ptr ) ) __pu_val = ( x ) ; \
unsigned int __ua_flags ; \
might_fault ( ) ; \
__ua_flags = uaccess_save_and_enable ( ) ; \
switch ( sizeof ( * ( ptr ) ) ) { \
case 1 : __fn ( __pu_val , __pu_ptr , __err , 1 ) ; break ; \
case 2 : __fn ( __pu_val , __pu_ptr , __err , 2 ) ; break ; \
case 4 : __fn ( __pu_val , __pu_ptr , __err , 4 ) ; break ; \
case 8 : __fn ( __pu_val , __pu_ptr , __err , 8 ) ; break ; \
default : __err = __put_user_bad ( ) ; break ; \
} \
uaccess_restore ( __ua_flags ) ; \
} while ( 0 )
# define put_user(x, ptr) \
( { \
int __pu_err = 0 ; \
__put_user_switch ( ( x ) , ( ptr ) , __pu_err , __put_user_check ) ; \
__pu_err ; \
} )
2015-01-06 15:11:13 +02:00
# define __put_user(x, ptr) \
2005-04-16 15:20:36 -07:00
( { \
long __pu_err = 0 ; \
2016-02-04 16:54:45 +00:00
__put_user_switch ( ( x ) , ( ptr ) , __pu_err , __put_user_nocheck ) ; \
2005-04-16 15:20:36 -07:00
__pu_err ; \
} )
2015-01-06 15:11:13 +02:00
# define __put_user_error(x, ptr, err) \
2005-04-16 15:20:36 -07:00
( { \
2016-02-04 16:54:45 +00:00
__put_user_switch ( ( x ) , ( ptr ) , ( err ) , __put_user_nocheck ) ; \
2005-04-16 15:20:36 -07:00
( void ) 0 ; \
} )
2016-02-04 16:54:45 +00:00
# define __put_user_nocheck(x, __pu_ptr, __err, __size) \
do { \
unsigned long __pu_addr = ( unsigned long ) __pu_ptr ; \
__put_user_nocheck_ # # __size ( x , __pu_addr , __err ) ; \
} while ( 0 )
# define __put_user_nocheck_1 __put_user_asm_byte
# define __put_user_nocheck_2 __put_user_asm_half
# define __put_user_nocheck_4 __put_user_asm_word
# define __put_user_nocheck_8 __put_user_asm_dword
2005-04-16 15:20:36 -07:00
2015-08-18 23:06:25 +01:00
# define __put_user_asm(x, __pu_addr, err, instr) \
2005-04-16 15:20:36 -07:00
__asm__ __volatile__ ( \
2015-08-18 23:06:25 +01:00
" 1: " TUSER ( instr ) " %1, [%2], #0 \n " \
2005-04-16 15:20:36 -07:00
" 2: \n " \
2015-03-24 10:41:09 +01:00
" .pushsection .text.fixup, \" ax \" \n " \
2005-04-16 15:20:36 -07:00
" .align 2 \n " \
" 3: mov %0, %3 \n " \
" b 2b \n " \
2010-04-19 10:15:03 +01:00
" .popsection \n " \
" .pushsection __ex_table, \" a \" \n " \
2005-04-16 15:20:36 -07:00
" .align 3 \n " \
" .long 1b, 3b \n " \
2010-04-19 10:15:03 +01:00
" .popsection " \
2005-04-16 15:20:36 -07:00
: " +r " ( err ) \
: " r " ( x ) , " r " ( __pu_addr ) , " i " ( - EFAULT ) \
: " cc " )
2015-08-18 23:06:25 +01:00
# define __put_user_asm_byte(x, __pu_addr, err) \
__put_user_asm ( x , __pu_addr , err , strb )
2005-04-16 15:20:36 -07:00
# ifndef __ARMEB__
2015-01-06 15:11:13 +02:00
# define __put_user_asm_half(x, __pu_addr, err) \
2005-04-16 15:20:36 -07:00
( { \
2015-01-06 14:37:22 +02:00
unsigned long __temp = ( __force unsigned long ) ( x ) ; \
2005-04-16 15:20:36 -07:00
__put_user_asm_byte ( __temp , __pu_addr , err ) ; \
__put_user_asm_byte ( __temp > > 8 , __pu_addr + 1 , err ) ; \
} )
# else
2015-01-06 15:11:13 +02:00
# define __put_user_asm_half(x, __pu_addr, err) \
2005-04-16 15:20:36 -07:00
( { \
2015-01-06 14:37:22 +02:00
unsigned long __temp = ( __force unsigned long ) ( x ) ; \
2005-04-16 15:20:36 -07:00
__put_user_asm_byte ( __temp > > 8 , __pu_addr , err ) ; \
__put_user_asm_byte ( __temp , __pu_addr + 1 , err ) ; \
} )
# endif
2015-01-06 15:11:13 +02:00
# define __put_user_asm_word(x, __pu_addr, err) \
2015-08-18 23:06:25 +01:00
__put_user_asm ( x , __pu_addr , err , str )
2005-04-16 15:20:36 -07:00
# ifndef __ARMEB__
# define __reg_oper0 "%R2"
# define __reg_oper1 "%Q2"
# else
# define __reg_oper0 "%Q2"
# define __reg_oper1 "%R2"
# endif
2015-01-06 15:11:13 +02:00
# define __put_user_asm_dword(x, __pu_addr, err) \
2005-04-16 15:20:36 -07:00
__asm__ __volatile__ ( \
2012-01-25 11:38:13 +01:00
ARM ( " 1: " TUSER ( str ) " " __reg_oper1 " , [%1], #4 \n " ) \
ARM ( " 2: " TUSER ( str ) " " __reg_oper0 " , [%1] \n " ) \
THUMB ( " 1: " TUSER ( str ) " " __reg_oper1 " , [%1] \n " ) \
THUMB ( " 2: " TUSER ( str ) " " __reg_oper0 " , [%1, #4] \n " ) \
2005-04-16 15:20:36 -07:00
" 3: \n " \
2015-03-24 10:41:09 +01:00
" .pushsection .text.fixup, \" ax \" \n " \
2005-04-16 15:20:36 -07:00
" .align 2 \n " \
" 4: mov %0, %3 \n " \
" b 3b \n " \
2010-04-19 10:15:03 +01:00
" .popsection \n " \
" .pushsection __ex_table, \" a \" \n " \
2005-04-16 15:20:36 -07:00
" .align 3 \n " \
" .long 1b, 4b \n " \
" .long 2b, 4b \n " \
2010-04-19 10:15:03 +01:00
" .popsection " \
2005-04-16 15:20:36 -07:00
: " +r " ( err ) , " +r " ( __pu_addr ) \
: " r " ( x ) , " i " ( - EFAULT ) \
: " cc " )
2006-06-21 14:44:52 +01:00
2006-06-21 20:38:17 +01:00
# ifdef CONFIG_MMU
2015-08-19 11:02:28 +01:00
extern unsigned long __must_check
arm_copy_from_user ( void * to , const void __user * from , unsigned long n ) ;
static inline unsigned long __must_check
2017-03-21 08:23:33 -04:00
raw_copy_from_user ( void * to , const void __user * from , unsigned long n )
2015-08-19 11:02:28 +01:00
{
2016-06-23 15:06:53 -07:00
unsigned int __ua_flags ;
__ua_flags = uaccess_save_and_enable ( ) ;
2015-08-19 11:02:28 +01:00
n = arm_copy_from_user ( to , from , n ) ;
uaccess_restore ( __ua_flags ) ;
return n ;
}
extern unsigned long __must_check
arm_copy_to_user ( void __user * to , const void * from , unsigned long n ) ;
extern unsigned long __must_check
__copy_to_user_std ( void __user * to , const void * from , unsigned long n ) ;
static inline unsigned long __must_check
2017-03-21 08:23:33 -04:00
raw_copy_to_user ( void __user * to , const void * from , unsigned long n )
2015-08-19 11:02:28 +01:00
{
2015-12-05 13:42:07 +00:00
# ifndef CONFIG_UACCESS_WITH_MEMCPY
2016-06-23 15:06:53 -07:00
unsigned int __ua_flags ;
__ua_flags = uaccess_save_and_enable ( ) ;
2015-08-19 11:02:28 +01:00
n = arm_copy_to_user ( to , from , n ) ;
uaccess_restore ( __ua_flags ) ;
return n ;
2015-12-05 13:42:07 +00:00
# else
return arm_copy_to_user ( to , from , n ) ;
# endif
2015-08-19 11:02:28 +01:00
}
extern unsigned long __must_check
arm_clear_user ( void __user * addr , unsigned long n ) ;
extern unsigned long __must_check
__clear_user_std ( void __user * addr , unsigned long n ) ;
static inline unsigned long __must_check
__clear_user ( void __user * addr , unsigned long n )
{
unsigned int __ua_flags = uaccess_save_and_enable ( ) ;
n = arm_clear_user ( addr , n ) ;
uaccess_restore ( __ua_flags ) ;
return n ;
}
2006-06-21 20:38:17 +01:00
# else
2017-03-21 08:23:33 -04:00
static inline unsigned long
raw_copy_from_user ( void * to , const void __user * from , unsigned long n )
2017-02-16 01:43:58 +01:00
{
2017-03-21 08:23:33 -04:00
memcpy ( to , ( const void __force * ) from , n ) ;
return 0 ;
2017-02-16 01:43:58 +01:00
}
2017-03-21 08:23:33 -04:00
static inline unsigned long
raw_copy_to_user ( void __user * to , const void * from , unsigned long n )
2017-02-16 01:43:58 +01:00
{
2017-03-21 08:23:33 -04:00
memcpy ( ( void __force * ) to , from , n ) ;
return 0 ;
2005-04-16 15:20:36 -07:00
}
2017-03-21 08:23:33 -04:00
# define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
# endif
# define INLINE_COPY_TO_USER
# define INLINE_COPY_FROM_USER
2005-04-16 15:20:36 -07:00
2006-10-26 10:27:42 +01:00
static inline unsigned long __must_check clear_user ( void __user * to , unsigned long n )
2005-04-16 15:20:36 -07:00
{
if ( access_ok ( VERIFY_WRITE , to , n ) )
2006-06-21 14:44:52 +01:00
n = __clear_user ( to , n ) ;
2005-04-16 15:20:36 -07:00
return n ;
}
2015-08-19 11:02:28 +01:00
/* These are from lib/ code, and use __get_user() and friends */
2012-07-06 15:45:39 +01:00
extern long strncpy_from_user ( char * dest , const char __user * src , long count ) ;
2005-04-16 15:20:36 -07:00
2012-07-06 15:45:39 +01:00
extern __must_check long strnlen_user ( const char __user * str , long n ) ;
2005-04-16 15:20:36 -07:00
# endif /* _ASMARM_UACCESS_H */