2019-05-19 15:08:55 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2008-04-17 22:05:36 +04:00
/*
2020-06-09 07:34:11 +03:00
* Access kernel or user memory without faulting .
2008-04-17 22:05:36 +04:00
*/
2011-10-16 10:01:52 +04:00
# include <linux/export.h>
2008-04-17 22:05:36 +04:00
# include <linux/mm.h>
2010-10-27 20:29:01 +04:00
# include <linux/uaccess.h>
2008-04-17 22:05:36 +04:00
2020-06-17 10:37:53 +03:00
bool __weak copy_from_kernel_nofault_allowed ( const void * unsafe_src ,
size_t size )
2020-06-09 07:34:27 +03:00
{
return true ;
}
2020-06-17 10:37:53 +03:00
# define copy_from_kernel_nofault_loop(dst, src, len, type, err_label) \
2020-06-09 07:34:58 +03:00
while ( len > = sizeof ( type ) ) { \
__get_kernel_nofault ( dst , src , type , err_label ) ; \
dst + = sizeof ( type ) ; \
src + = sizeof ( type ) ; \
len - = sizeof ( type ) ; \
}
2020-06-17 10:37:53 +03:00
long copy_from_kernel_nofault ( void * dst , const void * src , size_t size )
2020-06-09 07:34:58 +03:00
{
2021-08-11 10:30:18 +03:00
unsigned long align = 0 ;
if ( ! IS_ENABLED ( CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS ) )
align = ( unsigned long ) dst | ( unsigned long ) src ;
2020-06-17 10:37:53 +03:00
if ( ! copy_from_kernel_nofault_allowed ( src , size ) )
2020-06-09 07:35:04 +03:00
return - ERANGE ;
2020-06-09 07:34:58 +03:00
pagefault_disable ( ) ;
2021-08-11 10:30:18 +03:00
if ( ! ( align & 7 ) )
copy_from_kernel_nofault_loop ( dst , src , size , u64 , Efault ) ;
if ( ! ( align & 3 ) )
copy_from_kernel_nofault_loop ( dst , src , size , u32 , Efault ) ;
if ( ! ( align & 1 ) )
copy_from_kernel_nofault_loop ( dst , src , size , u16 , Efault ) ;
2020-06-17 10:37:53 +03:00
copy_from_kernel_nofault_loop ( dst , src , size , u8 , Efault ) ;
2020-06-09 07:34:58 +03:00
pagefault_enable ( ) ;
return 0 ;
Efault :
pagefault_enable ( ) ;
return - EFAULT ;
}
2020-06-17 10:37:53 +03:00
EXPORT_SYMBOL_GPL ( copy_from_kernel_nofault ) ;
2020-06-09 07:34:58 +03:00
2020-06-17 10:37:53 +03:00
# define copy_to_kernel_nofault_loop(dst, src, len, type, err_label) \
2020-06-09 07:34:58 +03:00
while ( len > = sizeof ( type ) ) { \
__put_kernel_nofault ( dst , src , type , err_label ) ; \
dst + = sizeof ( type ) ; \
src + = sizeof ( type ) ; \
len - = sizeof ( type ) ; \
}
2020-06-17 10:37:53 +03:00
long copy_to_kernel_nofault ( void * dst , const void * src , size_t size )
2020-06-09 07:34:58 +03:00
{
2021-08-11 10:30:18 +03:00
unsigned long align = 0 ;
if ( ! IS_ENABLED ( CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS ) )
align = ( unsigned long ) dst | ( unsigned long ) src ;
2020-06-09 07:34:58 +03:00
pagefault_disable ( ) ;
2021-08-11 10:30:18 +03:00
if ( ! ( align & 7 ) )
copy_to_kernel_nofault_loop ( dst , src , size , u64 , Efault ) ;
if ( ! ( align & 3 ) )
copy_to_kernel_nofault_loop ( dst , src , size , u32 , Efault ) ;
if ( ! ( align & 1 ) )
copy_to_kernel_nofault_loop ( dst , src , size , u16 , Efault ) ;
2020-06-17 10:37:53 +03:00
copy_to_kernel_nofault_loop ( dst , src , size , u8 , Efault ) ;
2020-06-09 07:34:58 +03:00
pagefault_enable ( ) ;
return 0 ;
Efault :
pagefault_enable ( ) ;
return - EFAULT ;
}
long strncpy_from_kernel_nofault ( char * dst , const void * unsafe_addr , long count )
{
const void * src = unsafe_addr ;
if ( unlikely ( count < = 0 ) )
return 0 ;
2020-06-17 10:37:53 +03:00
if ( ! copy_from_kernel_nofault_allowed ( unsafe_addr , count ) )
2020-06-09 07:35:04 +03:00
return - ERANGE ;
2020-06-09 07:34:58 +03:00
pagefault_disable ( ) ;
do {
__get_kernel_nofault ( dst , src , u8 , Efault ) ;
dst + + ;
src + + ;
} while ( dst [ - 1 ] & & src - unsafe_addr < count ) ;
pagefault_enable ( ) ;
dst [ - 1 ] = ' \0 ' ;
return src - unsafe_addr ;
Efault :
pagefault_enable ( ) ;
2022-11-10 11:56:13 +03:00
dst [ 0 ] = ' \0 ' ;
2020-06-09 07:34:58 +03:00
return - EFAULT ;
}
2019-05-15 08:38:18 +03:00
2020-06-09 07:34:55 +03:00
/**
2020-06-17 10:37:54 +03:00
* copy_from_user_nofault ( ) : safely attempt to read from a user - space location
2020-06-09 07:34:55 +03:00
* @ dst : pointer to the buffer that shall take the data
* @ src : address to read from . This must be a user address .
* @ size : size of the data chunk
*
* Safely read from user address @ src to the buffer at @ dst . If a kernel fault
* happens , handle that and return - EFAULT .
*/
2020-06-17 10:37:54 +03:00
long copy_from_user_nofault ( void * dst , const void __user * src , size_t size )
2020-06-09 07:34:55 +03:00
{
long ret = - EFAULT ;
if ( access_ok ( src , size ) ) {
pagefault_disable ( ) ;
ret = __copy_from_user_inatomic ( dst , src , size ) ;
pagefault_enable ( ) ;
}
if ( ret )
return - EFAULT ;
return 0 ;
}
2020-06-17 10:37:54 +03:00
EXPORT_SYMBOL_GPL ( copy_from_user_nofault ) ;
2020-06-09 07:34:55 +03:00
/**
2020-06-17 10:37:54 +03:00
* copy_to_user_nofault ( ) : safely attempt to write to a user - space location
2020-06-09 07:34:55 +03:00
* @ dst : address to write to
* @ src : pointer to the data that shall be written
* @ size : size of the data chunk
*
* Safely write to address @ dst from the buffer at @ src . If a kernel fault
* happens , handle that and return - EFAULT .
*/
2020-06-17 10:37:54 +03:00
long copy_to_user_nofault ( void __user * dst , const void * src , size_t size )
2020-06-09 07:34:55 +03:00
{
long ret = - EFAULT ;
if ( access_ok ( dst , size ) ) {
pagefault_disable ( ) ;
ret = __copy_to_user_inatomic ( dst , src , size ) ;
pagefault_enable ( ) ;
}
if ( ret )
return - EFAULT ;
return 0 ;
}
2020-06-17 10:37:54 +03:00
EXPORT_SYMBOL_GPL ( copy_to_user_nofault ) ;
2020-06-09 07:34:55 +03:00
2019-05-15 08:38:18 +03:00
/**
2020-06-09 07:34:14 +03:00
* strncpy_from_user_nofault : - Copy a NUL terminated string from unsafe user
2019-05-15 08:38:18 +03:00
* address .
* @ dst : Destination address , in kernel space . This buffer must be at
* least @ count bytes long .
* @ unsafe_addr : Unsafe user address .
* @ count : Maximum number of bytes to copy , including the trailing NUL .
*
* Copies a NUL - terminated string from unsafe user address to kernel buffer .
*
* On success , returns the length of the string INCLUDING the trailing NUL .
*
* If access fails , returns - EFAULT ( some data may have been copied
* and the trailing NUL added ) .
*
* If @ count is smaller than the length of the string , copies @ count - 1 bytes ,
* sets the last byte of @ dst buffer to NUL and returns @ count .
*/
2020-06-09 07:34:14 +03:00
long strncpy_from_user_nofault ( char * dst , const void __user * unsafe_addr ,
2019-05-15 08:38:18 +03:00
long count )
{
long ret ;
if ( unlikely ( count < = 0 ) )
return 0 ;
pagefault_disable ( ) ;
ret = strncpy_from_user ( dst , unsafe_addr , count ) ;
pagefault_enable ( ) ;
if ( ret > = count ) {
ret = count ;
dst [ ret - 1 ] = ' \0 ' ;
} else if ( ret > 0 ) {
ret + + ;
}
return ret ;
}
/**
2020-06-09 07:34:20 +03:00
* strnlen_user_nofault : - Get the size of a user string INCLUDING final NUL .
2019-05-15 08:38:18 +03:00
* @ unsafe_addr : The string to measure .
* @ count : Maximum count ( including NUL )
*
* Get the size of a NUL - terminated string in user space without pagefault .
*
* Returns the size of the string INCLUDING the terminating NUL .
*
* If the string is too long , returns a number larger than @ count . User
* has to check the return value against " > count " .
* On exception ( or invalid count ) , returns 0.
*
* Unlike strnlen_user , this can be used from IRQ handler etc . because
* it disables pagefaults .
*/
2020-06-09 07:34:20 +03:00
long strnlen_user_nofault ( const void __user * unsafe_addr , long count )
2019-05-15 08:38:18 +03:00
{
int ret ;
pagefault_disable ( ) ;
ret = strnlen_user ( unsafe_addr , count ) ;
pagefault_enable ( ) ;
return ret ;
}
mm: uninline copy_overflow()
While building a small config with CONFIG_CC_OPTIMISE_FOR_SIZE, I ended
up with more than 50 times the following function in vmlinux because GCC
doesn't honor the 'inline' keyword:
c00243bc <copy_overflow>:
c00243bc: 94 21 ff f0 stwu r1,-16(r1)
c00243c0: 7c 85 23 78 mr r5,r4
c00243c4: 7c 64 1b 78 mr r4,r3
c00243c8: 3c 60 c0 62 lis r3,-16286
c00243cc: 7c 08 02 a6 mflr r0
c00243d0: 38 63 5e e5 addi r3,r3,24293
c00243d4: 90 01 00 14 stw r0,20(r1)
c00243d8: 4b ff 82 45 bl c001c61c <__warn_printk>
c00243dc: 0f e0 00 00 twui r0,0
c00243e0: 80 01 00 14 lwz r0,20(r1)
c00243e4: 38 21 00 10 addi r1,r1,16
c00243e8: 7c 08 03 a6 mtlr r0
c00243ec: 4e 80 00 20 blr
With -Winline, GCC tells:
/include/linux/thread_info.h:212:20: warning: inlining failed in call to 'copy_overflow': call is unlikely and code size would grow [-Winline]
copy_overflow() is a non conditional warning called by check_copy_size()
on an error path.
check_copy_size() have to remain inlined in order to benefit from
constant folding, but copy_overflow() is not worth inlining.
Uninline the warning when CONFIG_BUG is selected.
When CONFIG_BUG is not selected, WARN() does nothing so skip it.
This reduces the size of vmlinux by almost 4kbytes.
Link: https://lkml.kernel.org/r/e1723b9cfa924bcefcd41f69d0025b38e4c9364e.1644819985.git.christophe.leroy@csgroup.eu
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: David Laight <David.Laight@ACULAB.COM>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2022-03-23 00:47:49 +03:00
void __copy_overflow ( int size , unsigned long count )
{
WARN ( 1 , " Buffer overflow detected (%d < %lu)! \n " , size , count ) ;
}
EXPORT_SYMBOL ( __copy_overflow ) ;