2008-04-17 20:05:36 +02:00
/*
* Access kernel memory without faulting .
*/
2011-10-16 02:01:52 -04:00
# include <linux/export.h>
2008-04-17 20:05:36 +02:00
# include <linux/mm.h>
2010-10-27 17:29:01 +01:00
# include <linux/uaccess.h>
2008-04-17 20:05:36 +02:00
/**
* probe_kernel_read ( ) : safely attempt to read from a location
* @ dst : pointer to the buffer that shall take the data
* @ src : address to read from
* @ size : size of the data chunk
*
* Safely read from address @ src to the buffer at @ dst . If a kernel fault
* happens , handle that and return - EFAULT .
2015-11-05 18:46:03 -08:00
*
* We ensure that the copy_from_user is executed in atomic context so that
* do_page_fault ( ) doesn ' t attempt to take mmap_sem . This makes
* probe_kernel_read ( ) suitable for use within regions where the caller
* already holds mmap_sem , or other locks which nest inside mmap_sem .
2008-04-17 20:05:36 +02:00
*/
2010-01-07 11:58:36 -06:00
2011-05-19 14:35:33 -04:00
long __weak probe_kernel_read ( void * dst , const void * src , size_t size )
2010-01-07 11:58:36 -06:00
__attribute__ ( ( alias ( " __probe_kernel_read " ) ) ) ;
2011-05-19 14:35:33 -04:00
long __probe_kernel_read ( void * dst , const void * src , size_t size )
2008-04-17 20:05:36 +02:00
{
long ret ;
2008-02-20 13:33:38 -06:00
mm_segment_t old_fs = get_fs ( ) ;
2008-04-17 20:05:36 +02:00
2008-02-20 13:33:38 -06:00
set_fs ( KERNEL_DS ) ;
2008-04-17 20:05:36 +02:00
pagefault_disable ( ) ;
ret = __copy_from_user_inatomic ( dst ,
( __force const void __user * ) src , size ) ;
pagefault_enable ( ) ;
2008-02-20 13:33:38 -06:00
set_fs ( old_fs ) ;
2008-04-17 20:05:36 +02:00
return ret ? - EFAULT : 0 ;
}
EXPORT_SYMBOL_GPL ( probe_kernel_read ) ;
/**
* probe_kernel_write ( ) : safely attempt to write to a location
* @ dst : address to write to
* @ src : pointer to the data that shall be written
* @ size : size of the data chunk
*
* Safely write to address @ dst from the buffer at @ src . If a kernel fault
* happens , handle that and return - EFAULT .
*/
2011-05-19 14:35:33 -04:00
long __weak probe_kernel_write ( void * dst , const void * src , size_t size )
2010-01-07 11:58:36 -06:00
__attribute__ ( ( alias ( " __probe_kernel_write " ) ) ) ;
2011-05-19 14:35:33 -04:00
long __probe_kernel_write ( void * dst , const void * src , size_t size )
2008-04-17 20:05:36 +02:00
{
long ret ;
2008-02-20 13:33:38 -06:00
mm_segment_t old_fs = get_fs ( ) ;
2008-04-17 20:05:36 +02:00
2008-02-20 13:33:38 -06:00
set_fs ( KERNEL_DS ) ;
2008-04-17 20:05:36 +02:00
pagefault_disable ( ) ;
ret = __copy_to_user_inatomic ( ( __force void __user * ) dst , src , size ) ;
pagefault_enable ( ) ;
2008-02-20 13:33:38 -06:00
set_fs ( old_fs ) ;
2008-04-17 20:05:36 +02:00
return ret ? - EFAULT : 0 ;
}
EXPORT_SYMBOL_GPL ( probe_kernel_write ) ;
2015-08-31 08:57:10 -07:00
/**
* strncpy_from_unsafe : - Copy a NUL terminated string from unsafe address .
* @ dst : Destination address , in kernel space . This buffer must be at
* least @ count bytes long .
* @ src : Unsafe address .
* @ count : Maximum number of bytes to copy , including the trailing NUL .
*
* Copies a NUL - terminated string from unsafe address to kernel buffer .
*
* On success , returns the length of the string INCLUDING the trailing NUL .
*
* If access fails , returns - EFAULT ( some data may have been copied
* and the trailing NUL added ) .
*
* If @ count is smaller than the length of the string , copies @ count - 1 bytes ,
* sets the last byte of @ dst buffer to NUL and returns @ count .
*/
long strncpy_from_unsafe ( char * dst , const void * unsafe_addr , long count )
{
mm_segment_t old_fs = get_fs ( ) ;
const void * src = unsafe_addr ;
long ret ;
if ( unlikely ( count < = 0 ) )
return 0 ;
set_fs ( KERNEL_DS ) ;
pagefault_disable ( ) ;
do {
ret = __copy_from_user_inatomic ( dst + + ,
( const void __user __force * ) src + + , 1 ) ;
} while ( dst [ - 1 ] & & ret = = 0 & & src - unsafe_addr < count ) ;
dst [ - 1 ] = ' \0 ' ;
pagefault_enable ( ) ;
set_fs ( old_fs ) ;
2015-11-05 18:50:11 -08:00
return ret ? - EFAULT : src - unsafe_addr ;
2015-08-31 08:57:10 -07:00
}