2006-12-04 15:40:45 +01:00
/*
* arch / s390 / lib / uaccess_pt . c
*
2007-02-05 21:18:17 +01:00
* User access functions based on page table walks for enhanced
* system layout without hardware support .
2006-12-04 15:40:45 +01:00
*
* Copyright IBM Corp . 2006
* Author ( s ) : Gerald Schaefer ( gerald . schaefer @ de . ibm . com )
*/
# include <linux/errno.h>
2007-01-09 10:18:50 +01:00
# include <linux/hardirq.h>
2006-12-04 15:40:45 +01:00
# include <linux/mm.h>
2006-12-08 15:53:49 +01:00
# include <asm/uaccess.h>
2006-12-04 15:40:45 +01:00
# include <asm/futex.h>
2007-02-05 21:16:47 +01:00
# include "uaccess.h"
2006-12-04 15:40:45 +01:00
2007-10-22 12:52:46 +02:00
static inline pte_t * follow_table ( struct mm_struct * mm , unsigned long addr )
{
pgd_t * pgd ;
2007-10-22 12:52:48 +02:00
pud_t * pud ;
2007-10-22 12:52:46 +02:00
pmd_t * pmd ;
pgd = pgd_offset ( mm , addr ) ;
if ( pgd_none ( * pgd ) | | unlikely ( pgd_bad ( * pgd ) ) )
2009-12-07 12:51:47 +01:00
return ( pte_t * ) 0x3a ;
2007-10-22 12:52:46 +02:00
2007-10-22 12:52:48 +02:00
pud = pud_offset ( pgd , addr ) ;
if ( pud_none ( * pud ) | | unlikely ( pud_bad ( * pud ) ) )
2009-12-07 12:51:47 +01:00
return ( pte_t * ) 0x3b ;
2007-10-22 12:52:48 +02:00
pmd = pmd_offset ( pud , addr ) ;
2007-10-22 12:52:46 +02:00
if ( pmd_none ( * pmd ) | | unlikely ( pmd_bad ( * pmd ) ) )
2009-12-07 12:51:47 +01:00
return ( pte_t * ) 0x10 ;
2007-10-22 12:52:46 +02:00
return pte_offset_map ( pmd , addr ) ;
}
2009-12-07 12:51:47 +01:00
static __always_inline size_t __user_copy_pt ( unsigned long uaddr , void * kptr ,
size_t n , int write_user )
2006-12-04 15:40:45 +01:00
{
struct mm_struct * mm = current - > mm ;
unsigned long offset , pfn , done , size ;
pte_t * pte ;
void * from , * to ;
done = 0 ;
retry :
spin_lock ( & mm - > page_table_lock ) ;
do {
2007-10-22 12:52:46 +02:00
pte = follow_table ( mm , uaddr ) ;
2009-12-07 12:51:47 +01:00
if ( ( unsigned long ) pte < 0x1000 )
goto fault ;
if ( ! pte_present ( * pte ) ) {
pte = ( pte_t * ) 0x11 ;
2006-12-04 15:40:45 +01:00
goto fault ;
2009-12-07 12:51:47 +01:00
} else if ( write_user & & ! pte_write ( * pte ) ) {
pte = ( pte_t * ) 0x04 ;
goto fault ;
}
2006-12-04 15:40:45 +01:00
pfn = pte_pfn ( * pte ) ;
offset = uaddr & ( PAGE_SIZE - 1 ) ;
size = min ( n - done , PAGE_SIZE - offset ) ;
if ( write_user ) {
to = ( void * ) ( ( pfn < < PAGE_SHIFT ) + offset ) ;
from = kptr + done ;
} else {
from = ( void * ) ( ( pfn < < PAGE_SHIFT ) + offset ) ;
to = kptr + done ;
}
memcpy ( to , from , size ) ;
done + = size ;
uaddr + = size ;
} while ( done < n ) ;
spin_unlock ( & mm - > page_table_lock ) ;
return n - done ;
fault :
spin_unlock ( & mm - > page_table_lock ) ;
2009-12-07 12:51:47 +01:00
if ( __handle_fault ( uaddr , ( unsigned long ) pte , write_user ) )
2006-12-04 15:40:45 +01:00
return n - done ;
goto retry ;
}
2007-02-05 21:18:17 +01:00
/*
* Do DAT for user address by page table walk , return kernel address .
* This function needs to be called with current - > mm - > page_table_lock held .
*/
2009-12-07 12:51:47 +01:00
static __always_inline unsigned long __dat_user_addr ( unsigned long uaddr )
2007-02-05 21:18:17 +01:00
{
struct mm_struct * mm = current - > mm ;
2009-12-07 12:51:47 +01:00
unsigned long pfn ;
2007-02-05 21:18:17 +01:00
pte_t * pte ;
int rc ;
retry :
2007-10-22 12:52:46 +02:00
pte = follow_table ( mm , uaddr ) ;
2009-12-07 12:51:47 +01:00
if ( ( unsigned long ) pte < 0x1000 )
goto fault ;
if ( ! pte_present ( * pte ) ) {
pte = ( pte_t * ) 0x11 ;
2007-02-05 21:18:17 +01:00
goto fault ;
2009-12-07 12:51:47 +01:00
}
2007-02-05 21:18:17 +01:00
pfn = pte_pfn ( * pte ) ;
2009-12-07 12:51:47 +01:00
return ( pfn < < PAGE_SHIFT ) + ( uaddr & ( PAGE_SIZE - 1 ) ) ;
2007-02-05 21:18:17 +01:00
fault :
spin_unlock ( & mm - > page_table_lock ) ;
2009-12-07 12:51:47 +01:00
rc = __handle_fault ( uaddr , ( unsigned long ) pte , 0 ) ;
2007-02-05 21:18:17 +01:00
spin_lock ( & mm - > page_table_lock ) ;
2009-12-07 12:51:47 +01:00
if ( ! rc )
goto retry ;
return 0 ;
2007-02-05 21:18:17 +01:00
}
2006-12-04 15:40:45 +01:00
size_t copy_from_user_pt ( size_t n , const void __user * from , void * to )
{
size_t rc ;
if ( segment_eq ( get_fs ( ) , KERNEL_DS ) ) {
memcpy ( to , ( void __kernel __force * ) from , n ) ;
return 0 ;
}
rc = __user_copy_pt ( ( unsigned long ) from , to , n , 0 ) ;
if ( unlikely ( rc ) )
memset ( to + n - rc , 0 , rc ) ;
return rc ;
}
size_t copy_to_user_pt ( size_t n , void __user * to , const void * from )
{
if ( segment_eq ( get_fs ( ) , KERNEL_DS ) ) {
memcpy ( ( void __kernel __force * ) to , from , n ) ;
return 0 ;
}
return __user_copy_pt ( ( unsigned long ) to , ( void * ) from , n , 1 ) ;
}
2007-02-05 21:18:17 +01:00
static size_t clear_user_pt ( size_t n , void __user * to )
{
long done , size , ret ;
if ( segment_eq ( get_fs ( ) , KERNEL_DS ) ) {
memset ( ( void __kernel __force * ) to , 0 , n ) ;
return 0 ;
}
done = 0 ;
do {
if ( n - done > PAGE_SIZE )
size = PAGE_SIZE ;
else
size = n - done ;
ret = __user_copy_pt ( ( unsigned long ) to + done ,
& empty_zero_page , size , 1 ) ;
done + = size ;
if ( ret )
return ret + n - done ;
} while ( done < n ) ;
return 0 ;
}
static size_t strnlen_user_pt ( size_t count , const char __user * src )
{
char * addr ;
unsigned long uaddr = ( unsigned long ) src ;
struct mm_struct * mm = current - > mm ;
unsigned long offset , pfn , done , len ;
pte_t * pte ;
size_t len_str ;
if ( segment_eq ( get_fs ( ) , KERNEL_DS ) )
return strnlen ( ( const char __kernel __force * ) src , count ) + 1 ;
done = 0 ;
retry :
spin_lock ( & mm - > page_table_lock ) ;
do {
2007-10-22 12:52:46 +02:00
pte = follow_table ( mm , uaddr ) ;
2009-12-07 12:51:47 +01:00
if ( ( unsigned long ) pte < 0x1000 )
2007-02-05 21:18:17 +01:00
goto fault ;
2009-12-07 12:51:47 +01:00
if ( ! pte_present ( * pte ) ) {
pte = ( pte_t * ) 0x11 ;
goto fault ;
}
2007-02-05 21:18:17 +01:00
pfn = pte_pfn ( * pte ) ;
offset = uaddr & ( PAGE_SIZE - 1 ) ;
addr = ( char * ) ( pfn < < PAGE_SHIFT ) + offset ;
len = min ( count - done , PAGE_SIZE - offset ) ;
len_str = strnlen ( addr , len ) ;
done + = len_str ;
uaddr + = len_str ;
} while ( ( len_str = = len ) & & ( done < count ) ) ;
spin_unlock ( & mm - > page_table_lock ) ;
return done + 1 ;
fault :
spin_unlock ( & mm - > page_table_lock ) ;
2009-12-07 12:51:47 +01:00
if ( __handle_fault ( uaddr , ( unsigned long ) pte , 0 ) )
2007-02-05 21:18:17 +01:00
return 0 ;
goto retry ;
}
static size_t strncpy_from_user_pt ( size_t count , const char __user * src ,
char * dst )
{
size_t n = strnlen_user_pt ( count , src ) ;
if ( ! n )
return - EFAULT ;
if ( n > count )
n = count ;
if ( segment_eq ( get_fs ( ) , KERNEL_DS ) ) {
memcpy ( dst , ( const char __kernel __force * ) src , n ) ;
if ( dst [ n - 1 ] = = ' \0 ' )
return n - 1 ;
else
return n ;
}
if ( __user_copy_pt ( ( unsigned long ) src , dst , n , 0 ) )
return - EFAULT ;
if ( dst [ n - 1 ] = = ' \0 ' )
return n - 1 ;
else
return n ;
}
static size_t copy_in_user_pt ( size_t n , void __user * to ,
const void __user * from )
{
struct mm_struct * mm = current - > mm ;
unsigned long offset_from , offset_to , offset_max , pfn_from , pfn_to ,
2009-12-07 12:51:47 +01:00
uaddr , done , size , error_code ;
2007-02-05 21:18:17 +01:00
unsigned long uaddr_from = ( unsigned long ) from ;
unsigned long uaddr_to = ( unsigned long ) to ;
pte_t * pte_from , * pte_to ;
int write_user ;
2008-04-17 07:46:27 +02:00
if ( segment_eq ( get_fs ( ) , KERNEL_DS ) ) {
memcpy ( ( void __force * ) to , ( void __force * ) from , n ) ;
return 0 ;
}
2007-02-05 21:18:17 +01:00
done = 0 ;
retry :
spin_lock ( & mm - > page_table_lock ) ;
do {
2009-12-07 12:51:47 +01:00
write_user = 0 ;
uaddr = uaddr_from ;
2007-10-22 12:52:46 +02:00
pte_from = follow_table ( mm , uaddr_from ) ;
2009-12-07 12:51:47 +01:00
error_code = ( unsigned long ) pte_from ;
if ( error_code < 0x1000 )
goto fault ;
if ( ! pte_present ( * pte_from ) ) {
error_code = 0x11 ;
2007-02-05 21:18:17 +01:00
goto fault ;
}
2007-10-22 12:52:46 +02:00
2009-12-07 12:51:47 +01:00
write_user = 1 ;
uaddr = uaddr_to ;
2007-10-22 12:52:46 +02:00
pte_to = follow_table ( mm , uaddr_to ) ;
2009-12-07 12:51:47 +01:00
error_code = ( unsigned long ) pte_to ;
if ( error_code < 0x1000 )
goto fault ;
if ( ! pte_present ( * pte_to ) ) {
error_code = 0x11 ;
goto fault ;
} else if ( ! pte_write ( * pte_to ) ) {
error_code = 0x04 ;
2007-02-05 21:18:17 +01:00
goto fault ;
}
pfn_from = pte_pfn ( * pte_from ) ;
pfn_to = pte_pfn ( * pte_to ) ;
offset_from = uaddr_from & ( PAGE_SIZE - 1 ) ;
offset_to = uaddr_from & ( PAGE_SIZE - 1 ) ;
offset_max = max ( offset_from , offset_to ) ;
size = min ( n - done , PAGE_SIZE - offset_max ) ;
memcpy ( ( void * ) ( pfn_to < < PAGE_SHIFT ) + offset_to ,
( void * ) ( pfn_from < < PAGE_SHIFT ) + offset_from , size ) ;
done + = size ;
uaddr_from + = size ;
uaddr_to + = size ;
} while ( done < n ) ;
spin_unlock ( & mm - > page_table_lock ) ;
return n - done ;
fault :
spin_unlock ( & mm - > page_table_lock ) ;
2009-12-07 12:51:47 +01:00
if ( __handle_fault ( uaddr , error_code , write_user ) )
2007-02-05 21:18:17 +01:00
return n - done ;
goto retry ;
}
# define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
asm volatile ( " 0: l %1,0(%6) \n " \
" 1: " insn \
" 2: cs %1,%2,0(%6) \n " \
" 3: jl 1b \n " \
" lhi %0,0 \n " \
" 4: \n " \
EX_TABLE ( 0 b , 4 b ) EX_TABLE ( 2 b , 4 b ) EX_TABLE ( 3 b , 4 b ) \
: " =d " ( ret ) , " =&d " ( oldval ) , " =&d " ( newval ) , \
" =m " ( * uaddr ) \
: " 0 " ( - EFAULT ) , " d " ( oparg ) , " a " ( uaddr ) , \
" m " ( * uaddr ) : " cc " ) ;
2011-03-10 18:50:58 -08:00
static int __futex_atomic_op_pt ( int op , u32 __user * uaddr , int oparg , int * old )
2007-02-05 21:18:17 +01:00
{
int oldval = 0 , newval , ret ;
switch ( op ) {
case FUTEX_OP_SET :
__futex_atomic_op ( " lr %2,%5 \n " ,
ret , oldval , newval , uaddr , oparg ) ;
break ;
case FUTEX_OP_ADD :
__futex_atomic_op ( " lr %2,%1 \n ar %2,%5 \n " ,
ret , oldval , newval , uaddr , oparg ) ;
break ;
case FUTEX_OP_OR :
__futex_atomic_op ( " lr %2,%1 \n or %2,%5 \n " ,
ret , oldval , newval , uaddr , oparg ) ;
break ;
case FUTEX_OP_ANDN :
__futex_atomic_op ( " lr %2,%1 \n nr %2,%5 \n " ,
ret , oldval , newval , uaddr , oparg ) ;
break ;
case FUTEX_OP_XOR :
__futex_atomic_op ( " lr %2,%1 \n xr %2,%5 \n " ,
ret , oldval , newval , uaddr , oparg ) ;
break ;
default :
ret = - ENOSYS ;
}
2008-04-17 07:46:27 +02:00
if ( ret = = 0 )
* old = oldval ;
2007-02-05 21:18:17 +01:00
return ret ;
}
2011-03-10 18:50:58 -08:00
int futex_atomic_op_pt ( int op , u32 __user * uaddr , int oparg , int * old )
2007-02-05 21:18:17 +01:00
{
int ret ;
2008-04-17 07:46:27 +02:00
if ( segment_eq ( get_fs ( ) , KERNEL_DS ) )
return __futex_atomic_op_pt ( op , uaddr , oparg , old ) ;
2007-02-05 21:18:17 +01:00
spin_lock ( & current - > mm - > page_table_lock ) ;
uaddr = ( int __user * ) __dat_user_addr ( ( unsigned long ) uaddr ) ;
if ( ! uaddr ) {
spin_unlock ( & current - > mm - > page_table_lock ) ;
return - EFAULT ;
}
get_page ( virt_to_page ( uaddr ) ) ;
spin_unlock ( & current - > mm - > page_table_lock ) ;
2008-04-17 07:46:27 +02:00
ret = __futex_atomic_op_pt ( op , uaddr , oparg , old ) ;
put_page ( virt_to_page ( uaddr ) ) ;
return ret ;
}
2011-03-10 18:50:58 -08:00
static int __futex_atomic_cmpxchg_pt ( u32 * uval , u32 __user * uaddr ,
u32 oldval , u32 newval )
2008-04-17 07:46:27 +02:00
{
int ret ;
asm volatile ( " 0: cs %1,%4,0(%5) \n "
2011-03-10 18:48:51 -08:00
" 1: la %0,0 \n "
2008-04-17 07:46:27 +02:00
" 2: \n "
EX_TABLE ( 0 b , 2 b ) EX_TABLE ( 1 b , 2 b )
2007-02-05 21:18:17 +01:00
: " =d " ( ret ) , " +d " ( oldval ) , " =m " ( * uaddr )
: " 0 " ( - EFAULT ) , " d " ( newval ) , " a " ( uaddr ) , " m " ( * uaddr )
: " cc " , " memory " ) ;
2011-03-10 18:48:51 -08:00
* uval = oldval ;
2008-04-17 07:46:27 +02:00
return ret ;
}
2011-03-10 18:50:58 -08:00
int futex_atomic_cmpxchg_pt ( u32 * uval , u32 __user * uaddr ,
u32 oldval , u32 newval )
2008-04-17 07:46:27 +02:00
{
int ret ;
if ( segment_eq ( get_fs ( ) , KERNEL_DS ) )
2011-03-10 18:48:51 -08:00
return __futex_atomic_cmpxchg_pt ( uval , uaddr , oldval , newval ) ;
2008-04-17 07:46:27 +02:00
spin_lock ( & current - > mm - > page_table_lock ) ;
uaddr = ( int __user * ) __dat_user_addr ( ( unsigned long ) uaddr ) ;
if ( ! uaddr ) {
spin_unlock ( & current - > mm - > page_table_lock ) ;
return - EFAULT ;
}
get_page ( virt_to_page ( uaddr ) ) ;
spin_unlock ( & current - > mm - > page_table_lock ) ;
2011-03-10 18:48:51 -08:00
ret = __futex_atomic_cmpxchg_pt ( uval , uaddr , oldval , newval ) ;
2007-02-05 21:18:17 +01:00
put_page ( virt_to_page ( uaddr ) ) ;
return ret ;
}
struct uaccess_ops uaccess_pt = {
. copy_from_user = copy_from_user_pt ,
. copy_from_user_small = copy_from_user_pt ,
. copy_to_user = copy_to_user_pt ,
. copy_to_user_small = copy_to_user_pt ,
. copy_in_user = copy_in_user_pt ,
. clear_user = clear_user_pt ,
. strnlen_user = strnlen_user_pt ,
. strncpy_from_user = strncpy_from_user_pt ,
. futex_atomic_op = futex_atomic_op_pt ,
. futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt ,
} ;