2006-12-04 15:40:45 +01:00
/*
2007-02-05 21:18:17 +01:00
* User access functions based on page table walks for enhanced
* system layout without hardware support .
2006-12-04 15:40:45 +01:00
*
2012-09-10 16:43:26 +02:00
* Copyright IBM Corp . 2006 , 2012
2006-12-04 15:40:45 +01:00
* Author ( s ) : Gerald Schaefer ( gerald . schaefer @ de . ibm . com )
*/
# include <linux/errno.h>
2007-01-09 10:18:50 +01:00
# include <linux/hardirq.h>
2006-12-04 15:40:45 +01:00
# include <linux/mm.h>
2012-09-10 16:43:26 +02:00
# include <linux/hugetlb.h>
2006-12-08 15:53:49 +01:00
# include <asm/uaccess.h>
2006-12-04 15:40:45 +01:00
# include <asm/futex.h>
2007-02-05 21:16:47 +01:00
# include "uaccess.h"
2006-12-04 15:40:45 +01:00
2013-02-25 09:10:23 +01:00
# ifndef CONFIG_64BIT
# define AHI "ahi"
# define SLR "slr"
# else
# define AHI "aghi"
# define SLR "slgr"
# endif
static size_t strnlen_kernel ( size_t count , const char __user * src )
{
register unsigned long reg0 asm ( " 0 " ) = 0UL ;
unsigned long tmp1 , tmp2 ;
asm volatile (
" la %2,0(%1) \n "
" la %3,0(%0,%1) \n "
" " SLR " %0,%0 \n "
" 0: srst %3,%2 \n "
" jo 0b \n "
" la %0,1(%3) \n " /* strnlen_kernel results includes \0 */
" " SLR " %0,%1 \n "
" 1: \n "
EX_TABLE ( 0 b , 1 b )
: " +a " ( count ) , " +a " ( src ) , " =a " ( tmp1 ) , " =a " ( tmp2 )
: " d " ( reg0 ) : " cc " , " memory " ) ;
return count ;
}
static size_t copy_in_kernel ( size_t count , void __user * to ,
const void __user * from )
{
unsigned long tmp1 ;
asm volatile (
" " AHI " %0,-1 \n "
" jo 5f \n "
" bras %3,3f \n "
" 0: " AHI " %0,257 \n "
" 1: mvc 0(1,%1),0(%2) \n "
" la %1,1(%1) \n "
" la %2,1(%2) \n "
" " AHI " %0,-1 \n "
" jnz 1b \n "
" j 5f \n "
" 2: mvc 0(256,%1),0(%2) \n "
" la %1,256(%1) \n "
" la %2,256(%2) \n "
" 3: " AHI " %0,-256 \n "
" jnm 2b \n "
" 4: ex %0,1b-0b(%3) \n "
" 5: " SLR " %0,%0 \n "
" 6: \n "
EX_TABLE ( 1 b , 6 b ) EX_TABLE ( 2 b , 0 b ) EX_TABLE ( 4 b , 0 b )
: " +a " ( count ) , " +a " ( to ) , " +a " ( from ) , " =a " ( tmp1 )
: : " cc " , " memory " ) ;
return count ;
}
2012-09-10 16:43:26 +02:00
/*
* Returns kernel address for user virtual address . If the returned address is
* > = - 4095 ( IS_ERR_VALUE ( x ) returns true ) , a fault has occured and the address
* contains the ( negative ) exception code .
*/
2013-03-21 12:50:39 +01:00
# ifdef CONFIG_64BIT
static unsigned long follow_table ( struct mm_struct * mm ,
unsigned long address , int write )
2007-10-22 12:52:46 +02:00
{
2013-03-21 12:50:39 +01:00
unsigned long * table = ( unsigned long * ) __pa ( mm - > pgd ) ;
2007-10-22 12:52:46 +02:00
2013-03-21 12:50:39 +01:00
switch ( mm - > context . asce_bits & _ASCE_TYPE_MASK ) {
case _ASCE_TYPE_REGION1 :
table = table + ( ( address > > 53 ) & 0x7ff ) ;
2013-07-23 20:57:57 +02:00
if ( unlikely ( * table & _REGION_ENTRY_INVALID ) )
2013-03-21 12:50:39 +01:00
return - 0x39UL ;
table = ( unsigned long * ) ( * table & _REGION_ENTRY_ORIGIN ) ;
2013-04-29 10:58:56 +02:00
/* fallthrough */
2013-03-21 12:50:39 +01:00
case _ASCE_TYPE_REGION2 :
table = table + ( ( address > > 42 ) & 0x7ff ) ;
2013-07-23 20:57:57 +02:00
if ( unlikely ( * table & _REGION_ENTRY_INVALID ) )
2013-03-21 12:50:39 +01:00
return - 0x3aUL ;
table = ( unsigned long * ) ( * table & _REGION_ENTRY_ORIGIN ) ;
2013-04-29 10:58:56 +02:00
/* fallthrough */
2013-03-21 12:50:39 +01:00
case _ASCE_TYPE_REGION3 :
table = table + ( ( address > > 31 ) & 0x7ff ) ;
2013-07-23 20:57:57 +02:00
if ( unlikely ( * table & _REGION_ENTRY_INVALID ) )
2013-03-21 12:50:39 +01:00
return - 0x3bUL ;
table = ( unsigned long * ) ( * table & _REGION_ENTRY_ORIGIN ) ;
2013-04-29 10:58:56 +02:00
/* fallthrough */
2013-03-21 12:50:39 +01:00
case _ASCE_TYPE_SEGMENT :
table = table + ( ( address > > 20 ) & 0x7ff ) ;
2013-07-23 20:57:57 +02:00
if ( unlikely ( * table & _SEGMENT_ENTRY_INVALID ) )
2013-03-21 12:50:39 +01:00
return - 0x10UL ;
if ( unlikely ( * table & _SEGMENT_ENTRY_LARGE ) ) {
2013-07-23 20:57:57 +02:00
if ( write & & ( * table & _SEGMENT_ENTRY_PROTECT ) )
2013-03-21 12:50:39 +01:00
return - 0x04UL ;
return ( * table & _SEGMENT_ENTRY_ORIGIN_LARGE ) +
( address & ~ _SEGMENT_ENTRY_ORIGIN_LARGE ) ;
}
table = ( unsigned long * ) ( * table & _SEGMENT_ENTRY_ORIGIN ) ;
}
table = table + ( ( address > > 12 ) & 0xff ) ;
if ( unlikely ( * table & _PAGE_INVALID ) )
return - 0x11UL ;
2013-07-23 20:57:57 +02:00
if ( write & & ( * table & _PAGE_PROTECT ) )
2013-03-21 12:50:39 +01:00
return - 0x04UL ;
return ( * table & PAGE_MASK ) + ( address & ~ PAGE_MASK ) ;
}
2007-10-22 12:52:46 +02:00
2013-03-21 12:50:39 +01:00
# else /* CONFIG_64BIT */
2007-10-22 12:52:48 +02:00
2013-03-21 12:50:39 +01:00
static unsigned long follow_table ( struct mm_struct * mm ,
unsigned long address , int write )
{
unsigned long * table = ( unsigned long * ) __pa ( mm - > pgd ) ;
2012-09-10 16:43:26 +02:00
2013-03-21 12:50:39 +01:00
table = table + ( ( address > > 20 ) & 0x7ff ) ;
2013-07-23 20:57:57 +02:00
if ( unlikely ( * table & _SEGMENT_ENTRY_INVALID ) )
2013-03-21 12:50:39 +01:00
return - 0x10UL ;
table = ( unsigned long * ) ( * table & _SEGMENT_ENTRY_ORIGIN ) ;
table = table + ( ( address > > 12 ) & 0xff ) ;
if ( unlikely ( * table & _PAGE_INVALID ) )
2012-09-10 16:43:26 +02:00
return - 0x11UL ;
2013-07-23 20:57:57 +02:00
if ( write & & ( * table & _PAGE_PROTECT ) )
2012-09-10 16:43:26 +02:00
return - 0x04UL ;
2013-03-21 12:50:39 +01:00
return ( * table & PAGE_MASK ) + ( address & ~ PAGE_MASK ) ;
2007-10-22 12:52:46 +02:00
}
2013-03-21 12:50:39 +01:00
# endif /* CONFIG_64BIT */
2009-12-07 12:51:47 +01:00
static __always_inline size_t __user_copy_pt ( unsigned long uaddr , void * kptr ,
size_t n , int write_user )
2006-12-04 15:40:45 +01:00
{
struct mm_struct * mm = current - > mm ;
2012-09-10 16:43:26 +02:00
unsigned long offset , done , size , kaddr ;
2006-12-04 15:40:45 +01:00
void * from , * to ;
done = 0 ;
retry :
spin_lock ( & mm - > page_table_lock ) ;
do {
2012-09-10 16:43:26 +02:00
kaddr = follow_table ( mm , uaddr , write_user ) ;
if ( IS_ERR_VALUE ( kaddr ) )
2009-12-07 12:51:47 +01:00
goto fault ;
2006-12-04 15:40:45 +01:00
2012-09-10 16:43:26 +02:00
offset = uaddr & ~ PAGE_MASK ;
2006-12-04 15:40:45 +01:00
size = min ( n - done , PAGE_SIZE - offset ) ;
if ( write_user ) {
2012-09-10 16:43:26 +02:00
to = ( void * ) kaddr ;
2006-12-04 15:40:45 +01:00
from = kptr + done ;
} else {
2012-09-10 16:43:26 +02:00
from = ( void * ) kaddr ;
2006-12-04 15:40:45 +01:00
to = kptr + done ;
}
memcpy ( to , from , size ) ;
done + = size ;
uaddr + = size ;
} while ( done < n ) ;
spin_unlock ( & mm - > page_table_lock ) ;
return n - done ;
fault :
spin_unlock ( & mm - > page_table_lock ) ;
2012-09-10 16:43:26 +02:00
if ( __handle_fault ( uaddr , - kaddr , write_user ) )
2006-12-04 15:40:45 +01:00
return n - done ;
goto retry ;
}
2007-02-05 21:18:17 +01:00
/*
* Do DAT for user address by page table walk , return kernel address .
* This function needs to be called with current - > mm - > page_table_lock held .
*/
2012-09-10 16:43:26 +02:00
static __always_inline unsigned long __dat_user_addr ( unsigned long uaddr ,
int write )
2007-02-05 21:18:17 +01:00
{
struct mm_struct * mm = current - > mm ;
2012-09-10 16:43:26 +02:00
unsigned long kaddr ;
2007-02-05 21:18:17 +01:00
int rc ;
retry :
2012-09-10 16:43:26 +02:00
kaddr = follow_table ( mm , uaddr , write ) ;
if ( IS_ERR_VALUE ( kaddr ) )
2007-02-05 21:18:17 +01:00
goto fault ;
2012-09-10 16:43:26 +02:00
return kaddr ;
2007-02-05 21:18:17 +01:00
fault :
spin_unlock ( & mm - > page_table_lock ) ;
2012-09-10 16:43:26 +02:00
rc = __handle_fault ( uaddr , - kaddr , write ) ;
2007-02-05 21:18:17 +01:00
spin_lock ( & mm - > page_table_lock ) ;
2009-12-07 12:51:47 +01:00
if ( ! rc )
goto retry ;
return 0 ;
2007-02-05 21:18:17 +01:00
}
2006-12-04 15:40:45 +01:00
size_t copy_from_user_pt ( size_t n , const void __user * from , void * to )
{
size_t rc ;
2013-02-25 09:10:23 +01:00
if ( segment_eq ( get_fs ( ) , KERNEL_DS ) )
return copy_in_kernel ( n , ( void __user * ) to , from ) ;
2006-12-04 15:40:45 +01:00
rc = __user_copy_pt ( ( unsigned long ) from , to , n , 0 ) ;
if ( unlikely ( rc ) )
memset ( to + n - rc , 0 , rc ) ;
return rc ;
}
size_t copy_to_user_pt ( size_t n , void __user * to , const void * from )
{
2013-02-25 09:10:23 +01:00
if ( segment_eq ( get_fs ( ) , KERNEL_DS ) )
return copy_in_kernel ( n , to , ( void __user * ) from ) ;
2006-12-04 15:40:45 +01:00
return __user_copy_pt ( ( unsigned long ) to , ( void * ) from , n , 1 ) ;
}
2007-02-05 21:18:17 +01:00
static size_t clear_user_pt ( size_t n , void __user * to )
{
2013-03-21 08:24:11 +01:00
void * zpage = ( void * ) empty_zero_page ;
2007-02-05 21:18:17 +01:00
long done , size , ret ;
done = 0 ;
do {
if ( n - done > PAGE_SIZE )
size = PAGE_SIZE ;
else
size = n - done ;
2013-02-25 09:10:23 +01:00
if ( segment_eq ( get_fs ( ) , KERNEL_DS ) )
ret = copy_in_kernel ( n , to , ( void __user * ) zpage ) ;
else
ret = __user_copy_pt ( ( unsigned long ) to , zpage , size , 1 ) ;
2007-02-05 21:18:17 +01:00
done + = size ;
2013-02-25 09:10:23 +01:00
to + = size ;
2007-02-05 21:18:17 +01:00
if ( ret )
return ret + n - done ;
} while ( done < n ) ;
return 0 ;
}
static size_t strnlen_user_pt ( size_t count , const char __user * src )
{
unsigned long uaddr = ( unsigned long ) src ;
struct mm_struct * mm = current - > mm ;
2012-09-10 16:43:26 +02:00
unsigned long offset , done , len , kaddr ;
2007-02-05 21:18:17 +01:00
size_t len_str ;
2013-02-21 13:30:42 +01:00
if ( unlikely ( ! count ) )
return 0 ;
2007-02-05 21:18:17 +01:00
if ( segment_eq ( get_fs ( ) , KERNEL_DS ) )
2013-02-25 09:10:23 +01:00
return strnlen_kernel ( count , src ) ;
2007-02-05 21:18:17 +01:00
done = 0 ;
retry :
spin_lock ( & mm - > page_table_lock ) ;
do {
2012-09-10 16:43:26 +02:00
kaddr = follow_table ( mm , uaddr , 0 ) ;
if ( IS_ERR_VALUE ( kaddr ) )
2009-12-07 12:51:47 +01:00
goto fault ;
2007-02-05 21:18:17 +01:00
2012-09-10 16:43:26 +02:00
offset = uaddr & ~ PAGE_MASK ;
2007-02-05 21:18:17 +01:00
len = min ( count - done , PAGE_SIZE - offset ) ;
2012-09-10 16:43:26 +02:00
len_str = strnlen ( ( char * ) kaddr , len ) ;
2007-02-05 21:18:17 +01:00
done + = len_str ;
uaddr + = len_str ;
} while ( ( len_str = = len ) & & ( done < count ) ) ;
spin_unlock ( & mm - > page_table_lock ) ;
return done + 1 ;
fault :
spin_unlock ( & mm - > page_table_lock ) ;
2012-09-10 16:43:26 +02:00
if ( __handle_fault ( uaddr , - kaddr , 0 ) )
2007-02-05 21:18:17 +01:00
return 0 ;
goto retry ;
}
static size_t strncpy_from_user_pt ( size_t count , const char __user * src ,
char * dst )
{
2013-02-25 07:24:20 +01:00
size_t done , len , offset , len_str ;
2007-02-05 21:18:17 +01:00
2013-02-21 13:30:42 +01:00
if ( unlikely ( ! count ) )
return 0 ;
2013-02-25 07:24:20 +01:00
done = 0 ;
do {
offset = ( size_t ) src & ~ PAGE_MASK ;
len = min ( count - done , PAGE_SIZE - offset ) ;
2013-02-25 09:10:23 +01:00
if ( segment_eq ( get_fs ( ) , KERNEL_DS ) ) {
if ( copy_in_kernel ( len , ( void __user * ) dst , src ) )
return - EFAULT ;
} else {
if ( __user_copy_pt ( ( unsigned long ) src , dst , len , 0 ) )
return - EFAULT ;
}
2013-02-25 07:24:20 +01:00
len_str = strnlen ( dst , len ) ;
done + = len_str ;
src + = len_str ;
dst + = len_str ;
} while ( ( len_str = = len ) & & ( done < count ) ) ;
return done ;
2007-02-05 21:18:17 +01:00
}
static size_t copy_in_user_pt ( size_t n , void __user * to ,
const void __user * from )
{
struct mm_struct * mm = current - > mm ;
2012-09-10 16:43:26 +02:00
unsigned long offset_max , uaddr , done , size , error_code ;
2007-02-05 21:18:17 +01:00
unsigned long uaddr_from = ( unsigned long ) from ;
unsigned long uaddr_to = ( unsigned long ) to ;
2012-09-10 16:43:26 +02:00
unsigned long kaddr_to , kaddr_from ;
2007-02-05 21:18:17 +01:00
int write_user ;
2013-02-25 09:10:23 +01:00
if ( segment_eq ( get_fs ( ) , KERNEL_DS ) )
return copy_in_kernel ( n , to , from ) ;
2007-02-05 21:18:17 +01:00
done = 0 ;
retry :
spin_lock ( & mm - > page_table_lock ) ;
do {
2009-12-07 12:51:47 +01:00
write_user = 0 ;
uaddr = uaddr_from ;
2012-09-10 16:43:26 +02:00
kaddr_from = follow_table ( mm , uaddr_from , 0 ) ;
error_code = kaddr_from ;
if ( IS_ERR_VALUE ( error_code ) )
2007-02-05 21:18:17 +01:00
goto fault ;
2007-10-22 12:52:46 +02:00
2009-12-07 12:51:47 +01:00
write_user = 1 ;
uaddr = uaddr_to ;
2012-09-10 16:43:26 +02:00
kaddr_to = follow_table ( mm , uaddr_to , 1 ) ;
error_code = ( unsigned long ) kaddr_to ;
if ( IS_ERR_VALUE ( error_code ) )
2009-12-07 12:51:47 +01:00
goto fault ;
2007-02-05 21:18:17 +01:00
2012-09-10 16:43:26 +02:00
offset_max = max ( uaddr_from & ~ PAGE_MASK ,
uaddr_to & ~ PAGE_MASK ) ;
2007-02-05 21:18:17 +01:00
size = min ( n - done , PAGE_SIZE - offset_max ) ;
2012-09-10 16:43:26 +02:00
memcpy ( ( void * ) kaddr_to , ( void * ) kaddr_from , size ) ;
2007-02-05 21:18:17 +01:00
done + = size ;
uaddr_from + = size ;
uaddr_to + = size ;
} while ( done < n ) ;
spin_unlock ( & mm - > page_table_lock ) ;
return n - done ;
fault :
spin_unlock ( & mm - > page_table_lock ) ;
2012-09-10 16:43:26 +02:00
if ( __handle_fault ( uaddr , - error_code , write_user ) )
2007-02-05 21:18:17 +01:00
return n - done ;
goto retry ;
}
# define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
asm volatile ( " 0: l %1,0(%6) \n " \
" 1: " insn \
" 2: cs %1,%2,0(%6) \n " \
" 3: jl 1b \n " \
" lhi %0,0 \n " \
" 4: \n " \
EX_TABLE ( 0 b , 4 b ) EX_TABLE ( 2 b , 4 b ) EX_TABLE ( 3 b , 4 b ) \
: " =d " ( ret ) , " =&d " ( oldval ) , " =&d " ( newval ) , \
" =m " ( * uaddr ) \
: " 0 " ( - EFAULT ) , " d " ( oparg ) , " a " ( uaddr ) , \
" m " ( * uaddr ) : " cc " ) ;
2011-03-10 18:50:58 -08:00
static int __futex_atomic_op_pt ( int op , u32 __user * uaddr , int oparg , int * old )
2007-02-05 21:18:17 +01:00
{
int oldval = 0 , newval , ret ;
switch ( op ) {
case FUTEX_OP_SET :
__futex_atomic_op ( " lr %2,%5 \n " ,
ret , oldval , newval , uaddr , oparg ) ;
break ;
case FUTEX_OP_ADD :
__futex_atomic_op ( " lr %2,%1 \n ar %2,%5 \n " ,
ret , oldval , newval , uaddr , oparg ) ;
break ;
case FUTEX_OP_OR :
__futex_atomic_op ( " lr %2,%1 \n or %2,%5 \n " ,
ret , oldval , newval , uaddr , oparg ) ;
break ;
case FUTEX_OP_ANDN :
__futex_atomic_op ( " lr %2,%1 \n nr %2,%5 \n " ,
ret , oldval , newval , uaddr , oparg ) ;
break ;
case FUTEX_OP_XOR :
__futex_atomic_op ( " lr %2,%1 \n xr %2,%5 \n " ,
ret , oldval , newval , uaddr , oparg ) ;
break ;
default :
ret = - ENOSYS ;
}
2008-04-17 07:46:27 +02:00
if ( ret = = 0 )
* old = oldval ;
2007-02-05 21:18:17 +01:00
return ret ;
}
2011-03-10 18:50:58 -08:00
int futex_atomic_op_pt ( int op , u32 __user * uaddr , int oparg , int * old )
2007-02-05 21:18:17 +01:00
{
int ret ;
2008-04-17 07:46:27 +02:00
if ( segment_eq ( get_fs ( ) , KERNEL_DS ) )
return __futex_atomic_op_pt ( op , uaddr , oparg , old ) ;
2007-02-05 21:18:17 +01:00
spin_lock ( & current - > mm - > page_table_lock ) ;
2011-10-30 15:17:15 +01:00
uaddr = ( u32 __force __user * )
2012-09-10 16:43:26 +02:00
__dat_user_addr ( ( __force unsigned long ) uaddr , 1 ) ;
2007-02-05 21:18:17 +01:00
if ( ! uaddr ) {
spin_unlock ( & current - > mm - > page_table_lock ) ;
return - EFAULT ;
}
get_page ( virt_to_page ( uaddr ) ) ;
spin_unlock ( & current - > mm - > page_table_lock ) ;
2008-04-17 07:46:27 +02:00
ret = __futex_atomic_op_pt ( op , uaddr , oparg , old ) ;
put_page ( virt_to_page ( uaddr ) ) ;
return ret ;
}
2011-03-10 18:50:58 -08:00
static int __futex_atomic_cmpxchg_pt ( u32 * uval , u32 __user * uaddr ,
u32 oldval , u32 newval )
2008-04-17 07:46:27 +02:00
{
int ret ;
asm volatile ( " 0: cs %1,%4,0(%5) \n "
2011-03-10 18:48:51 -08:00
" 1: la %0,0 \n "
2008-04-17 07:46:27 +02:00
" 2: \n "
EX_TABLE ( 0 b , 2 b ) EX_TABLE ( 1 b , 2 b )
2007-02-05 21:18:17 +01:00
: " =d " ( ret ) , " +d " ( oldval ) , " =m " ( * uaddr )
: " 0 " ( - EFAULT ) , " d " ( newval ) , " a " ( uaddr ) , " m " ( * uaddr )
: " cc " , " memory " ) ;
2011-03-10 18:48:51 -08:00
* uval = oldval ;
2008-04-17 07:46:27 +02:00
return ret ;
}
2011-03-10 18:50:58 -08:00
int futex_atomic_cmpxchg_pt ( u32 * uval , u32 __user * uaddr ,
u32 oldval , u32 newval )
2008-04-17 07:46:27 +02:00
{
int ret ;
if ( segment_eq ( get_fs ( ) , KERNEL_DS ) )
2011-03-10 18:48:51 -08:00
return __futex_atomic_cmpxchg_pt ( uval , uaddr , oldval , newval ) ;
2008-04-17 07:46:27 +02:00
spin_lock ( & current - > mm - > page_table_lock ) ;
2011-10-30 15:17:15 +01:00
uaddr = ( u32 __force __user * )
2012-09-10 16:43:26 +02:00
__dat_user_addr ( ( __force unsigned long ) uaddr , 1 ) ;
2008-04-17 07:46:27 +02:00
if ( ! uaddr ) {
spin_unlock ( & current - > mm - > page_table_lock ) ;
return - EFAULT ;
}
get_page ( virt_to_page ( uaddr ) ) ;
spin_unlock ( & current - > mm - > page_table_lock ) ;
2011-03-10 18:48:51 -08:00
ret = __futex_atomic_cmpxchg_pt ( uval , uaddr , oldval , newval ) ;
2007-02-05 21:18:17 +01:00
put_page ( virt_to_page ( uaddr ) ) ;
return ret ;
}
struct uaccess_ops uaccess_pt = {
. copy_from_user = copy_from_user_pt ,
. copy_to_user = copy_to_user_pt ,
. copy_in_user = copy_in_user_pt ,
. clear_user = clear_user_pt ,
. strnlen_user = strnlen_user_pt ,
. strncpy_from_user = strncpy_from_user_pt ,
. futex_atomic_op = futex_atomic_op_pt ,
. futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt ,
} ;