2006-03-31 02:30:22 -08:00
/*
* Copyright ( C ) 2005 Paolo ' Blaisorblade ' Giarrusso < blaisorblade @ yahoo . it >
* Licensed under the GPL
*/
2012-10-08 03:27:32 +01:00
# include <linux/percpu.h>
# include <linux/sched.h>
2013-01-21 15:25:54 -05:00
# include <linux/syscalls.h>
2016-12-24 11:46:01 -08:00
# include <linux/uaccess.h>
2015-06-25 22:44:11 +02:00
# include <asm/ptrace-abi.h>
2012-10-08 03:27:32 +01:00
# include <os.h>
# include <skas.h>
# include <sysdep/tls.h>
2006-03-31 02:30:22 -08:00
2007-05-06 14:51:20 -07:00
/*
* If needed we can detect when it ' s uninitialized .
*
* These are initialized in an initcall and unchanged thereafter .
*/
2006-03-31 02:30:25 -08:00
static int host_supports_tls = - 1 ;
2007-05-06 14:51:20 -07:00
int host_gdt_entry_tls_min ;
2006-03-31 02:30:25 -08:00
2007-10-16 01:26:56 -07:00
int do_set_thread_area ( struct user_desc * info )
2006-03-31 02:30:22 -08:00
{
int ret ;
u32 cpu ;
cpu = get_cpu ( ) ;
ret = os_set_thread_area ( info , userspace_pid [ cpu ] ) ;
put_cpu ( ) ;
2008-02-04 22:30:51 -08:00
if ( ret )
printk ( KERN_ERR " PTRACE_SET_THREAD_AREA failed, err = %d, "
" index = %d \n " , ret , info - > entry_number ) ;
2006-03-31 02:30:22 -08:00
return ret ;
}
2007-10-16 01:26:56 -07:00
int do_get_thread_area ( struct user_desc * info )
2006-03-31 02:30:22 -08:00
{
int ret ;
u32 cpu ;
cpu = get_cpu ( ) ;
ret = os_get_thread_area ( info , userspace_pid [ cpu ] ) ;
put_cpu ( ) ;
2008-02-04 22:30:51 -08:00
if ( ret )
printk ( KERN_ERR " PTRACE_GET_THREAD_AREA failed, err = %d, "
" index = %d \n " , ret , info - > entry_number ) ;
2006-03-31 02:30:22 -08:00
return ret ;
}
/*
* sys_get_thread_area : get a yet unused TLS descriptor index .
* XXX : Consider leaving one free slot for glibc usage at first place . This must
* be done here ( and by changing GDT_ENTRY_TLS_ * macros ) and nowhere else .
*
2007-10-20 01:23:03 +02:00
* Also , this must be tested when compiling in SKAS mode with dynamic linking
2006-03-31 02:30:22 -08:00
* and running against NPTL .
*/
static int get_free_idx ( struct task_struct * task )
{
struct thread_struct * t = & task - > thread ;
int idx ;
if ( ! t - > arch . tls_array )
return GDT_ENTRY_TLS_MIN ;
for ( idx = 0 ; idx < GDT_ENTRY_TLS_ENTRIES ; idx + + )
if ( ! t - > arch . tls_array [ idx ] . present )
return idx + GDT_ENTRY_TLS_MIN ;
return - ESRCH ;
}
static inline void clear_user_desc ( struct user_desc * info )
{
/* Postcondition: LDT_empty(info) returns true. */
memset ( info , 0 , sizeof ( * info ) ) ;
2007-10-16 01:27:00 -07:00
/*
* Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
2006-03-31 02:30:22 -08:00
* indeed an empty user_desc .
*/
info - > read_exec_only = 1 ;
info - > seg_not_present = 1 ;
}
2006-03-31 02:30:24 -08:00
# define O_FORCE 1
2006-03-31 02:30:22 -08:00
static int load_TLS ( int flags , struct task_struct * to )
{
int ret = 0 ;
int idx ;
for ( idx = GDT_ENTRY_TLS_MIN ; idx < GDT_ENTRY_TLS_MAX ; idx + + ) {
2007-10-16 01:27:00 -07:00
struct uml_tls_struct * curr =
& to - > thread . arch . tls_array [ idx - GDT_ENTRY_TLS_MIN ] ;
2006-03-31 02:30:22 -08:00
2007-10-16 01:27:00 -07:00
/*
* Actually , now if it wasn ' t flushed it gets cleared and
* flushed to the host , which will clear it .
*/
2006-03-31 02:30:22 -08:00
if ( ! curr - > present ) {
if ( ! curr - > flushed ) {
clear_user_desc ( & curr - > tls ) ;
curr - > tls . entry_number = idx ;
} else {
WARN_ON ( ! LDT_empty ( & curr - > tls ) ) ;
continue ;
}
}
if ( ! ( flags & O_FORCE ) & & curr - > flushed )
continue ;
ret = do_set_thread_area ( & curr - > tls ) ;
if ( ret )
goto out ;
curr - > flushed = 1 ;
}
out :
return ret ;
}
2007-10-16 01:27:00 -07:00
/*
* Verify if we need to do a flush for the new process , i . e . if there are any
2006-03-31 02:30:22 -08:00
* present desc ' s , only if they haven ' t been flushed .
*/
static inline int needs_TLS_update ( struct task_struct * task )
{
int i ;
int ret = 0 ;
for ( i = GDT_ENTRY_TLS_MIN ; i < GDT_ENTRY_TLS_MAX ; i + + ) {
2007-10-16 01:27:00 -07:00
struct uml_tls_struct * curr =
& task - > thread . arch . tls_array [ i - GDT_ENTRY_TLS_MIN ] ;
2006-03-31 02:30:22 -08:00
2007-10-16 01:27:00 -07:00
/*
* Can ' t test curr - > present , we may need to clear a descriptor
* which had a value .
*/
2006-03-31 02:30:22 -08:00
if ( curr - > flushed )
continue ;
ret = 1 ;
break ;
}
return ret ;
}
2007-10-16 01:27:00 -07:00
/*
* On a newly forked process , the TLS descriptors haven ' t yet been flushed . So
2006-03-31 02:30:22 -08:00
* we mark them as such and the first switch_to will do the job .
*/
void clear_flushed_tls ( struct task_struct * task )
{
int i ;
for ( i = GDT_ENTRY_TLS_MIN ; i < GDT_ENTRY_TLS_MAX ; i + + ) {
2007-10-16 01:27:00 -07:00
struct uml_tls_struct * curr =
& task - > thread . arch . tls_array [ i - GDT_ENTRY_TLS_MIN ] ;
2006-03-31 02:30:22 -08:00
2007-10-16 01:27:00 -07:00
/*
* Still correct to do this , if it wasn ' t present on the host it
* will remain as flushed as it was .
*/
2006-03-31 02:30:22 -08:00
if ( ! curr - > present )
continue ;
curr - > flushed = 0 ;
}
}
2007-10-16 01:27:00 -07:00
/*
* In SKAS0 mode , currently , multiple guest threads sharing the same - > mm have a
2006-03-31 02:30:25 -08:00
* common host process . So this is needed in SKAS0 too .
*
* However , if each thread had a different host process ( and this was discussed
* for SMP support ) this won ' t be needed .
*
* And this will not need be used when ( and if ) we ' ll add support to the host
2007-10-16 01:27:00 -07:00
* SKAS patch .
*/
2006-03-31 02:30:25 -08:00
2008-02-04 22:30:49 -08:00
int arch_switch_tls ( struct task_struct * to )
2006-03-31 02:30:22 -08:00
{
2006-03-31 02:30:25 -08:00
if ( ! host_supports_tls )
return 0 ;
2007-10-16 01:27:00 -07:00
/*
* We have no need whatsoever to switch TLS for kernel threads ; beyond
2006-03-31 02:30:24 -08:00
* that , that would also result in us calling os_set_thread_area with
2007-10-16 01:27:00 -07:00
* userspace_pid [ cpu ] = = 0 , which gives an error .
*/
2006-03-31 02:30:24 -08:00
if ( likely ( to - > mm ) )
return load_TLS ( O_FORCE , to ) ;
return 0 ;
2006-03-31 02:30:22 -08:00
}
static int set_tls_entry ( struct task_struct * task , struct user_desc * info ,
int idx , int flushed )
{
struct thread_struct * t = & task - > thread ;
if ( idx < GDT_ENTRY_TLS_MIN | | idx > GDT_ENTRY_TLS_MAX )
return - EINVAL ;
t - > arch . tls_array [ idx - GDT_ENTRY_TLS_MIN ] . tls = * info ;
t - > arch . tls_array [ idx - GDT_ENTRY_TLS_MIN ] . present = 1 ;
t - > arch . tls_array [ idx - GDT_ENTRY_TLS_MIN ] . flushed = flushed ;
return 0 ;
}
2020-01-04 13:39:30 +01:00
int arch_set_tls ( struct task_struct * new , unsigned long tls )
2006-03-31 02:30:22 -08:00
{
struct user_desc info ;
int idx , ret = - EFAULT ;
2020-01-04 13:39:30 +01:00
if ( copy_from_user ( & info , ( void __user * ) tls , sizeof ( info ) ) )
2006-03-31 02:30:22 -08:00
goto out ;
ret = - EINVAL ;
if ( LDT_empty ( & info ) )
goto out ;
idx = info . entry_number ;
ret = set_tls_entry ( new , & info , idx , 0 ) ;
out :
return ret ;
}
/* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */
2008-02-04 22:30:37 -08:00
static int get_tls_entry ( struct task_struct * task , struct user_desc * info ,
int idx )
2006-03-31 02:30:22 -08:00
{
struct thread_struct * t = & task - > thread ;
if ( ! t - > arch . tls_array )
goto clear ;
if ( idx < GDT_ENTRY_TLS_MIN | | idx > GDT_ENTRY_TLS_MAX )
return - EINVAL ;
if ( ! t - > arch . tls_array [ idx - GDT_ENTRY_TLS_MIN ] . present )
goto clear ;
* info = t - > arch . tls_array [ idx - GDT_ENTRY_TLS_MIN ] . tls ;
out :
2007-10-16 01:27:00 -07:00
/*
* Temporary debugging check , to make sure that things have been
2006-03-31 02:30:22 -08:00
* flushed . This could be triggered if load_TLS ( ) failed .
*/
2007-10-16 01:27:00 -07:00
if ( unlikely ( task = = current & &
! t - > arch . tls_array [ idx - GDT_ENTRY_TLS_MIN ] . flushed ) ) {
2006-03-31 02:30:22 -08:00
printk ( KERN_ERR " get_tls_entry: task with pid %d got here "
" without flushed TLS. " , current - > pid ) ;
}
return 0 ;
clear :
2007-10-16 01:27:00 -07:00
/*
* When the TLS entry has not been set , the values read to user in the
2006-03-31 02:30:22 -08:00
* tls_array are 0 ( because it ' s cleared at boot , see
* arch / i386 / kernel / head . S : cpu_gdt_table ) . Emulate that .
*/
clear_user_desc ( info ) ;
info - > entry_number = idx ;
goto out ;
}
2013-01-21 15:25:54 -05:00
SYSCALL_DEFINE1 ( set_thread_area , struct user_desc __user * , user_desc )
2006-03-31 02:30:22 -08:00
{
struct user_desc info ;
int idx , ret ;
2006-03-31 02:30:25 -08:00
if ( ! host_supports_tls )
return - ENOSYS ;
2006-03-31 02:30:22 -08:00
if ( copy_from_user ( & info , user_desc , sizeof ( info ) ) )
return - EFAULT ;
idx = info . entry_number ;
if ( idx = = - 1 ) {
idx = get_free_idx ( current ) ;
if ( idx < 0 )
return idx ;
info . entry_number = idx ;
/* Tell the user which slot we chose for him.*/
if ( put_user ( idx , & user_desc - > entry_number ) )
return - EFAULT ;
}
2007-10-16 01:26:56 -07:00
ret = do_set_thread_area ( & info ) ;
2006-03-31 02:30:22 -08:00
if ( ret )
return ret ;
return set_tls_entry ( current , & info , idx , 1 ) ;
}
/*
* Perform set_thread_area on behalf of the traced child .
* Note : error handling is not done on the deferred load , and this differ from
* i386 . However the only possible error are caused by bugs .
*/
int ptrace_set_thread_area ( struct task_struct * child , int idx ,
2008-02-04 22:30:37 -08:00
struct user_desc __user * user_desc )
2006-03-31 02:30:22 -08:00
{
struct user_desc info ;
2006-03-31 02:30:25 -08:00
if ( ! host_supports_tls )
return - EIO ;
2006-03-31 02:30:22 -08:00
if ( copy_from_user ( & info , user_desc , sizeof ( info ) ) )
return - EFAULT ;
return set_tls_entry ( child , & info , idx , 0 ) ;
}
2013-01-21 15:25:54 -05:00
SYSCALL_DEFINE1 ( get_thread_area , struct user_desc __user * , user_desc )
2006-03-31 02:30:22 -08:00
{
struct user_desc info ;
int idx , ret ;
2006-03-31 02:30:25 -08:00
if ( ! host_supports_tls )
return - ENOSYS ;
2006-03-31 02:30:22 -08:00
if ( get_user ( idx , & user_desc - > entry_number ) )
return - EFAULT ;
ret = get_tls_entry ( current , & info , idx ) ;
if ( ret < 0 )
goto out ;
if ( copy_to_user ( user_desc , & info , sizeof ( info ) ) )
ret = - EFAULT ;
out :
return ret ;
}
/*
* Perform get_thread_area on behalf of the traced child .
*/
int ptrace_get_thread_area ( struct task_struct * child , int idx ,
struct user_desc __user * user_desc )
{
struct user_desc info ;
int ret ;
2006-03-31 02:30:25 -08:00
if ( ! host_supports_tls )
return - EIO ;
2006-03-31 02:30:22 -08:00
ret = get_tls_entry ( child , & info , idx ) ;
if ( ret < 0 )
goto out ;
if ( copy_to_user ( user_desc , & info , sizeof ( info ) ) )
ret = - EFAULT ;
out :
return ret ;
}
2006-03-31 02:30:24 -08:00
2007-10-16 01:27:00 -07:00
/*
2008-02-04 22:30:51 -08:00
* This code is really i386 - only , but it detects and logs x86_64 GDT indexes
* if a 32 - bit UML is running on a 64 - bit host .
2007-10-16 01:27:00 -07:00
*/
2007-05-06 14:51:20 -07:00
static int __init __setup_host_supports_tls ( void )
{
2006-03-31 02:30:25 -08:00
check_host_supports_tls ( & host_supports_tls , & host_gdt_entry_tls_min ) ;
if ( host_supports_tls ) {
printk ( KERN_INFO " Host TLS support detected \n " ) ;
printk ( KERN_INFO " Detected host type: " ) ;
switch ( host_gdt_entry_tls_min ) {
2008-02-04 22:30:51 -08:00
case GDT_ENTRY_TLS_MIN_I386 :
printk ( KERN_CONT " i386 " ) ;
break ;
case GDT_ENTRY_TLS_MIN_X86_64 :
printk ( KERN_CONT " x86_64 " ) ;
break ;
2006-03-31 02:30:25 -08:00
}
2008-02-04 22:30:51 -08:00
printk ( KERN_CONT " (GDT indexes %d to %d) \n " ,
host_gdt_entry_tls_min ,
host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES ) ;
2006-03-31 02:30:25 -08:00
} else
printk ( KERN_ERR " Host TLS support NOT detected! "
" TLS support inside UML will not work \n " ) ;
2006-04-10 22:53:26 -07:00
return 0 ;
2006-03-31 02:30:25 -08:00
}
__initcall ( __setup_host_supports_tls ) ;