2005-04-17 02:20:36 +04:00
/*
* linux / arch / i386 / kernel / process . c
*
* Copyright ( C ) 1995 Linus Torvalds
*
* Pentium III FXSR , SSE support
* Gareth Hughes < gareth @ valinux . com > , May 2000
*/
/*
* This file handles the architecture - dependent parts of process handling . .
*/
# include <stdarg.h>
2005-06-26 01:54:50 +04:00
# include <linux/cpu.h>
2005-04-17 02:20:36 +04:00
# include <linux/errno.h>
# include <linux/sched.h>
# include <linux/fs.h>
# include <linux/kernel.h>
# include <linux/mm.h>
# include <linux/elfcore.h>
# include <linux/smp.h>
# include <linux/smp_lock.h>
# include <linux/stddef.h>
# include <linux/slab.h>
# include <linux/vmalloc.h>
# include <linux/user.h>
# include <linux/a.out.h>
# include <linux/interrupt.h>
# include <linux/utsname.h>
# include <linux/delay.h>
# include <linux/reboot.h>
# include <linux/init.h>
# include <linux/mc146818rtc.h>
# include <linux/module.h>
# include <linux/kallsyms.h>
# include <linux/ptrace.h>
# include <linux/random.h>
2006-09-26 12:52:28 +04:00
# include <linux/personality.h>
2007-02-16 12:28:07 +03:00
# include <linux/tick.h>
2005-04-17 02:20:36 +04:00
# include <asm/uaccess.h>
# include <asm/pgtable.h>
# include <asm/system.h>
# include <asm/io.h>
# include <asm/ldt.h>
# include <asm/processor.h>
# include <asm/i387.h>
# include <asm/desc.h>
2006-01-08 12:05:26 +03:00
# include <asm/vm86.h>
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_MATH_EMULATION
# include <asm/math_emu.h>
# endif
# include <linux/err.h>
2005-06-26 01:54:50 +04:00
# include <asm/tlbflush.h>
# include <asm/cpu.h>
[PATCH] i386: Use %gs as the PDA base-segment in the kernel
This patch is the meat of the PDA change. This patch makes several related
changes:
1: Most significantly, %gs is now used in the kernel. This means that on
entry, the old value of %gs is saved away, and it is reloaded with
__KERNEL_PDA.
2: entry.S constructs the stack in the shape of struct pt_regs, and this
is passed around the kernel so that the process's saved register
state can be accessed.
Unfortunately struct pt_regs doesn't currently have space for %gs
(or %fs). This patch extends pt_regs to add space for gs (no space
is allocated for %fs, since it won't be used, and it would just
complicate the code in entry.S to work around the space).
3: Because %gs is now saved on the stack like %ds, %es and the integer
registers, there are a number of places where it no longer needs to
be handled specially; namely context switch, and saving/restoring the
register state in a signal context.
4: And since kernel threads run in kernel space and call normal kernel
code, they need to be created with their %gs == __KERNEL_PDA.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Chuck Ebbert <76306.1226@compuserve.com>
Cc: Zachary Amsden <zach@vmware.com>
Cc: Jan Beulich <jbeulich@novell.com>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
2006-12-07 04:14:02 +03:00
# include <asm/pda.h>
2005-06-26 01:54:50 +04:00
2005-04-17 02:20:36 +04:00
asmlinkage void ret_from_fork ( void ) __asm__ ( " ret_from_fork " ) ;
static int hlt_counter ;
unsigned long boot_option_idle_override = 0 ;
EXPORT_SYMBOL ( boot_option_idle_override ) ;
/*
* Return saved PC of a blocked thread .
*/
unsigned long thread_saved_pc ( struct task_struct * tsk )
{
return ( ( unsigned long * ) tsk - > thread . esp ) [ 3 ] ;
}
/*
* Powermanagement idle function , if any . .
*/
void ( * pm_idle ) ( void ) ;
2005-06-23 11:08:33 +04:00
EXPORT_SYMBOL ( pm_idle ) ;
2005-04-17 02:20:36 +04:00
static DEFINE_PER_CPU ( unsigned int , cpu_idle_state ) ;
void disable_hlt ( void )
{
hlt_counter + + ;
}
EXPORT_SYMBOL ( disable_hlt ) ;
void enable_hlt ( void )
{
hlt_counter - - ;
}
EXPORT_SYMBOL ( enable_hlt ) ;
/*
* We use this if we don ' t have any better
* idle routine . .
*/
void default_idle ( void )
{
if ( ! hlt_counter & & boot_cpu_data . hlt_works_ok ) {
2006-06-26 15:59:11 +04:00
current_thread_info ( ) - > status & = ~ TS_POLLING ;
[PATCH] sched: fix bad missed wakeups in the i386, x86_64, ia64, ACPI and APM idle code
Fernando Lopez-Lezcano reported frequent scheduling latencies and audio
xruns starting at the 2.6.18-rt kernel, and those problems persisted all
until current -rt kernels. The latencies were serious and unjustified by
system load, often in the milliseconds range.
After a patient and heroic multi-month effort of Fernando, where he
tested dozens of kernels, tried various configs, boot options,
test-patches of mine and provided latency traces of those incidents, the
following 'smoking gun' trace was captured by him:
_------=> CPU#
/ _-----=> irqs-off
| / _----=> need-resched
|| / _---=> hardirq/softirq
||| / _--=> preempt-depth
|||| /
||||| delay
cmd pid ||||| time | caller
\ / ||||| \ | /
IRQ_19-1479 1D..1 0us : __trace_start_sched_wakeup (try_to_wake_up)
IRQ_19-1479 1D..1 0us : __trace_start_sched_wakeup <<...>-5856> (37 0)
IRQ_19-1479 1D..1 0us : __trace_start_sched_wakeup (c01262ba 0 0)
IRQ_19-1479 1D..1 0us : resched_task (try_to_wake_up)
IRQ_19-1479 1D..1 0us : __spin_unlock_irqrestore (try_to_wake_up)
...
<idle>-0 1...1 11us!: default_idle (cpu_idle)
...
<idle>-0 0Dn.1 602us : smp_apic_timer_interrupt (c0103baf 1 0)
...
<...>-5856 0D..2 618us : __switch_to (__schedule)
<...>-5856 0D..2 618us : __schedule <<idle>-0> (20 162)
<...>-5856 0D..2 619us : __spin_unlock_irq (__schedule)
<...>-5856 0...1 619us : trace_stop_sched_switched (__schedule)
<...>-5856 0D..1 619us : trace_stop_sched_switched <<...>-5856> (37 0)
what is visible in this trace is that CPU#1 ran try_to_wake_up() for
PID:5856, it placed PID:5856 on CPU#0's runqueue and ran resched_task()
for CPU#0. But it decided to not send an IPI that no CPU - due to
TS_POLLING. But CPU#0 never woke up after its NEED_RESCHED bit was set,
and only rescheduled to PID:5856 upon the next lapic timer IRQ. The
result was a 600+ usecs latency and a missed wakeup!
the bug turned out to be an idle-wakeup bug introduced into the mainline
kernel this summer via an optimization in the x86_64 tree:
commit 495ab9c045e1b0e5c82951b762257fe1c9d81564
Author: Andi Kleen <ak@suse.de>
Date: Mon Jun 26 13:59:11 2006 +0200
[PATCH] i386/x86-64/ia64: Move polling flag into thread_info_status
During some profiling I noticed that default_idle causes a lot of
memory traffic. I think that is caused by the atomic operations
to clear/set the polling flag in thread_info. There is actually
no reason to make this atomic - only the idle thread does it
to itself, other CPUs only read it. So I moved it into ti->status.
the problem is this type of change:
if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
- clear_thread_flag(TIF_POLLING_NRFLAG);
+ current_thread_info()->status &= ~TS_POLLING;
smp_mb__after_clear_bit();
while (!need_resched()) {
local_irq_disable();
this changes clear_thread_flag() to an explicit clearing of TS_POLLING.
clear_thread_flag() is defined as:
clear_bit(flag, &ti->flags);
and clear_bit() is a LOCK-ed atomic instruction on all x86 platforms:
static inline void clear_bit(int nr, volatile unsigned long * addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btrl %1,%0"
hence smp_mb__after_clear_bit() is defined as a simple compile barrier:
#define smp_mb__after_clear_bit() barrier()
but the explicit TS_POLLING clearing introduced by the patch:
+ current_thread_info()->status &= ~TS_POLLING;
is not an atomic op! So the clearing of the TS_POLLING bit is freely
reorderable with the reading of the NEED_RESCHED bit - and both now
reside in different memory addresses.
CPU idle wakeup very much depends on ordered memory ops, the clearing of
the TS_POLLING flag must always be done before we test need_resched()
and hit the idle instruction(s). [Symmetrically, the wakeup code needs
to set NEED_RESCHED before it tests the TS_POLLING flag, so memory
ordering is paramount.]
Fernando's dual-core Athlon64 system has a sufficiently advanced memory
ordering model so that it triggered this scenario very often.
( And it also turned out that the reason why these latencies never
triggered on my testsystems is that i routinely use idle=poll, which
was the only idle variant not affected by this bug. )
The fix is to change the smp_mb__after_clear_bit() to an smp_mb(), to
act as an absolute barrier between the TS_POLLING write and the
NEED_RESCHED read. This affects almost all idling methods (default,
ACPI, APM), on all 3 x86 architectures: i386, x86_64, ia64.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Tested-by: Fernando Lopez-Lezcano <nando@ccrma.Stanford.EDU>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-22 12:11:56 +03:00
/*
* TS_POLLING - cleared state must be visible before we
* test NEED_RESCHED :
*/
smp_mb ( ) ;
2006-12-07 04:14:03 +03:00
local_irq_disable ( ) ;
if ( ! need_resched ( ) )
safe_halt ( ) ; /* enables interrupts racelessly */
else
local_irq_enable ( ) ;
2006-06-26 15:59:11 +04:00
current_thread_info ( ) - > status | = TS_POLLING ;
2005-04-17 02:20:36 +04:00
} else {
2006-12-07 04:14:03 +03:00
/* loop is done by the caller */
cpu_relax ( ) ;
2005-04-17 02:20:36 +04:00
}
}
2005-06-23 11:08:33 +04:00
# ifdef CONFIG_APM_MODULE
EXPORT_SYMBOL ( default_idle ) ;
# endif
2005-04-17 02:20:36 +04:00
/*
* On SMP it ' s slightly faster ( but much more power - consuming ! )
* to poll the - > work . need_resched flag instead of waiting for the
* cross - CPU IPI to arrive . Use this option with caution .
*/
static void poll_idle ( void )
{
2006-12-07 04:14:03 +03:00
cpu_relax ( ) ;
2005-04-17 02:20:36 +04:00
}
2005-06-26 01:54:50 +04:00
# ifdef CONFIG_HOTPLUG_CPU
# include <asm/nmi.h>
/* We don't actually take CPU down, just spin without interrupts. */
static inline void play_dead ( void )
{
2005-06-26 01:54:56 +04:00
/* This must be done before dead CPU ack */
cpu_exit_clear ( ) ;
wbinvd ( ) ;
mb ( ) ;
2005-06-26 01:54:50 +04:00
/* Ack it */
__get_cpu_var ( cpu_state ) = CPU_DEAD ;
2005-06-26 01:54:56 +04:00
/*
* With physical CPU hotplug , we should halt the cpu
*/
2005-06-26 01:54:50 +04:00
local_irq_disable ( ) ;
2005-06-26 01:54:56 +04:00
while ( 1 )
2005-09-04 02:56:42 +04:00
halt ( ) ;
2005-06-26 01:54:50 +04:00
}
# else
static inline void play_dead ( void )
{
BUG ( ) ;
}
# endif /* CONFIG_HOTPLUG_CPU */
2005-04-17 02:20:36 +04:00
/*
* The idle thread . There ' s no useful work to be
* done , so just try to conserve power and have a
* low exit latency ( ie sit in a loop waiting for
* somebody to say that they ' d like to reschedule )
*/
2005-06-26 01:54:50 +04:00
void cpu_idle ( void )
2005-04-17 02:20:36 +04:00
{
2005-11-09 08:39:01 +03:00
int cpu = smp_processor_id ( ) ;
2005-06-26 01:54:50 +04:00
2006-06-26 15:59:11 +04:00
current_thread_info ( ) - > status | = TS_POLLING ;
[PATCH] sched: resched and cpu_idle rework
Make some changes to the NEED_RESCHED and POLLING_NRFLAG to reduce
confusion, and make their semantics rigid. Improves efficiency of
resched_task and some cpu_idle routines.
* In resched_task:
- TIF_NEED_RESCHED is only cleared with the task's runqueue lock held,
and as we hold it during resched_task, then there is no need for an
atomic test and set there. The only other time this should be set is
when the task's quantum expires, in the timer interrupt - this is
protected against because the rq lock is irq-safe.
- If TIF_NEED_RESCHED is set, then we don't need to do anything. It
won't get unset until the task get's schedule()d off.
- If we are running on the same CPU as the task we resched, then set
TIF_NEED_RESCHED and no further action is required.
- If we are running on another CPU, and TIF_POLLING_NRFLAG is *not* set
after TIF_NEED_RESCHED has been set, then we need to send an IPI.
Using these rules, we are able to remove the test and set operation in
resched_task, and make clear the previously vague semantics of
POLLING_NRFLAG.
* In idle routines:
- Enter cpu_idle with preempt disabled. When the need_resched() condition
becomes true, explicitly call schedule(). This makes things a bit clearer
(IMO), but haven't updated all architectures yet.
- Many do a test and clear of TIF_NEED_RESCHED for some reason. According
to the resched_task rules, this isn't needed (and actually breaks the
assumption that TIF_NEED_RESCHED is only cleared with the runqueue lock
held). So remove that. Generally one less locked memory op when switching
to the idle thread.
- Many idle routines clear TIF_POLLING_NRFLAG, and only set it in the inner
most polling idle loops. The above resched_task semantics allow it to be
set until before the last time need_resched() is checked before going into
a halt requiring interrupt wakeup.
Many idle routines simply never enter such a halt, and so POLLING_NRFLAG
can be always left set, completely eliminating resched IPIs when rescheduling
the idle task.
POLLING_NRFLAG width can be increased, to reduce the chance of resched IPIs.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Con Kolivas <kernel@kolivas.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-09 08:39:04 +03:00
2005-04-17 02:20:36 +04:00
/* endless idle loop with no priority at all */
while ( 1 ) {
2007-02-16 12:28:07 +03:00
tick_nohz_stop_sched_tick ( ) ;
2005-04-17 02:20:36 +04:00
while ( ! need_resched ( ) ) {
void ( * idle ) ( void ) ;
if ( __get_cpu_var ( cpu_idle_state ) )
__get_cpu_var ( cpu_idle_state ) = 0 ;
rmb ( ) ;
idle = pm_idle ;
if ( ! idle )
idle = default_idle ;
2005-06-26 01:54:50 +04:00
if ( cpu_is_offline ( cpu ) )
play_dead ( ) ;
2005-04-17 02:20:36 +04:00
__get_cpu_var ( irq_stat ) . idle_timestamp = jiffies ;
idle ( ) ;
}
2007-02-16 12:28:07 +03:00
tick_nohz_restart_sched_tick ( ) ;
2005-11-09 08:39:01 +03:00
preempt_enable_no_resched ( ) ;
2005-04-17 02:20:36 +04:00
schedule ( ) ;
2005-11-09 08:39:01 +03:00
preempt_disable ( ) ;
2005-04-17 02:20:36 +04:00
}
}
void cpu_idle_wait ( void )
{
unsigned int cpu , this_cpu = get_cpu ( ) ;
2006-11-17 16:26:18 +03:00
cpumask_t map , tmp = current - > cpus_allowed ;
2005-04-17 02:20:36 +04:00
set_cpus_allowed ( current , cpumask_of_cpu ( this_cpu ) ) ;
put_cpu ( ) ;
cpus_clear ( map ) ;
for_each_online_cpu ( cpu ) {
per_cpu ( cpu_idle_state , cpu ) = 1 ;
cpu_set ( cpu , map ) ;
}
__get_cpu_var ( cpu_idle_state ) = 0 ;
wmb ( ) ;
do {
ssleep ( 1 ) ;
for_each_online_cpu ( cpu ) {
if ( cpu_isset ( cpu , map ) & & ! per_cpu ( cpu_idle_state , cpu ) )
cpu_clear ( cpu , map ) ;
}
cpus_and ( map , map , cpu_online_map ) ;
} while ( ! cpus_empty ( map ) ) ;
2006-11-17 16:26:18 +03:00
set_cpus_allowed ( current , tmp ) ;
2005-04-17 02:20:36 +04:00
}
EXPORT_SYMBOL_GPL ( cpu_idle_wait ) ;
/*
* This uses new MONITOR / MWAIT instructions on P4 processors with PNI ,
* which can obviate IPI to trigger checking of need_resched .
* We execute MONITOR against need_resched and enter optimized wait state
* through MWAIT . Whenever someone changes need_resched , we would be woken
* up from MWAIT ( without an IPI ) .
2006-09-26 03:28:13 +04:00
*
* New with Core Duo processors , MWAIT can take some hints based on CPU
* capability .
2005-04-17 02:20:36 +04:00
*/
2006-09-26 03:28:13 +04:00
void mwait_idle_with_hints ( unsigned long eax , unsigned long ecx )
2005-04-17 02:20:36 +04:00
{
2006-09-26 03:28:13 +04:00
if ( ! need_resched ( ) ) {
[PATCH] sched: resched and cpu_idle rework
Make some changes to the NEED_RESCHED and POLLING_NRFLAG to reduce
confusion, and make their semantics rigid. Improves efficiency of
resched_task and some cpu_idle routines.
* In resched_task:
- TIF_NEED_RESCHED is only cleared with the task's runqueue lock held,
and as we hold it during resched_task, then there is no need for an
atomic test and set there. The only other time this should be set is
when the task's quantum expires, in the timer interrupt - this is
protected against because the rq lock is irq-safe.
- If TIF_NEED_RESCHED is set, then we don't need to do anything. It
won't get unset until the task get's schedule()d off.
- If we are running on the same CPU as the task we resched, then set
TIF_NEED_RESCHED and no further action is required.
- If we are running on another CPU, and TIF_POLLING_NRFLAG is *not* set
after TIF_NEED_RESCHED has been set, then we need to send an IPI.
Using these rules, we are able to remove the test and set operation in
resched_task, and make clear the previously vague semantics of
POLLING_NRFLAG.
* In idle routines:
- Enter cpu_idle with preempt disabled. When the need_resched() condition
becomes true, explicitly call schedule(). This makes things a bit clearer
(IMO), but haven't updated all architectures yet.
- Many do a test and clear of TIF_NEED_RESCHED for some reason. According
to the resched_task rules, this isn't needed (and actually breaks the
assumption that TIF_NEED_RESCHED is only cleared with the runqueue lock
held). So remove that. Generally one less locked memory op when switching
to the idle thread.
- Many idle routines clear TIF_POLLING_NRFLAG, and only set it in the inner
most polling idle loops. The above resched_task semantics allow it to be
set until before the last time need_resched() is checked before going into
a halt requiring interrupt wakeup.
Many idle routines simply never enter such a halt, and so POLLING_NRFLAG
can be always left set, completely eliminating resched IPIs when rescheduling
the idle task.
POLLING_NRFLAG width can be increased, to reduce the chance of resched IPIs.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Con Kolivas <kernel@kolivas.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-09 08:39:04 +03:00
__monitor ( ( void * ) & current_thread_info ( ) - > flags , 0 , 0 ) ;
smp_mb ( ) ;
2006-09-26 03:28:13 +04:00
if ( ! need_resched ( ) )
2007-02-26 20:21:46 +03:00
__mwait ( eax , ecx ) ;
2005-04-17 02:20:36 +04:00
}
}
2006-09-26 03:28:13 +04:00
/* Default MONITOR/MWAIT with no hints, used for default C1 state */
static void mwait_idle ( void )
{
local_irq_enable ( ) ;
2006-12-07 04:14:03 +03:00
mwait_idle_with_hints ( 0 , 0 ) ;
2006-09-26 03:28:13 +04:00
}
2005-06-26 01:54:55 +04:00
void __devinit select_idle_routine ( const struct cpuinfo_x86 * c )
2005-04-17 02:20:36 +04:00
{
if ( cpu_has ( c , X86_FEATURE_MWAIT ) ) {
printk ( " monitor/mwait feature present. \n " ) ;
/*
* Skip , if setup has overridden idle .
* One CPU supports mwait = > All CPUs supports mwait
*/
if ( ! pm_idle ) {
printk ( " using mwait in idle threads. \n " ) ;
pm_idle = mwait_idle ;
}
}
}
2007-05-02 21:27:12 +04:00
static int __init idle_setup ( char * str )
2005-04-17 02:20:36 +04:00
{
2007-05-02 21:27:12 +04:00
if ( ! strcmp ( str , " poll " ) ) {
2005-04-17 02:20:36 +04:00
printk ( " using polling idle threads. \n " ) ;
pm_idle = poll_idle ;
# ifdef CONFIG_X86_SMP
if ( smp_num_siblings > 1 )
printk ( " WARNING: polling idle and HT enabled, performance may degrade. \n " ) ;
# endif
2007-05-02 21:27:12 +04:00
} else if ( ! strcmp ( str , " mwait " ) )
force_mwait = 1 ;
else
return - 1 ;
2005-04-17 02:20:36 +04:00
boot_option_idle_override = 1 ;
2007-05-02 21:27:12 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2007-05-02 21:27:12 +04:00
early_param ( " idle " , idle_setup ) ;
2005-04-17 02:20:36 +04:00
void show_regs ( struct pt_regs * regs )
{
unsigned long cr0 = 0L , cr2 = 0L , cr3 = 0L , cr4 = 0L ;
printk ( " \n " ) ;
printk ( " Pid: %d, comm: %20s \n " , current - > pid , current - > comm ) ;
printk ( " EIP: %04x:[<%08lx>] CPU: %d \n " , 0xffff & regs - > xcs , regs - > eip , smp_processor_id ( ) ) ;
print_symbol ( " EIP is at %s \n " , regs - > eip ) ;
2006-03-23 13:59:46 +03:00
if ( user_mode_vm ( regs ) )
2005-04-17 02:20:36 +04:00
printk ( " ESP: %04x:%08lx " , 0xffff & regs - > xss , regs - > esp ) ;
2006-02-05 10:28:04 +03:00
printk ( " EFLAGS: %08lx %s (%s %.*s) \n " ,
2006-10-02 13:18:13 +04:00
regs - > eflags , print_tainted ( ) , init_utsname ( ) - > release ,
( int ) strcspn ( init_utsname ( ) - > version , " " ) ,
init_utsname ( ) - > version ) ;
2005-04-17 02:20:36 +04:00
printk ( " EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx \n " ,
regs - > eax , regs - > ebx , regs - > ecx , regs - > edx ) ;
printk ( " ESI: %08lx EDI: %08lx EBP: %08lx " ,
regs - > esi , regs - > edi , regs - > ebp ) ;
2007-02-13 15:26:20 +03:00
printk ( " DS: %04x ES: %04x FS: %04x \n " ,
0xffff & regs - > xds , 0xffff & regs - > xes , 0xffff & regs - > xfs ) ;
2005-04-17 02:20:36 +04:00
2005-09-04 02:56:36 +04:00
cr0 = read_cr0 ( ) ;
cr2 = read_cr2 ( ) ;
cr3 = read_cr3 ( ) ;
2006-01-06 11:11:50 +03:00
cr4 = read_cr4_safe ( ) ;
2005-04-17 02:20:36 +04:00
printk ( " CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx \n " , cr0 , cr2 , cr3 , cr4 ) ;
2006-06-26 15:57:41 +04:00
show_trace ( NULL , regs , & regs - > esp ) ;
2005-04-17 02:20:36 +04:00
}
/*
* This gets run with % ebx containing the
* function to call , and % edx containing
* the " args " .
*/
extern void kernel_thread_helper ( void ) ;
/*
* Create a kernel thread
*/
int kernel_thread ( int ( * fn ) ( void * ) , void * arg , unsigned long flags )
{
struct pt_regs regs ;
memset ( & regs , 0 , sizeof ( regs ) ) ;
regs . ebx = ( unsigned long ) fn ;
regs . edx = ( unsigned long ) arg ;
regs . xds = __USER_DS ;
regs . xes = __USER_DS ;
2007-02-13 15:26:20 +03:00
regs . xfs = __KERNEL_PDA ;
2005-04-17 02:20:36 +04:00
regs . orig_eax = - 1 ;
regs . eip = ( unsigned long ) kernel_thread_helper ;
2006-09-26 12:52:39 +04:00
regs . xcs = __KERNEL_CS | get_kernel_rpl ( ) ;
2005-04-17 02:20:36 +04:00
regs . eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2 ;
/* Ok, create the new process.. */
2006-10-21 20:37:02 +04:00
return do_fork ( flags | CLONE_VM | CLONE_UNTRACED , 0 , & regs , 0 , NULL , NULL ) ;
2005-04-17 02:20:36 +04:00
}
2005-06-23 11:08:33 +04:00
EXPORT_SYMBOL ( kernel_thread ) ;
2005-04-17 02:20:36 +04:00
/*
* Free current thread data structures etc . .
*/
void exit_thread ( void )
{
/* The process may have allocated an io port bitmap... nuke it. */
2006-07-10 05:12:39 +04:00
if ( unlikely ( test_thread_flag ( TIF_IO_BITMAP ) ) ) {
struct task_struct * tsk = current ;
struct thread_struct * t = & tsk - > thread ;
2005-04-17 02:20:36 +04:00
int cpu = get_cpu ( ) ;
struct tss_struct * tss = & per_cpu ( init_tss , cpu ) ;
kfree ( t - > io_bitmap_ptr ) ;
t - > io_bitmap_ptr = NULL ;
2006-07-10 05:12:39 +04:00
clear_thread_flag ( TIF_IO_BITMAP ) ;
2005-04-17 02:20:36 +04:00
/*
* Careful , clear this in the TSS too :
*/
memset ( tss - > io_bitmap , 0xff , tss - > io_bitmap_max ) ;
t - > io_bitmap_max = 0 ;
tss - > io_bitmap_owner = NULL ;
tss - > io_bitmap_max = 0 ;
tss - > io_bitmap_base = INVALID_IO_BITMAP_OFFSET ;
put_cpu ( ) ;
}
}
void flush_thread ( void )
{
struct task_struct * tsk = current ;
memset ( tsk - > thread . debugreg , 0 , sizeof ( unsigned long ) * 8 ) ;
memset ( tsk - > thread . tls_array , 0 , sizeof ( tsk - > thread . tls_array ) ) ;
2006-07-10 05:12:39 +04:00
clear_tsk_thread_flag ( tsk , TIF_DEBUG ) ;
2005-04-17 02:20:36 +04:00
/*
* Forget coprocessor state . .
*/
clear_fpu ( tsk ) ;
clear_used_math ( ) ;
}
void release_thread ( struct task_struct * dead_task )
{
2006-01-06 11:11:59 +03:00
BUG_ON ( dead_task - > mm ) ;
2005-04-17 02:20:36 +04:00
release_vm86_irqs ( dead_task ) ;
}
/*
* This gets called before we allocate a new thread and copy
* the current task into it .
*/
void prepare_to_copy ( struct task_struct * tsk )
{
unlazy_fpu ( tsk ) ;
}
int copy_thread ( int nr , unsigned long clone_flags , unsigned long esp ,
unsigned long unused ,
struct task_struct * p , struct pt_regs * regs )
{
struct pt_regs * childregs ;
struct task_struct * tsk ;
int err ;
2006-01-12 12:05:41 +03:00
childregs = task_pt_regs ( p ) ;
2005-05-06 03:15:03 +04:00
* childregs = * regs ;
childregs - > eax = 0 ;
childregs - > esp = esp ;
p - > thread . esp = ( unsigned long ) childregs ;
p - > thread . esp0 = ( unsigned long ) ( childregs + 1 ) ;
2005-04-17 02:20:36 +04:00
p - > thread . eip = ( unsigned long ) ret_from_fork ;
2007-02-13 15:26:20 +03:00
savesegment ( gs , p - > thread . gs ) ;
2005-04-17 02:20:36 +04:00
tsk = current ;
2006-07-10 05:12:39 +04:00
if ( unlikely ( test_tsk_thread_flag ( tsk , TIF_IO_BITMAP ) ) ) {
2006-10-01 10:27:21 +04:00
p - > thread . io_bitmap_ptr = kmemdup ( tsk - > thread . io_bitmap_ptr ,
IO_BITMAP_BYTES , GFP_KERNEL ) ;
2005-04-17 02:20:36 +04:00
if ( ! p - > thread . io_bitmap_ptr ) {
p - > thread . io_bitmap_max = 0 ;
return - ENOMEM ;
}
2006-07-10 05:12:39 +04:00
set_tsk_thread_flag ( p , TIF_IO_BITMAP ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Set a new TLS for the child thread ?
*/
if ( clone_flags & CLONE_SETTLS ) {
struct desc_struct * desc ;
struct user_desc info ;
int idx ;
err = - EFAULT ;
if ( copy_from_user ( & info , ( void __user * ) childregs - > esi , sizeof ( info ) ) )
goto out ;
err = - EINVAL ;
if ( LDT_empty ( & info ) )
goto out ;
idx = info . entry_number ;
if ( idx < GDT_ENTRY_TLS_MIN | | idx > GDT_ENTRY_TLS_MAX )
goto out ;
desc = p - > thread . tls_array + idx - GDT_ENTRY_TLS_MIN ;
desc - > a = LDT_entry_a ( & info ) ;
desc - > b = LDT_entry_b ( & info ) ;
}
err = 0 ;
out :
if ( err & & p - > thread . io_bitmap_ptr ) {
kfree ( p - > thread . io_bitmap_ptr ) ;
p - > thread . io_bitmap_max = 0 ;
}
return err ;
}
/*
* fill in the user structure for a core dump . .
*/
void dump_thread ( struct pt_regs * regs , struct user * dump )
{
int i ;
/* changed the size calculations - should hopefully work better. lbt */
dump - > magic = CMAGIC ;
dump - > start_code = 0 ;
dump - > start_stack = regs - > esp & ~ ( PAGE_SIZE - 1 ) ;
dump - > u_tsize = ( ( unsigned long ) current - > mm - > end_code ) > > PAGE_SHIFT ;
dump - > u_dsize = ( ( unsigned long ) ( current - > mm - > brk + ( PAGE_SIZE - 1 ) ) ) > > PAGE_SHIFT ;
dump - > u_dsize - = dump - > u_tsize ;
dump - > u_ssize = 0 ;
for ( i = 0 ; i < 8 ; i + + )
dump - > u_debugreg [ i ] = current - > thread . debugreg [ i ] ;
if ( dump - > start_stack < TASK_SIZE )
dump - > u_ssize = ( ( unsigned long ) ( TASK_SIZE - dump - > start_stack ) ) > > PAGE_SHIFT ;
dump - > regs . ebx = regs - > ebx ;
dump - > regs . ecx = regs - > ecx ;
dump - > regs . edx = regs - > edx ;
dump - > regs . esi = regs - > esi ;
dump - > regs . edi = regs - > edi ;
dump - > regs . ebp = regs - > ebp ;
dump - > regs . eax = regs - > eax ;
dump - > regs . ds = regs - > xds ;
dump - > regs . es = regs - > xes ;
2007-02-13 15:26:20 +03:00
dump - > regs . fs = regs - > xfs ;
savesegment ( gs , dump - > regs . gs ) ;
2005-04-17 02:20:36 +04:00
dump - > regs . orig_eax = regs - > orig_eax ;
dump - > regs . eip = regs - > eip ;
dump - > regs . cs = regs - > xcs ;
dump - > regs . eflags = regs - > eflags ;
dump - > regs . esp = regs - > esp ;
dump - > regs . ss = regs - > xss ;
dump - > u_fpvalid = dump_fpu ( regs , & dump - > i387 ) ;
}
2005-06-23 11:08:33 +04:00
EXPORT_SYMBOL ( dump_thread ) ;
2005-04-17 02:20:36 +04:00
/*
* Capture the user space registers if the task is not running ( in user space )
*/
int dump_task_regs ( struct task_struct * tsk , elf_gregset_t * regs )
{
2006-01-12 12:05:41 +03:00
struct pt_regs ptregs = * task_pt_regs ( tsk ) ;
2005-04-17 02:20:36 +04:00
ptregs . xcs & = 0xffff ;
ptregs . xds & = 0xffff ;
ptregs . xes & = 0xffff ;
ptregs . xss & = 0xffff ;
elf_core_copy_regs ( regs , & ptregs ) ;
return 1 ;
}
2006-07-10 05:12:39 +04:00
static noinline void __switch_to_xtra ( struct task_struct * next_p ,
struct tss_struct * tss )
2005-04-17 02:20:36 +04:00
{
2006-07-10 05:12:39 +04:00
struct thread_struct * next ;
next = & next_p - > thread ;
if ( test_tsk_thread_flag ( next_p , TIF_DEBUG ) ) {
set_debugreg ( next - > debugreg [ 0 ] , 0 ) ;
set_debugreg ( next - > debugreg [ 1 ] , 1 ) ;
set_debugreg ( next - > debugreg [ 2 ] , 2 ) ;
set_debugreg ( next - > debugreg [ 3 ] , 3 ) ;
/* no 4 and 5 */
set_debugreg ( next - > debugreg [ 6 ] , 6 ) ;
set_debugreg ( next - > debugreg [ 7 ] , 7 ) ;
}
if ( ! test_tsk_thread_flag ( next_p , TIF_IO_BITMAP ) ) {
2005-04-17 02:20:36 +04:00
/*
* Disable the bitmap via an invalid offset . We still cache
* the previous bitmap owner and the IO bitmap contents :
*/
tss - > io_bitmap_base = INVALID_IO_BITMAP_OFFSET ;
return ;
}
2006-07-10 05:12:39 +04:00
2005-04-17 02:20:36 +04:00
if ( likely ( next = = tss - > io_bitmap_owner ) ) {
/*
* Previous owner of the bitmap ( hence the bitmap content )
* matches the next task , we dont have to do anything but
* to set a valid offset in the TSS :
*/
tss - > io_bitmap_base = IO_BITMAP_OFFSET ;
return ;
}
/*
* Lazy TSS ' s I / O bitmap copy . We set an invalid offset here
* and we let the task to get a GPF in case an I / O instruction
* is performed . The handler of the GPF will verify that the
* faulting task has a valid I / O bitmap and , it true , does the
* real copy and restart the instruction . This will save us
* redundant copies when the currently switched task does not
* perform any I / O during its timeslice .
*/
tss - > io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY ;
}
2005-06-28 01:36:36 +04:00
/*
* This function selects if the context switch from prev to next
* has to tweak the TSC disable bit in the cr4 .
*/
static inline void disable_tsc ( struct task_struct * prev_p ,
struct task_struct * next_p )
{
struct thread_info * prev , * next ;
/*
* gcc should eliminate the - > thread_info dereference if
* has_secure_computing returns 0 at compile time ( SECCOMP = n ) .
*/
2006-01-12 12:05:40 +03:00
prev = task_thread_info ( prev_p ) ;
next = task_thread_info ( next_p ) ;
2005-06-28 01:36:36 +04:00
if ( has_secure_computing ( prev ) | | has_secure_computing ( next ) ) {
/* slow path here */
if ( has_secure_computing ( prev ) & &
! has_secure_computing ( next ) ) {
write_cr4 ( read_cr4 ( ) & ~ X86_CR4_TSD ) ;
} else if ( ! has_secure_computing ( prev ) & &
has_secure_computing ( next ) )
write_cr4 ( read_cr4 ( ) | X86_CR4_TSD ) ;
}
}
2005-04-17 02:20:36 +04:00
/*
* switch_to ( x , yn ) should switch tasks from x to y .
*
* We fsave / fwait so that an exception goes off at the right time
* ( as a call from the fsave or fwait in effect ) rather than to
* the wrong process . Lazy FP saving no longer makes any sense
* with modern CPU ' s , and this simplifies a lot of things ( SMP
* and UP become the same ) .
*
* NOTE ! We used to use the x86 hardware context switching . The
* reason for not using it any more becomes apparent when you
* try to recover gracefully from saved state that is no longer
* valid ( stale segment register values in particular ) . With the
* hardware task - switch , there is no way to fix up bad state in
* a reasonable manner .
*
* The fact that Intel documents the hardware task - switching to
* be slow is a fairly red herring - this code is not noticeably
* faster . However , there _is_ some room for improvement here ,
* so the performance issues may eventually be a valid point .
* More important , however , is the fact that this allows us much
* more flexibility .
*
* The return value ( in % eax ) will be the " prev " task after
* the task - switch , and shows up in ret_from_fork in entry . S ,
* for example .
*/
struct task_struct fastcall * __switch_to ( struct task_struct * prev_p , struct task_struct * next_p )
{
struct thread_struct * prev = & prev_p - > thread ,
* next = & next_p - > thread ;
int cpu = smp_processor_id ( ) ;
struct tss_struct * tss = & per_cpu ( init_tss , cpu ) ;
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
__unlazy_fpu ( prev_p ) ;
2006-12-07 04:14:01 +03:00
/* we're going to use this soon, after a few expensive things */
if ( next_p - > fpu_counter > 5 )
prefetch ( & next - > i387 . fxsave ) ;
2005-04-17 02:20:36 +04:00
/*
2005-09-04 02:56:39 +04:00
* Reload esp0 .
2005-04-17 02:20:36 +04:00
*/
load_esp0 ( tss , next ) ;
/*
2007-02-13 15:26:20 +03:00
* Save away % gs . No need to save % fs , as it was saved on the
[PATCH] i386: Use %gs as the PDA base-segment in the kernel
This patch is the meat of the PDA change. This patch makes several related
changes:
1: Most significantly, %gs is now used in the kernel. This means that on
entry, the old value of %gs is saved away, and it is reloaded with
__KERNEL_PDA.
2: entry.S constructs the stack in the shape of struct pt_regs, and this
is passed around the kernel so that the process's saved register
state can be accessed.
Unfortunately struct pt_regs doesn't currently have space for %gs
(or %fs). This patch extends pt_regs to add space for gs (no space
is allocated for %fs, since it won't be used, and it would just
complicate the code in entry.S to work around the space).
3: Because %gs is now saved on the stack like %ds, %es and the integer
registers, there are a number of places where it no longer needs to
be handled specially; namely context switch, and saving/restoring the
register state in a signal context.
4: And since kernel threads run in kernel space and call normal kernel
code, they need to be created with their %gs == __KERNEL_PDA.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Chuck Ebbert <76306.1226@compuserve.com>
Cc: Zachary Amsden <zach@vmware.com>
Cc: Jan Beulich <jbeulich@novell.com>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
2006-12-07 04:14:02 +03:00
* stack on entry . No need to save % es and % ds , as those are
* always kernel segments while inside the kernel . Doing this
* before setting the new TLS descriptors avoids the situation
* where we temporarily have non - reloadable segments in % fs
* and % gs . This could be an issue if the NMI handler ever
* used % fs or % gs ( it does not today ) , or if the kernel is
* running inside of a hypervisor layer .
2005-04-17 02:20:36 +04:00
*/
2007-02-13 15:26:20 +03:00
savesegment ( gs , prev - > gs ) ;
2005-04-17 02:20:36 +04:00
/*
2005-09-04 02:56:39 +04:00
* Load the per - thread Thread - Local Storage descriptor .
2005-04-17 02:20:36 +04:00
*/
2005-09-04 02:56:39 +04:00
load_TLS ( next , cpu ) ;
2005-04-17 02:20:36 +04:00
2007-02-13 15:26:21 +03:00
/*
* Restore IOPL if needed . In normal use , the flags restore
* in the switch assembly will handle this . But if the kernel
* is running virtualized at a non - zero CPL , the popf will
* not restore flags , so it must be done in a separate step .
*/
if ( get_kernel_rpl ( ) & & unlikely ( prev - > iopl ! = next - > iopl ) )
set_iopl_mask ( next - > iopl ) ;
2005-04-17 02:20:36 +04:00
/*
2006-07-10 05:12:39 +04:00
* Now maybe handle debug registers and / or IO bitmaps
2005-04-17 02:20:36 +04:00
*/
2006-07-26 00:15:16 +04:00
if ( unlikely ( ( task_thread_info ( next_p ) - > flags & _TIF_WORK_CTXSW )
| | test_tsk_thread_flag ( prev_p , TIF_IO_BITMAP ) ) )
2006-07-10 05:12:39 +04:00
__switch_to_xtra ( next_p , tss ) ;
2005-04-17 02:20:36 +04:00
2005-06-28 01:36:36 +04:00
disable_tsc ( prev_p , next_p ) ;
2007-02-13 15:26:21 +03:00
/*
* Leave lazy mode , flushing any hypercalls made here .
* This must be done before restoring TLS segments so
* the GDT and LDT are properly updated , and must be
* done before math_state_restore , so the TS bit is up
* to date .
*/
arch_leave_lazy_cpu_mode ( ) ;
2006-12-07 04:14:01 +03:00
/* If the task has used fpu the last 5 timeslices, just do a full
* restore of the math state immediately to avoid the trap ; the
* chances of needing FPU soon are obviously high now
*/
if ( next_p - > fpu_counter > 5 )
math_state_restore ( ) ;
2007-02-13 15:26:21 +03:00
/*
* Restore % gs if needed ( which is common )
*/
if ( prev - > gs | next - > gs )
loadsegment ( gs , next - > gs ) ;
write_pda ( pcurrent , next_p ) ;
2005-04-17 02:20:36 +04:00
return prev_p ;
}
asmlinkage int sys_fork ( struct pt_regs regs )
{
return do_fork ( SIGCHLD , regs . esp , & regs , 0 , NULL , NULL ) ;
}
asmlinkage int sys_clone ( struct pt_regs regs )
{
unsigned long clone_flags ;
unsigned long newsp ;
int __user * parent_tidptr , * child_tidptr ;
clone_flags = regs . ebx ;
newsp = regs . ecx ;
parent_tidptr = ( int __user * ) regs . edx ;
child_tidptr = ( int __user * ) regs . edi ;
if ( ! newsp )
newsp = regs . esp ;
return do_fork ( clone_flags , newsp , & regs , 0 , parent_tidptr , child_tidptr ) ;
}
/*
* This is trivial , and on the face of it looks like it
* could equally well be done in user mode .
*
* Not so , for quite unobvious reasons - register pressure .
* In user mode vfork ( ) cannot have a stack frame , and if
* done by calling the " clone() " system call directly , you
* do not have enough call - clobbered registers to hold all
* the information you need .
*/
asmlinkage int sys_vfork ( struct pt_regs regs )
{
return do_fork ( CLONE_VFORK | CLONE_VM | SIGCHLD , regs . esp , & regs , 0 , NULL , NULL ) ;
}
/*
* sys_execve ( ) executes a new program .
*/
asmlinkage int sys_execve ( struct pt_regs regs )
{
int error ;
char * filename ;
filename = getname ( ( char __user * ) regs . ebx ) ;
error = PTR_ERR ( filename ) ;
if ( IS_ERR ( filename ) )
goto out ;
error = do_execve ( filename ,
( char __user * __user * ) regs . ecx ,
( char __user * __user * ) regs . edx ,
& regs ) ;
if ( error = = 0 ) {
task_lock ( current ) ;
current - > ptrace & = ~ PT_DTRACE ;
task_unlock ( current ) ;
/* Make sure we don't return using sysenter.. */
set_thread_flag ( TIF_IRET ) ;
}
putname ( filename ) ;
out :
return error ;
}
# define top_esp (THREAD_SIZE - sizeof(unsigned long))
# define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
unsigned long get_wchan ( struct task_struct * p )
{
unsigned long ebp , esp , eip ;
unsigned long stack_page ;
int count = 0 ;
if ( ! p | | p = = current | | p - > state = = TASK_RUNNING )
return 0 ;
2006-01-12 12:05:41 +03:00
stack_page = ( unsigned long ) task_stack_page ( p ) ;
2005-04-17 02:20:36 +04:00
esp = p - > thread . esp ;
if ( ! stack_page | | esp < stack_page | | esp > top_esp + stack_page )
return 0 ;
/* include/asm-i386/system.h:switch_to() pushes ebp last. */
ebp = * ( unsigned long * ) esp ;
do {
if ( ebp < stack_page | | ebp > top_ebp + stack_page )
return 0 ;
eip = * ( unsigned long * ) ( ebp + 4 ) ;
if ( ! in_sched_functions ( eip ) )
return eip ;
ebp = * ( unsigned long * ) ebp ;
} while ( count + + < 16 ) ;
return 0 ;
}
/*
* sys_alloc_thread_area : get a yet unused TLS descriptor index .
*/
static int get_free_idx ( void )
{
struct thread_struct * t = & current - > thread ;
int idx ;
for ( idx = 0 ; idx < GDT_ENTRY_TLS_ENTRIES ; idx + + )
if ( desc_empty ( t - > tls_array + idx ) )
return idx + GDT_ENTRY_TLS_MIN ;
return - ESRCH ;
}
/*
* Set a given TLS descriptor :
*/
asmlinkage int sys_set_thread_area ( struct user_desc __user * u_info )
{
struct thread_struct * t = & current - > thread ;
struct user_desc info ;
struct desc_struct * desc ;
int cpu , idx ;
if ( copy_from_user ( & info , u_info , sizeof ( info ) ) )
return - EFAULT ;
idx = info . entry_number ;
/*
* index - 1 means the kernel should try to find and
* allocate an empty descriptor :
*/
if ( idx = = - 1 ) {
idx = get_free_idx ( ) ;
if ( idx < 0 )
return idx ;
if ( put_user ( idx , & u_info - > entry_number ) )
return - EFAULT ;
}
if ( idx < GDT_ENTRY_TLS_MIN | | idx > GDT_ENTRY_TLS_MAX )
return - EINVAL ;
desc = t - > tls_array + idx - GDT_ENTRY_TLS_MIN ;
/*
* We must not get preempted while modifying the TLS .
*/
cpu = get_cpu ( ) ;
if ( LDT_empty ( & info ) ) {
desc - > a = 0 ;
desc - > b = 0 ;
} else {
desc - > a = LDT_entry_a ( & info ) ;
desc - > b = LDT_entry_b ( & info ) ;
}
load_TLS ( t , cpu ) ;
put_cpu ( ) ;
return 0 ;
}
/*
* Get the current Thread - Local Storage area :
*/
# define GET_BASE(desc) ( \
( ( ( desc ) - > a > > 16 ) & 0x0000ffff ) | \
( ( ( desc ) - > b < < 16 ) & 0x00ff0000 ) | \
( ( desc ) - > b & 0xff000000 ) )
# define GET_LIMIT(desc) ( \
( ( desc ) - > a & 0x0ffff ) | \
( ( desc ) - > b & 0xf0000 ) )
# define GET_32BIT(desc) (((desc)->b >> 22) & 1)
# define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
# define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
# define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
# define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
# define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
asmlinkage int sys_get_thread_area ( struct user_desc __user * u_info )
{
struct user_desc info ;
struct desc_struct * desc ;
int idx ;
if ( get_user ( idx , & u_info - > entry_number ) )
return - EFAULT ;
if ( idx < GDT_ENTRY_TLS_MIN | | idx > GDT_ENTRY_TLS_MAX )
return - EINVAL ;
2005-07-27 22:45:18 +04:00
memset ( & info , 0 , sizeof ( info ) ) ;
2005-04-17 02:20:36 +04:00
desc = current - > thread . tls_array + idx - GDT_ENTRY_TLS_MIN ;
info . entry_number = idx ;
info . base_addr = GET_BASE ( desc ) ;
info . limit = GET_LIMIT ( desc ) ;
info . seg_32bit = GET_32BIT ( desc ) ;
info . contents = GET_CONTENTS ( desc ) ;
info . read_exec_only = ! GET_WRITABLE ( desc ) ;
info . limit_in_pages = GET_LIMIT_PAGES ( desc ) ;
info . seg_not_present = ! GET_PRESENT ( desc ) ;
info . useable = GET_USEABLE ( desc ) ;
if ( copy_to_user ( u_info , & info , sizeof ( info ) ) )
return - EFAULT ;
return 0 ;
}
unsigned long arch_align_stack ( unsigned long sp )
{
2006-09-26 12:52:28 +04:00
if ( ! ( current - > personality & ADDR_NO_RANDOMIZE ) & & randomize_va_space )
2005-04-17 02:20:36 +04:00
sp - = get_random_int ( ) % 8192 ;
return sp & ~ 0xf ;
}