2005-04-17 02:20:36 +04:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 1994 - 1999 , 2000 by Ralf Baechle and others .
2006-02-08 16:38:18 +03:00
* Copyright ( C ) 2005 , 2006 by Ralf Baechle ( ralf @ linux - mips . org )
2005-04-17 02:20:36 +04:00
* Copyright ( C ) 1999 , 2000 Silicon Graphics , Inc .
* Copyright ( C ) 2004 Thiemo Seufer
*/
# include <linux/errno.h>
# include <linux/sched.h>
2007-10-12 02:46:09 +04:00
# include <linux/tick.h>
2005-04-17 02:20:36 +04:00
# include <linux/kernel.h>
# include <linux/mm.h>
# include <linux/stddef.h>
# include <linux/unistd.h>
2011-07-29 02:46:31 +04:00
# include <linux/export.h>
2005-04-17 02:20:36 +04:00
# include <linux/ptrace.h>
# include <linux/mman.h>
# include <linux/personality.h>
# include <linux/sys.h>
# include <linux/user.h>
# include <linux/init.h>
# include <linux/completion.h>
2006-02-07 19:48:03 +03:00
# include <linux/kallsyms.h>
2007-07-19 16:04:21 +04:00
# include <linux/random.h>
2005-04-17 02:20:36 +04:00
2007-07-19 16:04:21 +04:00
# include <asm/asm.h>
2005-04-17 02:20:36 +04:00
# include <asm/bootinfo.h>
# include <asm/cpu.h>
2005-05-31 15:49:19 +04:00
# include <asm/dsp.h>
2005-04-17 02:20:36 +04:00
# include <asm/fpu.h>
# include <asm/pgtable.h>
# include <asm/system.h>
# include <asm/mipsregs.h>
# include <asm/processor.h>
# include <asm/uaccess.h>
# include <asm/io.h>
# include <asm/elf.h>
# include <asm/isadep.h>
# include <asm/inst.h>
2006-09-26 18:44:01 +04:00
# include <asm/stacktrace.h>
2005-04-17 02:20:36 +04:00
/*
* The idle thread . There ' s no useful work to be done , so just try to conserve
* power and have a low exit latency ( ie sit in a loop waiting for somebody to
* say that they ' d like to reschedule )
*/
2007-05-25 22:32:28 +04:00
void __noreturn cpu_idle ( void )
2005-04-17 02:20:36 +04:00
{
2009-06-23 13:00:31 +04:00
int cpu ;
/* CPU is going idle. */
cpu = smp_processor_id ( ) ;
2005-04-17 02:20:36 +04:00
/* endless idle loop with no priority at all */
while ( 1 ) {
2008-07-18 19:27:28 +04:00
tick_nohz_stop_sched_tick ( 1 ) ;
2009-06-23 13:00:31 +04:00
while ( ! need_resched ( ) & & cpu_online ( cpu ) ) {
2008-09-09 23:33:36 +04:00
# ifdef CONFIG_MIPS_MT_SMTC
2007-02-05 03:34:20 +03:00
extern void smtc_idle_loop_hook ( void ) ;
2006-04-05 12:45:45 +04:00
smtc_idle_loop_hook ( ) ;
2007-02-26 23:46:34 +03:00
# endif
2010-03-09 19:27:28 +03:00
if ( cpu_wait ) {
/* Don't trace irqs off for idle */
stop_critical_timings ( ) ;
2005-04-17 02:20:36 +04:00
( * cpu_wait ) ( ) ;
2010-03-09 19:27:28 +03:00
start_critical_timings ( ) ;
}
2006-04-05 12:45:45 +04:00
}
2009-06-23 13:00:31 +04:00
# ifdef CONFIG_HOTPLUG_CPU
if ( ! cpu_online ( cpu ) & & ! cpu_isset ( cpu , cpu_callin_map ) & &
( system_state = = SYSTEM_RUNNING | |
system_state = = SYSTEM_BOOTING ) )
play_dead ( ) ;
# endif
2007-10-12 02:46:09 +04:00
tick_nohz_restart_sched_tick ( ) ;
2005-11-09 08:39:01 +03:00
preempt_enable_no_resched ( ) ;
2005-04-17 02:20:36 +04:00
schedule ( ) ;
2005-11-09 08:39:01 +03:00
preempt_disable ( ) ;
2005-04-17 02:20:36 +04:00
}
}
asmlinkage void ret_from_fork ( void ) ;
void start_thread ( struct pt_regs * regs , unsigned long pc , unsigned long sp )
{
unsigned long status ;
/* New thread loses kernel privileges. */
2007-12-14 01:42:19 +03:00
status = regs - > cp0_status & ~ ( ST0_CU0 | ST0_CU1 | ST0_FR | KU_MASK ) ;
2005-09-04 02:56:16 +04:00
# ifdef CONFIG_64BIT
2007-07-25 19:19:33 +04:00
status | = test_thread_flag ( TIF_32BIT_REGS ) ? 0 : ST0_FR ;
2005-04-17 02:20:36 +04:00
# endif
status | = KU_USER ;
regs - > cp0_status = status ;
clear_used_math ( ) ;
2006-10-08 19:10:01 +04:00
clear_fpu_owner ( ) ;
2005-05-31 15:49:19 +04:00
if ( cpu_has_dsp )
__init_dsp ( ) ;
2005-04-17 02:20:36 +04:00
regs - > cp0_epc = pc ;
regs - > regs [ 29 ] = sp ;
}
void exit_thread ( void )
{
}
void flush_thread ( void )
{
}
2009-04-03 03:56:59 +04:00
int copy_thread ( unsigned long clone_flags , unsigned long usp ,
2005-04-17 02:20:36 +04:00
unsigned long unused , struct task_struct * p , struct pt_regs * regs )
{
2006-01-12 12:06:08 +03:00
struct thread_info * ti = task_thread_info ( p ) ;
2005-04-17 02:20:36 +04:00
struct pt_regs * childregs ;
2009-07-08 21:07:50 +04:00
unsigned long childksp ;
2005-04-13 21:43:59 +04:00
p - > set_child_tid = p - > clear_child_tid = NULL ;
2005-04-17 02:20:36 +04:00
2006-01-12 12:06:08 +03:00
childksp = ( unsigned long ) task_stack_page ( p ) + THREAD_SIZE - 32 ;
2005-04-17 02:20:36 +04:00
preempt_disable ( ) ;
2005-05-31 15:49:19 +04:00
if ( is_fpu_owner ( ) )
2005-04-17 02:20:36 +04:00
save_fp ( p ) ;
2005-05-31 15:49:19 +04:00
if ( cpu_has_dsp )
save_dsp ( p ) ;
2005-04-17 02:20:36 +04:00
preempt_enable ( ) ;
/* set up new TSS. */
childregs = ( struct pt_regs * ) childksp - 1 ;
2009-07-08 21:07:50 +04:00
/* Put the stack after the struct pt_regs. */
childksp = ( unsigned long ) childregs ;
2005-04-17 02:20:36 +04:00
* childregs = * regs ;
childregs - > regs [ 7 ] = 0 ; /* Clear error flag */
childregs - > regs [ 2 ] = 0 ; /* Child gets zero as return value */
if ( childregs - > cp0_status & ST0_CU0 ) {
childregs - > regs [ 28 ] = ( unsigned long ) ti ;
childregs - > regs [ 29 ] = childksp ;
ti - > addr_limit = KERNEL_DS ;
} else {
childregs - > regs [ 29 ] = usp ;
ti - > addr_limit = USER_DS ;
}
p - > thread . reg29 = ( unsigned long ) childregs ;
p - > thread . reg31 = ( unsigned long ) ret_from_fork ;
/*
* New tasks lose permission to use the fpu . This accelerates context
* switching for most programs since they don ' t use the fpu .
*/
p - > thread . cp0_status = read_c0_status ( ) & ~ ( ST0_CU2 | ST0_CU1 ) ;
childregs - > cp0_status & = ~ ( ST0_CU2 | ST0_CU1 ) ;
2008-09-09 23:33:36 +04:00
# ifdef CONFIG_MIPS_MT_SMTC
2006-04-05 12:45:47 +04:00
/*
2008-09-09 23:33:36 +04:00
* SMTC restores TCStatus after Status , and the CU bits
* are aliased there .
2006-04-05 12:45:47 +04:00
*/
2008-09-09 23:33:36 +04:00
childregs - > cp0_tcstatus & = ~ ( ST0_CU2 | ST0_CU1 ) ;
# endif
2005-04-17 02:20:36 +04:00
clear_tsk_thread_flag ( p , TIF_USEDFPU ) ;
2006-04-05 12:45:47 +04:00
# ifdef CONFIG_MIPS_MT_FPAFF
2008-09-09 17:19:10 +04:00
clear_tsk_thread_flag ( p , TIF_FPUBOUND ) ;
2006-04-05 12:45:47 +04:00
# endif /* CONFIG_MIPS_MT_FPAFF */
2005-04-13 21:43:59 +04:00
if ( clone_flags & CLONE_SETTLS )
ti - > tp_value = regs - > regs [ 7 ] ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
/* Fill in the fpu structure for a core dump.. */
int dump_fpu ( struct pt_regs * regs , elf_fpregset_t * r )
{
memcpy ( r , & current - > thread . fpu , sizeof ( current - > thread . fpu ) ) ;
return 1 ;
}
2005-12-17 01:40:47 +03:00
void elf_dump_regs ( elf_greg_t * gp , struct pt_regs * regs )
2005-04-17 02:20:36 +04:00
{
int i ;
for ( i = 0 ; i < EF_R0 ; i + + )
gp [ i ] = 0 ;
gp [ EF_R0 ] = 0 ;
for ( i = 1 ; i < = 31 ; i + + )
gp [ EF_R0 + i ] = regs - > regs [ i ] ;
gp [ EF_R26 ] = 0 ;
gp [ EF_R27 ] = 0 ;
gp [ EF_LO ] = regs - > lo ;
gp [ EF_HI ] = regs - > hi ;
gp [ EF_CP0_EPC ] = regs - > cp0_epc ;
gp [ EF_CP0_BADVADDR ] = regs - > cp0_badvaddr ;
gp [ EF_CP0_STATUS ] = regs - > cp0_status ;
gp [ EF_CP0_CAUSE ] = regs - > cp0_cause ;
# ifdef EF_UNUSED0
gp [ EF_UNUSED0 ] = 0 ;
# endif
}
2007-10-12 02:46:15 +04:00
int dump_task_regs ( struct task_struct * tsk , elf_gregset_t * regs )
2005-03-14 13:16:59 +03:00
{
2006-01-12 12:06:07 +03:00
elf_dump_regs ( * regs , task_pt_regs ( tsk ) ) ;
2005-03-14 13:16:59 +03:00
return 1 ;
}
2007-10-12 02:46:15 +04:00
int dump_task_fpu ( struct task_struct * t , elf_fpregset_t * fpr )
2005-04-17 02:20:36 +04:00
{
memcpy ( fpr , & t - > thread . fpu , sizeof ( current - > thread . fpu ) ) ;
return 1 ;
}
/*
* Create a kernel thread
*/
2007-05-25 22:32:28 +04:00
static void __noreturn kernel_thread_helper ( void * arg , int ( * fn ) ( void * ) )
2005-04-17 02:20:36 +04:00
{
do_exit ( fn ( arg ) ) ;
}
long kernel_thread ( int ( * fn ) ( void * ) , void * arg , unsigned long flags )
{
struct pt_regs regs ;
memset ( & regs , 0 , sizeof ( regs ) ) ;
regs . regs [ 4 ] = ( unsigned long ) arg ;
regs . regs [ 5 ] = ( unsigned long ) fn ;
regs . cp0_epc = ( unsigned long ) kernel_thread_helper ;
regs . cp0_status = read_c0_status ( ) ;
# if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
2007-09-18 21:49:08 +04:00
regs . cp0_status = ( regs . cp0_status & ~ ( ST0_KUP | ST0_IEP | ST0_IEC ) ) |
( ( regs . cp0_status & ( ST0_KUC | ST0_IEC ) ) < < 2 ) ;
2005-04-17 02:20:36 +04:00
# else
regs . cp0_status | = ST0_EXL ;
# endif
/* Ok, create the new process.. */
return do_fork ( flags | CLONE_VM | CLONE_UNTRACED , 0 , & regs , 0 , NULL , NULL ) ;
}
2006-08-18 18:18:09 +04:00
/*
*
*/
struct mips_frame_info {
void * func ;
unsigned long func_size ;
int frame_size ;
int pc_offset ;
} ;
2005-02-21 13:55:16 +03:00
2006-08-03 11:29:15 +04:00
static inline int is_ra_save_ins ( union mips_instruction * ip )
{
/* sw / sd $ra, offset($sp) */
return ( ip - > i_format . opcode = = sw_op | | ip - > i_format . opcode = = sd_op ) & &
ip - > i_format . rs = = 29 & &
ip - > i_format . rt = = 31 ;
}
static inline int is_jal_jalr_jr_ins ( union mips_instruction * ip )
{
if ( ip - > j_format . opcode = = jal_op )
return 1 ;
if ( ip - > r_format . opcode ! = spec_op )
return 0 ;
return ip - > r_format . func = = jalr_op | | ip - > r_format . func = = jr_op ;
}
static inline int is_sp_move_ins ( union mips_instruction * ip )
{
/* addiu/daddiu sp,sp,-imm */
if ( ip - > i_format . rs ! = 29 | | ip - > i_format . rt ! = 29 )
return 0 ;
if ( ip - > i_format . opcode = = addiu_op | | ip - > i_format . opcode = = daddiu_op )
return 1 ;
return 0 ;
}
2006-07-29 18:27:20 +04:00
static int get_frame_info ( struct mips_frame_info * info )
2005-04-17 02:20:36 +04:00
{
2006-08-03 11:29:15 +04:00
union mips_instruction * ip = info - > func ;
2006-08-18 18:18:08 +04:00
unsigned max_insns = info - > func_size / sizeof ( union mips_instruction ) ;
unsigned i ;
2006-08-03 11:29:15 +04:00
2005-04-17 02:20:36 +04:00
info - > pc_offset = - 1 ;
2006-02-07 19:48:03 +03:00
info - > frame_size = 0 ;
2005-04-17 02:20:36 +04:00
2006-08-18 18:18:08 +04:00
if ( ! ip )
goto err ;
if ( max_insns = = 0 )
max_insns = 128U ; /* unknown function size */
max_insns = min ( 128U , max_insns ) ;
2006-08-03 11:29:15 +04:00
for ( i = 0 ; i < max_insns ; i + + , ip + + ) {
if ( is_jal_jalr_jr_ins ( ip ) )
2006-02-07 19:48:03 +03:00
break ;
2006-08-03 11:29:20 +04:00
if ( ! info - > frame_size ) {
if ( is_sp_move_ins ( ip ) )
info - > frame_size = - ip - > i_format . simmediate ;
continue ;
2006-02-07 19:48:03 +03:00
}
2006-08-03 11:29:20 +04:00
if ( info - > pc_offset = = - 1 & & is_ra_save_ins ( ip ) ) {
2006-02-07 19:48:03 +03:00
info - > pc_offset =
ip - > i_format . simmediate / sizeof ( long ) ;
2006-08-03 11:29:20 +04:00
break ;
2005-04-17 02:20:36 +04:00
}
}
2006-07-29 18:27:20 +04:00
if ( info - > frame_size & & info - > pc_offset > = 0 ) /* nested */
return 0 ;
if ( info - > pc_offset < 0 ) /* leaf */
return 1 ;
/* prologue seems boggus... */
2006-08-18 18:18:08 +04:00
err :
2006-07-29 18:27:20 +04:00
return - 1 ;
2005-04-17 02:20:36 +04:00
}
2006-08-18 18:18:09 +04:00
static struct mips_frame_info schedule_mfi __read_mostly ;
2005-04-17 02:20:36 +04:00
static int __init frame_info_init ( void )
{
2006-08-18 18:18:09 +04:00
unsigned long size = 0 ;
2006-02-07 19:48:03 +03:00
# ifdef CONFIG_KALLSYMS
2006-08-18 18:18:09 +04:00
unsigned long ofs ;
2006-10-13 15:37:35 +04:00
kallsyms_lookup_size_offset ( ( unsigned long ) schedule , & size , & ofs ) ;
2006-02-07 19:48:03 +03:00
# endif
2006-08-18 18:18:09 +04:00
schedule_mfi . func = schedule ;
schedule_mfi . func_size = size ;
get_frame_info ( & schedule_mfi ) ;
2006-08-03 11:29:18 +04:00
/*
* Without schedule ( ) frame info , result given by
* thread_saved_pc ( ) and get_wchan ( ) are not reliable .
*/
2006-08-18 18:18:09 +04:00
if ( schedule_mfi . pc_offset < 0 )
2006-08-03 11:29:18 +04:00
printk ( " Can't analyze schedule() prologue at %p \n " , schedule ) ;
2006-02-07 19:48:03 +03:00
2005-04-17 02:20:36 +04:00
return 0 ;
}
arch_initcall ( frame_info_init ) ;
/*
* Return saved PC of a blocked thread .
*/
unsigned long thread_saved_pc ( struct task_struct * tsk )
{
struct thread_struct * t = & tsk - > thread ;
/* New born processes are a special case */
if ( t - > reg31 = = ( unsigned long ) ret_from_fork )
return t - > reg31 ;
2006-08-18 18:18:09 +04:00
if ( schedule_mfi . pc_offset < 0 )
2005-04-17 02:20:36 +04:00
return 0 ;
2006-08-18 18:18:09 +04:00
return ( ( unsigned long * ) t - > reg29 ) [ schedule_mfi . pc_offset ] ;
2005-04-17 02:20:36 +04:00
}
2006-07-29 18:27:20 +04:00
# ifdef CONFIG_KALLSYMS
2011-05-13 16:38:04 +04:00
/* generic stack unwinding function */
unsigned long notrace unwind_stack_by_address ( unsigned long stack_page ,
unsigned long * sp ,
unsigned long pc ,
unsigned long * ra )
2006-07-29 18:27:20 +04:00
{
struct mips_frame_info info ;
unsigned long size , ofs ;
2006-08-03 11:29:21 +04:00
int leaf ;
2006-09-29 13:02:51 +04:00
extern void ret_from_irq ( void ) ;
extern void ret_from_exception ( void ) ;
2006-07-29 18:27:20 +04:00
if ( ! stack_page )
return 0 ;
2006-09-29 13:02:51 +04:00
/*
* If we reached the bottom of interrupt context ,
* return saved pc in pt_regs .
*/
if ( pc = = ( unsigned long ) ret_from_irq | |
pc = = ( unsigned long ) ret_from_exception ) {
struct pt_regs * regs ;
if ( * sp > = stack_page & &
* sp + sizeof ( * regs ) < = stack_page + THREAD_SIZE - 32 ) {
regs = ( struct pt_regs * ) * sp ;
pc = regs - > cp0_epc ;
if ( __kernel_text_address ( pc ) ) {
* sp = regs - > regs [ 29 ] ;
* ra = regs - > regs [ 31 ] ;
return pc ;
}
}
return 0 ;
}
2006-10-13 15:37:35 +04:00
if ( ! kallsyms_lookup_size_offset ( pc , & size , & ofs ) )
2006-07-29 18:27:20 +04:00
return 0 ;
2006-08-18 18:18:07 +04:00
/*
2011-03-31 05:57:33 +04:00
* Return ra if an exception occurred at the first instruction
2006-08-18 18:18:07 +04:00
*/
2006-09-29 13:02:51 +04:00
if ( unlikely ( ofs = = 0 ) ) {
pc = * ra ;
* ra = 0 ;
return pc ;
}
2006-07-29 18:27:20 +04:00
info . func = ( void * ) ( pc - ofs ) ;
info . func_size = ofs ; /* analyze from start to ofs */
2006-08-03 11:29:21 +04:00
leaf = get_frame_info ( & info ) ;
if ( leaf < 0 )
2006-07-29 18:27:20 +04:00
return 0 ;
2006-08-03 11:29:21 +04:00
if ( * sp < stack_page | |
* sp + info . frame_size > stack_page + THREAD_SIZE - 32 )
2006-07-29 18:27:20 +04:00
return 0 ;
2006-08-03 11:29:21 +04:00
if ( leaf )
/*
* For some extreme cases , get_frame_info ( ) can
* consider wrongly a nested function as a leaf
* one . In that cases avoid to return always the
* same value .
*/
2006-09-29 13:02:51 +04:00
pc = pc ! = * ra ? * ra : 0 ;
2006-08-03 11:29:21 +04:00
else
pc = ( ( unsigned long * ) ( * sp ) ) [ info . pc_offset ] ;
* sp + = info . frame_size ;
2006-09-29 13:02:51 +04:00
* ra = 0 ;
2006-08-03 11:29:21 +04:00
return __kernel_text_address ( pc ) ? pc : 0 ;
2006-07-29 18:27:20 +04:00
}
2011-05-13 16:38:04 +04:00
EXPORT_SYMBOL ( unwind_stack_by_address ) ;
/* used by show_backtrace() */
unsigned long unwind_stack ( struct task_struct * task , unsigned long * sp ,
unsigned long pc , unsigned long * ra )
{
unsigned long stack_page = ( unsigned long ) task_stack_page ( task ) ;
return unwind_stack_by_address ( stack_page , sp , pc , ra ) ;
}
2006-07-29 18:27:20 +04:00
# endif
2006-08-18 18:18:09 +04:00
/*
* get_wchan - a maintenance nightmare ^ W ^ Wpain in the ass . . .
*/
unsigned long get_wchan ( struct task_struct * task )
{
unsigned long pc = 0 ;
# ifdef CONFIG_KALLSYMS
unsigned long sp ;
2006-09-29 13:02:51 +04:00
unsigned long ra = 0 ;
2006-08-18 18:18:09 +04:00
# endif
if ( ! task | | task = = current | | task - > state = = TASK_RUNNING )
goto out ;
if ( ! task_stack_page ( task ) )
goto out ;
pc = thread_saved_pc ( task ) ;
# ifdef CONFIG_KALLSYMS
sp = task - > thread . reg29 + schedule_mfi . frame_size ;
while ( in_sched_functions ( pc ) )
2006-09-29 13:02:51 +04:00
pc = unwind_stack ( task , & sp , pc , & ra ) ;
2006-08-18 18:18:09 +04:00
# endif
out :
return pc ;
}
2007-07-19 16:04:21 +04:00
/*
* Don ' t forget that the stack pointer must be aligned on a 8 bytes
* boundary for 32 - bits ABI and 16 bytes for 64 - bits ABI .
*/
unsigned long arch_align_stack ( unsigned long sp )
{
if ( ! ( current - > personality & ADDR_NO_RANDOMIZE ) & & randomize_va_space )
sp - = get_random_int ( ) & ~ PAGE_MASK ;
return sp & ALMASK ;
}