2012-03-28 18:30:02 +01:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 1994 , 95 , 96 , 97 , 98 , 99 , 2003 , 06 by Ralf Baechle
* Copyright ( C ) 1996 by Paul M . Antoine
* Copyright ( C ) 1999 Silicon Graphics
* Kevin D . Kissell , kevink @ mips . org and Carsten Langgaard , carstenl @ mips . com
* Copyright ( C ) 2000 MIPS Technologies , Inc .
*/
# ifndef _ASM_SWITCH_TO_H
# define _ASM_SWITCH_TO_H
# include <asm/cpu-features.h>
# include <asm/watch.h>
# include <asm/dsp.h>
2013-06-10 06:30:00 +00:00
# include <asm/cop2.h>
2015-08-03 08:49:30 -07:00
# include <asm/fpu.h>
2012-03-28 18:30:02 +01:00
struct task_struct ;
2013-11-19 17:30:37 +00:00
/**
* resume - resume execution of a task
* @ prev : The task previously executed .
* @ next : The task to begin executing .
* @ next_ti : task_thread_info ( next ) .
*
* This function is used whilst scheduling to save the context of prev & load
* the context of next . Returns prev .
2012-03-28 18:30:02 +01:00
*/
2013-11-19 17:30:37 +00:00
extern asmlinkage struct task_struct * resume ( struct task_struct * prev ,
2015-08-03 08:49:30 -07:00
struct task_struct * next , struct thread_info * next_ti ) ;
2012-03-28 18:30:02 +01:00
extern unsigned int ll_bit ;
extern struct task_struct * ll_task ;
# ifdef CONFIG_MIPS_MT_FPAFF
/*
2013-01-22 12:59:30 +01:00
* Handle the scheduler resume end of FPU affinity management . We do this
2012-03-28 18:30:02 +01:00
* inline to try to keep the overhead down . If we have been forced to run on
* a " CPU " with an FPU because of a previous high level of FP computation ,
* but did not actually use the FPU during the most recent time - slice ( CU1
2019-04-23 16:26:36 +02:00
* isn ' t set ) , we undo the restriction on cpus_mask .
2012-03-28 18:30:02 +01:00
*
* We ' re not calling set_cpus_allowed ( ) here , because we have no need to
* force prompt migration - we ' re already switching the current CPU to a
* different thread .
*/
# define __mips_mt_fpaff_switch_to(prev) \
do { \
struct thread_info * __prev_ti = task_thread_info ( prev ) ; \
\
if ( cpu_has_fpu & & \
test_ti_thread_flag ( __prev_ti , TIF_FPUBOUND ) & & \
( ! ( KSTK_STATUS ( prev ) & ST0_CU1 ) ) ) { \
clear_ti_thread_flag ( __prev_ti , TIF_FPUBOUND ) ; \
2019-04-23 16:26:36 +02:00
prev - > cpus_mask = prev - > thread . user_cpus_allowed ; \
2012-03-28 18:30:02 +01:00
} \
next - > thread . emulated_fp = 0 ; \
} while ( 0 )
# else
# define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
# endif
2016-10-17 15:34:36 +01:00
/*
* Clear LLBit during context switches on MIPSr6 such that eretnc can be used
* unconditionally when returning to userland in entry . S .
*/
# define __clear_r6_hw_ll_bit() do { \
if ( cpu_has_mips_r6 ) \
2014-12-03 12:37:32 +00:00
write_c0_lladdr ( 0 ) ; \
2016-10-17 15:34:36 +01:00
} while ( 0 )
# define __clear_software_ll_bit() do { \
if ( ! __builtin_constant_p ( cpu_has_llsc ) | | ! cpu_has_llsc ) \
ll_bit = 0 ; \
2012-03-28 18:30:02 +01:00
} while ( 0 )
2016-10-28 08:21:03 +01:00
/*
* Check FCSR for any unmasked exceptions pending set with ` ptrace ' ,
* clear them and send a signal .
*/
2018-11-07 23:14:09 +00:00
# ifdef CONFIG_MIPS_FP_SUPPORT
# define __sanitize_fcr31(next) \
2016-10-28 08:21:03 +01:00
do { \
unsigned long fcr31 = mask_fcr31_x ( next - > thread . fpu . fcr31 ) ; \
void __user * pc ; \
\
if ( unlikely ( fcr31 ) ) { \
pc = ( void __user * ) task_pt_regs ( next ) - > cp0_epc ; \
next - > thread . fpu . fcr31 & = ~ fcr31 ; \
force_fcr31_sig ( fcr31 , pc , next ) ; \
} \
} while ( 0 )
2018-11-07 23:14:09 +00:00
# else
# define __sanitize_fcr31(next)
# endif
2016-10-28 08:21:03 +01:00
2015-07-29 12:14:42 +02:00
/*
* For newly created kernel threads switch_to ( ) will return to
* ret_from_kernel_thread , newly created user threads to ret_from_fork .
* That is , everything following resume ( ) will be skipped for new threads .
* So everything that matters to new threads should be placed before resume ( ) .
*/
2012-03-28 18:30:02 +01:00
# define switch_to(prev, next, last) \
do { \
__mips_mt_fpaff_switch_to ( prev ) ; \
2015-08-03 08:49:30 -07:00
lose_fpu_inatomic ( 1 , prev ) ; \
2016-10-28 08:21:03 +01:00
if ( tsk_used_math ( next ) ) \
__sanitize_fcr31 ( next ) ; \
2015-07-29 12:14:42 +02:00
if ( cpu_has_dsp ) { \
2012-03-28 18:30:02 +01:00
__save_dsp ( prev ) ; \
2015-07-29 12:14:42 +02:00
__restore_dsp ( next ) ; \
} \
if ( cop2_present ) { \
set_c0_status ( ST0_CU2 ) ; \
if ( ( KSTK_STATUS ( prev ) & ST0_CU2 ) ) { \
if ( cop2_lazy_restore ) \
KSTK_STATUS ( prev ) & = ~ ST0_CU2 ; \
cop2_save ( prev ) ; \
} \
if ( KSTK_STATUS ( next ) & ST0_CU2 & & \
! cop2_lazy_restore ) { \
cop2_restore ( next ) ; \
} \
clear_c0_status ( ST0_CU2 ) ; \
2013-06-10 06:30:00 +00:00
} \
2016-10-17 15:34:36 +01:00
__clear_r6_hw_ll_bit ( ) ; \
2012-03-28 18:30:02 +01:00
__clear_software_ll_bit ( ) ; \
if ( cpu_has_userlocal ) \
2015-07-29 12:14:42 +02:00
write_c0_userlocal ( task_thread_info ( next ) - > tp_value ) ; \
2016-03-01 22:19:36 +00:00
__restore_watch ( next ) ; \
2015-08-03 08:49:30 -07:00
( last ) = resume ( prev , next , task_thread_info ( next ) ) ; \
2012-03-28 18:30:02 +01:00
} while ( 0 )
# endif /* _ASM_SWITCH_TO_H */