2013-08-14 16:55:40 +04:00
# ifndef __ASM_PREEMPT_H
# define __ASM_PREEMPT_H
# include <linux/thread_info.h>
2013-11-28 17:26:41 +04:00
# define PREEMPT_ENABLED (0)
2013-08-14 16:55:40 +04:00
static __always_inline int preempt_count ( void )
{
2016-05-16 16:01:11 +03:00
return READ_ONCE ( current_thread_info ( ) - > preempt_count ) ;
2013-08-14 16:55:40 +04:00
}
2016-05-16 16:01:11 +03:00
static __always_inline volatile int * preempt_count_ptr ( void )
2013-08-14 16:55:40 +04:00
{
return & current_thread_info ( ) - > preempt_count ;
}
static __always_inline void preempt_count_set ( int pc )
{
* preempt_count_ptr ( ) = pc ;
}
2013-08-14 16:55:46 +04:00
/*
* must be macros to avoid header recursion hell
*/
# define init_task_preempt_count(p) do { \
2015-09-28 18:52:18 +03:00
task_thread_info ( p ) - > preempt_count = FORK_PREEMPT_COUNT ; \
2013-08-14 16:55:46 +04:00
} while ( 0 )
# define init_idle_preempt_count(p, cpu) do { \
task_thread_info ( p ) - > preempt_count = PREEMPT_ENABLED ; \
} while ( 0 )
2013-08-14 16:55:40 +04:00
static __always_inline void set_preempt_need_resched ( void )
{
}
static __always_inline void clear_preempt_need_resched ( void )
{
}
static __always_inline bool test_preempt_need_resched ( void )
{
2013-11-28 17:26:41 +04:00
return false ;
2013-08-14 16:55:40 +04:00
}
2013-09-10 14:15:23 +04:00
/*
* The various preempt_count add / sub methods
*/
static __always_inline void __preempt_count_add ( int val )
{
* preempt_count_ptr ( ) + = val ;
}
static __always_inline void __preempt_count_sub ( int val )
{
* preempt_count_ptr ( ) - = val ;
}
static __always_inline bool __preempt_count_dec_and_test ( void )
{
2013-11-28 17:26:41 +04:00
/*
* Because of load - store architectures cannot do per - cpu atomic
* operations ; we cannot use PREEMPT_NEED_RESCHED because it might get
* lost .
*/
return ! - - * preempt_count_ptr ( ) & & tif_need_resched ( ) ;
2013-09-10 14:15:23 +04:00
}
/*
* Returns true when we need to resched and can ( barring IRQ state ) .
*/
2015-07-15 12:52:04 +03:00
static __always_inline bool should_resched ( int preempt_offset )
2013-09-10 14:15:23 +04:00
{
2015-07-15 12:52:04 +03:00
return unlikely ( preempt_count ( ) = = preempt_offset & &
tif_need_resched ( ) ) ;
2013-09-10 14:15:23 +04:00
}
2013-08-14 16:51:00 +04:00
# ifdef CONFIG_PREEMPT
extern asmlinkage void preempt_schedule ( void ) ;
# define __preempt_schedule() preempt_schedule()
2015-06-04 18:39:08 +03:00
extern asmlinkage void preempt_schedule_notrace ( void ) ;
# define __preempt_schedule_notrace() preempt_schedule_notrace()
2013-08-14 16:51:00 +04:00
# endif /* CONFIG_PREEMPT */
2013-08-14 16:55:40 +04:00
# endif /* __ASM_PREEMPT_H */