diff --git a/include/proto/task.h b/include/proto/task.h index cbc1a9072..c1c4c07ec 100644 --- a/include/proto/task.h +++ b/include/proto/task.h @@ -90,6 +90,8 @@ extern unsigned int nb_tasks_cur; extern unsigned int niced_tasks; /* number of niced tasks in the run queue */ extern struct pool_head *pool_head_task; extern struct pool_head *pool_head_notification; +extern THREAD_LOCAL struct task *curr_task; /* task currently running or NULL */ +extern THREAD_LOCAL struct eb32sc_node *rq_next; /* Next task to be potentially run */ __decl_hathreads(extern HA_SPINLOCK_T rq_lock); /* spin lock related to run queue */ __decl_hathreads(extern HA_SPINLOCK_T wq_lock); /* spin lock related to wait queue */ @@ -177,8 +179,11 @@ static inline struct task *__task_unlink_rq(struct task *t) static inline struct task *task_unlink_rq(struct task *t) { HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock); - if (likely(task_in_rq(t))) + if (likely(task_in_rq(t))) { + if (&t->rq == rq_next) + rq_next = eb32sc_next(rq_next, tid_bit); __task_unlink_rq(t); + } HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock); return t; } @@ -230,7 +235,7 @@ static inline struct task *task_new(unsigned long thread_mask) * Free a task. Its context must have been freed since it will be lost. * The task count is decremented. */ -static inline void task_free(struct task *t) +static inline void __task_free(struct task *t) { pool_free(pool_head_task, t); if (unlikely(stopping)) @@ -238,6 +243,18 @@ static inline void task_free(struct task *t) HA_ATOMIC_SUB(&nb_tasks, 1); } +static inline void task_free(struct task *t) +{ + /* There's no need to protect t->state with a lock, as the task + * has to run on the current thread. + */ + if (t == curr_task || !(t->state & TASK_RUNNING)) + __task_free(t); + else + t->process = NULL; +} + + /* Place into the wait queue, where it may already be. If the expiration * timer is infinite, do nothing and rely on wake_expired_task to clean up. */ diff --git a/src/task.c b/src/task.c index fd9acf66d..3d021bb4c 100644 --- a/src/task.c +++ b/src/task.c @@ -39,6 +39,7 @@ unsigned int nb_tasks_cur = 0; /* copy of the tasks count */ unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */ THREAD_LOCAL struct task *curr_task = NULL; /* task currently running or NULL */ +THREAD_LOCAL struct eb32sc_node *rq_next = NULL; /* Next task to be potentially run */ __decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) rq_lock); /* spin lock related to run queue */ __decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) wq_lock); /* spin lock related to wait queue */ @@ -186,7 +187,6 @@ void process_runnable_tasks() struct task *t; int i; int max_processed; - struct eb32sc_node *rq_next; struct task *local_tasks[16]; int local_tasks_count; int final_tasks_count; @@ -227,8 +227,14 @@ void process_runnable_tasks() */ if (likely(t->process == process_stream)) t = process_stream(t); - else - t = t->process(t); + else { + if (t->process != NULL) + t = t->process(t); + else { + __task_free(t); + t = NULL; + } + } curr_task = NULL; if (likely(t != NULL)) { @@ -309,8 +315,14 @@ void process_runnable_tasks() curr_task = t; if (likely(t->process == process_stream)) t = process_stream(t); - else - t = t->process(t); + else { + if (t->process != NULL) + t = t->process(t); + else { + __task_free(t); + t = NULL; + } + } curr_task = NULL; if (t) local_tasks[final_tasks_count++] = t;