irq_work: Cleanup
Get rid of the __call_single_node union and clean up the API a little to avoid external code relying on the structure layout as much. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
This commit is contained in:
parent
23e6082a52
commit
7a9f50a058
@ -197,7 +197,7 @@ __notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk))
|
|||||||
|
|
||||||
llist_for_each_entry_safe(cb, cn,
|
llist_for_each_entry_safe(cb, cn,
|
||||||
llist_del_all(&rq->execute_cb),
|
llist_del_all(&rq->execute_cb),
|
||||||
work.llnode)
|
work.node.llist)
|
||||||
fn(&cb->work);
|
fn(&cb->work);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -460,7 +460,7 @@ __await_execution(struct i915_request *rq,
|
|||||||
* callback first, then checking the ACTIVE bit, we serialise with
|
* callback first, then checking the ACTIVE bit, we serialise with
|
||||||
* the completed/retired request.
|
* the completed/retired request.
|
||||||
*/
|
*/
|
||||||
if (llist_add(&cb->work.llnode, &signal->execute_cb)) {
|
if (llist_add(&cb->work.node.llist, &signal->execute_cb)) {
|
||||||
if (i915_request_is_active(signal) ||
|
if (i915_request_is_active(signal) ||
|
||||||
__request_in_flight(signal))
|
__request_in_flight(signal))
|
||||||
__notify_execute_cb_imm(signal);
|
__notify_execute_cb_imm(signal);
|
||||||
|
@ -14,28 +14,37 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
struct irq_work {
|
struct irq_work {
|
||||||
union {
|
|
||||||
struct __call_single_node node;
|
struct __call_single_node node;
|
||||||
struct {
|
|
||||||
struct llist_node llnode;
|
|
||||||
atomic_t flags;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
void (*func)(struct irq_work *);
|
void (*func)(struct irq_work *);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define __IRQ_WORK_INIT(_func, _flags) (struct irq_work){ \
|
||||||
|
.node = { .u_flags = (_flags), }, \
|
||||||
|
.func = (_func), \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define IRQ_WORK_INIT(_func) __IRQ_WORK_INIT(_func, 0)
|
||||||
|
#define IRQ_WORK_INIT_LAZY(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_LAZY)
|
||||||
|
#define IRQ_WORK_INIT_HARD(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_HARD_IRQ)
|
||||||
|
|
||||||
|
#define DEFINE_IRQ_WORK(name, _f) \
|
||||||
|
struct irq_work name = IRQ_WORK_INIT(_f)
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
|
void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
|
||||||
{
|
{
|
||||||
atomic_set(&work->flags, 0);
|
*work = IRQ_WORK_INIT(func);
|
||||||
work->func = func;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { \
|
static inline bool irq_work_is_pending(struct irq_work *work)
|
||||||
.flags = ATOMIC_INIT(0), \
|
{
|
||||||
.func = (_f) \
|
return atomic_read(&work->node.a_flags) & IRQ_WORK_PENDING;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool irq_work_is_busy(struct irq_work *work)
|
||||||
|
{
|
||||||
|
return atomic_read(&work->node.a_flags) & IRQ_WORK_BUSY;
|
||||||
|
}
|
||||||
|
|
||||||
bool irq_work_queue(struct irq_work *work);
|
bool irq_work_queue(struct irq_work *work);
|
||||||
bool irq_work_queue_on(struct irq_work *work, int cpu);
|
bool irq_work_queue_on(struct irq_work *work, int cpu);
|
||||||
|
@ -109,12 +109,12 @@ do { \
|
|||||||
|
|
||||||
# define lockdep_irq_work_enter(__work) \
|
# define lockdep_irq_work_enter(__work) \
|
||||||
do { \
|
do { \
|
||||||
if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\
|
if (!(atomic_read(&__work->node.a_flags) & IRQ_WORK_HARD_IRQ))\
|
||||||
current->irq_config = 1; \
|
current->irq_config = 1; \
|
||||||
} while (0)
|
} while (0)
|
||||||
# define lockdep_irq_work_exit(__work) \
|
# define lockdep_irq_work_exit(__work) \
|
||||||
do { \
|
do { \
|
||||||
if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\
|
if (!(atomic_read(&__work->node.a_flags) & IRQ_WORK_HARD_IRQ))\
|
||||||
current->irq_config = 0; \
|
current->irq_config = 0; \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
@ -298,7 +298,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
|
|||||||
if (irqs_disabled()) {
|
if (irqs_disabled()) {
|
||||||
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
|
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
|
||||||
work = this_cpu_ptr(&up_read_work);
|
work = this_cpu_ptr(&up_read_work);
|
||||||
if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY) {
|
if (irq_work_is_busy(&work->irq_work)) {
|
||||||
/* cannot queue more up_read, fallback */
|
/* cannot queue more up_read, fallback */
|
||||||
irq_work_busy = true;
|
irq_work_busy = true;
|
||||||
}
|
}
|
||||||
|
@ -31,7 +31,7 @@ static bool irq_work_claim(struct irq_work *work)
|
|||||||
{
|
{
|
||||||
int oflags;
|
int oflags;
|
||||||
|
|
||||||
oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->flags);
|
oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags);
|
||||||
/*
|
/*
|
||||||
* If the work is already pending, no need to raise the IPI.
|
* If the work is already pending, no need to raise the IPI.
|
||||||
* The pairing atomic_fetch_andnot() in irq_work_run() makes sure
|
* The pairing atomic_fetch_andnot() in irq_work_run() makes sure
|
||||||
@ -53,12 +53,12 @@ void __weak arch_irq_work_raise(void)
|
|||||||
static void __irq_work_queue_local(struct irq_work *work)
|
static void __irq_work_queue_local(struct irq_work *work)
|
||||||
{
|
{
|
||||||
/* If the work is "lazy", handle it from next tick if any */
|
/* If the work is "lazy", handle it from next tick if any */
|
||||||
if (atomic_read(&work->flags) & IRQ_WORK_LAZY) {
|
if (atomic_read(&work->node.a_flags) & IRQ_WORK_LAZY) {
|
||||||
if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
|
if (llist_add(&work->node.llist, this_cpu_ptr(&lazy_list)) &&
|
||||||
tick_nohz_tick_stopped())
|
tick_nohz_tick_stopped())
|
||||||
arch_irq_work_raise();
|
arch_irq_work_raise();
|
||||||
} else {
|
} else {
|
||||||
if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
|
if (llist_add(&work->node.llist, this_cpu_ptr(&raised_list)))
|
||||||
arch_irq_work_raise();
|
arch_irq_work_raise();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -102,7 +102,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
|
|||||||
if (cpu != smp_processor_id()) {
|
if (cpu != smp_processor_id()) {
|
||||||
/* Arch remote IPI send/receive backend aren't NMI safe */
|
/* Arch remote IPI send/receive backend aren't NMI safe */
|
||||||
WARN_ON_ONCE(in_nmi());
|
WARN_ON_ONCE(in_nmi());
|
||||||
__smp_call_single_queue(cpu, &work->llnode);
|
__smp_call_single_queue(cpu, &work->node.llist);
|
||||||
} else {
|
} else {
|
||||||
__irq_work_queue_local(work);
|
__irq_work_queue_local(work);
|
||||||
}
|
}
|
||||||
@ -142,7 +142,7 @@ void irq_work_single(void *arg)
|
|||||||
* to claim that work don't rely on us to handle their data
|
* to claim that work don't rely on us to handle their data
|
||||||
* while we are in the middle of the func.
|
* while we are in the middle of the func.
|
||||||
*/
|
*/
|
||||||
flags = atomic_fetch_andnot(IRQ_WORK_PENDING, &work->flags);
|
flags = atomic_fetch_andnot(IRQ_WORK_PENDING, &work->node.a_flags);
|
||||||
|
|
||||||
lockdep_irq_work_enter(work);
|
lockdep_irq_work_enter(work);
|
||||||
work->func(work);
|
work->func(work);
|
||||||
@ -152,7 +152,7 @@ void irq_work_single(void *arg)
|
|||||||
* no-one else claimed it meanwhile.
|
* no-one else claimed it meanwhile.
|
||||||
*/
|
*/
|
||||||
flags &= ~IRQ_WORK_PENDING;
|
flags &= ~IRQ_WORK_PENDING;
|
||||||
(void)atomic_cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
|
(void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void irq_work_run_list(struct llist_head *list)
|
static void irq_work_run_list(struct llist_head *list)
|
||||||
@ -166,7 +166,7 @@ static void irq_work_run_list(struct llist_head *list)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
llnode = llist_del_all(list);
|
llnode = llist_del_all(list);
|
||||||
llist_for_each_entry_safe(work, tmp, llnode, llnode)
|
llist_for_each_entry_safe(work, tmp, llnode, node.llist)
|
||||||
irq_work_single(work);
|
irq_work_single(work);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -198,7 +198,7 @@ void irq_work_sync(struct irq_work *work)
|
|||||||
{
|
{
|
||||||
lockdep_assert_irqs_enabled();
|
lockdep_assert_irqs_enabled();
|
||||||
|
|
||||||
while (atomic_read(&work->flags) & IRQ_WORK_BUSY)
|
while (irq_work_is_busy(work))
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(irq_work_sync);
|
EXPORT_SYMBOL_GPL(irq_work_sync);
|
||||||
|
@ -3025,10 +3025,8 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work)
|
|||||||
wake_up_interruptible(&log_wait);
|
wake_up_interruptible(&log_wait);
|
||||||
}
|
}
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
|
static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) =
|
||||||
.func = wake_up_klogd_work_func,
|
IRQ_WORK_INIT_LAZY(wake_up_klogd_work_func);
|
||||||
.flags = ATOMIC_INIT(IRQ_WORK_LAZY),
|
|
||||||
};
|
|
||||||
|
|
||||||
void wake_up_klogd(void)
|
void wake_up_klogd(void)
|
||||||
{
|
{
|
||||||
|
@ -1311,8 +1311,6 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
|
|||||||
if (IS_ENABLED(CONFIG_IRQ_WORK) &&
|
if (IS_ENABLED(CONFIG_IRQ_WORK) &&
|
||||||
!rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
|
!rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
|
||||||
(rnp->ffmask & rdp->grpmask)) {
|
(rnp->ffmask & rdp->grpmask)) {
|
||||||
init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
|
|
||||||
atomic_set(&rdp->rcu_iw.flags, IRQ_WORK_HARD_IRQ);
|
|
||||||
rdp->rcu_iw_pending = true;
|
rdp->rcu_iw_pending = true;
|
||||||
rdp->rcu_iw_gp_seq = rnp->gp_seq;
|
rdp->rcu_iw_gp_seq = rnp->gp_seq;
|
||||||
irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
|
irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
|
||||||
@ -3964,6 +3962,7 @@ int rcutree_prepare_cpu(unsigned int cpu)
|
|||||||
rdp->cpu_no_qs.b.norm = true;
|
rdp->cpu_no_qs.b.norm = true;
|
||||||
rdp->core_needs_qs = false;
|
rdp->core_needs_qs = false;
|
||||||
rdp->rcu_iw_pending = false;
|
rdp->rcu_iw_pending = false;
|
||||||
|
rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
|
||||||
rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
|
rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
|
||||||
trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
|
trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
|
||||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||||
|
@ -243,10 +243,8 @@ static void nohz_full_kick_func(struct irq_work *work)
|
|||||||
/* Empty, the tick restart happens on tick_nohz_irq_exit() */
|
/* Empty, the tick restart happens on tick_nohz_irq_exit() */
|
||||||
}
|
}
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
|
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) =
|
||||||
.func = nohz_full_kick_func,
|
IRQ_WORK_INIT_HARD(nohz_full_kick_func);
|
||||||
.flags = ATOMIC_INIT(IRQ_WORK_HARD_IRQ),
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Kick this CPU if it's full dynticks in order to force it to
|
* Kick this CPU if it's full dynticks in order to force it to
|
||||||
|
@ -1086,7 +1086,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
work = this_cpu_ptr(&send_signal_work);
|
work = this_cpu_ptr(&send_signal_work);
|
||||||
if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
|
if (irq_work_is_busy(&work->irq_work))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
/* Add the current task, which is the target of sending signal,
|
/* Add the current task, which is the target of sending signal,
|
||||||
|
Loading…
Reference in New Issue
Block a user