sched/swait: Rename to exclusive
Since swait basically implemented exclusive waits only, make sure the API reflects that. $ git grep -l -e "\<swake_up\>" -e "\<swait_event[^ (]*" -e "\<prepare_to_swait\>" | while read file; do sed -i -e 's/\<swake_up\>/&_one/g' -e 's/\<swait_event[^ (]*/&_exclusive/g' -e 's/\<prepare_to_swait\>/&_exclusive/g' $file; done With a few manual touch-ups. Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: bigeasy@linutronix.de Cc: oleg@redhat.com Cc: paulmck@linux.vnet.ibm.com Cc: pbonzini@redhat.com Link: https://lkml.kernel.org/r/20180612083909.261946548@infradead.org
This commit is contained in:
parent
0abf17bc77
commit
b3dae109fa
@ -515,7 +515,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
|
|||||||
dvcpu->arch.wait = 0;
|
dvcpu->arch.wait = 0;
|
||||||
|
|
||||||
if (swq_has_sleeper(&dvcpu->wq))
|
if (swq_has_sleeper(&dvcpu->wq))
|
||||||
swake_up(&dvcpu->wq);
|
swake_up_one(&dvcpu->wq);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1204,7 +1204,7 @@ static void kvm_mips_comparecount_func(unsigned long data)
|
|||||||
|
|
||||||
vcpu->arch.wait = 0;
|
vcpu->arch.wait = 0;
|
||||||
if (swq_has_sleeper(&vcpu->wq))
|
if (swq_has_sleeper(&vcpu->wq))
|
||||||
swake_up(&vcpu->wq);
|
swake_up_one(&vcpu->wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* low level hrtimer wake routine */
|
/* low level hrtimer wake routine */
|
||||||
|
@ -216,7 +216,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
wqp = kvm_arch_vcpu_wq(vcpu);
|
wqp = kvm_arch_vcpu_wq(vcpu);
|
||||||
if (swq_has_sleeper(wqp)) {
|
if (swq_has_sleeper(wqp)) {
|
||||||
swake_up(wqp);
|
swake_up_one(wqp);
|
||||||
++vcpu->stat.halt_wakeup;
|
++vcpu->stat.halt_wakeup;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3188,7 +3188,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
|
prepare_to_swait_exclusive(&vc->wq, &wait, TASK_INTERRUPTIBLE);
|
||||||
|
|
||||||
if (kvmppc_vcore_check_block(vc)) {
|
if (kvmppc_vcore_check_block(vc)) {
|
||||||
finish_swait(&vc->wq, &wait);
|
finish_swait(&vc->wq, &wait);
|
||||||
@ -3311,7 +3311,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|||||||
kvmppc_start_thread(vcpu, vc);
|
kvmppc_start_thread(vcpu, vc);
|
||||||
trace_kvm_guest_enter(vcpu);
|
trace_kvm_guest_enter(vcpu);
|
||||||
} else if (vc->vcore_state == VCORE_SLEEPING) {
|
} else if (vc->vcore_state == VCORE_SLEEPING) {
|
||||||
swake_up(&vc->wq);
|
swake_up_one(&vc->wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1145,7 +1145,7 @@ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
|
|||||||
* yield-candidate.
|
* yield-candidate.
|
||||||
*/
|
*/
|
||||||
vcpu->preempted = true;
|
vcpu->preempted = true;
|
||||||
swake_up(&vcpu->wq);
|
swake_up_one(&vcpu->wq);
|
||||||
vcpu->stat.halt_wakeup++;
|
vcpu->stat.halt_wakeup++;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
|
@ -154,7 +154,7 @@ void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
|
|||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if (!n.halted)
|
if (!n.halted)
|
||||||
prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
|
prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
|
||||||
if (hlist_unhashed(&n.link))
|
if (hlist_unhashed(&n.link))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -188,7 +188,7 @@ static void apf_task_wake_one(struct kvm_task_sleep_node *n)
|
|||||||
if (n->halted)
|
if (n->halted)
|
||||||
smp_send_reschedule(n->cpu);
|
smp_send_reschedule(n->cpu);
|
||||||
else if (swq_has_sleeper(&n->wq))
|
else if (swq_has_sleeper(&n->wq))
|
||||||
swake_up(&n->wq);
|
swake_up_one(&n->wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void apf_task_wake_all(void)
|
static void apf_task_wake_all(void)
|
||||||
|
@ -1379,7 +1379,7 @@ static void apic_timer_expired(struct kvm_lapic *apic)
|
|||||||
* using swait_active() is safe.
|
* using swait_active() is safe.
|
||||||
*/
|
*/
|
||||||
if (swait_active(q))
|
if (swait_active(q))
|
||||||
swake_up(q);
|
swake_up_one(q);
|
||||||
|
|
||||||
if (apic_lvtt_tscdeadline(apic))
|
if (apic_lvtt_tscdeadline(apic))
|
||||||
ktimer->expired_tscdeadline = ktimer->tscdeadline;
|
ktimer->expired_tscdeadline = ktimer->tscdeadline;
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
* wait-queues, but the semantics are actually completely different, and
|
* wait-queues, but the semantics are actually completely different, and
|
||||||
* every single user we have ever had has been buggy (or pointless).
|
* every single user we have ever had has been buggy (or pointless).
|
||||||
*
|
*
|
||||||
* A "swake_up()" only wakes up _one_ waiter, which is not at all what
|
* A "swake_up_one()" only wakes up _one_ waiter, which is not at all what
|
||||||
* "wake_up()" does, and has led to problems. In other cases, it has
|
* "wake_up()" does, and has led to problems. In other cases, it has
|
||||||
* been fine, because there's only ever one waiter (kvm), but in that
|
* been fine, because there's only ever one waiter (kvm), but in that
|
||||||
* case gthe whole "simple" wait-queue is just pointless to begin with,
|
* case gthe whole "simple" wait-queue is just pointless to begin with,
|
||||||
@ -115,7 +115,7 @@ extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name
|
|||||||
* CPU0 - waker CPU1 - waiter
|
* CPU0 - waker CPU1 - waiter
|
||||||
*
|
*
|
||||||
* for (;;) {
|
* for (;;) {
|
||||||
* @cond = true; prepare_to_swait(&wq_head, &wait, state);
|
* @cond = true; prepare_to_swait_exclusive(&wq_head, &wait, state);
|
||||||
* smp_mb(); // smp_mb() from set_current_state()
|
* smp_mb(); // smp_mb() from set_current_state()
|
||||||
* if (swait_active(wq_head)) if (@cond)
|
* if (swait_active(wq_head)) if (@cond)
|
||||||
* wake_up(wq_head); break;
|
* wake_up(wq_head); break;
|
||||||
@ -157,11 +157,11 @@ static inline bool swq_has_sleeper(struct swait_queue_head *wq)
|
|||||||
return swait_active(wq);
|
return swait_active(wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void swake_up(struct swait_queue_head *q);
|
extern void swake_up_one(struct swait_queue_head *q);
|
||||||
extern void swake_up_all(struct swait_queue_head *q);
|
extern void swake_up_all(struct swait_queue_head *q);
|
||||||
extern void swake_up_locked(struct swait_queue_head *q);
|
extern void swake_up_locked(struct swait_queue_head *q);
|
||||||
|
|
||||||
extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
|
extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state);
|
||||||
extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
|
extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
|
||||||
|
|
||||||
extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
|
extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
|
||||||
@ -196,7 +196,7 @@ __out: __ret; \
|
|||||||
(void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
|
(void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
|
||||||
schedule())
|
schedule())
|
||||||
|
|
||||||
#define swait_event(wq, condition) \
|
#define swait_event_exclusive(wq, condition) \
|
||||||
do { \
|
do { \
|
||||||
if (condition) \
|
if (condition) \
|
||||||
break; \
|
break; \
|
||||||
@ -208,7 +208,7 @@ do { \
|
|||||||
TASK_UNINTERRUPTIBLE, timeout, \
|
TASK_UNINTERRUPTIBLE, timeout, \
|
||||||
__ret = schedule_timeout(__ret))
|
__ret = schedule_timeout(__ret))
|
||||||
|
|
||||||
#define swait_event_timeout(wq, condition, timeout) \
|
#define swait_event_timeout_exclusive(wq, condition, timeout) \
|
||||||
({ \
|
({ \
|
||||||
long __ret = timeout; \
|
long __ret = timeout; \
|
||||||
if (!___wait_cond_timeout(condition)) \
|
if (!___wait_cond_timeout(condition)) \
|
||||||
@ -220,7 +220,7 @@ do { \
|
|||||||
___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \
|
___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \
|
||||||
schedule())
|
schedule())
|
||||||
|
|
||||||
#define swait_event_interruptible(wq, condition) \
|
#define swait_event_interruptible_exclusive(wq, condition) \
|
||||||
({ \
|
({ \
|
||||||
int __ret = 0; \
|
int __ret = 0; \
|
||||||
if (!(condition)) \
|
if (!(condition)) \
|
||||||
@ -233,7 +233,7 @@ do { \
|
|||||||
TASK_INTERRUPTIBLE, timeout, \
|
TASK_INTERRUPTIBLE, timeout, \
|
||||||
__ret = schedule_timeout(__ret))
|
__ret = schedule_timeout(__ret))
|
||||||
|
|
||||||
#define swait_event_interruptible_timeout(wq, condition, timeout) \
|
#define swait_event_interruptible_timeout_exclusive(wq, condition, timeout)\
|
||||||
({ \
|
({ \
|
||||||
long __ret = timeout; \
|
long __ret = timeout; \
|
||||||
if (!___wait_cond_timeout(condition)) \
|
if (!___wait_cond_timeout(condition)) \
|
||||||
@ -246,7 +246,7 @@ do { \
|
|||||||
(void)___swait_event(wq, condition, TASK_IDLE, 0, schedule())
|
(void)___swait_event(wq, condition, TASK_IDLE, 0, schedule())
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* swait_event_idle - wait without system load contribution
|
* swait_event_idle_exclusive - wait without system load contribution
|
||||||
* @wq: the waitqueue to wait on
|
* @wq: the waitqueue to wait on
|
||||||
* @condition: a C expression for the event to wait for
|
* @condition: a C expression for the event to wait for
|
||||||
*
|
*
|
||||||
@ -257,7 +257,7 @@ do { \
|
|||||||
* condition and doesn't want to contribute to system load. Signals are
|
* condition and doesn't want to contribute to system load. Signals are
|
||||||
* ignored.
|
* ignored.
|
||||||
*/
|
*/
|
||||||
#define swait_event_idle(wq, condition) \
|
#define swait_event_idle_exclusive(wq, condition) \
|
||||||
do { \
|
do { \
|
||||||
if (condition) \
|
if (condition) \
|
||||||
break; \
|
break; \
|
||||||
@ -270,7 +270,7 @@ do { \
|
|||||||
__ret = schedule_timeout(__ret))
|
__ret = schedule_timeout(__ret))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* swait_event_idle_timeout - wait up to timeout without load contribution
|
* swait_event_idle_timeout_exclusive - wait up to timeout without load contribution
|
||||||
* @wq: the waitqueue to wait on
|
* @wq: the waitqueue to wait on
|
||||||
* @condition: a C expression for the event to wait for
|
* @condition: a C expression for the event to wait for
|
||||||
* @timeout: timeout at which we'll give up in jiffies
|
* @timeout: timeout at which we'll give up in jiffies
|
||||||
@ -288,7 +288,7 @@ do { \
|
|||||||
* or the remaining jiffies (at least 1) if the @condition evaluated
|
* or the remaining jiffies (at least 1) if the @condition evaluated
|
||||||
* to %true before the @timeout elapsed.
|
* to %true before the @timeout elapsed.
|
||||||
*/
|
*/
|
||||||
#define swait_event_idle_timeout(wq, condition, timeout) \
|
#define swait_event_idle_timeout_exclusive(wq, condition, timeout) \
|
||||||
({ \
|
({ \
|
||||||
long __ret = timeout; \
|
long __ret = timeout; \
|
||||||
if (!___wait_cond_timeout(condition)) \
|
if (!___wait_cond_timeout(condition)) \
|
||||||
|
@ -92,7 +92,7 @@ static void s2idle_enter(void)
|
|||||||
/* Push all the CPUs into the idle loop. */
|
/* Push all the CPUs into the idle loop. */
|
||||||
wake_up_all_idle_cpus();
|
wake_up_all_idle_cpus();
|
||||||
/* Make the current CPU wait so it can enter the idle loop too. */
|
/* Make the current CPU wait so it can enter the idle loop too. */
|
||||||
swait_event(s2idle_wait_head,
|
swait_event_exclusive(s2idle_wait_head,
|
||||||
s2idle_state == S2IDLE_STATE_WAKE);
|
s2idle_state == S2IDLE_STATE_WAKE);
|
||||||
|
|
||||||
cpuidle_pause();
|
cpuidle_pause();
|
||||||
@ -160,7 +160,7 @@ void s2idle_wake(void)
|
|||||||
raw_spin_lock_irqsave(&s2idle_lock, flags);
|
raw_spin_lock_irqsave(&s2idle_lock, flags);
|
||||||
if (s2idle_state > S2IDLE_STATE_NONE) {
|
if (s2idle_state > S2IDLE_STATE_NONE) {
|
||||||
s2idle_state = S2IDLE_STATE_WAKE;
|
s2idle_state = S2IDLE_STATE_WAKE;
|
||||||
swake_up(&s2idle_wait_head);
|
swake_up_one(&s2idle_wait_head);
|
||||||
}
|
}
|
||||||
raw_spin_unlock_irqrestore(&s2idle_lock, flags);
|
raw_spin_unlock_irqrestore(&s2idle_lock, flags);
|
||||||
}
|
}
|
||||||
|
@ -110,7 +110,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
|
|||||||
|
|
||||||
WRITE_ONCE(sp->srcu_lock_nesting[idx], newval);
|
WRITE_ONCE(sp->srcu_lock_nesting[idx], newval);
|
||||||
if (!newval && READ_ONCE(sp->srcu_gp_waiting))
|
if (!newval && READ_ONCE(sp->srcu_gp_waiting))
|
||||||
swake_up(&sp->srcu_wq);
|
swake_up_one(&sp->srcu_wq);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
|
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
|
||||||
|
|
||||||
@ -140,7 +140,7 @@ void srcu_drive_gp(struct work_struct *wp)
|
|||||||
idx = sp->srcu_idx;
|
idx = sp->srcu_idx;
|
||||||
WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx);
|
WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx);
|
||||||
WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
|
WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
|
||||||
swait_event(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
|
swait_event_exclusive(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
|
||||||
WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
|
WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
|
||||||
|
|
||||||
/* Invoke the callbacks we removed above. */
|
/* Invoke the callbacks we removed above. */
|
||||||
|
@ -1727,7 +1727,7 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp)
|
|||||||
!READ_ONCE(rsp->gp_flags) ||
|
!READ_ONCE(rsp->gp_flags) ||
|
||||||
!rsp->gp_kthread)
|
!rsp->gp_kthread)
|
||||||
return;
|
return;
|
||||||
swake_up(&rsp->gp_wq);
|
swake_up_one(&rsp->gp_wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2002,7 +2002,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Helper function for swait_event_idle() wakeup at force-quiescent-state
|
* Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
|
||||||
* time.
|
* time.
|
||||||
*/
|
*/
|
||||||
static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
|
static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
|
||||||
@ -2144,7 +2144,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
|
|||||||
READ_ONCE(rsp->gpnum),
|
READ_ONCE(rsp->gpnum),
|
||||||
TPS("reqwait"));
|
TPS("reqwait"));
|
||||||
rsp->gp_state = RCU_GP_WAIT_GPS;
|
rsp->gp_state = RCU_GP_WAIT_GPS;
|
||||||
swait_event_idle(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
|
swait_event_idle_exclusive(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
|
||||||
RCU_GP_FLAG_INIT);
|
RCU_GP_FLAG_INIT);
|
||||||
rsp->gp_state = RCU_GP_DONE_GPS;
|
rsp->gp_state = RCU_GP_DONE_GPS;
|
||||||
/* Locking provides needed memory barrier. */
|
/* Locking provides needed memory barrier. */
|
||||||
@ -2176,7 +2176,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
|
|||||||
READ_ONCE(rsp->gpnum),
|
READ_ONCE(rsp->gpnum),
|
||||||
TPS("fqswait"));
|
TPS("fqswait"));
|
||||||
rsp->gp_state = RCU_GP_WAIT_FQS;
|
rsp->gp_state = RCU_GP_WAIT_FQS;
|
||||||
ret = swait_event_idle_timeout(rsp->gp_wq,
|
ret = swait_event_idle_timeout_exclusive(rsp->gp_wq,
|
||||||
rcu_gp_fqs_check_wake(rsp, &gf), j);
|
rcu_gp_fqs_check_wake(rsp, &gf), j);
|
||||||
rsp->gp_state = RCU_GP_DOING_FQS;
|
rsp->gp_state = RCU_GP_DOING_FQS;
|
||||||
/* Locking provides needed memory barriers. */
|
/* Locking provides needed memory barriers. */
|
||||||
|
@ -212,7 +212,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
|
|||||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||||
if (wake) {
|
if (wake) {
|
||||||
smp_mb(); /* EGP done before wake_up(). */
|
smp_mb(); /* EGP done before wake_up(). */
|
||||||
swake_up(&rsp->expedited_wq);
|
swake_up_one(&rsp->expedited_wq);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -518,7 +518,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
|
|||||||
jiffies_start = jiffies;
|
jiffies_start = jiffies;
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
ret = swait_event_timeout(
|
ret = swait_event_timeout_exclusive(
|
||||||
rsp->expedited_wq,
|
rsp->expedited_wq,
|
||||||
sync_rcu_preempt_exp_done_unlocked(rnp_root),
|
sync_rcu_preempt_exp_done_unlocked(rnp_root),
|
||||||
jiffies_stall);
|
jiffies_stall);
|
||||||
|
@ -1854,8 +1854,8 @@ static void __wake_nocb_leader(struct rcu_data *rdp, bool force,
|
|||||||
WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
|
WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
|
||||||
del_timer(&rdp->nocb_timer);
|
del_timer(&rdp->nocb_timer);
|
||||||
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
|
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
|
||||||
smp_mb(); /* ->nocb_leader_sleep before swake_up(). */
|
smp_mb(); /* ->nocb_leader_sleep before swake_up_one(). */
|
||||||
swake_up(&rdp_leader->nocb_wq);
|
swake_up_one(&rdp_leader->nocb_wq);
|
||||||
} else {
|
} else {
|
||||||
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
|
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
|
||||||
}
|
}
|
||||||
@ -2082,7 +2082,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
|
|||||||
*/
|
*/
|
||||||
trace_rcu_this_gp(rnp, rdp, c, TPS("StartWait"));
|
trace_rcu_this_gp(rnp, rdp, c, TPS("StartWait"));
|
||||||
for (;;) {
|
for (;;) {
|
||||||
swait_event_interruptible(
|
swait_event_interruptible_exclusive(
|
||||||
rnp->nocb_gp_wq[c & 0x1],
|
rnp->nocb_gp_wq[c & 0x1],
|
||||||
(d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c)));
|
(d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c)));
|
||||||
if (likely(d))
|
if (likely(d))
|
||||||
@ -2111,7 +2111,7 @@ wait_again:
|
|||||||
/* Wait for callbacks to appear. */
|
/* Wait for callbacks to appear. */
|
||||||
if (!rcu_nocb_poll) {
|
if (!rcu_nocb_poll) {
|
||||||
trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Sleep"));
|
trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Sleep"));
|
||||||
swait_event_interruptible(my_rdp->nocb_wq,
|
swait_event_interruptible_exclusive(my_rdp->nocb_wq,
|
||||||
!READ_ONCE(my_rdp->nocb_leader_sleep));
|
!READ_ONCE(my_rdp->nocb_leader_sleep));
|
||||||
raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags);
|
raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags);
|
||||||
my_rdp->nocb_leader_sleep = true;
|
my_rdp->nocb_leader_sleep = true;
|
||||||
@ -2176,7 +2176,7 @@ wait_again:
|
|||||||
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
|
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
|
||||||
if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
|
if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
|
||||||
/* List was empty, so wake up the follower. */
|
/* List was empty, so wake up the follower. */
|
||||||
swake_up(&rdp->nocb_wq);
|
swake_up_one(&rdp->nocb_wq);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2193,7 +2193,7 @@ static void nocb_follower_wait(struct rcu_data *rdp)
|
|||||||
{
|
{
|
||||||
for (;;) {
|
for (;;) {
|
||||||
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("FollowerSleep"));
|
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("FollowerSleep"));
|
||||||
swait_event_interruptible(rdp->nocb_wq,
|
swait_event_interruptible_exclusive(rdp->nocb_wq,
|
||||||
READ_ONCE(rdp->nocb_follower_head));
|
READ_ONCE(rdp->nocb_follower_head));
|
||||||
if (smp_load_acquire(&rdp->nocb_follower_head)) {
|
if (smp_load_acquire(&rdp->nocb_follower_head)) {
|
||||||
/* ^^^ Ensure CB invocation follows _head test. */
|
/* ^^^ Ensure CB invocation follows _head test. */
|
||||||
|
@ -32,7 +32,7 @@ void swake_up_locked(struct swait_queue_head *q)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(swake_up_locked);
|
EXPORT_SYMBOL(swake_up_locked);
|
||||||
|
|
||||||
void swake_up(struct swait_queue_head *q)
|
void swake_up_one(struct swait_queue_head *q)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
@ -40,7 +40,7 @@ void swake_up(struct swait_queue_head *q)
|
|||||||
swake_up_locked(q);
|
swake_up_locked(q);
|
||||||
raw_spin_unlock_irqrestore(&q->lock, flags);
|
raw_spin_unlock_irqrestore(&q->lock, flags);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(swake_up);
|
EXPORT_SYMBOL(swake_up_one);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Does not allow usage from IRQ disabled, since we must be able to
|
* Does not allow usage from IRQ disabled, since we must be able to
|
||||||
@ -76,7 +76,7 @@ static void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *w
|
|||||||
list_add_tail(&wait->task_list, &q->task_list);
|
list_add_tail(&wait->task_list, &q->task_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state)
|
void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
@ -85,7 +85,7 @@ void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int
|
|||||||
set_current_state(state);
|
set_current_state(state);
|
||||||
raw_spin_unlock_irqrestore(&q->lock, flags);
|
raw_spin_unlock_irqrestore(&q->lock, flags);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(prepare_to_swait);
|
EXPORT_SYMBOL(prepare_to_swait_exclusive);
|
||||||
|
|
||||||
long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
|
long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
|
||||||
{
|
{
|
||||||
@ -95,7 +95,7 @@ long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait
|
|||||||
raw_spin_lock_irqsave(&q->lock, flags);
|
raw_spin_lock_irqsave(&q->lock, flags);
|
||||||
if (unlikely(signal_pending_state(state, current))) {
|
if (unlikely(signal_pending_state(state, current))) {
|
||||||
/*
|
/*
|
||||||
* See prepare_to_wait_event(). TL;DR, subsequent swake_up()
|
* See prepare_to_wait_event(). TL;DR, subsequent swake_up_one()
|
||||||
* must not see us.
|
* must not see us.
|
||||||
*/
|
*/
|
||||||
list_del_init(&wait->task_list);
|
list_del_init(&wait->task_list);
|
||||||
|
@ -604,7 +604,7 @@ void kvm_arm_resume_guest(struct kvm *kvm)
|
|||||||
|
|
||||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||||
vcpu->arch.pause = false;
|
vcpu->arch.pause = false;
|
||||||
swake_up(kvm_arch_vcpu_wq(vcpu));
|
swake_up_one(kvm_arch_vcpu_wq(vcpu));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -612,7 +612,7 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
|
|||||||
{
|
{
|
||||||
struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
|
struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
|
||||||
|
|
||||||
swait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
|
swait_event_interruptible_exclusive(*wq, ((!vcpu->arch.power_off) &&
|
||||||
(!vcpu->arch.pause)));
|
(!vcpu->arch.pause)));
|
||||||
|
|
||||||
if (vcpu->arch.power_off || vcpu->arch.pause) {
|
if (vcpu->arch.power_off || vcpu->arch.pause) {
|
||||||
|
@ -155,7 +155,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
|||||||
smp_mb(); /* Make sure the above is visible */
|
smp_mb(); /* Make sure the above is visible */
|
||||||
|
|
||||||
wq = kvm_arch_vcpu_wq(vcpu);
|
wq = kvm_arch_vcpu_wq(vcpu);
|
||||||
swake_up(wq);
|
swake_up_one(wq);
|
||||||
|
|
||||||
return PSCI_RET_SUCCESS;
|
return PSCI_RET_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -107,7 +107,7 @@ static void async_pf_execute(struct work_struct *work)
|
|||||||
trace_kvm_async_pf_completed(addr, gva);
|
trace_kvm_async_pf_completed(addr, gva);
|
||||||
|
|
||||||
if (swq_has_sleeper(&vcpu->wq))
|
if (swq_has_sleeper(&vcpu->wq))
|
||||||
swake_up(&vcpu->wq);
|
swake_up_one(&vcpu->wq);
|
||||||
|
|
||||||
mmput(mm);
|
mmput(mm);
|
||||||
kvm_put_kvm(vcpu->kvm);
|
kvm_put_kvm(vcpu->kvm);
|
||||||
|
@ -2167,7 +2167,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
|
|||||||
kvm_arch_vcpu_blocking(vcpu);
|
kvm_arch_vcpu_blocking(vcpu);
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
prepare_to_swait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
|
prepare_to_swait_exclusive(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
|
||||||
|
|
||||||
if (kvm_vcpu_check_block(vcpu) < 0)
|
if (kvm_vcpu_check_block(vcpu) < 0)
|
||||||
break;
|
break;
|
||||||
@ -2209,7 +2209,7 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
wqp = kvm_arch_vcpu_wq(vcpu);
|
wqp = kvm_arch_vcpu_wq(vcpu);
|
||||||
if (swq_has_sleeper(wqp)) {
|
if (swq_has_sleeper(wqp)) {
|
||||||
swake_up(wqp);
|
swake_up_one(wqp);
|
||||||
++vcpu->stat.halt_wakeup;
|
++vcpu->stat.halt_wakeup;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user