rfkill: sync before userspace visibility/changes

If userspace quickly opens /dev/rfkill after a new
instance was created, it might see the old state of
the instance from before the sync work runs and may
even _change_ the state, only to have the sync work
change it again.

Fix this by doing the sync inline where needed, not
just for /dev/rfkill but also for sysfs.

Signed-off-by: Johannes Berg <johannes.berg@intel.com>
This commit is contained in:
Johannes Berg 2023-09-14 15:45:17 +02:00
parent 6e48ebffc2
commit 2c3dfba4cf

View File

@ -48,6 +48,7 @@ struct rfkill {
bool persistent; bool persistent;
bool polling_paused; bool polling_paused;
bool suspended; bool suspended;
bool need_sync;
const struct rfkill_ops *ops; const struct rfkill_ops *ops;
void *data; void *data;
@ -368,6 +369,17 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
rfkill_event(rfkill); rfkill_event(rfkill);
} }
static void rfkill_sync(struct rfkill *rfkill)
{
lockdep_assert_held(&rfkill_global_mutex);
if (!rfkill->need_sync)
return;
rfkill_set_block(rfkill, rfkill_global_states[rfkill->type].cur);
rfkill->need_sync = false;
}
static void rfkill_update_global_state(enum rfkill_type type, bool blocked) static void rfkill_update_global_state(enum rfkill_type type, bool blocked)
{ {
int i; int i;
@ -730,6 +742,10 @@ static ssize_t soft_show(struct device *dev, struct device_attribute *attr,
{ {
struct rfkill *rfkill = to_rfkill(dev); struct rfkill *rfkill = to_rfkill(dev);
mutex_lock(&rfkill_global_mutex);
rfkill_sync(rfkill);
mutex_unlock(&rfkill_global_mutex);
return sysfs_emit(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0); return sysfs_emit(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0);
} }
@ -751,6 +767,7 @@ static ssize_t soft_store(struct device *dev, struct device_attribute *attr,
return -EINVAL; return -EINVAL;
mutex_lock(&rfkill_global_mutex); mutex_lock(&rfkill_global_mutex);
rfkill_sync(rfkill);
rfkill_set_block(rfkill, state); rfkill_set_block(rfkill, state);
mutex_unlock(&rfkill_global_mutex); mutex_unlock(&rfkill_global_mutex);
@ -783,6 +800,10 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
{ {
struct rfkill *rfkill = to_rfkill(dev); struct rfkill *rfkill = to_rfkill(dev);
mutex_lock(&rfkill_global_mutex);
rfkill_sync(rfkill);
mutex_unlock(&rfkill_global_mutex);
return sysfs_emit(buf, "%d\n", user_state_from_blocked(rfkill->state)); return sysfs_emit(buf, "%d\n", user_state_from_blocked(rfkill->state));
} }
@ -805,6 +826,7 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
return -EINVAL; return -EINVAL;
mutex_lock(&rfkill_global_mutex); mutex_lock(&rfkill_global_mutex);
rfkill_sync(rfkill);
rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED); rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
mutex_unlock(&rfkill_global_mutex); mutex_unlock(&rfkill_global_mutex);
@ -1032,14 +1054,10 @@ static void rfkill_uevent_work(struct work_struct *work)
static void rfkill_sync_work(struct work_struct *work) static void rfkill_sync_work(struct work_struct *work)
{ {
struct rfkill *rfkill; struct rfkill *rfkill = container_of(work, struct rfkill, sync_work);
bool cur;
rfkill = container_of(work, struct rfkill, sync_work);
mutex_lock(&rfkill_global_mutex); mutex_lock(&rfkill_global_mutex);
cur = rfkill_global_states[rfkill->type].cur; rfkill_sync(rfkill);
rfkill_set_block(rfkill, cur);
mutex_unlock(&rfkill_global_mutex); mutex_unlock(&rfkill_global_mutex);
} }
@ -1087,6 +1105,7 @@ int __must_check rfkill_register(struct rfkill *rfkill)
round_jiffies_relative(POLL_INTERVAL)); round_jiffies_relative(POLL_INTERVAL));
if (!rfkill->persistent || rfkill_epo_lock_active) { if (!rfkill->persistent || rfkill_epo_lock_active) {
rfkill->need_sync = true;
schedule_work(&rfkill->sync_work); schedule_work(&rfkill->sync_work);
} else { } else {
#ifdef CONFIG_RFKILL_INPUT #ifdef CONFIG_RFKILL_INPUT
@ -1171,6 +1190,7 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
ev = kzalloc(sizeof(*ev), GFP_KERNEL); ev = kzalloc(sizeof(*ev), GFP_KERNEL);
if (!ev) if (!ev)
goto free; goto free;
rfkill_sync(rfkill);
rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD); rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
list_add_tail(&ev->list, &data->events); list_add_tail(&ev->list, &data->events);
} }