regmap: debugfs: Don't sleep while atomic for fast_io regmaps

If a regmap has "fast_io" set then its lock function uses a spinlock.
That doesn't work so well with the functions:
* regmap_cache_only_write_file()
* regmap_cache_bypass_write_file()

Both of the above functions have the pattern:
1. Lock the regmap.
2. Call:
   debugfs_write_file_bool()
     copy_from_user()
       __might_fault()
         __might_sleep()

Let's reorder things a bit so that we do all of our sleepable
functions before we grab the lock.

Fixes: d3dc5430d6 ("regmap: debugfs: Allow writes to cache state settings")
Signed-off-by: Douglas Anderson <dianders@chromium.org>
Link: https://lore.kernel.org/r/20200715164611.1.I35b3533e8a80efde0cec1cc70f71e1e74b2fa0da@changeid
Signed-off-by: Mark Brown <broonie@kernel.org>
This commit is contained in:
Douglas Anderson 2020-07-15 16:46:15 -07:00 committed by Mark Brown
parent 443a34ba68
commit 299632e54b
No known key found for this signature in database
GPG Key ID: 24D68B725D5487D0

View File

@ -457,29 +457,31 @@ static ssize_t regmap_cache_only_write_file(struct file *file,
{
struct regmap *map = container_of(file->private_data,
struct regmap, cache_only);
ssize_t result;
bool was_enabled, require_sync = false;
bool new_val, require_sync = false;
int err;
err = kstrtobool_from_user(user_buf, count, &new_val);
/* Ignore malforned data like debugfs_write_file_bool() */
if (err)
return count;
err = debugfs_file_get(file->f_path.dentry);
if (err)
return err;
map->lock(map->lock_arg);
was_enabled = map->cache_only;
result = debugfs_write_file_bool(file, user_buf, count, ppos);
if (result < 0) {
map->unlock(map->lock_arg);
return result;
}
if (map->cache_only && !was_enabled) {
if (new_val && !map->cache_only) {
dev_warn(map->dev, "debugfs cache_only=Y forced\n");
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
} else if (!map->cache_only && was_enabled) {
} else if (!new_val && map->cache_only) {
dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
require_sync = true;
}
map->cache_only = new_val;
map->unlock(map->lock_arg);
debugfs_file_put(file->f_path.dentry);
if (require_sync) {
err = regcache_sync(map);
@ -487,7 +489,7 @@ static ssize_t regmap_cache_only_write_file(struct file *file,
dev_err(map->dev, "Failed to sync cache %d\n", err);
}
return result;
return count;
}
static const struct file_operations regmap_cache_only_fops = {
@ -502,28 +504,32 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file,
{
struct regmap *map = container_of(file->private_data,
struct regmap, cache_bypass);
ssize_t result;
bool was_enabled;
bool new_val;
int err;
err = kstrtobool_from_user(user_buf, count, &new_val);
/* Ignore malforned data like debugfs_write_file_bool() */
if (err)
return count;
err = debugfs_file_get(file->f_path.dentry);
if (err)
return err;
map->lock(map->lock_arg);
was_enabled = map->cache_bypass;
result = debugfs_write_file_bool(file, user_buf, count, ppos);
if (result < 0)
goto out;
if (map->cache_bypass && !was_enabled) {
if (new_val && !map->cache_bypass) {
dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
} else if (!map->cache_bypass && was_enabled) {
} else if (!new_val && map->cache_bypass) {
dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
}
map->cache_bypass = new_val;
out:
map->unlock(map->lock_arg);
debugfs_file_put(file->f_path.dentry);
return result;
return count;
}
static const struct file_operations regmap_cache_bypass_fops = {