Merge tag 'for-6.1/dm-changes-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper updates from Mike Snitzer: - Fix dm-bufio to use test_bit_acquire to properly test_bit on arches with weaker memory ordering. - DM core replace DMWARN with DMERR or DMCRIT for fatal errors. - Enable WQ_HIGHPRI on DM verity target's verify_wq. - Add documentation for DM verity's try_verify_in_tasklet option. - Various typo and redundant word fixes in code and/or comments. * tag 'for-6.1/dm-changes-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm clone: Fix typo in block_device format specifier dm: remove unnecessary assignment statement in alloc_dev() dm verity: Add documentation for try_verify_in_tasklet option dm cache: delete the redundant word 'each' in comment dm raid: fix typo in analyse_superblocks code comment dm verity: enable WQ_HIGHPRI on verify_wq dm raid: delete the redundant word 'that' in comment dm: change from DMWARN to DMERR or DMCRIT for fatal errors dm bufio: use the acquire memory barrier when testing for B_READING
This commit is contained in:
@ -141,6 +141,10 @@ root_hash_sig_key_desc <key_description>
|
||||
also gain new certificates at run time if they are signed by a certificate
|
||||
already in the secondary trusted keyring.
|
||||
|
||||
try_verify_in_tasklet
|
||||
If verity hashes are in cache, verify data blocks in kernel tasklet instead
|
||||
of workqueue. This option can reduce IO latency.
|
||||
|
||||
Theory of operation
|
||||
===================
|
||||
|
||||
|
@ -795,7 +795,8 @@ static void __make_buffer_clean(struct dm_buffer *b)
|
||||
{
|
||||
BUG_ON(b->hold_count);
|
||||
|
||||
if (!b->state) /* fast case */
|
||||
/* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */
|
||||
if (!smp_load_acquire(&b->state)) /* fast case */
|
||||
return;
|
||||
|
||||
wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
|
||||
@ -816,7 +817,7 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
|
||||
BUG_ON(test_bit(B_DIRTY, &b->state));
|
||||
|
||||
if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
|
||||
unlikely(test_bit(B_READING, &b->state)))
|
||||
unlikely(test_bit_acquire(B_READING, &b->state)))
|
||||
continue;
|
||||
|
||||
if (!b->hold_count) {
|
||||
@ -1058,7 +1059,7 @@ found_buffer:
|
||||
* If the user called both dm_bufio_prefetch and dm_bufio_get on
|
||||
* the same buffer, it would deadlock if we waited.
|
||||
*/
|
||||
if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
|
||||
if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state)))
|
||||
return NULL;
|
||||
|
||||
b->hold_count++;
|
||||
@ -1218,7 +1219,7 @@ void dm_bufio_release(struct dm_buffer *b)
|
||||
* invalid buffer.
|
||||
*/
|
||||
if ((b->read_error || b->write_error) &&
|
||||
!test_bit(B_READING, &b->state) &&
|
||||
!test_bit_acquire(B_READING, &b->state) &&
|
||||
!test_bit(B_WRITING, &b->state) &&
|
||||
!test_bit(B_DIRTY, &b->state)) {
|
||||
__unlink_buffer(b);
|
||||
@ -1479,7 +1480,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_release_move);
|
||||
|
||||
static void forget_buffer_locked(struct dm_buffer *b)
|
||||
{
|
||||
if (likely(!b->hold_count) && likely(!b->state)) {
|
||||
if (likely(!b->hold_count) && likely(!smp_load_acquire(&b->state))) {
|
||||
__unlink_buffer(b);
|
||||
__free_buffer_wake(b);
|
||||
}
|
||||
@ -1639,7 +1640,7 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
|
||||
{
|
||||
if (!(gfp & __GFP_FS) ||
|
||||
(static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
|
||||
if (test_bit(B_READING, &b->state) ||
|
||||
if (test_bit_acquire(B_READING, &b->state) ||
|
||||
test_bit(B_WRITING, &b->state) ||
|
||||
test_bit(B_DIRTY, &b->state))
|
||||
return false;
|
||||
|
@ -166,7 +166,7 @@ struct dm_cache_policy_type {
|
||||
struct dm_cache_policy_type *real;
|
||||
|
||||
/*
|
||||
* Policies may store a hint for each each cache block.
|
||||
* Policies may store a hint for each cache block.
|
||||
* Currently the size of this hint must be 0 or 4 bytes but we
|
||||
* expect to relax this in future.
|
||||
*/
|
||||
|
@ -2035,7 +2035,7 @@ static void disable_passdown_if_not_supported(struct clone *clone)
|
||||
reason = "max discard sectors smaller than a region";
|
||||
|
||||
if (reason) {
|
||||
DMWARN("Destination device (%pd) %s: Disabling discard passdown.",
|
||||
DMWARN("Destination device (%pg) %s: Disabling discard passdown.",
|
||||
dest_dev, reason);
|
||||
clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
|
||||
}
|
||||
|
@ -434,10 +434,10 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
|
||||
hc = __get_name_cell(new);
|
||||
|
||||
if (hc) {
|
||||
DMWARN("Unable to change %s on mapped device %s to one that "
|
||||
"already exists: %s",
|
||||
change_uuid ? "uuid" : "name",
|
||||
param->name, new);
|
||||
DMERR("Unable to change %s on mapped device %s to one that "
|
||||
"already exists: %s",
|
||||
change_uuid ? "uuid" : "name",
|
||||
param->name, new);
|
||||
dm_put(hc->md);
|
||||
up_write(&_hash_lock);
|
||||
kfree(new_data);
|
||||
@ -449,8 +449,8 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
|
||||
*/
|
||||
hc = __get_name_cell(param->name);
|
||||
if (!hc) {
|
||||
DMWARN("Unable to rename non-existent device, %s to %s%s",
|
||||
param->name, change_uuid ? "uuid " : "", new);
|
||||
DMERR("Unable to rename non-existent device, %s to %s%s",
|
||||
param->name, change_uuid ? "uuid " : "", new);
|
||||
up_write(&_hash_lock);
|
||||
kfree(new_data);
|
||||
return ERR_PTR(-ENXIO);
|
||||
@ -460,9 +460,9 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
|
||||
* Does this device already have a uuid?
|
||||
*/
|
||||
if (change_uuid && hc->uuid) {
|
||||
DMWARN("Unable to change uuid of mapped device %s to %s "
|
||||
"because uuid is already set to %s",
|
||||
param->name, new, hc->uuid);
|
||||
DMERR("Unable to change uuid of mapped device %s to %s "
|
||||
"because uuid is already set to %s",
|
||||
param->name, new, hc->uuid);
|
||||
dm_put(hc->md);
|
||||
up_write(&_hash_lock);
|
||||
kfree(new_data);
|
||||
@ -750,7 +750,7 @@ static int get_target_version(struct file *filp, struct dm_ioctl *param, size_t
|
||||
static int check_name(const char *name)
|
||||
{
|
||||
if (strchr(name, '/')) {
|
||||
DMWARN("invalid device name");
|
||||
DMERR("invalid device name");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -773,7 +773,7 @@ static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *src
|
||||
down_read(&_hash_lock);
|
||||
hc = dm_get_mdptr(md);
|
||||
if (!hc || hc->md != md) {
|
||||
DMWARN("device has been removed from the dev hash table.");
|
||||
DMERR("device has been removed from the dev hash table.");
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1026,7 +1026,7 @@ static int dev_rename(struct file *filp, struct dm_ioctl *param, size_t param_si
|
||||
if (new_data < param->data ||
|
||||
invalid_str(new_data, (void *) param + param_size) || !*new_data ||
|
||||
strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) {
|
||||
DMWARN("Invalid new mapped device name or uuid string supplied.");
|
||||
DMERR("Invalid new mapped device name or uuid string supplied.");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1061,7 +1061,7 @@ static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t pa
|
||||
|
||||
if (geostr < param->data ||
|
||||
invalid_str(geostr, (void *) param + param_size)) {
|
||||
DMWARN("Invalid geometry supplied.");
|
||||
DMERR("Invalid geometry supplied.");
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1069,13 +1069,13 @@ static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t pa
|
||||
indata + 1, indata + 2, indata + 3, &dummy);
|
||||
|
||||
if (x != 4) {
|
||||
DMWARN("Unable to interpret geometry settings.");
|
||||
DMERR("Unable to interpret geometry settings.");
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (indata[0] > 65535 || indata[1] > 255 ||
|
||||
indata[2] > 255 || indata[3] > ULONG_MAX) {
|
||||
DMWARN("Geometry exceeds range limits.");
|
||||
DMERR("Geometry exceeds range limits.");
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1387,7 +1387,7 @@ static int populate_table(struct dm_table *table,
|
||||
char *target_params;
|
||||
|
||||
if (!param->target_count) {
|
||||
DMWARN("populate_table: no targets specified");
|
||||
DMERR("populate_table: no targets specified");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1395,7 +1395,7 @@ static int populate_table(struct dm_table *table,
|
||||
|
||||
r = next_target(spec, next, end, &spec, &target_params);
|
||||
if (r) {
|
||||
DMWARN("unable to find target");
|
||||
DMERR("unable to find target");
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -1404,7 +1404,7 @@ static int populate_table(struct dm_table *table,
|
||||
(sector_t) spec->length,
|
||||
target_params);
|
||||
if (r) {
|
||||
DMWARN("error adding target to table");
|
||||
DMERR("error adding target to table");
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -1451,8 +1451,8 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si
|
||||
if (immutable_target_type &&
|
||||
(immutable_target_type != dm_table_get_immutable_target_type(t)) &&
|
||||
!dm_table_get_wildcard_target(t)) {
|
||||
DMWARN("can't replace immutable target type %s",
|
||||
immutable_target_type->name);
|
||||
DMERR("can't replace immutable target type %s",
|
||||
immutable_target_type->name);
|
||||
r = -EINVAL;
|
||||
goto err_unlock_md_type;
|
||||
}
|
||||
@ -1461,12 +1461,12 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si
|
||||
/* setup md->queue to reflect md's type (may block) */
|
||||
r = dm_setup_md_queue(md, t);
|
||||
if (r) {
|
||||
DMWARN("unable to set up device queue for new table.");
|
||||
DMERR("unable to set up device queue for new table.");
|
||||
goto err_unlock_md_type;
|
||||
}
|
||||
} else if (!is_valid_type(dm_get_md_type(md), dm_table_get_type(t))) {
|
||||
DMWARN("can't change device type (old=%u vs new=%u) after initial table load.",
|
||||
dm_get_md_type(md), dm_table_get_type(t));
|
||||
DMERR("can't change device type (old=%u vs new=%u) after initial table load.",
|
||||
dm_get_md_type(md), dm_table_get_type(t));
|
||||
r = -EINVAL;
|
||||
goto err_unlock_md_type;
|
||||
}
|
||||
@ -1477,7 +1477,7 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si
|
||||
down_write(&_hash_lock);
|
||||
hc = dm_get_mdptr(md);
|
||||
if (!hc || hc->md != md) {
|
||||
DMWARN("device has been removed from the dev hash table.");
|
||||
DMERR("device has been removed from the dev hash table.");
|
||||
up_write(&_hash_lock);
|
||||
r = -ENXIO;
|
||||
goto err_destroy_table;
|
||||
@ -1686,19 +1686,19 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
|
||||
|
||||
if (tmsg < (struct dm_target_msg *) param->data ||
|
||||
invalid_str(tmsg->message, (void *) param + param_size)) {
|
||||
DMWARN("Invalid target message parameters.");
|
||||
DMERR("Invalid target message parameters.");
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
r = dm_split_args(&argc, &argv, tmsg->message);
|
||||
if (r) {
|
||||
DMWARN("Failed to split target message parameters");
|
||||
DMERR("Failed to split target message parameters");
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!argc) {
|
||||
DMWARN("Empty message received.");
|
||||
DMERR("Empty message received.");
|
||||
r = -EINVAL;
|
||||
goto out_argv;
|
||||
}
|
||||
@ -1718,12 +1718,12 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
|
||||
|
||||
ti = dm_table_find_target(table, tmsg->sector);
|
||||
if (!ti) {
|
||||
DMWARN("Target message sector outside device.");
|
||||
DMERR("Target message sector outside device.");
|
||||
r = -EINVAL;
|
||||
} else if (ti->type->message)
|
||||
r = ti->type->message(ti, argc, argv, result, maxlen);
|
||||
else {
|
||||
DMWARN("Target type does not support messages");
|
||||
DMERR("Target type does not support messages");
|
||||
r = -EINVAL;
|
||||
}
|
||||
|
||||
@ -1814,11 +1814,11 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
|
||||
|
||||
if ((DM_VERSION_MAJOR != version[0]) ||
|
||||
(DM_VERSION_MINOR < version[1])) {
|
||||
DMWARN("ioctl interface mismatch: "
|
||||
"kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)",
|
||||
DM_VERSION_MAJOR, DM_VERSION_MINOR,
|
||||
DM_VERSION_PATCHLEVEL,
|
||||
version[0], version[1], version[2], cmd);
|
||||
DMERR("ioctl interface mismatch: "
|
||||
"kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)",
|
||||
DM_VERSION_MAJOR, DM_VERSION_MINOR,
|
||||
DM_VERSION_PATCHLEVEL,
|
||||
version[0], version[1], version[2], cmd);
|
||||
r = -EINVAL;
|
||||
}
|
||||
|
||||
@ -1927,11 +1927,11 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
|
||||
|
||||
if (cmd == DM_DEV_CREATE_CMD) {
|
||||
if (!*param->name) {
|
||||
DMWARN("name not supplied when creating device");
|
||||
DMERR("name not supplied when creating device");
|
||||
return -EINVAL;
|
||||
}
|
||||
} else if (*param->uuid && *param->name) {
|
||||
DMWARN("only supply one of name or uuid, cmd(%u)", cmd);
|
||||
DMERR("only supply one of name or uuid, cmd(%u)", cmd);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1978,7 +1978,7 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us
|
||||
|
||||
fn = lookup_ioctl(cmd, &ioctl_flags);
|
||||
if (!fn) {
|
||||
DMWARN("dm_ctl_ioctl: unknown command 0x%x", command);
|
||||
DMERR("dm_ctl_ioctl: unknown command 0x%x", command);
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
||||
@ -2203,7 +2203,7 @@ int __init dm_early_create(struct dm_ioctl *dmi,
|
||||
(sector_t) spec_array[i]->length,
|
||||
target_params_array[i]);
|
||||
if (r) {
|
||||
DMWARN("error adding target to table");
|
||||
DMERR("error adding target to table");
|
||||
goto err_destroy_table;
|
||||
}
|
||||
}
|
||||
@ -2216,7 +2216,7 @@ int __init dm_early_create(struct dm_ioctl *dmi,
|
||||
/* setup md->queue to reflect md's type (may block) */
|
||||
r = dm_setup_md_queue(md, t);
|
||||
if (r) {
|
||||
DMWARN("unable to set up device queue for new table.");
|
||||
DMERR("unable to set up device queue for new table.");
|
||||
goto err_destroy_table;
|
||||
}
|
||||
|
||||
|
@ -2529,7 +2529,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
|
||||
* of the "sync" directive.
|
||||
*
|
||||
* With reshaping capability added, we must ensure that
|
||||
* that the "sync" directive is disallowed during the reshape.
|
||||
* the "sync" directive is disallowed during the reshape.
|
||||
*/
|
||||
if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
|
||||
continue;
|
||||
@ -2590,7 +2590,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
|
||||
|
||||
/*
|
||||
* Adjust data_offset and new_data_offset on all disk members of @rs
|
||||
* for out of place reshaping if requested by contructor
|
||||
* for out of place reshaping if requested by constructor
|
||||
*
|
||||
* We need free space at the beginning of each raid disk for forward
|
||||
* and at the end for backward reshapes which userspace has to provide
|
||||
|
@ -238,7 +238,7 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
|
||||
dm_requeue_original_request(tio, true);
|
||||
break;
|
||||
default:
|
||||
DMWARN("unimplemented target endio return value: %d", r);
|
||||
DMCRIT("unimplemented target endio return value: %d", r);
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
@ -409,7 +409,7 @@ static int map_request(struct dm_rq_target_io *tio)
|
||||
dm_kill_unmapped_request(rq, BLK_STS_IOERR);
|
||||
break;
|
||||
default:
|
||||
DMWARN("unimplemented target map return value: %d", r);
|
||||
DMCRIT("unimplemented target map return value: %d", r);
|
||||
BUG();
|
||||
}
|
||||
|
||||
|
@ -1220,7 +1220,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
|
||||
return 2; /* this wasn't a stats message */
|
||||
|
||||
if (r == -EINVAL)
|
||||
DMWARN("Invalid parameters for message %s", argv[0]);
|
||||
DMCRIT("Invalid parameters for message %s", argv[0]);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -234,12 +234,12 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
|
||||
return 0;
|
||||
|
||||
if ((start >= dev_size) || (start + len > dev_size)) {
|
||||
DMWARN("%s: %pg too small for target: "
|
||||
"start=%llu, len=%llu, dev_size=%llu",
|
||||
dm_device_name(ti->table->md), bdev,
|
||||
(unsigned long long)start,
|
||||
(unsigned long long)len,
|
||||
(unsigned long long)dev_size);
|
||||
DMERR("%s: %pg too small for target: "
|
||||
"start=%llu, len=%llu, dev_size=%llu",
|
||||
dm_device_name(ti->table->md), bdev,
|
||||
(unsigned long long)start,
|
||||
(unsigned long long)len,
|
||||
(unsigned long long)dev_size);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -251,10 +251,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
|
||||
unsigned int zone_sectors = bdev_zone_sectors(bdev);
|
||||
|
||||
if (start & (zone_sectors - 1)) {
|
||||
DMWARN("%s: start=%llu not aligned to h/w zone size %u of %pg",
|
||||
dm_device_name(ti->table->md),
|
||||
(unsigned long long)start,
|
||||
zone_sectors, bdev);
|
||||
DMERR("%s: start=%llu not aligned to h/w zone size %u of %pg",
|
||||
dm_device_name(ti->table->md),
|
||||
(unsigned long long)start,
|
||||
zone_sectors, bdev);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -268,10 +268,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
|
||||
* the sector range.
|
||||
*/
|
||||
if (len & (zone_sectors - 1)) {
|
||||
DMWARN("%s: len=%llu not aligned to h/w zone size %u of %pg",
|
||||
dm_device_name(ti->table->md),
|
||||
(unsigned long long)len,
|
||||
zone_sectors, bdev);
|
||||
DMERR("%s: len=%llu not aligned to h/w zone size %u of %pg",
|
||||
dm_device_name(ti->table->md),
|
||||
(unsigned long long)len,
|
||||
zone_sectors, bdev);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
@ -280,20 +280,20 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
|
||||
return 0;
|
||||
|
||||
if (start & (logical_block_size_sectors - 1)) {
|
||||
DMWARN("%s: start=%llu not aligned to h/w "
|
||||
"logical block size %u of %pg",
|
||||
dm_device_name(ti->table->md),
|
||||
(unsigned long long)start,
|
||||
limits->logical_block_size, bdev);
|
||||
DMERR("%s: start=%llu not aligned to h/w "
|
||||
"logical block size %u of %pg",
|
||||
dm_device_name(ti->table->md),
|
||||
(unsigned long long)start,
|
||||
limits->logical_block_size, bdev);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (len & (logical_block_size_sectors - 1)) {
|
||||
DMWARN("%s: len=%llu not aligned to h/w "
|
||||
"logical block size %u of %pg",
|
||||
dm_device_name(ti->table->md),
|
||||
(unsigned long long)len,
|
||||
limits->logical_block_size, bdev);
|
||||
DMERR("%s: len=%llu not aligned to h/w "
|
||||
"logical block size %u of %pg",
|
||||
dm_device_name(ti->table->md),
|
||||
(unsigned long long)len,
|
||||
limits->logical_block_size, bdev);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -434,8 +434,8 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d)
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
DMWARN("%s: device %s not in table devices list",
|
||||
dm_device_name(ti->table->md), d->name);
|
||||
DMERR("%s: device %s not in table devices list",
|
||||
dm_device_name(ti->table->md), d->name);
|
||||
return;
|
||||
}
|
||||
if (refcount_dec_and_test(&dd->count)) {
|
||||
@ -618,12 +618,12 @@ static int validate_hardware_logical_block_alignment(struct dm_table *t,
|
||||
}
|
||||
|
||||
if (remaining) {
|
||||
DMWARN("%s: table line %u (start sect %llu len %llu) "
|
||||
"not aligned to h/w logical block size %u",
|
||||
dm_device_name(t->md), i,
|
||||
(unsigned long long) ti->begin,
|
||||
(unsigned long long) ti->len,
|
||||
limits->logical_block_size);
|
||||
DMERR("%s: table line %u (start sect %llu len %llu) "
|
||||
"not aligned to h/w logical block size %u",
|
||||
dm_device_name(t->md), i,
|
||||
(unsigned long long) ti->begin,
|
||||
(unsigned long long) ti->len,
|
||||
limits->logical_block_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1008,7 +1008,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
|
||||
struct dm_md_mempools *pools;
|
||||
|
||||
if (unlikely(type == DM_TYPE_NONE)) {
|
||||
DMWARN("no table type is set, can't allocate mempools");
|
||||
DMERR("no table type is set, can't allocate mempools");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1112,7 +1112,7 @@ static bool integrity_profile_exists(struct gendisk *disk)
|
||||
* Get a disk whose integrity profile reflects the table's profile.
|
||||
* Returns NULL if integrity support was inconsistent or unavailable.
|
||||
*/
|
||||
static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
|
||||
static struct gendisk *dm_table_get_integrity_disk(struct dm_table *t)
|
||||
{
|
||||
struct list_head *devices = dm_table_get_devices(t);
|
||||
struct dm_dev_internal *dd = NULL;
|
||||
@ -1185,10 +1185,10 @@ static int dm_table_register_integrity(struct dm_table *t)
|
||||
* profile the new profile should not conflict.
|
||||
*/
|
||||
if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
|
||||
DMWARN("%s: conflict with existing integrity profile: "
|
||||
"%s profile mismatch",
|
||||
dm_device_name(t->md),
|
||||
template_disk->disk_name);
|
||||
DMERR("%s: conflict with existing integrity profile: "
|
||||
"%s profile mismatch",
|
||||
dm_device_name(t->md),
|
||||
template_disk->disk_name);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -1327,7 +1327,7 @@ static int dm_table_construct_crypto_profile(struct dm_table *t)
|
||||
if (t->md->queue &&
|
||||
!blk_crypto_has_capabilities(profile,
|
||||
t->md->queue->crypto_profile)) {
|
||||
DMWARN("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
|
||||
DMERR("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
|
||||
dm_destroy_crypto_profile(profile);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1401,14 +1401,16 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
||||
|
||||
/* WQ_UNBOUND greatly improves performance when running on ramdisk */
|
||||
wq_flags = WQ_MEM_RECLAIM | WQ_UNBOUND;
|
||||
if (v->use_tasklet) {
|
||||
/*
|
||||
* Allow verify_wq to preempt softirq since verification in
|
||||
* tasklet will fall-back to using it for error handling
|
||||
* (or if the bufio cache doesn't have required hashes).
|
||||
*/
|
||||
wq_flags |= WQ_HIGHPRI;
|
||||
}
|
||||
/*
|
||||
* Using WQ_HIGHPRI improves throughput and completion latency by
|
||||
* reducing wait times when reading from a dm-verity device.
|
||||
*
|
||||
* Also as required for the "try_verify_in_tasklet" feature: WQ_HIGHPRI
|
||||
* allows verify_wq to preempt softirq since verification in tasklet
|
||||
* will fall-back to using it for error handling (or if the bufio cache
|
||||
* doesn't have required hashes).
|
||||
*/
|
||||
wq_flags |= WQ_HIGHPRI;
|
||||
v->verify_wq = alloc_workqueue("kverityd", wq_flags, num_online_cpus());
|
||||
if (!v->verify_wq) {
|
||||
ti->error = "Cannot allocate workqueue";
|
||||
|
@ -864,7 +864,7 @@ int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
|
||||
sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
|
||||
|
||||
if (geo->start > sz) {
|
||||
DMWARN("Start sector is beyond the geometry limits.");
|
||||
DMERR("Start sector is beyond the geometry limits.");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1149,7 +1149,7 @@ static void clone_endio(struct bio *bio)
|
||||
/* The target will handle the io */
|
||||
return;
|
||||
default:
|
||||
DMWARN("unimplemented target endio return value: %d", r);
|
||||
DMCRIT("unimplemented target endio return value: %d", r);
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
@ -1455,7 +1455,7 @@ static void __map_bio(struct bio *clone)
|
||||
dm_io_dec_pending(io, BLK_STS_DM_REQUEUE);
|
||||
break;
|
||||
default:
|
||||
DMWARN("unimplemented target map return value: %d", r);
|
||||
DMCRIT("unimplemented target map return value: %d", r);
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
@ -2005,7 +2005,7 @@ static struct mapped_device *alloc_dev(int minor)
|
||||
|
||||
md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
|
||||
if (!md) {
|
||||
DMWARN("unable to allocate device, out of memory.");
|
||||
DMERR("unable to allocate device, out of memory.");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -2065,7 +2065,6 @@ static struct mapped_device *alloc_dev(int minor)
|
||||
md->disk->minors = 1;
|
||||
md->disk->flags |= GENHD_FL_NO_PART;
|
||||
md->disk->fops = &dm_blk_dops;
|
||||
md->disk->queue = md->queue;
|
||||
md->disk->private_data = md;
|
||||
sprintf(md->disk->disk_name, "dm-%d", minor);
|
||||
|
||||
|
Reference in New Issue
Block a user