dm crypt: use unbound workqueue for request processing

Use unbound workqueue by default so that work is automatically balanced
between available CPUs.  The original behavior of encrypting using the
same cpu that IO was submitted on can still be enabled by setting the
optional 'same_cpu_crypt' table argument.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
Mikulas Patocka
2015-02-13 08:23:09 -05:00
committed by Mike Snitzer
parent 37527b8692
commit f3396c58fd
2 changed files with 40 additions and 16 deletions

View File

@@ -51,7 +51,7 @@ Parameters: <cipher> <key> <iv_offset> <device path> \
Otherwise #opt_params is the number of following arguments. Otherwise #opt_params is the number of following arguments.
Example of optional parameters section: Example of optional parameters section:
1 allow_discards 2 allow_discards same_cpu_crypt
allow_discards allow_discards
Block discard requests (a.k.a. TRIM) are passed through the crypt device. Block discard requests (a.k.a. TRIM) are passed through the crypt device.
@@ -63,6 +63,11 @@ allow_discards
used space etc.) if the discarded blocks can be located easily on the used space etc.) if the discarded blocks can be located easily on the
device later. device later.
same_cpu_crypt
Perform encryption using the same cpu that IO was submitted on.
The default is to use an unbound workqueue so that encryption work
is automatically balanced between available CPUs.
Example scripts Example scripts
=============== ===============
LUKS (Linux Unified Key Setup) is now the preferred way to set up disk LUKS (Linux Unified Key Setup) is now the preferred way to set up disk

View File

@@ -108,7 +108,7 @@ struct iv_tcw_private {
* Crypt: maps a linear range of a block device * Crypt: maps a linear range of a block device
* and encrypts / decrypts at the same time. * and encrypts / decrypts at the same time.
*/ */
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, DM_CRYPT_SAME_CPU };
/* /*
* The fields in here must be read only after initialization. * The fields in here must be read only after initialization.
@@ -1688,7 +1688,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
char dummy; char dummy;
static struct dm_arg _args[] = { static struct dm_arg _args[] = {
{0, 1, "Invalid number of feature args"}, {0, 2, "Invalid number of feature args"},
}; };
if (argc < 5) { if (argc < 5) {
@@ -1788,17 +1788,25 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (ret) if (ret)
goto bad; goto bad;
while (opt_params--) {
opt_string = dm_shift_arg(&as); opt_string = dm_shift_arg(&as);
if (!opt_string) {
ti->error = "Not enough feature arguments";
goto bad;
}
if (opt_params == 1 && opt_string && if (!strcasecmp(opt_string, "allow_discards"))
!strcasecmp(opt_string, "allow_discards"))
ti->num_discard_bios = 1; ti->num_discard_bios = 1;
else if (opt_params) {
ret = -EINVAL; else if (!strcasecmp(opt_string, "same_cpu_crypt"))
set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
else {
ti->error = "Invalid feature arguments"; ti->error = "Invalid feature arguments";
goto bad; goto bad;
} }
} }
}
ret = -ENOMEM; ret = -ENOMEM;
cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1); cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
@@ -1807,8 +1815,11 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad; goto bad;
} }
cc->crypt_queue = alloc_workqueue("kcryptd", if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
else
cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
num_online_cpus());
if (!cc->crypt_queue) { if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue"; ti->error = "Couldn't create kcryptd queue";
goto bad; goto bad;
@@ -1860,6 +1871,7 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
{ {
struct crypt_config *cc = ti->private; struct crypt_config *cc = ti->private;
unsigned i, sz = 0; unsigned i, sz = 0;
int num_feature_args = 0;
switch (type) { switch (type) {
case STATUSTYPE_INFO: case STATUSTYPE_INFO:
@@ -1878,8 +1890,15 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
cc->dev->name, (unsigned long long)cc->start); cc->dev->name, (unsigned long long)cc->start);
num_feature_args += !!ti->num_discard_bios;
num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
if (num_feature_args) {
DMEMIT(" %d", num_feature_args);
if (ti->num_discard_bios) if (ti->num_discard_bios)
DMEMIT(" 1 allow_discards"); DMEMIT(" allow_discards");
if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
DMEMIT(" same_cpu_crypt");
}
break; break;
} }
@@ -1976,7 +1995,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
static struct target_type crypt_target = { static struct target_type crypt_target = {
.name = "crypt", .name = "crypt",
.version = {1, 13, 0}, .version = {1, 14, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = crypt_ctr, .ctr = crypt_ctr,
.dtr = crypt_dtr, .dtr = crypt_dtr,