- Fixes and improvements to dm btree and dm space map code in
persistent-data library used by thinp and cache. - Update DM integrity to use struct_group() to zero struct journal_sector. - Update DM sysfs to use default_groups in kobj_type. -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEJfWUX4UqZ4x1O2wixSPxCi2dA1oFAmHe/s4ACgkQxSPxCi2d A1rlwggA4esorVQSM86KcoHjRqatof1NBPoAdTPt1BykO9wl/HBCn4YGOPJVWkoP Dwe1PUvlH0ZbpnW/J7IrMpdT+IyXJVzQSmFJsQTFjMWXQM2OCDKz5YJc4KQzFBda Hl30kChoot58kxtZ29FLz4t3cpXG8lkB3tz3O/Z2McGQTymNM6Ekbv7GXwX5jfDo QpRsXEJ2L2jyVFxwE6kCIpDIiOI0a3+3OvHQneTGunCnhqZ6N3ii8llKbw/OAzHI jE+q+tJ6NeNMVMdETenrFXSBcbxo07VG87hWj62XbGzHaeYE5vWu1UCWPjx2kqS3 +Co7+q02v925cThrRGftc5xQlB88+Q== =YgBt -----END PGP SIGNATURE----- Merge tag 'for-5.17/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull device mapper updates from Mike Snitzer: - Fixes and improvements to dm btree and dm space map code in persistent-data library used by thinp and cache. - Update DM integrity to use struct_group() to zero struct journal_sector. - Update DM sysfs to use default_groups in kobj_type. * tag 'for-5.17/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm sysfs: use default_groups in kobj_type dm integrity: Use struct_group() to zero struct journal_sector dm space map common: add bounds check to sm_ll_lookup_bitmap() dm btree: add a defensive bounds check to insert_at() dm btree remove: change a bunch of BUG_ON() calls to proper errors dm btree spine: eliminate duplicate le32_to_cpu() in node_check() dm btree spine: remove extra node_check function declaration
This commit is contained in:
commit
49008f0cc1
@ -121,8 +121,10 @@ struct journal_entry {
|
||||
#define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
|
||||
|
||||
struct journal_sector {
|
||||
__u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
|
||||
__u8 mac[JOURNAL_MAC_PER_SECTOR];
|
||||
struct_group(sectors,
|
||||
__u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
|
||||
__u8 mac[JOURNAL_MAC_PER_SECTOR];
|
||||
);
|
||||
commit_id_t commit_id;
|
||||
};
|
||||
|
||||
@ -2870,7 +2872,8 @@ static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
|
||||
wraparound_section(ic, &i);
|
||||
for (j = 0; j < ic->journal_section_sectors; j++) {
|
||||
struct journal_sector *js = access_journal(ic, i, j);
|
||||
memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
|
||||
BUILD_BUG_ON(sizeof(js->sectors) != JOURNAL_SECTOR_DATA);
|
||||
memset(&js->sectors, 0, sizeof(js->sectors));
|
||||
js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
|
||||
}
|
||||
for (j = 0; j < ic->journal_section_entries; j++) {
|
||||
|
@ -112,6 +112,7 @@ static struct attribute *dm_attrs[] = {
|
||||
&dm_attr_rq_based_seq_io_merge_deadline.attr,
|
||||
NULL,
|
||||
};
|
||||
ATTRIBUTE_GROUPS(dm);
|
||||
|
||||
static const struct sysfs_ops dm_sysfs_ops = {
|
||||
.show = dm_attr_show,
|
||||
@ -120,7 +121,7 @@ static const struct sysfs_ops dm_sysfs_ops = {
|
||||
|
||||
static struct kobj_type dm_ktype = {
|
||||
.sysfs_ops = &dm_sysfs_ops,
|
||||
.default_attrs = dm_attrs,
|
||||
.default_groups = dm_groups,
|
||||
.release = dm_kobject_release,
|
||||
};
|
||||
|
||||
|
@ -9,6 +9,9 @@
|
||||
#include "dm-transaction-manager.h"
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/device-mapper.h>
|
||||
|
||||
#define DM_MSG_PREFIX "btree"
|
||||
|
||||
/*
|
||||
* Removing an entry from a btree
|
||||
@ -79,15 +82,23 @@ static void node_shift(struct btree_node *n, int shift)
|
||||
}
|
||||
}
|
||||
|
||||
static void node_copy(struct btree_node *left, struct btree_node *right, int shift)
|
||||
static int node_copy(struct btree_node *left, struct btree_node *right, int shift)
|
||||
{
|
||||
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
|
||||
uint32_t value_size = le32_to_cpu(left->header.value_size);
|
||||
BUG_ON(value_size != le32_to_cpu(right->header.value_size));
|
||||
if (value_size != le32_to_cpu(right->header.value_size)) {
|
||||
DMERR("mismatched value size");
|
||||
return -EILSEQ;
|
||||
}
|
||||
|
||||
if (shift < 0) {
|
||||
shift = -shift;
|
||||
BUG_ON(nr_left + shift > le32_to_cpu(left->header.max_entries));
|
||||
|
||||
if (nr_left + shift > le32_to_cpu(left->header.max_entries)) {
|
||||
DMERR("bad shift");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(key_ptr(left, nr_left),
|
||||
key_ptr(right, 0),
|
||||
shift * sizeof(__le64));
|
||||
@ -95,7 +106,11 @@ static void node_copy(struct btree_node *left, struct btree_node *right, int shi
|
||||
value_ptr(right, 0),
|
||||
shift * value_size);
|
||||
} else {
|
||||
BUG_ON(shift > le32_to_cpu(right->header.max_entries));
|
||||
if (shift > le32_to_cpu(right->header.max_entries)) {
|
||||
DMERR("bad shift");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(key_ptr(right, 0),
|
||||
key_ptr(left, nr_left - shift),
|
||||
shift * sizeof(__le64));
|
||||
@ -103,6 +118,7 @@ static void node_copy(struct btree_node *left, struct btree_node *right, int shi
|
||||
value_ptr(left, nr_left - shift),
|
||||
shift * value_size);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -170,35 +186,54 @@ static void exit_child(struct dm_btree_info *info, struct child *c)
|
||||
dm_tm_unlock(info->tm, c->block);
|
||||
}
|
||||
|
||||
static void shift(struct btree_node *left, struct btree_node *right, int count)
|
||||
static int shift(struct btree_node *left, struct btree_node *right, int count)
|
||||
{
|
||||
int r;
|
||||
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
|
||||
uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
|
||||
uint32_t max_entries = le32_to_cpu(left->header.max_entries);
|
||||
uint32_t r_max_entries = le32_to_cpu(right->header.max_entries);
|
||||
|
||||
BUG_ON(max_entries != r_max_entries);
|
||||
BUG_ON(nr_left - count > max_entries);
|
||||
BUG_ON(nr_right + count > max_entries);
|
||||
if (max_entries != r_max_entries) {
|
||||
DMERR("node max_entries mismatch");
|
||||
return -EILSEQ;
|
||||
}
|
||||
|
||||
if (nr_left - count > max_entries) {
|
||||
DMERR("node shift out of bounds");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (nr_right + count > max_entries) {
|
||||
DMERR("node shift out of bounds");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!count)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
if (count > 0) {
|
||||
node_shift(right, count);
|
||||
node_copy(left, right, count);
|
||||
r = node_copy(left, right, count);
|
||||
if (r)
|
||||
return r;
|
||||
} else {
|
||||
node_copy(left, right, count);
|
||||
r = node_copy(left, right, count);
|
||||
if (r)
|
||||
return r;
|
||||
node_shift(right, count);
|
||||
}
|
||||
|
||||
left->header.nr_entries = cpu_to_le32(nr_left - count);
|
||||
right->header.nr_entries = cpu_to_le32(nr_right + count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
|
||||
struct child *l, struct child *r)
|
||||
static int __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
|
||||
struct child *l, struct child *r)
|
||||
{
|
||||
int ret;
|
||||
struct btree_node *left = l->n;
|
||||
struct btree_node *right = r->n;
|
||||
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
|
||||
@ -229,9 +264,12 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
|
||||
* Rebalance.
|
||||
*/
|
||||
unsigned target_left = (nr_left + nr_right) / 2;
|
||||
shift(left, right, nr_left - target_left);
|
||||
ret = shift(left, right, nr_left - target_left);
|
||||
if (ret)
|
||||
return ret;
|
||||
*key_ptr(parent, r->index) = right->keys[0];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
|
||||
@ -253,12 +291,12 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
|
||||
return r;
|
||||
}
|
||||
|
||||
__rebalance2(info, parent, &left, &right);
|
||||
r = __rebalance2(info, parent, &left, &right);
|
||||
|
||||
exit_child(info, &left);
|
||||
exit_child(info, &right);
|
||||
|
||||
return 0;
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -266,21 +304,30 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
|
||||
* in right, then rebalance2. This wastes some cpu, but I want something
|
||||
* simple atm.
|
||||
*/
|
||||
static void delete_center_node(struct dm_btree_info *info, struct btree_node *parent,
|
||||
struct child *l, struct child *c, struct child *r,
|
||||
struct btree_node *left, struct btree_node *center, struct btree_node *right,
|
||||
uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
|
||||
static int delete_center_node(struct dm_btree_info *info, struct btree_node *parent,
|
||||
struct child *l, struct child *c, struct child *r,
|
||||
struct btree_node *left, struct btree_node *center, struct btree_node *right,
|
||||
uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
|
||||
{
|
||||
uint32_t max_entries = le32_to_cpu(left->header.max_entries);
|
||||
unsigned shift = min(max_entries - nr_left, nr_center);
|
||||
|
||||
BUG_ON(nr_left + shift > max_entries);
|
||||
if (nr_left + shift > max_entries) {
|
||||
DMERR("node shift out of bounds");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
node_copy(left, center, -shift);
|
||||
left->header.nr_entries = cpu_to_le32(nr_left + shift);
|
||||
|
||||
if (shift != nr_center) {
|
||||
shift = nr_center - shift;
|
||||
BUG_ON((nr_right + shift) > max_entries);
|
||||
|
||||
if ((nr_right + shift) > max_entries) {
|
||||
DMERR("node shift out of bounds");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
node_shift(right, shift);
|
||||
node_copy(center, right, shift);
|
||||
right->header.nr_entries = cpu_to_le32(nr_right + shift);
|
||||
@ -291,18 +338,18 @@ static void delete_center_node(struct dm_btree_info *info, struct btree_node *pa
|
||||
r->index--;
|
||||
|
||||
dm_tm_dec(info->tm, dm_block_location(c->block));
|
||||
__rebalance2(info, parent, l, r);
|
||||
return __rebalance2(info, parent, l, r);
|
||||
}
|
||||
|
||||
/*
|
||||
* Redistributes entries among 3 sibling nodes.
|
||||
*/
|
||||
static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
|
||||
struct child *l, struct child *c, struct child *r,
|
||||
struct btree_node *left, struct btree_node *center, struct btree_node *right,
|
||||
uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
|
||||
static int redistribute3(struct dm_btree_info *info, struct btree_node *parent,
|
||||
struct child *l, struct child *c, struct child *r,
|
||||
struct btree_node *left, struct btree_node *center, struct btree_node *right,
|
||||
uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
|
||||
{
|
||||
int s;
|
||||
int s, ret;
|
||||
uint32_t max_entries = le32_to_cpu(left->header.max_entries);
|
||||
unsigned total = nr_left + nr_center + nr_right;
|
||||
unsigned target_right = total / 3;
|
||||
@ -317,35 +364,55 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
|
||||
|
||||
if (s < 0 && nr_center < -s) {
|
||||
/* not enough in central node */
|
||||
shift(left, center, -nr_center);
|
||||
ret = shift(left, center, -nr_center);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
s += nr_center;
|
||||
shift(left, right, s);
|
||||
ret = shift(left, right, s);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nr_right += s;
|
||||
} else
|
||||
shift(left, center, s);
|
||||
|
||||
shift(center, right, target_right - nr_right);
|
||||
} else {
|
||||
ret = shift(left, center, s);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = shift(center, right, target_right - nr_right);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
s = target_right - nr_right;
|
||||
if (s > 0 && nr_center < s) {
|
||||
/* not enough in central node */
|
||||
shift(center, right, nr_center);
|
||||
ret = shift(center, right, nr_center);
|
||||
if (ret)
|
||||
return ret;
|
||||
s -= nr_center;
|
||||
shift(left, right, s);
|
||||
ret = shift(left, right, s);
|
||||
if (ret)
|
||||
return ret;
|
||||
nr_left -= s;
|
||||
} else
|
||||
shift(center, right, s);
|
||||
} else {
|
||||
ret = shift(center, right, s);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
shift(left, center, nr_left - target_left);
|
||||
ret = shift(left, center, nr_left - target_left);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
*key_ptr(parent, c->index) = center->keys[0];
|
||||
*key_ptr(parent, r->index) = right->keys[0];
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
|
||||
struct child *l, struct child *c, struct child *r)
|
||||
static int __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
|
||||
struct child *l, struct child *c, struct child *r)
|
||||
{
|
||||
struct btree_node *left = l->n;
|
||||
struct btree_node *center = c->n;
|
||||
@ -357,15 +424,19 @@ static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
|
||||
|
||||
unsigned threshold = merge_threshold(left) * 4 + 1;
|
||||
|
||||
BUG_ON(left->header.max_entries != center->header.max_entries);
|
||||
BUG_ON(center->header.max_entries != right->header.max_entries);
|
||||
if ((left->header.max_entries != center->header.max_entries) ||
|
||||
(center->header.max_entries != right->header.max_entries)) {
|
||||
DMERR("bad btree metadata, max_entries differ");
|
||||
return -EILSEQ;
|
||||
}
|
||||
|
||||
if ((nr_left + nr_center + nr_right) < threshold)
|
||||
delete_center_node(info, parent, l, c, r, left, center, right,
|
||||
nr_left, nr_center, nr_right);
|
||||
else
|
||||
redistribute3(info, parent, l, c, r, left, center, right,
|
||||
nr_left, nr_center, nr_right);
|
||||
if ((nr_left + nr_center + nr_right) < threshold) {
|
||||
return delete_center_node(info, parent, l, c, r, left, center, right,
|
||||
nr_left, nr_center, nr_right);
|
||||
}
|
||||
|
||||
return redistribute3(info, parent, l, c, r, left, center, right,
|
||||
nr_left, nr_center, nr_right);
|
||||
}
|
||||
|
||||
static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
|
||||
@ -395,13 +466,13 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
|
||||
return r;
|
||||
}
|
||||
|
||||
__rebalance3(info, parent, &left, ¢er, &right);
|
||||
r = __rebalance3(info, parent, &left, ¢er, &right);
|
||||
|
||||
exit_child(info, &left);
|
||||
exit_child(info, ¢er);
|
||||
exit_child(info, &right);
|
||||
|
||||
return 0;
|
||||
return r;
|
||||
}
|
||||
|
||||
static int rebalance_children(struct shadow_spine *s,
|
||||
|
@ -15,10 +15,6 @@
|
||||
|
||||
#define BTREE_CSUM_XOR 121107
|
||||
|
||||
static int node_check(struct dm_block_validator *v,
|
||||
struct dm_block *b,
|
||||
size_t block_size);
|
||||
|
||||
static void node_prepare_for_write(struct dm_block_validator *v,
|
||||
struct dm_block *b,
|
||||
size_t block_size)
|
||||
@ -40,7 +36,7 @@ static int node_check(struct dm_block_validator *v,
|
||||
struct node_header *h = &n->header;
|
||||
size_t value_size;
|
||||
__le32 csum_disk;
|
||||
uint32_t flags;
|
||||
uint32_t flags, nr_entries, max_entries;
|
||||
|
||||
if (dm_block_location(b) != le64_to_cpu(h->blocknr)) {
|
||||
DMERR_LIMIT("node_check failed: blocknr %llu != wanted %llu",
|
||||
@ -57,15 +53,17 @@ static int node_check(struct dm_block_validator *v,
|
||||
return -EILSEQ;
|
||||
}
|
||||
|
||||
nr_entries = le32_to_cpu(h->nr_entries);
|
||||
max_entries = le32_to_cpu(h->max_entries);
|
||||
value_size = le32_to_cpu(h->value_size);
|
||||
|
||||
if (sizeof(struct node_header) +
|
||||
(sizeof(__le64) + value_size) * le32_to_cpu(h->max_entries) > block_size) {
|
||||
(sizeof(__le64) + value_size) * max_entries > block_size) {
|
||||
DMERR_LIMIT("node_check failed: max_entries too large");
|
||||
return -EILSEQ;
|
||||
}
|
||||
|
||||
if (le32_to_cpu(h->nr_entries) > le32_to_cpu(h->max_entries)) {
|
||||
if (nr_entries > max_entries) {
|
||||
DMERR_LIMIT("node_check failed: too many entries");
|
||||
return -EILSEQ;
|
||||
}
|
||||
|
@ -81,14 +81,16 @@ void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
|
||||
}
|
||||
|
||||
static int insert_at(size_t value_size, struct btree_node *node, unsigned index,
|
||||
uint64_t key, void *value)
|
||||
__dm_written_to_disk(value)
|
||||
uint64_t key, void *value)
|
||||
__dm_written_to_disk(value)
|
||||
{
|
||||
uint32_t nr_entries = le32_to_cpu(node->header.nr_entries);
|
||||
uint32_t max_entries = le32_to_cpu(node->header.max_entries);
|
||||
__le64 key_le = cpu_to_le64(key);
|
||||
|
||||
if (index > nr_entries ||
|
||||
index >= le32_to_cpu(node->header.max_entries)) {
|
||||
index >= max_entries ||
|
||||
nr_entries >= max_entries) {
|
||||
DMERR("too many entries in btree node for insert");
|
||||
__dm_unbless_for_disk(value);
|
||||
return -ENOMEM;
|
||||
|
@ -283,6 +283,11 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result)
|
||||
struct disk_index_entry ie_disk;
|
||||
struct dm_block *blk;
|
||||
|
||||
if (b >= ll->nr_blocks) {
|
||||
DMERR_LIMIT("metadata block out of bounds");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
b = do_div(index, ll->entries_per_block);
|
||||
r = ll->load_ie(ll, index, &ie_disk);
|
||||
if (r < 0)
|
||||
|
Loading…
x
Reference in New Issue
Block a user