mirror of
git://sourceware.org/git/lvm2.git
synced 2025-01-02 01:18:26 +03:00
o implemeted dm_start_table/dm_add_entry/dm_complete_table as used by
the /proc interface.
This commit is contained in:
parent
bf235aa693
commit
da44380f86
@ -65,38 +65,6 @@ static int _setup_btree_index(int l, struct mapped_device *md)
|
||||
return 1;
|
||||
}
|
||||
|
||||
struct mapped_device *dm_build_btree(struct mapped_device *md)
|
||||
{
|
||||
int n, i;
|
||||
offset_t *k;
|
||||
|
||||
/* how many indexes will the btree have ? */
|
||||
for (n = _div_up(md->num_targets, KEYS_PER_NODE), i = 1; n != 1; i++)
|
||||
n = _div_up(n, KEYS_PER_NODE + 1);
|
||||
|
||||
md->depth = i;
|
||||
md->counts[md->depth - 1] = _div_up(md->num_targets, KEYS_PER_NODE);
|
||||
|
||||
while (--i)
|
||||
md->counts[i - 1] = _div_up(md->counts[i], KEYS_PER_NODE + 1);
|
||||
|
||||
for (i = 0; i < md->depth; i++) {
|
||||
size_t s = NODE_SIZE * md->counts[i];
|
||||
md->index[i] = __aligned(s, NODE_SIZE);
|
||||
memset(md->index[i], -1, s);
|
||||
}
|
||||
|
||||
/* bottom layer is easy */
|
||||
for (k = md->index[md->depth - 1], i = 0; i < md->num_targets; i++)
|
||||
k[i] = t->map[i].high;
|
||||
|
||||
/* fill in higher levels */
|
||||
for (i = md->depth - 1; i; i--)
|
||||
_setup_btree_index(i - 1, md);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void dm_free_btree(struct mapped_device *md)
|
||||
{
|
||||
int i;
|
||||
@ -105,6 +73,9 @@ void dm_free_btree(struct mapped_device *md)
|
||||
|
||||
__free_aligned(md->targets);
|
||||
__free_aligned(md->contexts);
|
||||
|
||||
md->num_targets = 0;
|
||||
md->num_allocated = 0;
|
||||
}
|
||||
|
||||
static int _setup_targets(struct mapped_device *md, struct device_table *t)
|
||||
@ -113,7 +84,7 @@ static int _setup_targets(struct mapped_device *md, struct device_table *t)
|
||||
offset_t low = 0;
|
||||
|
||||
md->num_targets = t->count;
|
||||
md->targets = __aligned(sizeof(*md->targets) * md->num_targets,
|
||||
md->targets = vmalloc(sizeof(*md->targets) * md->num_targets,
|
||||
NODE_SIZE);
|
||||
|
||||
for (i = 0; i < md->num_targets; i++) {
|
||||
@ -135,17 +106,91 @@ static int _setup_targets(struct mapped_device *md, struct device_table *t)
|
||||
|
||||
int dm_start_table(struct mapped_device *md)
|
||||
{
|
||||
/* if this is active, suspend it */
|
||||
bit_set(md->state, DM_LOADING);
|
||||
|
||||
dm_free_btree(md);
|
||||
if (!_alloc_targets(2)) /* FIXME: increase once debugged 256 ? */
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dm_add_entry(struct mapped_device *md, offset_t high,
|
||||
dm_map_fn target, void *context)
|
||||
{
|
||||
if (md->num_targets >= md->num_entries &&
|
||||
!_alloc_targets(md->num_allocated * 2))
|
||||
retun -ENOMEM;
|
||||
|
||||
md->highs[md->num_targets] = high;
|
||||
md->targets[md->num_targets] = target;
|
||||
md->contexts[md->num_targets] = context;
|
||||
|
||||
md->num_targets++;
|
||||
}
|
||||
|
||||
int dm_activate_table(struct mapped_device *md)
|
||||
int dm_complete_table(struct mapped_device *md)
|
||||
{
|
||||
int n, i;
|
||||
offset_t *k;
|
||||
|
||||
/* how many indexes will the btree have ? */
|
||||
for (n = _div_up(md->num_targets, KEYS_PER_NODE), i = 1; n != 1; i++)
|
||||
n = _div_up(n, KEYS_PER_NODE + 1);
|
||||
|
||||
md->depth = i;
|
||||
md->counts[md->depth - 1] = _div_up(md->num_targets, KEYS_PER_NODE);
|
||||
|
||||
while (--i)
|
||||
md->counts[i - 1] = _div_up(md->counts[i], KEYS_PER_NODE + 1);
|
||||
|
||||
for (i = 0; i < md->depth; i++) {
|
||||
size_t s = NODE_SIZE * md->counts[i];
|
||||
md->index[i] = vmalloc(s);
|
||||
memset(md->index[i], -1, s);
|
||||
}
|
||||
|
||||
/* bottom layer is easy */
|
||||
md->index[md->depth - 1] = md->highs;
|
||||
|
||||
/* fill in higher levels */
|
||||
for (i = md->depth - 1; i; i--)
|
||||
_setup_btree_index(i - 1, md);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _alloc_targets(int num)
|
||||
{
|
||||
offset_t *n_highs;
|
||||
dm_map_fn *n_targets;
|
||||
void **n_contexts;
|
||||
|
||||
if (!(n_highs = vmalloc(sizeof(*n_highs) * num)))
|
||||
return 0;
|
||||
|
||||
if (!(n_targets = vmalloc(sizeof(*n_targets) * num))) {
|
||||
vfree(n_highs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!(n_contexts = vmalloc(sizeof(*n_contexts) * num))) {
|
||||
vfree(n_highs);
|
||||
vfree(n_targets);
|
||||
return 0;
|
||||
}
|
||||
|
||||
memcpy(n_highs, md->highs, sizeof(*n_highs) * md->num_targets);
|
||||
memcpy(n_targets, md->targets, sizeof(*n_targets) * md->num_targets);
|
||||
memcpy(n_contexts, md->contexts,
|
||||
sizeof(*n_contexts) * md->num_targets);
|
||||
|
||||
vfree(md->highs);
|
||||
vfree(md->targets);
|
||||
vfree(md->contexts);
|
||||
|
||||
md->num_allocated = num;
|
||||
md->highs = n_highs;
|
||||
md->targets = n_targets;
|
||||
md->contexts = n_contexts;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -53,6 +53,7 @@ struct mapped_device {
|
||||
|
||||
int num_targets;
|
||||
int num_allocated;
|
||||
offset_t *highs;
|
||||
dm_map_fn *targets;
|
||||
void **contexts;
|
||||
|
||||
@ -92,16 +93,4 @@ int dm_complete_table(struct mapped_device *md);
|
||||
int dm_init_fs(void);
|
||||
int dm_fin_fs(void);
|
||||
|
||||
|
||||
/* misc. inlines */
|
||||
static inline void *__aligned(size_t s, unsigned int align)
|
||||
{
|
||||
return vmalloc(s);
|
||||
}
|
||||
|
||||
static inline void __free_aligned(void *ptr)
|
||||
{
|
||||
vfree(ptr);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user