dm vdo int-map: rename functions to use a common vdo_int_map preamble
Reviewed-by: Matthew Sakai <msakai@redhat.com> Signed-off-by: Bruce Johnston <bjohnsto@redhat.com> Signed-off-by: Matthew Sakai <msakai@redhat.com> Signed-off-by: Mike Snitzer <snitzer@kernel.org>
This commit is contained in:
parent
db6b0a7ffe
commit
ffb8d96541
@ -232,7 +232,7 @@ static int __must_check allocate_cache_components(struct vdo_page_cache *cache)
|
||||
if (result != UDS_SUCCESS)
|
||||
return result;
|
||||
|
||||
result = vdo_make_int_map(cache->page_count, 0, &cache->page_map);
|
||||
result = vdo_int_map_create(cache->page_count, 0, &cache->page_map);
|
||||
if (result != UDS_SUCCESS)
|
||||
return result;
|
||||
|
||||
@ -1346,8 +1346,8 @@ int vdo_invalidate_page_cache(struct vdo_page_cache *cache)
|
||||
}
|
||||
|
||||
/* Reset the page map by re-allocating it. */
|
||||
vdo_free_int_map(uds_forget(cache->page_map));
|
||||
return vdo_make_int_map(cache->page_count, 0, &cache->page_map);
|
||||
vdo_int_map_free(uds_forget(cache->page_map));
|
||||
return vdo_int_map_create(cache->page_count, 0, &cache->page_map);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2751,7 +2751,7 @@ static int __must_check initialize_block_map_zone(struct block_map *map,
|
||||
INIT_LIST_HEAD(&zone->dirty_lists->eras[i][VDO_CACHE_PAGE]);
|
||||
}
|
||||
|
||||
result = vdo_make_int_map(VDO_LOCK_MAP_CAPACITY, 0, &zone->loading_pages);
|
||||
result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, 0, &zone->loading_pages);
|
||||
if (result != VDO_SUCCESS)
|
||||
return result;
|
||||
|
||||
@ -2831,7 +2831,7 @@ static void uninitialize_block_map_zone(struct block_map_zone *zone)
|
||||
|
||||
uds_free(uds_forget(zone->dirty_lists));
|
||||
free_vio_pool(uds_forget(zone->vio_pool));
|
||||
vdo_free_int_map(uds_forget(zone->loading_pages));
|
||||
vdo_int_map_free(uds_forget(zone->loading_pages));
|
||||
if (cache->infos != NULL) {
|
||||
struct page_info *info;
|
||||
|
||||
@ -2839,7 +2839,7 @@ static void uninitialize_block_map_zone(struct block_map_zone *zone)
|
||||
free_vio(uds_forget(info->vio));
|
||||
}
|
||||
|
||||
vdo_free_int_map(uds_forget(cache->page_map));
|
||||
vdo_int_map_free(uds_forget(cache->page_map));
|
||||
uds_free(uds_forget(cache->infos));
|
||||
uds_free(uds_forget(cache->pages));
|
||||
}
|
||||
|
@ -2404,7 +2404,7 @@ static int __must_check initialize_zone(struct vdo *vdo, struct hash_zones *zone
|
||||
data_vio_count_t i;
|
||||
struct hash_zone *zone = &zones->zones[zone_number];
|
||||
|
||||
result = vdo_make_int_map(VDO_LOCK_MAP_CAPACITY, 0, &zone->hash_lock_map);
|
||||
result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, 0, &zone->hash_lock_map);
|
||||
if (result != VDO_SUCCESS)
|
||||
return result;
|
||||
|
||||
@ -2528,7 +2528,7 @@ void vdo_free_hash_zones(struct hash_zones *zones)
|
||||
struct hash_zone *zone = &zones->zones[i];
|
||||
|
||||
uds_free_funnel_queue(uds_forget(zone->timed_out_complete));
|
||||
vdo_free_int_map(uds_forget(zone->hash_lock_map));
|
||||
vdo_int_map_free(uds_forget(zone->hash_lock_map));
|
||||
uds_free(uds_forget(zone->lock_array));
|
||||
}
|
||||
|
||||
|
@ -171,7 +171,7 @@ static int allocate_buckets(struct int_map *map, size_t capacity)
|
||||
}
|
||||
|
||||
/**
|
||||
* vdo_make_int_map() - Allocate and initialize an int_map.
|
||||
* vdo_int_map_create() - Allocate and initialize an int_map.
|
||||
* @initial_capacity: The number of entries the map should initially be capable of holding (zero
|
||||
* tells the map to use its own small default).
|
||||
* @initial_load: The load factor of the map, expressed as an integer percentage (typically in the
|
||||
@ -180,7 +180,8 @@ static int allocate_buckets(struct int_map *map, size_t capacity)
|
||||
*
|
||||
* Return: UDS_SUCCESS or an error code.
|
||||
*/
|
||||
int vdo_make_int_map(size_t initial_capacity, unsigned int initial_load, struct int_map **map_ptr)
|
||||
int vdo_int_map_create(size_t initial_capacity, unsigned int initial_load,
|
||||
struct int_map **map_ptr)
|
||||
{
|
||||
struct int_map *map;
|
||||
int result;
|
||||
@ -207,7 +208,7 @@ int vdo_make_int_map(size_t initial_capacity, unsigned int initial_load, struct
|
||||
|
||||
result = allocate_buckets(map, capacity);
|
||||
if (result != UDS_SUCCESS) {
|
||||
vdo_free_int_map(uds_forget(map));
|
||||
vdo_int_map_free(uds_forget(map));
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -216,13 +217,13 @@ int vdo_make_int_map(size_t initial_capacity, unsigned int initial_load, struct
|
||||
}
|
||||
|
||||
/**
|
||||
* vdo_free_int_map() - Free an int_map.
|
||||
* vdo_int_map_free() - Free an int_map.
|
||||
* @map: The int_map to free.
|
||||
*
|
||||
* NOTE: The map does not own the pointer values stored in the map and they are not freed by this
|
||||
* call.
|
||||
*/
|
||||
void vdo_free_int_map(struct int_map *map)
|
||||
void vdo_int_map_free(struct int_map *map)
|
||||
{
|
||||
if (map == NULL)
|
||||
return;
|
||||
@ -464,7 +465,8 @@ find_empty_bucket(struct int_map *map, struct bucket *bucket, unsigned int max_p
|
||||
* Return: The bucket that was vacated by moving its entry to the provided hole, or NULL if no
|
||||
* entry could be moved.
|
||||
*/
|
||||
static struct bucket *move_empty_bucket(struct int_map *map __always_unused, struct bucket *hole)
|
||||
static struct bucket *move_empty_bucket(struct int_map *map __always_unused,
|
||||
struct bucket *hole)
|
||||
{
|
||||
/*
|
||||
* Examine every neighborhood that the empty bucket is part of, starting with the one in
|
||||
@ -572,7 +574,8 @@ static bool update_mapping(struct int_map *map,
|
||||
* Return: a pointer to an empty bucket in the desired neighborhood, or NULL if a vacancy could not
|
||||
* be found or arranged.
|
||||
*/
|
||||
static struct bucket *find_or_make_vacancy(struct int_map *map, struct bucket *neighborhood)
|
||||
static struct bucket *find_or_make_vacancy(struct int_map *map,
|
||||
struct bucket *neighborhood)
|
||||
{
|
||||
/* Probe within and beyond the neighborhood for the first empty bucket. */
|
||||
struct bucket *hole = find_empty_bucket(map, neighborhood, MAX_PROBES);
|
||||
@ -619,7 +622,8 @@ static struct bucket *find_or_make_vacancy(struct int_map *map, struct bucket *n
|
||||
*
|
||||
* Return: UDS_SUCCESS or an error code.
|
||||
*/
|
||||
int vdo_int_map_put(struct int_map *map, u64 key, void *new_value, bool update, void **old_value_ptr)
|
||||
int vdo_int_map_put(struct int_map *map, u64 key, void *new_value, bool update,
|
||||
void **old_value_ptr)
|
||||
{
|
||||
struct bucket *neighborhood, *bucket;
|
||||
|
||||
|
@ -23,17 +23,17 @@
|
||||
|
||||
struct int_map;
|
||||
|
||||
int __must_check
|
||||
vdo_make_int_map(size_t initial_capacity, unsigned int initial_load, struct int_map **map_ptr);
|
||||
int __must_check vdo_int_map_create(size_t initial_capacity, unsigned int initial_load,
|
||||
struct int_map **map_ptr);
|
||||
|
||||
void vdo_free_int_map(struct int_map *map);
|
||||
void vdo_int_map_free(struct int_map *map);
|
||||
|
||||
size_t vdo_int_map_size(const struct int_map *map);
|
||||
|
||||
void *vdo_int_map_get(struct int_map *map, u64 key);
|
||||
|
||||
int __must_check
|
||||
vdo_int_map_put(struct int_map *map, u64 key, void *new_value, bool update, void **old_value_ptr);
|
||||
int __must_check vdo_int_map_put(struct int_map *map, u64 key, void *new_value,
|
||||
bool update, void **old_value_ptr);
|
||||
|
||||
void *vdo_int_map_remove(struct int_map *map, u64 key);
|
||||
|
||||
|
@ -401,8 +401,8 @@ int vdo_make_io_submitter(unsigned int thread_count, unsigned int rotation_inter
|
||||
* uneven. So for now, we'll assume that all requests *may* wind up on one thread,
|
||||
* and thus all in the same map.
|
||||
*/
|
||||
result = vdo_make_int_map(max_requests_active * 2, 0,
|
||||
&bio_queue_data->map);
|
||||
result = vdo_int_map_create(max_requests_active * 2, 0,
|
||||
&bio_queue_data->map);
|
||||
if (result != 0) {
|
||||
/*
|
||||
* Clean up the partially initialized bio-queue entirely and indicate that
|
||||
@ -422,7 +422,7 @@ int vdo_make_io_submitter(unsigned int thread_count, unsigned int rotation_inter
|
||||
* Clean up the partially initialized bio-queue entirely and indicate that
|
||||
* initialization failed.
|
||||
*/
|
||||
vdo_free_int_map(uds_forget(bio_queue_data->map));
|
||||
vdo_int_map_free(uds_forget(bio_queue_data->map));
|
||||
uds_log_error("bio queue initialization failed %d", result);
|
||||
vdo_cleanup_io_submitter(io_submitter);
|
||||
vdo_free_io_submitter(io_submitter);
|
||||
@ -471,7 +471,7 @@ void vdo_free_io_submitter(struct io_submitter *io_submitter)
|
||||
io_submitter->num_bio_queues_used--;
|
||||
/* vdo_destroy() will free the work queue, so just give up our reference to it. */
|
||||
uds_forget(io_submitter->bio_queue_data[i].queue);
|
||||
vdo_free_int_map(uds_forget(io_submitter->bio_queue_data[i].map));
|
||||
vdo_int_map_free(uds_forget(io_submitter->bio_queue_data[i].map));
|
||||
}
|
||||
uds_free(io_submitter);
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ static int initialize_zone(struct logical_zones *zones, zone_count_t zone_number
|
||||
struct logical_zone *zone = &zones->zones[zone_number];
|
||||
zone_count_t allocation_zone_number;
|
||||
|
||||
result = vdo_make_int_map(VDO_LOCK_MAP_CAPACITY, 0, &zone->lbn_operations);
|
||||
result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, 0, &zone->lbn_operations);
|
||||
if (result != VDO_SUCCESS)
|
||||
return result;
|
||||
|
||||
@ -137,7 +137,7 @@ void vdo_free_logical_zones(struct logical_zones *zones)
|
||||
uds_free(uds_forget(zones->manager));
|
||||
|
||||
for (index = 0; index < zones->zone_count; index++)
|
||||
vdo_free_int_map(uds_forget(zones->zones[index].lbn_operations));
|
||||
vdo_int_map_free(uds_forget(zones->zones[index].lbn_operations));
|
||||
|
||||
uds_free(zones);
|
||||
}
|
||||
|
@ -330,13 +330,13 @@ static int initialize_zone(struct vdo *vdo, struct physical_zones *zones)
|
||||
zone_count_t zone_number = zones->zone_count;
|
||||
struct physical_zone *zone = &zones->zones[zone_number];
|
||||
|
||||
result = vdo_make_int_map(VDO_LOCK_MAP_CAPACITY, 0, &zone->pbn_operations);
|
||||
result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, 0, &zone->pbn_operations);
|
||||
if (result != VDO_SUCCESS)
|
||||
return result;
|
||||
|
||||
result = make_pbn_lock_pool(LOCK_POOL_CAPACITY, &zone->lock_pool);
|
||||
if (result != VDO_SUCCESS) {
|
||||
vdo_free_int_map(zone->pbn_operations);
|
||||
vdo_int_map_free(zone->pbn_operations);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -347,7 +347,7 @@ static int initialize_zone(struct vdo *vdo, struct physical_zones *zones)
|
||||
result = vdo_make_default_thread(vdo, zone->thread_id);
|
||||
if (result != VDO_SUCCESS) {
|
||||
free_pbn_lock_pool(uds_forget(zone->lock_pool));
|
||||
vdo_free_int_map(zone->pbn_operations);
|
||||
vdo_int_map_free(zone->pbn_operations);
|
||||
return result;
|
||||
}
|
||||
return result;
|
||||
@ -401,7 +401,7 @@ void vdo_free_physical_zones(struct physical_zones *zones)
|
||||
struct physical_zone *zone = &zones->zones[index];
|
||||
|
||||
free_pbn_lock_pool(uds_forget(zone->lock_pool));
|
||||
vdo_free_int_map(uds_forget(zone->pbn_operations));
|
||||
vdo_int_map_free(uds_forget(zone->pbn_operations));
|
||||
}
|
||||
|
||||
uds_free(zones);
|
||||
|
Loading…
x
Reference in New Issue
Block a user