dm-vdo: change unnamed enums to defines

Signed-off-by: Bruce Johnston <bjohnsto@redhat.com>
Signed-off-by: Matthew Sakai <msakai@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
This commit is contained in:
Bruce Johnston 2024-02-26 17:04:43 -05:00 committed by Mike Snitzer
parent 04530b487b
commit 6008d526b0
25 changed files with 91 additions and 160 deletions

@ -114,10 +114,8 @@ const struct block_map_entry UNMAPPED_BLOCK_MAP_ENTRY = {
.pbn_low_word = __cpu_to_le32(VDO_ZERO_BLOCK & UINT_MAX), .pbn_low_word = __cpu_to_le32(VDO_ZERO_BLOCK & UINT_MAX),
}; };
enum { #define LOG_INTERVAL 4000
LOG_INTERVAL = 4000, #define DISPLAY_INTERVAL 100000
DISPLAY_INTERVAL = 100000,
};
/* /*
* For adjusting VDO page cache statistic fields which are only mutated on the logical zone thread. * For adjusting VDO page cache statistic fields which are only mutated on the logical zone thread.

@ -114,9 +114,7 @@ static blk_opf_t PASSTHROUGH_FLAGS = (REQ_PRIO | REQ_META | REQ_SYNC | REQ_RAHEA
* them are awakened. * them are awakened.
*/ */
enum { #define DATA_VIO_RELEASE_BATCH_SIZE 128
DATA_VIO_RELEASE_BATCH_SIZE = 128,
};
static const unsigned int VDO_SECTORS_PER_BLOCK_MASK = VDO_SECTORS_PER_BLOCK - 1; static const unsigned int VDO_SECTORS_PER_BLOCK_MASK = VDO_SECTORS_PER_BLOCK - 1;
static const u32 COMPRESSION_STATUS_MASK = 0xff; static const u32 COMPRESSION_STATUS_MASK = 0xff;
@ -1044,8 +1042,8 @@ void dump_data_vio_pool(struct data_vio_pool *pool, bool dump_vios)
* In order that syslog can empty its buffer, sleep after 35 elements for 4ms (till the * In order that syslog can empty its buffer, sleep after 35 elements for 4ms (till the
* second clock tick). These numbers were picked based on experiments with lab machines. * second clock tick). These numbers were picked based on experiments with lab machines.
*/ */
enum { ELEMENTS_PER_BATCH = 35 }; static const int ELEMENTS_PER_BATCH = 35;
enum { SLEEP_FOR_SYSLOG = 4000 }; static const int SLEEP_FOR_SYSLOG = 4000;
if (pool == NULL) if (pool == NULL)
return; return;

@ -154,11 +154,9 @@ struct uds_attribute {
const char *(*show_string)(struct hash_zones *hash_zones); const char *(*show_string)(struct hash_zones *hash_zones);
}; };
enum timer_state { #define DEDUPE_QUERY_TIMER_IDLE 0
DEDUPE_QUERY_TIMER_IDLE, #define DEDUPE_QUERY_TIMER_RUNNING 1
DEDUPE_QUERY_TIMER_RUNNING, #define DEDUPE_QUERY_TIMER_FIRED 2
DEDUPE_QUERY_TIMER_FIRED,
};
enum dedupe_context_state { enum dedupe_context_state {
DEDUPE_CONTEXT_IDLE, DEDUPE_CONTEXT_IDLE,
@ -185,11 +183,9 @@ static const char *SUSPENDED = "suspended";
static const char *UNKNOWN = "unknown"; static const char *UNKNOWN = "unknown";
/* Version 2 uses the kernel space UDS index and is limited to 16 bytes */ /* Version 2 uses the kernel space UDS index and is limited to 16 bytes */
enum { #define UDS_ADVICE_VERSION 2
UDS_ADVICE_VERSION = 2, /* version byte + state byte + 64-bit little-endian PBN */
/* version byte + state byte + 64-bit little-endian PBN */ #define UDS_ADVICE_SIZE (1 + 1 + sizeof(u64))
UDS_ADVICE_SIZE = 1 + 1 + sizeof(u64),
};
enum hash_lock_state { enum hash_lock_state {
/* State for locks that are not in use or are being initialized. */ /* State for locks that are not in use or are being initialized. */
@ -279,9 +275,7 @@ struct hash_lock {
struct vdo_wait_queue waiters; struct vdo_wait_queue waiters;
}; };
enum { #define LOCK_POOL_CAPACITY MAXIMUM_VDO_USER_VIOS
LOCK_POOL_CAPACITY = MAXIMUM_VDO_USER_VIOS,
};
struct hash_zones { struct hash_zones {
struct action_manager *manager; struct action_manager *manager;

@ -42,7 +42,7 @@
#define CURRENT_VERSION "8.3.0.65" #define CURRENT_VERSION "8.3.0.65"
enum { enum admin_phases {
GROW_LOGICAL_PHASE_START, GROW_LOGICAL_PHASE_START,
GROW_LOGICAL_PHASE_GROW_BLOCK_MAP, GROW_LOGICAL_PHASE_GROW_BLOCK_MAP,
GROW_LOGICAL_PHASE_END, GROW_LOGICAL_PHASE_END,
@ -142,10 +142,8 @@ static const char * const ADMIN_PHASE_NAMES[] = {
"SUSPEND_PHASE_END", "SUSPEND_PHASE_END",
}; };
enum { /* If we bump this, update the arrays below */
/* If we bump this, update the arrays below */ #define TABLE_VERSION 4
TABLE_VERSION = 4,
};
/* arrays for handling different table versions */ /* arrays for handling different table versions */
static const u8 REQUIRED_ARGC[] = { 10, 12, 9, 7, 6 }; static const u8 REQUIRED_ARGC[] = { 10, 12, 9, 7, 6 };
@ -159,17 +157,15 @@ static const u8 POOL_NAME_ARG_INDEX[] = { 8, 10, 8 };
* need to scan 16 words, so it's not likely to be a big deal compared to other resource usage. * need to scan 16 words, so it's not likely to be a big deal compared to other resource usage.
*/ */
enum { /*
/* * This minimum size for the bit array creates a numbering space of 0-999, which allows
* This minimum size for the bit array creates a numbering space of 0-999, which allows * successive starts of the same volume to have different instance numbers in any
* successive starts of the same volume to have different instance numbers in any * reasonably-sized test. Changing instances on restart allows vdoMonReport to detect that
* reasonably-sized test. Changing instances on restart allows vdoMonReport to detect that * the ephemeral stats have reset to zero.
* the ephemeral stats have reset to zero. */
*/ #define BIT_COUNT_MINIMUM 1000
BIT_COUNT_MINIMUM = 1000, /* Grow the bit array by this many bits when needed */
/** Grow the bit array by this many bits when needed */ #define BIT_COUNT_INCREMENT 100
BIT_COUNT_INCREMENT = 100,
};
struct instance_tracker { struct instance_tracker {
unsigned int bit_count; unsigned int bit_count;

@ -41,10 +41,10 @@ enum dump_option_flags {
FLAG_SKIP_DEFAULT = (1 << SKIP_DEFAULT) FLAG_SKIP_DEFAULT = (1 << SKIP_DEFAULT)
}; };
enum { #define FLAGS_ALL_POOLS (FLAG_SHOW_VIO_POOL)
FLAGS_ALL_POOLS = (FLAG_SHOW_VIO_POOL), #define DEFAULT_DUMP_FLAGS (FLAG_SHOW_QUEUES | FLAG_SHOW_VDO_STATUS)
DEFAULT_DUMP_FLAGS = (FLAG_SHOW_QUEUES | FLAG_SHOW_VDO_STATUS) /* Another static buffer... log10(256) = 2.408+, round up: */
}; #define DIGITS_PER_U64 (1 + sizeof(u64) * 2409 / 1000)
static inline bool is_arg_string(const char *arg, const char *this_option) static inline bool is_arg_string(const char *arg, const char *this_option)
{ {
@ -222,9 +222,6 @@ void dump_data_vio(void *data)
* one does run, the log output will be garbled anyway. * one does run, the log output will be garbled anyway.
*/ */
static char vio_completion_dump_buffer[100 + MAX_VDO_WORK_QUEUE_NAME_LEN]; static char vio_completion_dump_buffer[100 + MAX_VDO_WORK_QUEUE_NAME_LEN];
/* Another static buffer... log10(256) = 2.408+, round up: */
enum { DIGITS_PER_U64 = 1 + sizeof(u64) * 2409 / 1000 };
static char vio_block_number_dump_buffer[sizeof("P L D") + 3 * DIGITS_PER_U64]; static char vio_block_number_dump_buffer[sizeof("P L D") + 3 * DIGITS_PER_U64];
static char vio_flush_generation_buffer[sizeof(" FG") + DIGITS_PER_U64]; static char vio_flush_generation_buffer[sizeof(" FG") + DIGITS_PER_U64];
static char flags_dump_buffer[8]; static char flags_dump_buffer[8];

@ -55,9 +55,7 @@ static const struct header GEOMETRY_BLOCK_HEADER_4_0 = {
const u8 VDO_GEOMETRY_MAGIC_NUMBER[VDO_GEOMETRY_MAGIC_NUMBER_SIZE + 1] = "dmvdo001"; const u8 VDO_GEOMETRY_MAGIC_NUMBER[VDO_GEOMETRY_MAGIC_NUMBER_SIZE + 1] = "dmvdo001";
enum { #define PAGE_HEADER_4_1_SIZE (8 + 8 + 8 + 1 + 1 + 1 + 1)
PAGE_HEADER_4_1_SIZE = 8 + 8 + 8 + 1 + 1 + 1 + 1,
};
static const struct version_number BLOCK_MAP_4_1 = { static const struct version_number BLOCK_MAP_4_1 = {
.major_version = 4, .major_version = 4,

@ -79,9 +79,7 @@ struct error_block {
const struct error_info *infos; const struct error_info *infos;
}; };
enum { #define MAX_ERROR_BLOCKS 6
MAX_ERROR_BLOCKS = 6,
};
static struct { static struct {
int allocated; int allocated;

@ -15,12 +15,10 @@ static const u8 INDEX_CONFIG_MAGIC[] = "ALBIC";
static const u8 INDEX_CONFIG_VERSION_6_02[] = "06.02"; static const u8 INDEX_CONFIG_VERSION_6_02[] = "06.02";
static const u8 INDEX_CONFIG_VERSION_8_02[] = "08.02"; static const u8 INDEX_CONFIG_VERSION_8_02[] = "08.02";
enum { #define DEFAULT_VOLUME_READ_THREADS 2
DEFAULT_VOLUME_READ_THREADS = 2, #define MAX_VOLUME_READ_THREADS 16
MAX_VOLUME_READ_THREADS = 16, #define INDEX_CONFIG_MAGIC_LENGTH (sizeof(INDEX_CONFIG_MAGIC) - 1)
INDEX_CONFIG_MAGIC_LENGTH = sizeof(INDEX_CONFIG_MAGIC) - 1, #define INDEX_CONFIG_VERSION_LENGTH ((int)(sizeof(INDEX_CONFIG_VERSION_6_02) - 1))
INDEX_CONFIG_VERSION_LENGTH = sizeof(INDEX_CONFIG_VERSION_6_02) - 1,
};
static bool is_version(const u8 *version, u8 *buffer) static bool is_version(const u8 *version, u8 *buffer)
{ {

@ -70,17 +70,13 @@
* This is the largest field size supported by get_field() and set_field(). Any field that is * This is the largest field size supported by get_field() and set_field(). Any field that is
* larger is not guaranteed to fit in a single byte-aligned u32. * larger is not guaranteed to fit in a single byte-aligned u32.
*/ */
enum { #define MAX_FIELD_BITS ((sizeof(u32) - 1) * BITS_PER_BYTE + 1)
MAX_FIELD_BITS = (sizeof(u32) - 1) * BITS_PER_BYTE + 1,
};
/* /*
* This is the largest field size supported by get_big_field() and set_big_field(). Any field that * This is the largest field size supported by get_big_field() and set_big_field(). Any field that
* is larger is not guaranteed to fit in a single byte-aligned u64. * is larger is not guaranteed to fit in a single byte-aligned u64.
*/ */
enum { #define MAX_BIG_FIELD_BITS ((sizeof(u64) - 1) * BITS_PER_BYTE + 1)
MAX_BIG_FIELD_BITS = (sizeof(u64) - 1) * BITS_PER_BYTE + 1,
};
/* /*
* This is the number of guard bytes needed at the end of the memory byte array when using the bit * This is the number of guard bytes needed at the end of the memory byte array when using the bit
@ -88,45 +84,33 @@ enum {
* bytes beyond the end of the desired field. The definition is written to make it clear how this * bytes beyond the end of the desired field. The definition is written to make it clear how this
* value is derived. * value is derived.
*/ */
enum { #define POST_FIELD_GUARD_BYTES (sizeof(u64) - 1)
POST_FIELD_GUARD_BYTES = sizeof(u64) - 1,
};
/* The number of guard bits that are needed in the tail guard list */ /* The number of guard bits that are needed in the tail guard list */
enum { #define GUARD_BITS (POST_FIELD_GUARD_BYTES * BITS_PER_BYTE)
GUARD_BITS = POST_FIELD_GUARD_BYTES * BITS_PER_BYTE
};
/* /*
* The maximum size of a single delta list in bytes. We count guard bytes in this value because a * The maximum size of a single delta list in bytes. We count guard bytes in this value because a
* buffer of this size can be used with move_bits(). * buffer of this size can be used with move_bits().
*/ */
enum { #define DELTA_LIST_MAX_BYTE_COUNT \
DELTA_LIST_MAX_BYTE_COUNT = ((U16_MAX + BITS_PER_BYTE) / BITS_PER_BYTE + POST_FIELD_GUARD_BYTES)
((U16_MAX + BITS_PER_BYTE) / BITS_PER_BYTE + POST_FIELD_GUARD_BYTES)
};
/* The number of extra bytes and bits needed to store a collision entry */ /* The number of extra bytes and bits needed to store a collision entry */
enum { #define COLLISION_BYTES UDS_RECORD_NAME_SIZE
COLLISION_BYTES = UDS_RECORD_NAME_SIZE, #define COLLISION_BITS (COLLISION_BYTES * BITS_PER_BYTE)
COLLISION_BITS = COLLISION_BYTES * BITS_PER_BYTE
};
/* /*
* Immutable delta lists are packed into pages containing a header that encodes the delta list * Immutable delta lists are packed into pages containing a header that encodes the delta list
* information into 19 bits per list (64KB bit offset). * information into 19 bits per list (64KB bit offset).
*/ */
#define IMMUTABLE_HEADER_SIZE 19
enum { IMMUTABLE_HEADER_SIZE = 19 };
/* /*
* Constants and structures for the saved delta index. "DI" is for delta_index, and -##### is a * Constants and structures for the saved delta index. "DI" is for delta_index, and -##### is a
* number to increment when the format of the data changes. * number to increment when the format of the data changes.
*/ */
#define MAGIC_SIZE 8
enum {
MAGIC_SIZE = 8,
};
static const char DELTA_INDEX_MAGIC[] = "DI-00002"; static const char DELTA_INDEX_MAGIC[] = "DI-00002";
@ -216,9 +200,7 @@ static void rebalance_delta_zone(const struct delta_zone *delta_zone, u32 first,
static inline size_t get_zone_memory_size(unsigned int zone_count, size_t memory_size) static inline size_t get_zone_memory_size(unsigned int zone_count, size_t memory_size)
{ {
/* Round up so that each zone is a multiple of 64K in size. */ /* Round up so that each zone is a multiple of 64K in size. */
enum { size_t ALLOC_BOUNDARY = 64 * 1024;
ALLOC_BOUNDARY = 64 * 1024,
};
return (memory_size / zone_count + ALLOC_BOUNDARY - 1) & -ALLOC_BOUNDARY; return (memory_size / zone_count + ALLOC_BOUNDARY - 1) & -ALLOC_BOUNDARY;
} }

@ -54,11 +54,9 @@
* Each save also has a unique nonce. * Each save also has a unique nonce.
*/ */
enum { #define MAGIC_SIZE 32
MAGIC_SIZE = 32, #define NONCE_INFO_SIZE 32
NONCE_INFO_SIZE = 32, #define MAX_SAVES 2
MAX_SAVES = 2,
};
enum region_kind { enum region_kind {
RL_KIND_EMPTY = 0, RL_KIND_EMPTY = 0,
@ -82,9 +80,7 @@ enum region_type {
RH_TYPE_UNSAVED = 4, RH_TYPE_UNSAVED = 4,
}; };
enum { #define RL_SOLE_INSTANCE 65535
RL_SOLE_INSTANCE = 65535,
};
/* /*
* Super block version 2 is the first released version. * Super block version 2 is the first released version.
@ -98,11 +94,9 @@ enum {
* order to make room to prepend LVM metadata to a volume originally created without lvm. This * order to make room to prepend LVM metadata to a volume originally created without lvm. This
* allows the index to retain most its deduplication records. * allows the index to retain most its deduplication records.
*/ */
enum { #define SUPER_VERSION_MINIMUM 3
SUPER_VERSION_MINIMUM = 3, #define SUPER_VERSION_CURRENT 3
SUPER_VERSION_CURRENT = 3, #define SUPER_VERSION_MAXIMUM 7
SUPER_VERSION_MAXIMUM = 7,
};
static const u8 LAYOUT_MAGIC[MAGIC_SIZE] = "*ALBIREO*SINGLE*FILE*LAYOUT*001*"; static const u8 LAYOUT_MAGIC[MAGIC_SIZE] = "*ALBIREO*SINGLE*FILE*LAYOUT*001*";
static const u64 REGION_MAGIC = 0x416c6252676e3031; /* 'AlbRgn01' */ static const u64 REGION_MAGIC = 0x416c6252676e3031; /* 'AlbRgn01' */

@ -25,9 +25,7 @@
static const u8 PAGE_MAP_MAGIC[] = "ALBIPM02"; static const u8 PAGE_MAP_MAGIC[] = "ALBIPM02";
enum { #define PAGE_MAP_MAGIC_LENGTH (sizeof(PAGE_MAP_MAGIC) - 1)
PAGE_MAP_MAGIC_LENGTH = sizeof(PAGE_MAP_MAGIC) - 1,
};
static inline u32 get_entry_count(const struct index_geometry *geometry) static inline u32 get_entry_count(const struct index_geometry *geometry)
{ {

@ -37,7 +37,7 @@ struct buffered_reader {
u8 *end; u8 *end;
}; };
enum { MAX_READ_AHEAD_BLOCKS = 4 }; #define MAX_READ_AHEAD_BLOCKS 4
/* /*
* The buffered writer allows efficient I/O by buffering writes and committing page-sized segments * The buffered writer allows efficient I/O by buffering writes and committing page-sized segments

@ -46,11 +46,9 @@
static const u8 OPEN_CHAPTER_MAGIC[] = "ALBOC"; static const u8 OPEN_CHAPTER_MAGIC[] = "ALBOC";
static const u8 OPEN_CHAPTER_VERSION[] = "02.00"; static const u8 OPEN_CHAPTER_VERSION[] = "02.00";
enum { #define OPEN_CHAPTER_MAGIC_LENGTH (sizeof(OPEN_CHAPTER_MAGIC) - 1)
OPEN_CHAPTER_MAGIC_LENGTH = sizeof(OPEN_CHAPTER_MAGIC) - 1, #define OPEN_CHAPTER_VERSION_LENGTH (sizeof(OPEN_CHAPTER_VERSION) - 1)
OPEN_CHAPTER_VERSION_LENGTH = sizeof(OPEN_CHAPTER_VERSION) - 1, #define LOAD_RATIO 2
LOAD_RATIO = 2,
};
static inline size_t records_size(const struct open_chapter_zone *open_chapter) static inline size_t records_size(const struct open_chapter_zone *open_chapter)
{ {

@ -17,10 +17,8 @@
* keys to be sorted. * keys to be sorted.
*/ */
enum { /* Piles smaller than this are handled with a simple insertion sort. */
/* Piles smaller than this are handled with a simple insertion sort. */ #define INSERTION_SORT_THRESHOLD 12
INSERTION_SORT_THRESHOLD = 12,
};
/* Sort keys are pointers to immutable fixed-length arrays of bytes. */ /* Sort keys are pointers to immutable fixed-length arrays of bytes. */
typedef const u8 *sort_key_t; typedef const u8 *sort_key_t;

@ -77,10 +77,8 @@
* considered to be a member of the cache for uds_sparse_cache_contains(). * considered to be a member of the cache for uds_sparse_cache_contains().
*/ */
enum { #define SKIP_SEARCH_THRESHOLD 20000
SKIP_SEARCH_THRESHOLD = 20000, #define ZONE_ZERO 0
ZONE_ZERO = 0,
};
/* /*
* These counters are essentially fields of the struct cached_chapter_index, but are segregated * These counters are essentially fields of the struct cached_chapter_index, but are segregated

@ -94,7 +94,8 @@ struct chapter_range {
u32 chapter_count; u32 chapter_count;
}; };
enum { MAGIC_SIZE = 8 }; #define MAGIC_SIZE 8
static const char MAGIC_START_5[] = "MI5-0005"; static const char MAGIC_START_5[] = "MI5-0005";
struct sub_index_data { struct sub_index_data {
@ -193,10 +194,11 @@ unsigned int uds_get_volume_index_zone(const struct volume_index *volume_index,
return get_volume_sub_index_zone(get_volume_sub_index(volume_index, name), name); return get_volume_sub_index_zone(get_volume_sub_index(volume_index, name), name);
} }
#define DELTA_LIST_SIZE 256
static int compute_volume_sub_index_parameters(const struct uds_configuration *config, static int compute_volume_sub_index_parameters(const struct uds_configuration *config,
struct sub_index_parameters *params) struct sub_index_parameters *params)
{ {
enum { DELTA_LIST_SIZE = 256 };
u64 entries_in_volume_index, address_span; u64 entries_in_volume_index, address_span;
u32 chapters_in_volume_index, invalid_chapters; u32 chapters_in_volume_index, invalid_chapters;
u32 rounded_chapters; u32 rounded_chapters;

@ -60,13 +60,11 @@
* in-memory volume index. * in-memory volume index.
*/ */
enum { /* The maximum allowable number of contiguous bad chapters */
/* The maximum allowable number of contiguous bad chapters */ #define MAX_BAD_CHAPTERS 100
MAX_BAD_CHAPTERS = 100, #define VOLUME_CACHE_MAX_ENTRIES (U16_MAX >> 1)
VOLUME_CACHE_MAX_ENTRIES = (U16_MAX >> 1), #define VOLUME_CACHE_QUEUED_FLAG (1 << 15)
VOLUME_CACHE_QUEUED_FLAG = (1 << 15), #define VOLUME_CACHE_MAX_QUEUED_READS 4096
VOLUME_CACHE_MAX_QUEUED_READS = 4096,
};
static const u64 BAD_CHAPTER = U64_MAX; static const u64 BAD_CHAPTER = U64_MAX;

@ -56,13 +56,11 @@
#include "numeric.h" #include "numeric.h"
#include "permassert.h" #include "permassert.h"
enum { #define DEFAULT_CAPACITY 16 /* the number of neighborhoods in a new table */
DEFAULT_CAPACITY = 16, /* the number of neighborhoods in a new table */ #define NEIGHBORHOOD 255 /* the number of buckets in each neighborhood */
NEIGHBORHOOD = 255, /* the number of buckets in each neighborhood */ #define MAX_PROBES 1024 /* limit on the number of probes for a free bucket */
MAX_PROBES = 1024, /* limit on the number of probes for a free bucket */ #define NULL_HOP_OFFSET 0 /* the hop offset value terminating the hop list */
NULL_HOP_OFFSET = 0, /* the hop offset value terminating the hop list */ #define DEFAULT_LOAD 75 /* a compromise between memory use and performance */
DEFAULT_LOAD = 75 /* a compromise between memory use and performance */
};
/** /**
* struct bucket - hash bucket * struct bucket - hash bucket

@ -21,9 +21,7 @@
#include "physical-zone.h" #include "physical-zone.h"
#include "vdo.h" #include "vdo.h"
enum { #define ALLOCATIONS_PER_ZONE 128
ALLOCATIONS_PER_ZONE = 128,
};
/** /**
* as_logical_zone() - Convert a generic vdo_completion to a logical_zone. * as_logical_zone() - Convert a generic vdo_completion to a logical_zone.

@ -30,9 +30,7 @@ static const struct version_number COMPRESSED_BLOCK_1_0 = {
.minor_version = 0, .minor_version = 0,
}; };
enum { #define COMPRESSED_BLOCK_1_0_SIZE (4 + 4 + (2 * VDO_MAX_COMPRESSION_SLOTS))
COMPRESSED_BLOCK_1_0_SIZE = 4 + 4 + (2 * VDO_MAX_COMPRESSION_SLOTS),
};
/** /**
* vdo_get_compressed_block_fragment() - Get a reference to a compressed fragment from a compressed * vdo_get_compressed_block_fragment() - Get a reference to a compressed fragment from a compressed

@ -23,10 +23,8 @@
#include "status-codes.h" #include "status-codes.h"
#include "vdo.h" #include "vdo.h"
enum { /* Each user data_vio needs a PBN read lock and write lock. */
/* Each user data_vio needs a PBN read lock and write lock. */ #define LOCK_POOL_CAPACITY (2 * MAXIMUM_VDO_USER_VIOS)
LOCK_POOL_CAPACITY = 2 * MAXIMUM_VDO_USER_VIOS,
};
struct pbn_lock_implementation { struct pbn_lock_implementation {
enum pbn_lock_type type; enum pbn_lock_type type;

@ -14,9 +14,7 @@
#include "status-codes.h" #include "status-codes.h"
/* We use a single 64-bit search vector, so the maximum priority is 63 */ /* We use a single 64-bit search vector, so the maximum priority is 63 */
enum { #define MAX_PRIORITY 63
MAX_PRIORITY = 63
};
/* /*
* All the entries with the same priority are queued in a circular list in a bucket for that * All the entries with the same priority are queued in a circular list in a bucket for that

@ -26,15 +26,13 @@
static const u64 RECOVERY_COUNT_MASK = 0xff; static const u64 RECOVERY_COUNT_MASK = 0xff;
enum { /*
/* * The number of reserved blocks must be large enough to prevent a new recovery journal
* The number of reserved blocks must be large enough to prevent a new recovery journal * block write from overwriting a block which appears to still be a valid head block of the
* block write from overwriting a block which appears to still be a valid head block of the * journal. Currently, that means reserving enough space for all 2048 data_vios.
* journal. Currently, that means reserving enough space for all 2048 data_vios. */
*/ #define RECOVERY_JOURNAL_RESERVED_BLOCKS \
RECOVERY_JOURNAL_RESERVED_BLOCKS = ((MAXIMUM_VDO_USER_VIOS / RECOVERY_JOURNAL_ENTRIES_PER_BLOCK) + 2)
(MAXIMUM_VDO_USER_VIOS / RECOVERY_JOURNAL_ENTRIES_PER_BLOCK) + 2,
};
/** /**
* DOC: Lock Counters. * DOC: Lock Counters.

@ -27,11 +27,9 @@ struct thread {
struct completion thread_done; struct completion thread_done;
}; };
enum { #define ONCE_NOT_DONE 0
ONCE_NOT_DONE = 0, #define ONCE_IN_PROGRESS 1
ONCE_IN_PROGRESS = 1, #define ONCE_COMPLETE 2
ONCE_COMPLETE = 2,
};
/* Run a function once only, and record that fact in the atomic value. */ /* Run a function once only, and record that fact in the atomic value. */
void vdo_perform_once(atomic_t *once, void (*function)(void)) void vdo_perform_once(atomic_t *once, void (*function)(void))

@ -60,7 +60,7 @@
#include "status-codes.h" #include "status-codes.h"
#include "vio.h" #include "vio.h"
enum { PARANOID_THREAD_CONSISTENCY_CHECKS = 0 }; #define PARANOID_THREAD_CONSISTENCY_CHECKS 0
struct sync_completion { struct sync_completion {
struct vdo_completion vdo_completion; struct vdo_completion vdo_completion;