1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-12-21 13:34:40 +03:00

Remove old custom list macros and replace with libdevmapper list

macros.
This commit is contained in:
Jonathan Earl Brassow 2009-07-28 21:14:12 +00:00
parent 28ae3fe772
commit 664ef2f993
6 changed files with 78 additions and 607 deletions

View File

@ -41,11 +41,11 @@ endif
CFLAGS += -g
LDFLAGS += $(shell if [ -e /usr/lib64/openais ]; then \
echo '-L/usr/lib64/openais -L/usr/lib64'; \
echo '-L/usr/lib64/openais -L/usr/lib64 -L/lib64'; \
else \
echo '-L/usr/lib/openais -L/usr/lib'; \
echo '-L/usr/lib/openais -L/usr/lib -L/lib'; \
fi)
LDFLAGS += -lcpg -lSaCkpt -lext2fs
LDFLAGS += -lcpg -lSaCkpt -lext2fs -ldevmapper
all: ${TARGET}

View File

@ -15,7 +15,7 @@
#include <openais/saCkpt.h>
#include "linux/dm-log-userspace.h"
#include "list.h"
#include <libdevmapper.h>
#include "functions.h"
#include "local.h"
#include "common.h"
@ -96,7 +96,7 @@ struct checkpoint_data {
#define MAX_CHECKPOINT_REQUESTERS 10
struct clog_cpg {
struct list_head list;
struct dm_list list;
uint32_t lowest_id;
cpg_handle_t handle;
@ -109,8 +109,8 @@ struct clog_cpg {
int free_me;
int delay;
int resend_requests;
struct list_head startup_list;
struct list_head working_list;
struct dm_list startup_list;
struct dm_list working_list;
int checkpoints_needed;
uint32_t checkpoint_requesters[MAX_CHECKPOINT_REQUESTERS];
@ -119,8 +119,7 @@ struct clog_cpg {
char debugging[DEBUGGING_HISTORY][128];
};
/* FIXME: Need lock for this */
static struct list_head clog_cpg_list;
static struct dm_list clog_cpg_list;
/*
* cluster_send
@ -136,7 +135,7 @@ int cluster_send(struct clog_request *rq)
struct iovec iov;
struct clog_cpg *entry, *tmp;
list_for_each_entry_safe(entry, tmp, &clog_cpg_list, list)
dm_list_iterate_items(entry, &clog_cpg_list)
if (!strncmp(entry->name.value, rq->u_rq.uuid,
CPG_MAX_NAME_LENGTH)) {
found = 1;
@ -196,16 +195,16 @@ int cluster_send(struct clog_request *rq)
}
static struct clog_request *get_matching_rq(struct clog_request *rq,
struct list_head *l)
struct dm_list *l)
{
struct clog_request *match, *n;
list_for_each_entry_safe(match, n, l, list) {
dm_list_iterate_items_safe(match, n, l)
if (match->u_rq.seq == rq->u_rq.seq) {
list_del_init(&match->list);
dm_list_del(&match->list);
return match;
}
}
return NULL;
}
@ -284,15 +283,14 @@ static int handle_cluster_response(struct clog_cpg *entry,
rq->u_rq.seq);
LOG_ERROR("Current local list:");
if (list_empty(&entry->working_list))
if (dm_list_empty(&entry->working_list))
LOG_ERROR(" [none]");
list_for_each_entry_safe(orig_rq, n, &entry->working_list, list) {
dm_list_iterate_items(orig_rq, &entry->working_list)
LOG_ERROR(" [%s] %s:%u",
SHORT_UUID(orig_rq->u_rq.uuid),
_RQ_TYPE(orig_rq->u_rq.request_type),
orig_rq->u_rq.seq);
}
return -EINVAL;
}
@ -321,10 +319,9 @@ static struct clog_cpg *find_clog_cpg(cpg_handle_t handle)
{
struct clog_cpg *match, *tmp;
list_for_each_entry_safe(match, tmp, &clog_cpg_list, list) {
dm_list_iterate_items(match, &clog_cpg_list)
if (match->handle == handle)
return match;
}
return NULL;
}
@ -569,7 +566,7 @@ rr_create_retry:
}
memset(rq, 0, sizeof(*rq));
INIT_LIST_HEAD(&rq->list);
dm_list_init(&rq->list);
rq->u_rq.request_type = DM_ULOG_CHECKPOINT_READY;
rq->originator = cp->requester; /* FIXME: hack to overload meaning of originator */
strncpy(rq->u_rq.uuid, cp->uuid, CPG_MAX_NAME_LENGTH);
@ -794,8 +791,8 @@ static int resend_requests(struct clog_cpg *entry)
entry->resend_requests = 0;
list_for_each_entry_safe(rq, n, &entry->working_list, list) {
list_del_init(&rq->list);
dm_list_iterate_items_safe(rq, n, &entry->working_list) {
dm_list_del(&rq->list);
if (strcmp(entry->name.value, rq->u_rq.uuid)) {
LOG_ERROR("[%s] Stray request from another log (%s)",
@ -857,7 +854,7 @@ static int do_cluster_work(void *data)
int r = SA_AIS_OK;
struct clog_cpg *entry, *tmp;
list_for_each_entry_safe(entry, tmp, &clog_cpg_list, list) {
dm_list_iterate_items(entry, &clog_cpg_list) {
r = cpg_dispatch(entry->handle, CPG_DISPATCH_ALL);
if (r != SA_AIS_OK)
LOG_ERROR("cpg_dispatch failed: %s", str_ais_error(r));
@ -881,8 +878,8 @@ static int flush_startup_list(struct clog_cpg *entry)
struct clog_request *rq, *n;
struct checkpoint_data *new;
list_for_each_entry_safe(rq, n, &entry->startup_list, list) {
list_del_init(&rq->list);
dm_list_iterate_items_safe(rq, n, &entry->startup_list) {
dm_list_del(&rq->list);
if (rq->u_rq.request_type == DM_ULOG_MEMBER_JOIN) {
new = prepare_checkpoint(entry, rq->originator);
@ -959,8 +956,8 @@ static void cpg_message_callback(cpg_handle_t handle, struct cpg_name *gname,
return;
}
memcpy(tmp_rq, rq, sizeof(*rq) + rq->u_rq.data_size);
INIT_LIST_HEAD(&tmp_rq->list);
list_add_tail(&tmp_rq->list, &match->working_list);
dm_list_init(&tmp_rq->list);
dm_list_add( &match->working_list, &tmp_rq->list);
}
if (rq->u_rq.request_type == DM_ULOG_POSTSUSPEND) {
@ -979,16 +976,15 @@ static void cpg_message_callback(cpg_handle_t handle, struct cpg_name *gname,
match->resend_requests = 1;
LOG_COND(log_resend_requests, "[%s] %u is leaving, resend required%s",
SHORT_UUID(rq->u_rq.uuid), nodeid,
(list_empty(&match->working_list)) ? " -- working_list empty": "");
(dm_list_empty(&match->working_list)) ? " -- working_list empty": "");
list_for_each_entry_safe(tmp_rq, n, &match->working_list, list) {
dm_list_iterate_items(tmp_rq, &match->working_list)
LOG_COND(log_resend_requests,
"[%s] %s/%u",
SHORT_UUID(tmp_rq->u_rq.uuid),
_RQ_TYPE(tmp_rq->u_rq.request_type),
tmp_rq->u_rq.seq);
}
}
match->delay++;
LOG_COND(log_resend_requests, "[%s] %u is leaving, delay = %d",
@ -1067,8 +1063,8 @@ static void cpg_message_callback(cpg_handle_t handle, struct cpg_name *gname,
memcpy(tmp_rq, rq, sizeof(*rq) + rq->u_rq.data_size);
tmp_rq->pit_server = match->lowest_id;
INIT_LIST_HEAD(&tmp_rq->list);
list_add_tail(&tmp_rq->list, &match->startup_list);
dm_list_init(&tmp_rq->list);
dm_list_add(&match->startup_list, &tmp_rq->list);
goto out;
}
@ -1182,7 +1178,7 @@ static void cpg_join_callback(struct clog_cpg *match,
* FIXME: remove checkpoint_requesters/checkpoints_needed, and use
* the startup_list interface exclusively
*/
if (list_empty(&match->startup_list) && (match->state == VALID) &&
if (dm_list_empty(&match->startup_list) && (match->state == VALID) &&
(match->checkpoints_needed < MAX_CHECKPOINT_REQUESTERS)) {
match->checkpoint_requesters[match->checkpoints_needed++] = joined->nodeid;
goto out;
@ -1198,8 +1194,8 @@ static void cpg_join_callback(struct clog_cpg *match,
}
rq->u_rq.request_type = DM_ULOG_MEMBER_JOIN;
rq->originator = joined->nodeid;
INIT_LIST_HEAD(&rq->list);
list_add_tail(&rq->list, &match->startup_list);
dm_list_init(&rq->list);
dm_list_add(&match->startup_list, &rq->list);
out:
/* Find the lowest_id, i.e. the server */
@ -1241,15 +1237,15 @@ static void cpg_leave_callback(struct clog_cpg *match,
/* Am I leaving? */
if (my_cluster_id == left->nodeid) {
LOG_DBG("Finalizing leave...");
list_del_init(&match->list);
dm_list_del(&match->list);
cpg_fd_get(match->handle, &fd);
links_unregister(fd);
cluster_postsuspend(match->name.value, match->luid);
list_for_each_entry_safe(rq, n, &match->working_list, list) {
list_del_init(&rq->list);
dm_list_iterate_items_safe(rq, n, &match->working_list) {
dm_list_del(&rq->list);
if (rq->u_rq.request_type == DM_ULOG_POSTSUSPEND)
kernel_send(&rq->u_rq);
@ -1278,13 +1274,13 @@ static void cpg_leave_callback(struct clog_cpg *match,
SHORT_UUID(match->name.value), left->nodeid);
free_checkpoint(c_cp);
}
list_for_each_entry_safe(rq, n, &match->startup_list, list) {
dm_list_iterate_items_safe(rq, n, &match->startup_list) {
if ((rq->u_rq.request_type == DM_ULOG_MEMBER_JOIN) &&
(rq->originator == left->nodeid)) {
LOG_COND(log_checkpoint,
"[%s] Removing pending ckpt from startup list (%u is leaving)",
SHORT_UUID(match->name.value), left->nodeid);
list_del_init(&rq->list);
dm_list_del(&rq->list);
free(rq);
}
}
@ -1300,11 +1296,11 @@ static void cpg_leave_callback(struct clog_cpg *match,
if (left->nodeid < my_cluster_id) {
match->delay = (match->delay > 0) ? match->delay - 1 : 0;
if (!match->delay && list_empty(&match->working_list))
if (!match->delay && dm_list_empty(&match->working_list))
match->resend_requests = 0;
LOG_COND(log_resend_requests, "[%s] %u has left, delay = %d%s",
SHORT_UUID(match->name.value), left->nodeid,
match->delay, (list_empty(&match->working_list)) ?
match->delay, (dm_list_empty(&match->working_list)) ?
" -- working_list empty": "");
}
@ -1344,10 +1340,9 @@ static void cpg_leave_callback(struct clog_cpg *match,
*/
i = 1; /* We do not have a DM_ULOG_MEMBER_JOIN entry of our own */
list_for_each_entry_safe(rq, n, &match->startup_list, list) {
dm_list_iterate_items(rq, &match->startup_list)
if (rq->u_rq.request_type == DM_ULOG_MEMBER_JOIN)
i++;
}
if (i == member_list_entries) {
/*
@ -1371,7 +1366,7 @@ static void cpg_config_callback(cpg_handle_t handle, struct cpg_name *gname,
struct clog_cpg *match, *tmp;
int found = 0;
list_for_each_entry_safe(match, tmp, &clog_cpg_list, list)
dm_list_iterate_items(match, &clog_cpg_list)
if (match->handle == handle) {
found = 1;
break;
@ -1455,7 +1450,7 @@ int create_cluster_cpg(char *uuid, uint64_t luid)
struct clog_cpg *new = NULL;
struct clog_cpg *tmp, *tmp2;
list_for_each_entry_safe(tmp, tmp2, &clog_cpg_list, list)
dm_list_iterate_items(tmp, &clog_cpg_list)
if (!strncmp(tmp->name.value, uuid, CPG_MAX_NAME_LENGTH)) {
LOG_ERROR("Log entry already exists: %s", uuid);
return -EEXIST;
@ -1467,10 +1462,10 @@ int create_cluster_cpg(char *uuid, uint64_t luid)
return -ENOMEM;
}
memset(new, 0, sizeof(*new));
INIT_LIST_HEAD(&new->list);
dm_list_init(&new->list);
new->lowest_id = 0xDEAD;
INIT_LIST_HEAD(&new->startup_list);
INIT_LIST_HEAD(&new->working_list);
dm_list_init(&new->startup_list);
dm_list_init(&new->working_list);
size = ((strlen(uuid) + 1) > CPG_MAX_NAME_LENGTH) ?
CPG_MAX_NAME_LENGTH : (strlen(uuid) + 1);
@ -1501,7 +1496,7 @@ int create_cluster_cpg(char *uuid, uint64_t luid)
}
new->cpg_state = VALID;
list_add(&new->list, &clog_cpg_list);
dm_list_add(&clog_cpg_list, &new->list);
LOG_DBG("New handle: %llu", (unsigned long long)new->handle);
LOG_DBG("New name: %s", new->name.value);
@ -1519,8 +1514,8 @@ static void abort_startup(struct clog_cpg *del)
LOG_DBG("[%s] CPG teardown before checkpoint received",
SHORT_UUID(del->name.value));
list_for_each_entry_safe(rq, n, &del->startup_list, list) {
list_del_init(&rq->list);
dm_list_iterate_items_safe(rq, n, &del->startup_list) {
dm_list_del(&rq->list);
LOG_DBG("[%s] Ignoring request from %u: %s",
SHORT_UUID(del->name.value), rq->originator,
@ -1563,7 +1558,7 @@ static int _destroy_cluster_cpg(struct clog_cpg *del)
* startup list. If so, we certainly don't want to
* clear the startup_list here by calling abort_startup
*/
if (!list_empty(&del->startup_list) && (state != VALID))
if (!dm_list_empty(&del->startup_list) && (state != VALID))
abort_startup(del);
r = cpg_leave(del->handle, &del->name);
@ -1576,7 +1571,7 @@ int destroy_cluster_cpg(char *uuid)
{
struct clog_cpg *del, *tmp;
list_for_each_entry_safe(del, tmp, &clog_cpg_list, list)
dm_list_iterate_items_safe(del, tmp, &clog_cpg_list)
if (!strncmp(del->name.value, uuid, CPG_MAX_NAME_LENGTH))
_destroy_cluster_cpg(del);
@ -1587,7 +1582,7 @@ int init_cluster(void)
{
SaAisErrorT rv;
INIT_LIST_HEAD(&clog_cpg_list);
dm_list_init(&clog_cpg_list);
rv = saCkptInitialize(&ckpt_handle, &callbacks, &version);
if (rv != SA_AIS_OK)
@ -1614,7 +1609,7 @@ void cluster_debug(void)
LOG_ERROR("");
LOG_ERROR("CLUSTER COMPONENT DEBUGGING::");
list_for_each_entry_safe(entry, tmp, &clog_cpg_list, list) {
dm_list_iterate_items(entry, &clog_cpg_list) {
LOG_ERROR("%s::", SHORT_UUID(entry->name.value));
LOG_ERROR(" lowest_id : %u", entry->lowest_id);
LOG_ERROR(" state : %s", (entry->state == INVALID) ?
@ -1633,16 +1628,14 @@ void cluster_debug(void)
break;
LOG_ERROR(" CKPTs waiting : %d", i);
LOG_ERROR(" Working list:");
list_for_each_entry_safe(rq, n, &entry->working_list, list) {
dm_list_iterate_items(rq, &entry->working_list)
LOG_ERROR(" %s/%u", _RQ_TYPE(rq->u_rq.request_type),
rq->u_rq.seq);
}
LOG_ERROR(" Startup list:");
list_for_each_entry_safe(rq, n, &entry->startup_list, list) {
dm_list_iterate_items(rq, &entry->startup_list)
LOG_ERROR(" %s/%u", _RQ_TYPE(rq->u_rq.request_type),
rq->u_rq.seq);
}
LOG_ERROR("Command History:");
for (i = 0; i < DEBUGGING_HISTORY; i++) {

View File

@ -1,7 +1,7 @@
#ifndef __CLUSTER_LOG_CLUSTER_DOT_H__
#define __CLUSTER_LOG_CLUSTER_DOT_H__
#include "list.h"
#include <libdevmapper.h>
#include <linux/dm-log-userspace.h>
/*
@ -12,7 +12,7 @@
* available.
*/
struct clog_request {
struct list_head list;
struct dm_list list;
/*
* 'originator' is the machine from which the requests

View File

@ -13,7 +13,6 @@
#include <fcntl.h>
#include <time.h>
#include "linux/dm-log-userspace.h"
#include "list.h"
#include "functions.h"
#include "common.h"
#include "cluster.h"
@ -46,7 +45,7 @@ struct log_header {
};
struct log_c {
struct list_head list;
struct dm_list list;
char uuid[DM_UUID_LEN];
uint64_t luid;
@ -76,7 +75,7 @@ struct log_c {
uint32_t state; /* current operational state of the log */
struct list_head mark_list;
struct dm_list mark_list;
uint32_t recovery_halted;
struct recovery_request *recovery_request_list;
@ -91,7 +90,7 @@ struct log_c {
};
struct mark_entry {
struct list_head list;
struct dm_list list;
uint32_t nodeid;
uint64_t region;
};
@ -101,9 +100,8 @@ struct recovery_request {
struct recovery_request *next;
};
static struct list_head log_list = LIST_HEAD_INIT(log_list);
static struct list_head log_pending_list = LIST_HEAD_INIT(log_pending_list);
static DM_LIST_INIT(log_list);
static DM_LIST_INIT(log_pending_list);
static int log_test_bit(uint32_t *bs, unsigned bit)
{
@ -151,16 +149,12 @@ static uint64_t count_bits32(uint32_t *addr, uint32_t count)
*/
static struct log_c *get_log(const char *uuid, uint64_t luid)
{
struct list_head *l;
struct log_c *lc;
/* FIXME: Need prefetch to do this right */
__list_for_each(l, &log_list) {
lc = list_entry(l, struct log_c, list);
dm_list_iterate_items(lc, &log_list)
if (!strcmp(lc->uuid, uuid) &&
(!luid || (luid == lc->luid)))
return lc;
}
return NULL;
}
@ -175,16 +169,12 @@ static struct log_c *get_log(const char *uuid, uint64_t luid)
*/
static struct log_c *get_pending_log(const char *uuid, uint64_t luid)
{
struct list_head *l;
struct log_c *lc;
/* FIXME: Need prefetch to do this right */
__list_for_each(l, &log_pending_list) {
lc = list_entry(l, struct log_c, list);
dm_list_iterate_items(lc, &log_pending_list)
if (!strcmp(lc->uuid, uuid) &&
(!luid || (luid == lc->luid)))
return lc;
}
return NULL;
}
@ -459,7 +449,7 @@ static int _clog_ctr(char *uuid, uint64_t luid,
return -EINVAL;
}
INIT_LIST_HEAD(&lc->mark_list);
dm_list_init(&lc->mark_list);
lc->bitset_uint32_count = region_count /
(sizeof(*lc->clean_bits) << BYTE_SHIFT);
@ -513,7 +503,7 @@ static int _clog_ctr(char *uuid, uint64_t luid,
LOG_DBG("Disk log ready");
}
list_add(&lc->list, &log_pending_list);
dm_list_add(&log_pending_list, &lc->list);
return 0;
fail:
@ -634,7 +624,7 @@ static int clog_dtr(struct dm_ulog_request *rq)
LOG_DBG("[%s] Cluster log removed", SHORT_UUID(lc->uuid));
list_del_init(&lc->list);
dm_list_del(&lc->list);
if (lc->disk_fd != -1)
close(lc->disk_fd);
if (lc->disk_buffer)
@ -705,8 +695,8 @@ int cluster_postsuspend(char *uuid, uint64_t luid)
lc->resume_override = 0;
/* move log to pending list */
list_del_init(&lc->list);
list_add(&lc->list, &log_pending_list);
dm_list_del(&lc->list);
dm_list_add(&log_pending_list, &lc->list);
return 0;
}
@ -894,8 +884,8 @@ int local_resume(struct dm_ulog_request *rq)
}
/* move log to official list */
list_del_init(&lc->list);
list_add(&lc->list, &log_list);
dm_list_del(&lc->list);
dm_list_add(&log_list, &lc->list);
}
return 0;
@ -1030,17 +1020,13 @@ static int mark_region(struct log_c *lc, uint64_t region, uint32_t who)
{
int found = 0;
struct mark_entry *m;
struct list_head *p, *n;
list_for_each_safe(p, n, &lc->mark_list) {
/* FIXME: Use proper macros */
m = (struct mark_entry *)p;
dm_list_iterate_items(m, &lc->mark_list)
if (m->region == region) {
found = 1;
if (m->nodeid == who)
return 0;
}
}
if (!found)
log_clear_bit(lc, lc->clean_bits, region);
@ -1058,7 +1044,7 @@ static int mark_region(struct log_c *lc, uint64_t region, uint32_t who)
m->nodeid = who;
m->region = region;
list_add_tail(&m->list, &lc->mark_list);
dm_list_add(&lc->mark_list, &m->list);
return 0;
}
@ -1104,20 +1090,16 @@ static int clog_mark_region(struct dm_ulog_request *rq, uint32_t originator)
static int clear_region(struct log_c *lc, uint64_t region, uint32_t who)
{
int other_matches = 0;
struct mark_entry *m;
struct list_head *p, *n;
struct mark_entry *m, *n;
list_for_each_safe(p, n, &lc->mark_list) {
/* FIXME: Use proper macros */
m = (struct mark_entry *)p;
dm_list_iterate_items_safe(m, n, &lc->mark_list)
if (m->region == region) {
if (m->nodeid == who) {
list_del_init(&m->list);
dm_list_del(&m->list);
free(m);
} else
other_matches = 1;
}
}
/*
* Clear region if:
@ -1805,12 +1787,7 @@ int log_get_state(struct dm_ulog_request *rq)
*/
int log_status(void)
{
struct list_head *l;
__list_for_each(l, &log_list)
return 1;
__list_for_each(l, &log_pending_list)
if (!dm_list_empty(&log_list) || !dm_list_empty(&log_pending_list))
return 1;
return 0;
@ -1818,7 +1795,6 @@ int log_status(void)
void log_debug(void)
{
struct list_head *l;
struct log_c *lc;
uint64_t r;
int i;
@ -1827,8 +1803,7 @@ void log_debug(void)
LOG_ERROR("LOG COMPONENT DEBUGGING::");
LOG_ERROR("Official log list:");
LOG_ERROR("Pending log list:");
__list_for_each(l, &log_pending_list) {
lc = list_entry(l, struct log_c, list);
dm_list_iterate_items(lc, &log_pending_list) {
LOG_ERROR("%s", lc->uuid);
LOG_ERROR("sync_bits:");
print_bits((char *)lc->sync_bits,
@ -1838,8 +1813,7 @@ void log_debug(void)
lc->bitset_uint32_count * sizeof(*lc->clean_bits), 1);
}
__list_for_each(l, &log_list) {
lc = list_entry(l, struct log_c, list);
dm_list_iterate_items(lc, &log_list) {
LOG_ERROR("%s", lc->uuid);
LOG_ERROR(" recoverer : %u", lc->recoverer);
LOG_ERROR(" recovering_region: %llu",
@ -1853,7 +1827,6 @@ void log_debug(void)
print_bits((char *)lc->clean_bits,
lc->bitset_uint32_count * sizeof(*lc->clean_bits), 1);
lc = list_entry(l, struct log_c, list);
LOG_ERROR("Validating %s::", SHORT_UUID(lc->uuid));
r = find_next_zero_bit(lc->sync_bits, lc->region_count, 0);
LOG_ERROR(" lc->region_count = %llu",

View File

@ -1,471 +0,0 @@
#ifndef _LINUX_LIST_H
#define _LINUX_LIST_H
/*
* These are non-NULL pointers that will result in page faults
* under normal circumstances, used to verify that nobody uses
* non-initialized list entries.
*/
#define LIST_POISON1 ((void *) 0x00100100)
#define LIST_POISON2 ((void *) 0x00200200)
/*
* Simple doubly linked list implementation.
*
* Some of the internal functions ("__xxx") are useful when
* manipulating whole lists rather than single entries, as
* sometimes we already know the next/prev entries and we can
* generate better code by using them directly rather than
* using the generic single-entry routines.
*/
struct list_head {
struct list_head *next, *prev;
};
#define LIST_HEAD_INIT(name) { &(name), &(name) }
#define LIST_HEAD(name) \
struct list_head name = LIST_HEAD_INIT(name)
#define INIT_LIST_HEAD(ptr) do { \
(ptr)->next = (ptr); (ptr)->prev = (ptr); \
} while (0)
/*
* Insert a new entry between two known consecutive entries.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static inline void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
next->prev = new;
new->next = next;
new->prev = prev;
prev->next = new;
}
/**
* list_add - add a new entry
* @new: new entry to be added
* @head: list head to add it after
*
* Insert a new entry after the specified head.
* This is good for implementing stacks.
*/
static inline void list_add(struct list_head *new, struct list_head *head)
{
__list_add(new, head, head->next);
}
/**
* list_add_tail - add a new entry
* @new: new entry to be added
* @head: list head to add it before
*
* Insert a new entry before the specified head.
* This is useful for implementing queues.
*/
static inline void list_add_tail(struct list_head *new, struct list_head *head)
{
__list_add(new, head->prev, head);
}
/*
* Delete a list entry by making the prev/next entries
* point to each other.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static inline void __list_del(struct list_head * prev, struct list_head * next)
{
next->prev = prev;
prev->next = next;
}
/**
* list_del - deletes entry from list.
* @entry: the element to delete from the list.
* Note: list_empty on entry does not return true after this, the entry is
* in an undefined state.
*/
static inline void list_del(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
entry->next = LIST_POISON1;
entry->prev = LIST_POISON2;
}
/**
* list_del_init - deletes entry from list and reinitialize it.
* @entry: the element to delete from the list.
*/
static inline void list_del_init(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
INIT_LIST_HEAD(entry);
}
/**
* list_move - delete from one list and add as another's head
* @list: the entry to move
* @head: the head that will precede our entry
*/
static inline void list_move(struct list_head *list, struct list_head *head)
{
__list_del(list->prev, list->next);
list_add(list, head);
}
/**
* list_move_tail - delete from one list and add as another's tail
* @list: the entry to move
* @head: the head that will follow our entry
*/
static inline void list_move_tail(struct list_head *list,
struct list_head *head)
{
__list_del(list->prev, list->next);
list_add_tail(list, head);
}
/**
* list_empty - tests whether a list is empty
* @head: the list to test.
*/
static inline int list_empty(const struct list_head *head)
{
return head->next == head;
}
/**
* list_empty_careful - tests whether a list is
* empty _and_ checks that no other CPU might be
* in the process of still modifying either member
*
* NOTE: using list_empty_careful() without synchronization
* can only be safe if the only activity that can happen
* to the list entry is list_del_init(). Eg. it cannot be used
* if another CPU could re-list_add() it.
*
* @head: the list to test.
*/
static inline int list_empty_careful(const struct list_head *head)
{
struct list_head *next = head->next;
return (next == head) && (next == head->prev);
}
static inline void __list_splice(struct list_head *list,
struct list_head *head)
{
struct list_head *first = list->next;
struct list_head *last = list->prev;
struct list_head *at = head->next;
first->prev = head;
head->next = first;
last->next = at;
at->prev = last;
}
/**
* list_splice - join two lists
* @list: the new list to add.
* @head: the place to add it in the first list.
*/
static inline void list_splice(struct list_head *list, struct list_head *head)
{
if (!list_empty(list))
__list_splice(list, head);
}
/**
* list_splice_init - join two lists and reinitialise the emptied list.
* @list: the new list to add.
* @head: the place to add it in the first list.
*
* The list at @list is reinitialised
*/
static inline void list_splice_init(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list)) {
__list_splice(list, head);
INIT_LIST_HEAD(list);
}
}
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
/**
* container_of - cast a member of a structure out to the containing structure
*
* @ptr: the pointer to the member.
* @type: the type of the container struct this is embedded in.
* @member: the name of the member within the struct.
*
*/
#define container_of(ptr, type, member) ({ \
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - offsetof(type,member) );})
/**
* list_entry - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_struct within the struct.
*/
#define list_entry(ptr, type, member) \
container_of(ptr, type, member)
/**
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop counter.
* @head: the head for your list.
*/
#define list_for_each(pos, head) \
for (pos = (head)->next; prefetch(pos->next), pos != (head); \
pos = pos->next)
/**
* __list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop counter.
* @head: the head for your list.
*
* This variant differs from list_for_each() in that it's the
* simplest possible list iteration code, no prefetching is done.
* Use this for code that knows the list to be very short (empty
* or 1 entry) most of the time.
*/
#define __list_for_each(pos, head) \
for (pos = (head)->next; pos != (head); pos = pos->next)
/**
* list_for_each_prev - iterate over a list backwards
* @pos: the &struct list_head to use as a loop counter.
* @head: the head for your list.
*/
#define list_for_each_prev(pos, head) \
for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
pos = pos->prev)
/**
* list_for_each_safe - iterate over a list safe against removal of list entry
* @pos: the &struct list_head to use as a loop counter.
* @n: another &struct list_head to use as temporary storage
* @head: the head for your list.
*/
#define list_for_each_safe(pos, n, head) \
for (pos = (head)->next, n = pos->next; pos != (head); \
pos = n, n = pos->next)
/**
* list_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop counter.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry(pos, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member); \
prefetch(pos->member.next), &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
/**
* list_for_each_entry_reverse - iterate backwards over list of given type.
* @pos: the type * to use as a loop counter.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_reverse(pos, head, member) \
for (pos = list_entry((head)->prev, typeof(*pos), member); \
prefetch(pos->member.prev), &pos->member != (head); \
pos = list_entry(pos->member.prev, typeof(*pos), member))
/**
* list_prepare_entry - prepare a pos entry for use as a start point in
* list_for_each_entry_continue
* @pos: the type * to use as a start point
* @head: the head of the list
* @member: the name of the list_struct within the struct.
*/
#define list_prepare_entry(pos, head, member) \
((pos) ? : list_entry(head, typeof(*pos), member))
/**
* list_for_each_entry_continue - iterate over list of given type
* continuing after existing point
* @pos: the type * to use as a loop counter.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_continue(pos, head, member) \
for (pos = list_entry(pos->member.next, typeof(*pos), member); \
prefetch(pos->member.next), &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
/**
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @pos: the type * to use as a loop counter.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_safe(pos, n, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member), \
n = list_entry(pos->member.next, typeof(*pos), member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member))
/*
* Double linked lists with a single pointer list head.
* Mostly useful for hash tables where the two pointer list head is
* too wasteful.
* You lose the ability to access the tail in O(1).
*/
struct hlist_head {
struct hlist_node *first;
};
struct hlist_node {
struct hlist_node *next, **pprev;
};
#define HLIST_HEAD_INIT { .first = NULL }
#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
#define INIT_HLIST_NODE(ptr) ((ptr)->next = NULL, (ptr)->pprev = NULL)
static inline int hlist_unhashed(const struct hlist_node *h)
{
return !h->pprev;
}
static inline int hlist_empty(const struct hlist_head *h)
{
return !h->first;
}
static inline void __hlist_del(struct hlist_node *n)
{
struct hlist_node *next = n->next;
struct hlist_node **pprev = n->pprev;
*pprev = next;
if (next)
next->pprev = pprev;
}
static inline void hlist_del(struct hlist_node *n)
{
__hlist_del(n);
n->next = LIST_POISON1;
n->pprev = LIST_POISON2;
}
static inline void hlist_del_init(struct hlist_node *n)
{
if (n->pprev) {
__hlist_del(n);
INIT_HLIST_NODE(n);
}
}
static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
{
struct hlist_node *first = h->first;
n->next = first;
if (first)
first->pprev = &n->next;
h->first = n;
n->pprev = &h->first;
}
/* next must be != NULL */
static inline void hlist_add_before(struct hlist_node *n,
struct hlist_node *next)
{
n->pprev = next->pprev;
n->next = next;
next->pprev = &n->next;
*(n->pprev) = n;
}
static inline void hlist_add_after(struct hlist_node *n,
struct hlist_node *next)
{
next->next = n->next;
n->next = next;
next->pprev = &n->next;
if(next->next)
next->next->pprev = &next->next;
}
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
#define hlist_for_each(pos, head) \
for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
pos = pos->next)
#define hlist_for_each_safe(pos, n, head) \
for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
pos = n)
/**
* hlist_for_each_entry - iterate over list of given type
* @tpos: the type * to use as a loop counter.
* @pos: the &struct hlist_node to use as a loop counter.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry(tpos, pos, head, member) \
for (pos = (head)->first; \
pos && ({ prefetch(pos->next); 1;}) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
/**
* hlist_for_each_entry_continue - iterate over a hlist continuing after existing point
* @tpos: the type * to use as a loop counter.
* @pos: the &struct hlist_node to use as a loop counter.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_continue(tpos, pos, member) \
for (pos = (pos)->next; \
pos && ({ prefetch(pos->next); 1;}) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
/**
* hlist_for_each_entry_from - iterate over a hlist continuing from existing point
* @tpos: the type * to use as a loop counter.
* @pos: the &struct hlist_node to use as a loop counter.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_from(tpos, pos, member) \
for (; pos && ({ prefetch(pos->next); 1;}) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
/**
* hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @tpos: the type * to use as a loop counter.
* @pos: the &struct hlist_node to use as a loop counter.
* @n: another &struct hlist_node to use as temporary storage
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
for (pos = (head)->first; \
pos && ({ n = pos->next; 1; }) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = n)
#endif

View File

@ -4,30 +4,6 @@
#include <stdio.h>
#include <syslog.h>
#if (BITS_PER_LONG == 64)
#define PRIu64 "lu"
#define PRId64 "ld"
#define PRIo64 "lo"
#define PRIx64 "lx"
#define PRIX64 "lX"
#define SCNu64 "lu"
#define SCNd64 "ld"
#define SCNo64 "lo"
#define SCNx64 "lx"
#define SCNX64 "lX"
#else
#define PRIu64 "Lu"
#define PRId64 "Ld"
#define PRIo64 "Lo"
#define PRIx64 "Lx"
#define PRIX64 "LX"
#define SCNu64 "Lu"
#define SCNd64 "Ld"
#define SCNo64 "Lo"
#define SCNx64 "Lx"
#define SCNX64 "LX"
#endif
/* SHORT_UUID - print last 8 chars of a string */
#define SHORT_UUID(x) (strlen(x) > 8) ? ((x) + (strlen(x) - 8)) : (x)