mirror of
git://sourceware.org/git/lvm2.git
synced 2025-09-24 21:44:22 +03:00
Compare commits
23 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
87ef173e0a | ||
|
52a3fb6bc7 | ||
|
92e2a257a6 | ||
|
32e175752c | ||
|
d43f7180dc | ||
|
0129c2b0fc | ||
|
4ed1990001 | ||
|
5bd6ab27ae | ||
|
f3593b89fa | ||
|
23d84b2310 | ||
|
fdc49402ec | ||
|
5457c133e1 | ||
|
292e588ee3 | ||
|
243494c25e | ||
|
e4365f3706 | ||
|
310f3038d3 | ||
|
4e6033273d | ||
|
73718586d3 | ||
|
011abe61e8 | ||
|
fe3a37f89d | ||
|
8aea44e77b | ||
|
5529aec0d6 | ||
|
369549d23f |
24
WHATS_NEW
24
WHATS_NEW
@@ -1,6 +1,26 @@
|
||||
Version 2.00.34 -
|
||||
==================================
|
||||
Version 2.01.02 - 21st January 2005
|
||||
===================================
|
||||
Update clvmd_init_rhel4: use lvm.static and don't load dlm.
|
||||
Fix some size_t printing.
|
||||
Fix 64 bit xlate consts.
|
||||
Split out pool sptype_names to avoid unused const.
|
||||
Always fail if random id generation fails.
|
||||
Recognise gnbd devices.
|
||||
Fix clvmd startup bug introduced in cman/gulm amalgamation.
|
||||
Improve reporting of node-specific locking errors.
|
||||
|
||||
Version 2.01.01 - 19th January 2005
|
||||
===================================
|
||||
Fix clvmd lv_info_by_lvid open_count.
|
||||
Store snapshot and origin sizes separately.
|
||||
Update vgcreate man page.
|
||||
|
||||
Version 2.01.00 - 17th January 2005
|
||||
===================================
|
||||
Fix vgscan metadata auto-correction.
|
||||
Only ask libdevmapper for open_count when we need it.
|
||||
Adjust RHEL4 clvmd init script priority.
|
||||
Enable building of CMAN & GULM versions of clvmd into a single binary
|
||||
|
||||
Version 2.00.33 - 7th January 2005
|
||||
==================================
|
||||
|
@@ -1,5 +1,8 @@
|
||||
Version 1.01.00 -
|
||||
============================
|
||||
Version 1.01.01 -
|
||||
=============================
|
||||
|
||||
Version 1.01.00 - 17 Jan 2005
|
||||
=============================
|
||||
Add dm_task_no_open_count() to skip getting open_count.
|
||||
|
||||
Version 1.00.21 - 7 Jan 2005
|
||||
|
@@ -281,12 +281,12 @@ AC_MSG_RESULT($SELINUX)
|
||||
dnl -- Build cluster LVM daemon
|
||||
AC_MSG_CHECKING(whether to build cluster LVM daemon)
|
||||
AC_ARG_WITH(clvmd,
|
||||
[ --with-clvmd=TYPE Build cluster LVM Daemon: cman/gulm/none
|
||||
[ --with-clvmd=TYPE Build cluster LVM Daemon: cman/gulm/none/all
|
||||
[TYPE=none] ],
|
||||
[ CLVMD="$withval" ],
|
||||
[ CLVMD="none" ])
|
||||
if test x$CLVMD = xyes; then
|
||||
CLVMD=cman
|
||||
CLVMD=all
|
||||
fi
|
||||
AC_MSG_RESULT($CLVMD)
|
||||
|
||||
@@ -481,7 +481,7 @@ if test x$READLINE = xyes; then
|
||||
AC_CHECK_HEADERS(readline/readline.h readline/history.h,,AC_MSG_ERROR(bailing out))
|
||||
fi
|
||||
|
||||
if test x$CLVMD = xyes; then
|
||||
if test x$CLVMD != xnone; then
|
||||
AC_CHECK_HEADERS(mntent.h netdb.h netinet/in.h pthread.h search.h sys/mount.h sys/socket.h sys/uio.h sys/un.h utmpx.h,,AC_MSG_ERROR(bailing out))
|
||||
AC_CHECK_FUNCS(dup2 getmntent memmove select socket,,AC_MSG_ERROR(bailing out))
|
||||
AC_FUNC_GETMNTENT
|
||||
|
@@ -18,19 +18,32 @@ VPATH = @srcdir@
|
||||
SOURCES = \
|
||||
clvmd-command.c \
|
||||
clvmd.c \
|
||||
libclvm.c \
|
||||
lvm-functions.c \
|
||||
system-lv.c
|
||||
|
||||
ifeq ("@CLVMD@", "gulm")
|
||||
GULM = yes
|
||||
endif
|
||||
|
||||
ifeq ("@CLVMD@", "cman")
|
||||
CMAN = yes
|
||||
endif
|
||||
|
||||
ifeq ("@CLVMD@", "all")
|
||||
GULM = yes
|
||||
CMAN = yes
|
||||
endif
|
||||
|
||||
ifeq ("$(GULM)", "yes")
|
||||
SOURCES += clvmd-gulm.c tcp-comms.c
|
||||
LMLIBS += -lccs -lgulm
|
||||
CFLAGS += -DUSE_GULM
|
||||
endif
|
||||
|
||||
ifeq ("@CLVMD@", "cman")
|
||||
ifeq ("$(CMAN)", "yes")
|
||||
SOURCES += clvmd-cman.c
|
||||
LMLIBS += -ldlm
|
||||
CFLAGS += -DUSE_CMAN
|
||||
endif
|
||||
|
||||
TARGETS = \
|
||||
|
@@ -43,6 +43,7 @@ struct clvm_header {
|
||||
/* Flags */
|
||||
#define CLVMD_FLAG_LOCAL 1 /* Only do this on the local node */
|
||||
#define CLVMD_FLAG_SYSTEMLV 2 /* Data in system LV under my node name */
|
||||
#define CLVMD_FLAG_NODEERRS 4 /* Reply has errors in node-specific portion */
|
||||
|
||||
/* Name of the local socket to communicate between libclvm and clvmd */
|
||||
//static const char CLVMD_SOCKNAME[]="/var/run/clvmd";
|
||||
|
@@ -66,7 +66,7 @@ struct lock_wait {
|
||||
struct dlm_lksb lksb;
|
||||
};
|
||||
|
||||
int init_cluster()
|
||||
static int _init_cluster(void)
|
||||
{
|
||||
struct sockaddr_cl saddr;
|
||||
int port = CLUSTER_PORT_CLVMD;
|
||||
@@ -74,7 +74,7 @@ int init_cluster()
|
||||
/* Open the cluster communication socket */
|
||||
cluster_sock = socket(AF_CLUSTER, SOCK_DGRAM, CLPROTO_CLIENT);
|
||||
if (cluster_sock == -1) {
|
||||
syslog(LOG_ERR, "Can't open cluster manager socket: %m");
|
||||
syslog(LOG_ERR, "Can't open cman cluster manager socket: %m");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -104,18 +104,23 @@ int init_cluster()
|
||||
return 0;
|
||||
}
|
||||
|
||||
int get_main_cluster_fd()
|
||||
static void _cluster_init_completed(void)
|
||||
{
|
||||
clvmd_cluster_init_completed();
|
||||
}
|
||||
|
||||
static int _get_main_cluster_fd()
|
||||
{
|
||||
return cluster_sock;
|
||||
}
|
||||
|
||||
int get_num_nodes()
|
||||
static int _get_num_nodes()
|
||||
{
|
||||
return num_nodes;
|
||||
}
|
||||
|
||||
/* send_message with the fd check removed */
|
||||
int cluster_send_message(void *buf, int msglen, char *csid, const char *errtext)
|
||||
static int _cluster_send_message(void *buf, int msglen, char *csid, const char *errtext)
|
||||
{
|
||||
struct iovec iov[2];
|
||||
struct msghdr msg;
|
||||
@@ -135,7 +140,7 @@ int cluster_send_message(void *buf, int msglen, char *csid, const char *errtext)
|
||||
if (csid) {
|
||||
msg.msg_name = &saddr;
|
||||
msg.msg_namelen = sizeof(saddr);
|
||||
memcpy(&saddr.scl_nodeid, csid, MAX_CSID_LEN);
|
||||
memcpy(&saddr.scl_nodeid, csid, CMAN_MAX_CSID_LEN);
|
||||
} else { /* Cluster broadcast */
|
||||
|
||||
msg.msg_name = NULL;
|
||||
@@ -151,26 +156,26 @@ int cluster_send_message(void *buf, int msglen, char *csid, const char *errtext)
|
||||
return len;
|
||||
}
|
||||
|
||||
void get_our_csid(char *csid)
|
||||
static void _get_our_csid(char *csid)
|
||||
{
|
||||
int i;
|
||||
memset(csid, 0, MAX_CSID_LEN);
|
||||
memset(csid, 0, CMAN_MAX_CSID_LEN);
|
||||
|
||||
for (i = 0; i < num_nodes; i++) {
|
||||
if (nodes[i].us)
|
||||
memcpy(csid, &nodes[i].node_id, MAX_CSID_LEN);
|
||||
memcpy(csid, &nodes[i].node_id, CMAN_MAX_CSID_LEN);
|
||||
}
|
||||
}
|
||||
|
||||
/* Call a callback routine for each node that known (down mean not running a clvmd) */
|
||||
int cluster_do_node_callback(struct local_client *client,
|
||||
static int _cluster_do_node_callback(struct local_client *client,
|
||||
void (*callback) (struct local_client *, char *,
|
||||
int))
|
||||
{
|
||||
int i;
|
||||
int somedown = 0;
|
||||
|
||||
for (i = 0; i < get_num_nodes(); i++) {
|
||||
for (i = 0; i < _get_num_nodes(); i++) {
|
||||
callback(client, (char *)&nodes[i].node_id, node_updown[nodes[i].node_id]);
|
||||
if (!node_updown[nodes[i].node_id])
|
||||
somedown = -1;
|
||||
@@ -202,7 +207,7 @@ static void process_oob_msg(char *buf, int len, int nodeid)
|
||||
}
|
||||
}
|
||||
|
||||
int cluster_fd_callback(struct local_client *client, char *buf, int len, char *csid,
|
||||
static int _cluster_fd_callback(struct local_client *client, char *buf, int len, char *csid,
|
||||
struct local_client **new_client)
|
||||
{
|
||||
struct iovec iov[2];
|
||||
@@ -254,7 +259,7 @@ int cluster_fd_callback(struct local_client *client, char *buf, int len, char *c
|
||||
return len;
|
||||
}
|
||||
|
||||
void add_up_node(char *csid)
|
||||
static void _add_up_node(char *csid)
|
||||
{
|
||||
/* It's up ! */
|
||||
int nodeid = nodeid_from_csid(csid);
|
||||
@@ -278,7 +283,7 @@ void add_up_node(char *csid)
|
||||
DEBUGLOG("Added new node %d to updown list\n", nodeid);
|
||||
}
|
||||
|
||||
void cluster_closedown()
|
||||
static void _cluster_closedown()
|
||||
{
|
||||
unlock_all();
|
||||
dlm_release_lockspace(LOCKSPACE_NAME, lockspace, 1);
|
||||
@@ -307,7 +312,7 @@ static int is_listening(int nodeid)
|
||||
|
||||
/* Populate the list of CLVMDs running.
|
||||
called only at startup time */
|
||||
void count_clvmds_running(void)
|
||||
static void count_clvmds_running(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -366,13 +371,13 @@ static void get_members()
|
||||
}
|
||||
|
||||
/* Convert a node name to a CSID */
|
||||
int csid_from_name(char *csid, char *name)
|
||||
static int _csid_from_name(char *csid, char *name)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_nodes; i++) {
|
||||
if (strcmp(name, nodes[i].name) == 0) {
|
||||
memcpy(csid, &nodes[i].node_id, MAX_CSID_LEN);
|
||||
memcpy(csid, &nodes[i].node_id, CMAN_MAX_CSID_LEN);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@@ -380,12 +385,12 @@ int csid_from_name(char *csid, char *name)
|
||||
}
|
||||
|
||||
/* Convert a CSID to a node name */
|
||||
int name_from_csid(char *csid, char *name)
|
||||
static int _name_from_csid(char *csid, char *name)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_nodes; i++) {
|
||||
if (memcmp(csid, &nodes[i].node_id, MAX_CSID_LEN) == 0) {
|
||||
if (memcmp(csid, &nodes[i].node_id, CMAN_MAX_CSID_LEN) == 0) {
|
||||
strcpy(name, nodes[i].name);
|
||||
return 0;
|
||||
}
|
||||
@@ -396,7 +401,7 @@ int name_from_csid(char *csid, char *name)
|
||||
}
|
||||
|
||||
/* Convert a node ID to a node name */
|
||||
int name_from_nodeid(int nodeid, char *name)
|
||||
static int name_from_nodeid(int nodeid, char *name)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -416,12 +421,12 @@ static int nodeid_from_csid(char *csid)
|
||||
{
|
||||
int nodeid;
|
||||
|
||||
memcpy(&nodeid, csid, MAX_CSID_LEN);
|
||||
memcpy(&nodeid, csid, CMAN_MAX_CSID_LEN);
|
||||
|
||||
return nodeid;
|
||||
}
|
||||
|
||||
int is_quorate()
|
||||
static int _is_quorate()
|
||||
{
|
||||
return ioctl(cluster_sock, SIOCCLUSTER_ISQUORATE, 0);
|
||||
}
|
||||
@@ -435,7 +440,7 @@ static void sync_ast_routine(void *arg)
|
||||
pthread_mutex_unlock(&lwait->mutex);
|
||||
}
|
||||
|
||||
int sync_lock(const char *resource, int mode, int flags, int *lockid)
|
||||
static int _sync_lock(const char *resource, int mode, int flags, int *lockid)
|
||||
{
|
||||
int status;
|
||||
struct lock_wait lwait;
|
||||
@@ -478,7 +483,7 @@ int sync_lock(const char *resource, int mode, int flags, int *lockid)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sync_unlock(const char *resource /* UNUSED */, int lockid)
|
||||
static int _sync_unlock(const char *resource /* UNUSED */, int lockid)
|
||||
{
|
||||
int status;
|
||||
struct lock_wait lwait;
|
||||
@@ -505,3 +510,28 @@ int sync_unlock(const char *resource /* UNUSED */, int lockid)
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
static struct cluster_ops _cluster_cman_ops = {
|
||||
.cluster_init_completed = _cluster_init_completed,
|
||||
.cluster_send_message = _cluster_send_message,
|
||||
.name_from_csid = _name_from_csid,
|
||||
.csid_from_name = _csid_from_name,
|
||||
.get_num_nodes = _get_num_nodes,
|
||||
.cluster_fd_callback = _cluster_fd_callback,
|
||||
.get_main_cluster_fd = _get_main_cluster_fd,
|
||||
.cluster_do_node_callback = _cluster_do_node_callback,
|
||||
.is_quorate = _is_quorate,
|
||||
.get_our_csid = _get_our_csid,
|
||||
.add_up_node = _add_up_node,
|
||||
.cluster_closedown = _cluster_closedown,
|
||||
.sync_lock = _sync_lock,
|
||||
.sync_unlock = _sync_unlock,
|
||||
};
|
||||
|
||||
struct cluster_ops *init_cman_cluster(void)
|
||||
{
|
||||
if (!_init_cluster())
|
||||
return &_cluster_cman_ops;
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
@@ -22,33 +22,47 @@
|
||||
|
||||
struct local_client;
|
||||
|
||||
extern int cluster_send_message(void *buf, int msglen, char *csid,
|
||||
struct cluster_ops {
|
||||
void (*cluster_init_completed) (void);
|
||||
|
||||
int (*cluster_send_message) (void *buf, int msglen, char *csid,
|
||||
const char *errtext);
|
||||
extern int name_from_csid(char *csid, char *name);
|
||||
extern int csid_from_name(char *csid, char *name);
|
||||
extern int get_num_nodes(void);
|
||||
extern int cluster_fd_callback(struct local_client *fd, char *buf, int len,
|
||||
int (*name_from_csid) (char *csid, char *name);
|
||||
int (*csid_from_name) (char *csid, char *name);
|
||||
int (*get_num_nodes) (void);
|
||||
int (*cluster_fd_callback) (struct local_client *fd, char *buf, int len,
|
||||
char *csid, struct local_client **new_client);
|
||||
extern int init_cluster(void);
|
||||
extern int get_main_cluster_fd(void); /* gets accept FD or cman cluster socket */
|
||||
extern int cluster_do_node_callback(struct local_client *client,
|
||||
int (*get_main_cluster_fd) (void); /* gets accept FD or cman cluster socket */
|
||||
int (*cluster_do_node_callback) (struct local_client *client,
|
||||
void (*callback) (struct local_client *,
|
||||
char *csid, int node_up));
|
||||
extern int is_quorate(void);
|
||||
int (*is_quorate) (void);
|
||||
|
||||
extern void get_our_csid(char *csid);
|
||||
extern void add_up_node(char *csid);
|
||||
extern void cluster_closedown(void);
|
||||
void (*get_our_csid) (char *csid);
|
||||
void (*add_up_node) (char *csid);
|
||||
void (*cluster_closedown) (void);
|
||||
|
||||
extern int sync_lock(const char *resource, int mode, int flags, int *lockid);
|
||||
extern int sync_unlock(const char *resource, int lockid);
|
||||
int (*sync_lock) (const char *resource, int mode, int flags, int *lockid);
|
||||
int (*sync_unlock) (const char *resource, int lockid);
|
||||
|
||||
};
|
||||
|
||||
#ifdef USE_GULM
|
||||
#include "tcp-comms.h"
|
||||
#else
|
||||
/* cman */
|
||||
#include "cnxman-socket.h"
|
||||
#define MAX_CSID_LEN 4
|
||||
# include "tcp-comms.h"
|
||||
struct cluster_ops *init_gulm_cluster(void);
|
||||
#define MAX_CSID_LEN GULM_MAX_CSID_LEN
|
||||
#define MAX_CLUSTER_MEMBER_NAME_LEN GULM_MAX_CLUSTER_MEMBER_NAME_LEN
|
||||
#endif
|
||||
|
||||
#ifdef USE_CMAN
|
||||
# include "cnxman-socket.h"
|
||||
# define CMAN_MAX_CSID_LEN 4
|
||||
# ifndef MAX_CSID_LEN
|
||||
# define MAX_CSID_LEN CMAN_MAX_CSID_LEN
|
||||
# endif
|
||||
# undef MAX_CLUSTER_MEMBER_NAME_LEN
|
||||
# define MAX_CLUSTER_MEMBER_NAME_LEN CMAN_MAX_CLUSTER_MEMBER_NAME_LEN
|
||||
struct cluster_ops *init_cman_cluster(void);
|
||||
#endif
|
||||
|
||||
|
||||
|
@@ -72,7 +72,7 @@ static volatile int lock_start_flag;
|
||||
struct node_info
|
||||
{
|
||||
enum {NODE_UNKNOWN, NODE_DOWN, NODE_UP, NODE_CLVMD} state;
|
||||
char name[MAX_CLUSTER_MEMBER_NAME_LEN];
|
||||
char name[GULM_MAX_CLUSTER_MEMBER_NAME_LEN];
|
||||
};
|
||||
|
||||
struct lock_wait
|
||||
@@ -88,6 +88,8 @@ static int read_from_core_sock(struct local_client *client, char *buf, int len,
|
||||
static int read_from_lock_sock(struct local_client *client, char *buf, int len, char *csid,
|
||||
struct local_client **new_client);
|
||||
static int get_all_cluster_nodes(void);
|
||||
static int _csid_from_name(char *csid, char *name);
|
||||
static void _cluster_closedown(void);
|
||||
|
||||
/* In tcp-comms.c */
|
||||
extern struct hash_table *sock_hash;
|
||||
@@ -123,7 +125,7 @@ static lg_lockspace_callbacks_t lock_callbacks;
|
||||
static void badsig_handler(int sig)
|
||||
{
|
||||
DEBUGLOG("got sig %d\n", sig);
|
||||
cluster_closedown();
|
||||
_cluster_closedown();
|
||||
exit(0);
|
||||
}
|
||||
|
||||
@@ -135,7 +137,7 @@ static void sighup_handler(int sig)
|
||||
get_all_cluster_nodes();
|
||||
}
|
||||
|
||||
int init_cluster()
|
||||
static int _init_cluster(void)
|
||||
{
|
||||
int status;
|
||||
int ccs_h;
|
||||
@@ -241,7 +243,7 @@ int init_cluster()
|
||||
return 0;
|
||||
}
|
||||
|
||||
void cluster_closedown()
|
||||
static void _cluster_closedown(void)
|
||||
{
|
||||
DEBUGLOG("cluster_closedown\n");
|
||||
lg_lock_logout(gulm_if);
|
||||
@@ -339,7 +341,7 @@ static struct node_info *add_or_set_node(char *name, struct in6_addr *ip, uint8_
|
||||
{
|
||||
struct node_info *ninfo;
|
||||
|
||||
ninfo = hash_lookup_binary(node_hash, (char *)ip, MAX_CSID_LEN);
|
||||
ninfo = hash_lookup_binary(node_hash, (char *)ip, GULM_MAX_CSID_LEN);
|
||||
if (!ninfo)
|
||||
{
|
||||
/* If we can't find that node then re-read the config file in case it
|
||||
@@ -348,7 +350,7 @@ static struct node_info *add_or_set_node(char *name, struct in6_addr *ip, uint8_
|
||||
get_all_cluster_nodes();
|
||||
|
||||
/* Now try again */
|
||||
ninfo = hash_lookup_binary(node_hash, (char *)ip, MAX_CSID_LEN);
|
||||
ninfo = hash_lookup_binary(node_hash, (char *)ip, GULM_MAX_CSID_LEN);
|
||||
if (!ninfo)
|
||||
{
|
||||
DEBUGLOG("Ignoring node %s, not part of the SAN cluster\n", name);
|
||||
@@ -361,6 +363,11 @@ static struct node_info *add_or_set_node(char *name, struct in6_addr *ip, uint8_
|
||||
return ninfo;
|
||||
}
|
||||
|
||||
static void _get_our_csid(char *csid)
|
||||
{
|
||||
get_our_gulm_csid(csid);
|
||||
}
|
||||
|
||||
static int core_nodelist(void *misc, lglcb_t type, char *name, struct in6_addr *ip, uint8_t state)
|
||||
{
|
||||
DEBUGLOG("CORE nodelist\n");
|
||||
@@ -381,14 +388,14 @@ static int core_nodelist(void *misc, lglcb_t type, char *name, struct in6_addr *
|
||||
{
|
||||
if (type == lglcb_stop)
|
||||
{
|
||||
char ourcsid[MAX_CSID_LEN];
|
||||
char ourcsid[GULM_MAX_CSID_LEN];
|
||||
|
||||
DEBUGLOG("Got Nodelist, stop\n");
|
||||
clvmd_cluster_init_completed();
|
||||
|
||||
/* Mark ourself as up */
|
||||
get_our_csid(ourcsid);
|
||||
add_up_node(ourcsid);
|
||||
_get_our_csid(ourcsid);
|
||||
gulm_add_up_node(ourcsid);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -417,7 +424,7 @@ static int core_nodechange(void *misc, char *nodename, struct in6_addr *nodeip,
|
||||
|
||||
/* If we don't get nodeip here, try a lookup by name */
|
||||
if (!nodeip)
|
||||
csid_from_name((char *)nodeip, nodename);
|
||||
_csid_from_name((char *)nodeip, nodename);
|
||||
if (!nodeip)
|
||||
return 0;
|
||||
|
||||
@@ -540,15 +547,15 @@ int get_next_node_csid(void **context, char *csid)
|
||||
return 0;
|
||||
}
|
||||
|
||||
memcpy(csid, hash_get_key(node_hash, *context), MAX_CSID_LEN);
|
||||
memcpy(csid, hash_get_key(node_hash, *context), GULM_MAX_CSID_LEN);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int name_from_csid(char *csid, char *name)
|
||||
int gulm_name_from_csid(char *csid, char *name)
|
||||
{
|
||||
struct node_info *ninfo;
|
||||
|
||||
ninfo = hash_lookup_binary(node_hash, csid, MAX_CSID_LEN);
|
||||
ninfo = hash_lookup_binary(node_hash, csid, GULM_MAX_CSID_LEN);
|
||||
if (!ninfo)
|
||||
{
|
||||
sprintf(name, "UNKNOWN %s", print_csid(csid));
|
||||
@@ -560,7 +567,7 @@ int name_from_csid(char *csid, char *name)
|
||||
}
|
||||
|
||||
|
||||
int csid_from_name(char *csid, char *name)
|
||||
static int _csid_from_name(char *csid, char *name)
|
||||
{
|
||||
struct hash_node *hn;
|
||||
struct node_info *ninfo;
|
||||
@@ -570,25 +577,25 @@ int csid_from_name(char *csid, char *name)
|
||||
ninfo = hash_get_data(node_hash, hn);
|
||||
if (strcmp(ninfo->name, name) == 0)
|
||||
{
|
||||
memcpy(csid, hash_get_key(node_hash, hn), MAX_CSID_LEN);
|
||||
memcpy(csid, hash_get_key(node_hash, hn), GULM_MAX_CSID_LEN);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
int get_num_nodes()
|
||||
static int _get_num_nodes()
|
||||
{
|
||||
DEBUGLOG("num_nodes = %d\n", num_nodes);
|
||||
return num_nodes;
|
||||
}
|
||||
|
||||
/* Node is now known to be running a clvmd */
|
||||
void add_up_node(char *csid)
|
||||
void gulm_add_up_node(char *csid)
|
||||
{
|
||||
struct node_info *ninfo;
|
||||
|
||||
ninfo = hash_lookup_binary(node_hash, csid, MAX_CSID_LEN);
|
||||
ninfo = hash_lookup_binary(node_hash, csid, GULM_MAX_CSID_LEN);
|
||||
if (!ninfo)
|
||||
return;
|
||||
|
||||
@@ -601,7 +608,7 @@ void add_down_node(char *csid)
|
||||
{
|
||||
struct node_info *ninfo;
|
||||
|
||||
ninfo = hash_lookup_binary(node_hash, csid, MAX_CSID_LEN);
|
||||
ninfo = hash_lookup_binary(node_hash, csid, GULM_MAX_CSID_LEN);
|
||||
if (!ninfo)
|
||||
return;
|
||||
|
||||
@@ -614,7 +621,7 @@ void add_down_node(char *csid)
|
||||
}
|
||||
|
||||
/* Call a callback for each node, so the caller knows whether it's up or down */
|
||||
int cluster_do_node_callback(struct local_client *master_client,
|
||||
static int _cluster_do_node_callback(struct local_client *master_client,
|
||||
void (*callback)(struct local_client *, char *csid, int node_up))
|
||||
{
|
||||
struct hash_node *hn;
|
||||
@@ -622,15 +629,15 @@ int cluster_do_node_callback(struct local_client *master_client,
|
||||
|
||||
hash_iterate(hn, node_hash)
|
||||
{
|
||||
char csid[MAX_CSID_LEN];
|
||||
char csid[GULM_MAX_CSID_LEN];
|
||||
struct local_client *client;
|
||||
|
||||
ninfo = hash_get_data(node_hash, hn);
|
||||
memcpy(csid, hash_get_key(node_hash, hn), MAX_CSID_LEN);
|
||||
memcpy(csid, hash_get_key(node_hash, hn), GULM_MAX_CSID_LEN);
|
||||
|
||||
DEBUGLOG("down_callback. node %s, state = %d\n", ninfo->name, ninfo->state);
|
||||
|
||||
client = hash_lookup_binary(sock_hash, csid, MAX_CSID_LEN);
|
||||
client = hash_lookup_binary(sock_hash, csid, GULM_MAX_CSID_LEN);
|
||||
if (client)
|
||||
callback(master_client, csid, ninfo->state == NODE_CLVMD);
|
||||
}
|
||||
@@ -742,7 +749,7 @@ static int _unlock_resource(char *resource, int lockid)
|
||||
To aid unlocking, we store the lock mode in the lockid (as GULM
|
||||
doesn't use this).
|
||||
*/
|
||||
int sync_lock(const char *resource, int mode, int flags, int *lockid)
|
||||
static int _sync_lock(const char *resource, int mode, int flags, int *lockid)
|
||||
{
|
||||
int status;
|
||||
char lock1[strlen(resource)+3];
|
||||
@@ -786,7 +793,7 @@ int sync_lock(const char *resource, int mode, int flags, int *lockid)
|
||||
return status;
|
||||
}
|
||||
|
||||
int sync_unlock(const char *resource, int lockid)
|
||||
static int _sync_unlock(const char *resource, int lockid)
|
||||
{
|
||||
int status = 0;
|
||||
char lock1[strlen(resource)+3];
|
||||
@@ -822,7 +829,7 @@ int sync_unlock(const char *resource, int lockid)
|
||||
return status;
|
||||
}
|
||||
|
||||
int is_quorate()
|
||||
static int _is_quorate()
|
||||
{
|
||||
if (current_corestate == lg_core_Slave ||
|
||||
current_corestate == lg_core_Master ||
|
||||
@@ -854,7 +861,7 @@ static int get_all_cluster_nodes()
|
||||
for (i=1;;i++)
|
||||
{
|
||||
char nodekey[256];
|
||||
char nodeip[MAX_CSID_LEN];
|
||||
char nodeip[GULM_MAX_CSID_LEN];
|
||||
int clvmflag = 1;
|
||||
char *clvmflagstr;
|
||||
char key[256];
|
||||
@@ -877,7 +884,7 @@ static int get_all_cluster_nodes()
|
||||
struct node_info *ninfo;
|
||||
|
||||
/* If it's not in the list, then add it */
|
||||
ninfo = hash_lookup_binary(node_hash, nodeip, MAX_CSID_LEN);
|
||||
ninfo = hash_lookup_binary(node_hash, nodeip, GULM_MAX_CSID_LEN);
|
||||
if (!ninfo)
|
||||
{
|
||||
ninfo = malloc(sizeof(struct node_info));
|
||||
@@ -890,7 +897,7 @@ static int get_all_cluster_nodes()
|
||||
strcpy(ninfo->name, nodename);
|
||||
|
||||
ninfo->state = NODE_DOWN;
|
||||
hash_insert_binary(node_hash, nodeip, MAX_CSID_LEN, ninfo);
|
||||
hash_insert_binary(node_hash, nodeip, GULM_MAX_CSID_LEN, ninfo);
|
||||
}
|
||||
}
|
||||
else
|
||||
@@ -906,3 +913,42 @@ static int get_all_cluster_nodes()
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _get_main_cluster_fd(void)
|
||||
{
|
||||
return get_main_gulm_cluster_fd();
|
||||
}
|
||||
|
||||
static int _cluster_fd_callback(struct local_client *fd, char *buf, int len, char *csid, struct local_client **new_client)
|
||||
{
|
||||
return cluster_fd_gulm_callback(fd, buf, len, csid, new_client);
|
||||
}
|
||||
|
||||
static int _cluster_send_message(void *buf, int msglen, char *csid, const char *errtext)
|
||||
{
|
||||
return gulm_cluster_send_message(buf, msglen, csid, errtext);
|
||||
}
|
||||
|
||||
static struct cluster_ops _cluster_gulm_ops = {
|
||||
.cluster_init_completed = NULL,
|
||||
.cluster_send_message = _cluster_send_message,
|
||||
.name_from_csid = gulm_name_from_csid,
|
||||
.csid_from_name = _csid_from_name,
|
||||
.get_num_nodes = _get_num_nodes,
|
||||
.cluster_fd_callback = _cluster_fd_callback,
|
||||
.get_main_cluster_fd = _get_main_cluster_fd,
|
||||
.cluster_do_node_callback = _cluster_do_node_callback,
|
||||
.is_quorate = _is_quorate,
|
||||
.get_our_csid = _get_our_csid,
|
||||
.add_up_node = gulm_add_up_node,
|
||||
.cluster_closedown = _cluster_closedown,
|
||||
.sync_lock = _sync_lock,
|
||||
.sync_unlock = _sync_unlock,
|
||||
};
|
||||
|
||||
struct cluster_ops *init_gulm_cluster(void)
|
||||
{
|
||||
if (!_init_cluster())
|
||||
return &_cluster_gulm_ops;
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
@@ -7,3 +7,6 @@ extern int gulm_fd(void);
|
||||
extern int get_ip_address(char *node, char *addr);
|
||||
extern void tcp_remove_client(char *csid);
|
||||
extern int alloc_client(int fd, char *csid, struct local_client **new_client);
|
||||
|
||||
void gulm_add_up_node(char *csid);
|
||||
int gulm_name_from_csid(char *csid, char *name);
|
||||
|
@@ -55,9 +55,9 @@
|
||||
|
||||
/* The maximum size of a message that will fit into a packet. Anything bigger
|
||||
than this is sent via the system LV */
|
||||
#define MAX_INLINE_MESSAGE (MAX_CLUSTER_MESSAGE-sizeof(struct clvm_header))
|
||||
#define MAX_INLINE_MESSAGE (max_cluster_message-sizeof(struct clvm_header))
|
||||
|
||||
#define ISLOCAL_CSID(c) (memcmp(c, our_csid, MAX_CSID_LEN) == 0)
|
||||
#define ISLOCAL_CSID(c) (memcmp(c, our_csid, max_csid_len) == 0)
|
||||
|
||||
/* Head of the fd list. Also contains
|
||||
the cluster_socket details */
|
||||
@@ -65,7 +65,12 @@ static struct local_client local_client_head;
|
||||
|
||||
static unsigned short global_xid = 0; /* Last transaction ID issued */
|
||||
|
||||
static struct cluster_ops *clops = NULL;
|
||||
|
||||
static char our_csid[MAX_CSID_LEN];
|
||||
static unsigned max_csid_len;
|
||||
static unsigned max_cluster_message;
|
||||
static unsigned max_cluster_member_name_len;
|
||||
|
||||
/* Structure of items on the LVM thread list */
|
||||
struct lvm_thread_cmd {
|
||||
@@ -230,7 +235,23 @@ int main(int argc, char *argv[])
|
||||
init_lvhash();
|
||||
|
||||
/* Start the cluster interface */
|
||||
if (init_cluster()) {
|
||||
#ifdef USE_CMAN
|
||||
if ((clops = init_cman_cluster())) {
|
||||
max_csid_len = CMAN_MAX_CSID_LEN;
|
||||
max_cluster_message = CMAN_MAX_CLUSTER_MESSAGE;
|
||||
max_cluster_member_name_len = CMAN_MAX_CLUSTER_MEMBER_NAME_LEN;
|
||||
}
|
||||
#endif
|
||||
#ifdef USE_GULM
|
||||
if (!clops)
|
||||
if ((clops = init_gulm_cluster())) {
|
||||
max_csid_len = GULM_MAX_CSID_LEN;
|
||||
max_cluster_message = GULM_MAX_CLUSTER_MESSAGE;
|
||||
max_cluster_member_name_len = GULM_MAX_CLUSTER_MEMBER_NAME_LEN;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!clops) {
|
||||
DEBUGLOG("Can't initialise cluster interface\n");
|
||||
log_error("Can't initialise cluster interface\n");
|
||||
child_init_signal(DFAIL_CLUSTER_IF);
|
||||
@@ -239,12 +260,12 @@ int main(int argc, char *argv[])
|
||||
|
||||
/* Save our CSID */
|
||||
uname(&nodeinfo);
|
||||
get_our_csid(our_csid);
|
||||
clops->get_our_csid(our_csid);
|
||||
|
||||
/* Initialise the FD list head */
|
||||
local_client_head.fd = get_main_cluster_fd();
|
||||
local_client_head.fd = clops->get_main_cluster_fd();
|
||||
local_client_head.type = CLUSTER_MAIN_SOCK;
|
||||
local_client_head.callback = cluster_fd_callback;
|
||||
local_client_head.callback = clops->cluster_fd_callback;
|
||||
|
||||
/* Add the local socket to the list */
|
||||
newfd = malloc(sizeof(struct local_client));
|
||||
@@ -262,13 +283,12 @@ int main(int argc, char *argv[])
|
||||
DEBUGLOG("starting LVM thread\n");
|
||||
pthread_create(&lvm_thread, NULL, lvm_thread_fn, nodeinfo.nodename);
|
||||
|
||||
#ifndef USE_GULM
|
||||
/* Tell the rest of the cluster our version number */
|
||||
/* CMAN can do this immediately, gulm needs to wait until
|
||||
the core initialisation has finished and the node list
|
||||
has been gathered */
|
||||
send_version_message();
|
||||
#endif
|
||||
if (clops->cluster_init_completed)
|
||||
clops->cluster_init_completed();
|
||||
|
||||
DEBUGLOG("clvmd ready for work\n");
|
||||
child_init_signal(SUCCESS);
|
||||
@@ -417,9 +437,9 @@ static void timedout_callback(struct local_client *client, char *csid,
|
||||
{
|
||||
if (node_up) {
|
||||
struct node_reply *reply;
|
||||
char nodename[MAX_CLUSTER_MEMBER_NAME_LEN];
|
||||
char nodename[max_cluster_member_name_len];
|
||||
|
||||
name_from_csid(csid, nodename);
|
||||
clops->name_from_csid(csid, nodename);
|
||||
DEBUGLOG("PJC: checking for a reply from %s\n", nodename);
|
||||
pthread_mutex_lock(&client->bits.localsock.reply_mutex);
|
||||
|
||||
@@ -448,7 +468,7 @@ static void timedout_callback(struct local_client *client, char *csid,
|
||||
static void request_timed_out(struct local_client *client)
|
||||
{
|
||||
DEBUGLOG("Request timed-out. padding\n");
|
||||
cluster_do_node_callback(client, timedout_callback);
|
||||
clops->cluster_do_node_callback(client, timedout_callback);
|
||||
|
||||
if (client->bits.localsock.num_replies !=
|
||||
client->bits.localsock.expected_replies) {
|
||||
@@ -473,7 +493,7 @@ static void main_loop(int local_sock, int cmd_timeout)
|
||||
int select_status;
|
||||
struct local_client *thisfd;
|
||||
struct timeval tv = { cmd_timeout, 0 };
|
||||
int quorate = is_quorate();
|
||||
int quorate = clops->is_quorate();
|
||||
|
||||
/* Wait on the cluster FD and all local sockets/pipes */
|
||||
FD_ZERO(&in);
|
||||
@@ -488,7 +508,7 @@ static void main_loop(int local_sock, int cmd_timeout)
|
||||
if ((select_status = select(FD_SETSIZE, &in, NULL, NULL, &tv)) > 0) {
|
||||
struct local_client *lastfd = NULL;
|
||||
char csid[MAX_CSID_LEN];
|
||||
char buf[MAX_CLUSTER_MESSAGE];
|
||||
char buf[max_cluster_message];
|
||||
|
||||
for (thisfd = &local_client_head; thisfd != NULL;
|
||||
thisfd = thisfd->next) {
|
||||
@@ -573,7 +593,7 @@ static void main_loop(int local_sock, int cmd_timeout)
|
||||
}
|
||||
|
||||
closedown:
|
||||
cluster_closedown();
|
||||
clops->cluster_closedown();
|
||||
close(local_sock);
|
||||
}
|
||||
|
||||
@@ -765,7 +785,7 @@ static int read_from_local_sock(struct local_client *thisfd)
|
||||
if (thisfd->bits.localsock.in_progress) {
|
||||
struct clvm_header reply;
|
||||
reply.cmd = CLVMD_CMD_REPLY;
|
||||
reply.status = -EBUSY;
|
||||
reply.status = EBUSY;
|
||||
reply.arglen = 0;
|
||||
reply.flags = 0;
|
||||
send_message(&reply, sizeof(reply), our_csid,
|
||||
@@ -788,7 +808,7 @@ static int read_from_local_sock(struct local_client *thisfd)
|
||||
if (!thisfd->bits.localsock.cmd) {
|
||||
struct clvm_header reply;
|
||||
reply.cmd = CLVMD_CMD_REPLY;
|
||||
reply.status = -ENOMEM;
|
||||
reply.status = ENOMEM;
|
||||
reply.arglen = 0;
|
||||
reply.flags = 0;
|
||||
send_message(&reply, sizeof(reply), our_csid,
|
||||
@@ -829,13 +849,13 @@ static int read_from_local_sock(struct local_client *thisfd)
|
||||
}
|
||||
|
||||
/* Check the node name for validity */
|
||||
if (inheader->node[0] && csid_from_name(csid, inheader->node)) {
|
||||
if (inheader->node[0] && clops->csid_from_name(csid, inheader->node)) {
|
||||
/* Error, node is not in the cluster */
|
||||
struct clvm_header reply;
|
||||
DEBUGLOG("Unknown node: '%s'\n", inheader->node);
|
||||
|
||||
reply.cmd = CLVMD_CMD_REPLY;
|
||||
reply.status = -ENOENT;
|
||||
reply.status = ENOENT;
|
||||
reply.flags = 0;
|
||||
reply.arglen = 0;
|
||||
send_message(&reply, sizeof(reply), our_csid,
|
||||
@@ -866,7 +886,7 @@ static int read_from_local_sock(struct local_client *thisfd)
|
||||
close(comms_pipe[1]);
|
||||
|
||||
reply.cmd = CLVMD_CMD_REPLY;
|
||||
reply.status = -ENOMEM;
|
||||
reply.status = ENOMEM;
|
||||
reply.arglen = 0;
|
||||
reply.flags = 0;
|
||||
send_message(&reply, sizeof(reply), our_csid,
|
||||
@@ -961,7 +981,7 @@ static int distribute_command(struct local_client *thisfd)
|
||||
/* if node is empty then do it on the whole cluster */
|
||||
if (inheader->node[0] == '\0') {
|
||||
thisfd->bits.localsock.expected_replies =
|
||||
get_num_nodes();
|
||||
clops->get_num_nodes();
|
||||
thisfd->bits.localsock.num_replies = 0;
|
||||
thisfd->bits.localsock.sent_time = time(NULL);
|
||||
thisfd->bits.localsock.in_progress = TRUE;
|
||||
@@ -982,7 +1002,7 @@ static int distribute_command(struct local_client *thisfd)
|
||||
/* Do it on a single node */
|
||||
char csid[MAX_CSID_LEN];
|
||||
|
||||
if (csid_from_name(csid, inheader->node)) {
|
||||
if (clops->csid_from_name(csid, inheader->node)) {
|
||||
/* This has already been checked so should not happen */
|
||||
return 0;
|
||||
} else {
|
||||
@@ -992,7 +1012,7 @@ static int distribute_command(struct local_client *thisfd)
|
||||
thisfd->bits.localsock.in_progress = TRUE;
|
||||
|
||||
/* Are we the requested node ?? */
|
||||
if (memcmp(csid, our_csid, MAX_CSID_LEN) == 0) {
|
||||
if (memcmp(csid, our_csid, max_csid_len) == 0) {
|
||||
DEBUGLOG("Doing command on local node only\n");
|
||||
add_to_lvmqueue(thisfd, inheader, len, NULL);
|
||||
} else {
|
||||
@@ -1024,14 +1044,14 @@ void process_remote_command(struct clvm_header *msg, int msglen, int fd,
|
||||
char *csid)
|
||||
{
|
||||
char *replyargs;
|
||||
char nodename[MAX_CLUSTER_MEMBER_NAME_LEN];
|
||||
char nodename[max_cluster_member_name_len];
|
||||
int replylen = 0;
|
||||
int buflen = MAX_CLUSTER_MESSAGE - sizeof(struct clvm_header) - 1;
|
||||
int buflen = max_cluster_message - sizeof(struct clvm_header) - 1;
|
||||
int status;
|
||||
int msg_malloced = 0;
|
||||
|
||||
/* Get the node name as we /may/ need it later */
|
||||
name_from_csid(csid, nodename);
|
||||
clops->name_from_csid(csid, nodename);
|
||||
|
||||
DEBUGLOG("process_remote_command %d for clientid 0x%x on node %s\n",
|
||||
msg->cmd, msg->clientid, nodename);
|
||||
@@ -1056,7 +1076,7 @@ void process_remote_command(struct clvm_header *msg, int msglen, int fd,
|
||||
|
||||
/* Return a failure response */
|
||||
head.cmd = CLVMD_CMD_REPLY;
|
||||
head.status = -EFBIG;
|
||||
head.status = EFBIG;
|
||||
head.flags = 0;
|
||||
head.clientid = msg->clientid;
|
||||
head.arglen = 0;
|
||||
@@ -1073,7 +1093,7 @@ void process_remote_command(struct clvm_header *msg, int msglen, int fd,
|
||||
msg->arglen);
|
||||
/* Return a failure response */
|
||||
head.cmd = CLVMD_CMD_REPLY;
|
||||
head.status = -ENOMEM;
|
||||
head.status = ENOMEM;
|
||||
head.flags = 0;
|
||||
head.clientid = msg->clientid;
|
||||
head.arglen = 0;
|
||||
@@ -1097,7 +1117,7 @@ void process_remote_command(struct clvm_header *msg, int msglen, int fd,
|
||||
if (msg->cmd == CLVMD_CMD_VERSION) {
|
||||
int *version_nums = (int *) msg->args;
|
||||
char node[256];
|
||||
name_from_csid(csid, node);
|
||||
clops->name_from_csid(csid, node);
|
||||
DEBUGLOG("Remote node %s is version %d.%d.%d\n",
|
||||
node,
|
||||
ntohl(version_nums[0]),
|
||||
@@ -1118,17 +1138,17 @@ void process_remote_command(struct clvm_header *msg, int msglen, int fd,
|
||||
byebyemsg.flags = 0;
|
||||
byebyemsg.arglen = 0;
|
||||
byebyemsg.clientid = 0;
|
||||
cluster_send_message(&byebyemsg, sizeof(byebyemsg),
|
||||
clops->cluster_send_message(&byebyemsg, sizeof(byebyemsg),
|
||||
our_csid,
|
||||
"Error Sending GOAWAY message");
|
||||
} else {
|
||||
add_up_node(csid);
|
||||
clops->add_up_node(csid);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
/* Allocate a default reply buffer */
|
||||
replyargs = malloc(MAX_CLUSTER_MESSAGE - sizeof(struct clvm_header));
|
||||
replyargs = malloc(max_cluster_message - sizeof(struct clvm_header));
|
||||
|
||||
if (replyargs != NULL) {
|
||||
/* Run the command */
|
||||
@@ -1136,7 +1156,7 @@ void process_remote_command(struct clvm_header *msg, int msglen, int fd,
|
||||
do_command(NULL, msg, msglen, &replyargs, buflen,
|
||||
&replylen);
|
||||
} else {
|
||||
status = -ENOMEM;
|
||||
status = ENOMEM;
|
||||
}
|
||||
|
||||
/* If it wasn't a reply, then reply */
|
||||
@@ -1171,7 +1191,7 @@ void process_remote_command(struct clvm_header *msg, int msglen, int fd,
|
||||
(aggreply,
|
||||
replylen + sizeof(struct clvm_header)) < 0
|
||||
&& replylen > 0)
|
||||
agghead->status = -EFBIG;
|
||||
agghead->status = EFBIG;
|
||||
|
||||
send_message(agghead,
|
||||
sizeof(struct clvm_header), csid,
|
||||
@@ -1196,7 +1216,7 @@ void process_remote_command(struct clvm_header *msg, int msglen, int fd,
|
||||
DEBUGLOG("Error attempting to realloc return buffer\n");
|
||||
/* Return a failure response */
|
||||
head.cmd = CLVMD_CMD_REPLY;
|
||||
head.status = -ENOMEM;
|
||||
head.status = ENOMEM;
|
||||
head.flags = 0;
|
||||
head.clientid = msg->clientid;
|
||||
head.arglen = 0;
|
||||
@@ -1228,13 +1248,13 @@ static void add_reply_to_list(struct local_client *client, int status,
|
||||
reply = malloc(sizeof(struct node_reply));
|
||||
if (reply) {
|
||||
reply->status = status;
|
||||
name_from_csid(csid, reply->node);
|
||||
clops->name_from_csid(csid, reply->node);
|
||||
DEBUGLOG("Reply from node %s: %d bytes\n", reply->node, len);
|
||||
|
||||
if (len > 0) {
|
||||
reply->replymsg = malloc(len);
|
||||
if (!reply->replymsg) {
|
||||
reply->status = -ENOMEM;
|
||||
reply->status = ENOMEM;
|
||||
} else {
|
||||
memcpy(reply->replymsg, buf, len);
|
||||
}
|
||||
@@ -1342,8 +1362,8 @@ static int process_local_command(struct clvm_header *msg, int msglen,
|
||||
struct local_client *client,
|
||||
unsigned short xid)
|
||||
{
|
||||
char *replybuf = malloc(MAX_CLUSTER_MESSAGE);
|
||||
int buflen = MAX_CLUSTER_MESSAGE - sizeof(struct clvm_header) - 1;
|
||||
char *replybuf = malloc(max_cluster_message);
|
||||
int buflen = max_cluster_message - sizeof(struct clvm_header) - 1;
|
||||
int replylen = 0;
|
||||
int status;
|
||||
|
||||
@@ -1425,9 +1445,10 @@ static void send_local_reply(struct local_client *client, int status, int fd)
|
||||
replybuf = malloc(message_len);
|
||||
|
||||
clientreply = (struct clvm_header *) replybuf;
|
||||
clientreply->status = -status;
|
||||
clientreply->status = status;
|
||||
clientreply->cmd = CLVMD_CMD_REPLY;
|
||||
clientreply->node[0] = '\0';
|
||||
clientreply->flags = 0;
|
||||
|
||||
ptr = clientreply->args;
|
||||
|
||||
@@ -1439,6 +1460,9 @@ static void send_local_reply(struct local_client *client, int status, int fd)
|
||||
strcpy(ptr, thisreply->node);
|
||||
ptr += strlen(thisreply->node) + 1;
|
||||
|
||||
if (thisreply->status)
|
||||
clientreply->flags |= CLVMD_FLAG_NODEERRS;
|
||||
|
||||
*(int *) ptr = thisreply->status;
|
||||
ptr += sizeof(int);
|
||||
|
||||
@@ -1507,7 +1531,7 @@ static void send_version_message()
|
||||
version_nums[1] = htonl(CLVMD_MINOR_VERSION);
|
||||
version_nums[2] = htonl(CLVMD_PATCH_VERSION);
|
||||
|
||||
cluster_send_message(message, sizeof(message), NULL,
|
||||
clops->cluster_send_message(message, sizeof(message), NULL,
|
||||
"Error Sending version number");
|
||||
}
|
||||
|
||||
@@ -1520,7 +1544,7 @@ static int send_message(void *buf, int msglen, char *csid, int fd,
|
||||
/* Send remote messages down the cluster socket */
|
||||
if (csid == NULL || !ISLOCAL_CSID(csid)) {
|
||||
hton_clvm((struct clvm_header *) buf); /* Byte swap if necessary */
|
||||
return cluster_send_message(buf, msglen, csid, errtext);
|
||||
return clops->cluster_send_message(buf, msglen, csid, errtext);
|
||||
} else {
|
||||
int ptr = 0;
|
||||
|
||||
@@ -1607,7 +1631,7 @@ static int add_to_lvmqueue(struct local_client *client, struct clvm_header *msg,
|
||||
|
||||
cmd = malloc(sizeof(struct lvm_thread_cmd));
|
||||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
return ENOMEM;
|
||||
|
||||
cmd->msg = malloc(msglen);
|
||||
if (!cmd->msg) {
|
||||
@@ -1621,7 +1645,7 @@ static int add_to_lvmqueue(struct local_client *client, struct clvm_header *msg,
|
||||
cmd->xid = client->xid;
|
||||
memcpy(cmd->msg, msg, msglen);
|
||||
if (csid) {
|
||||
memcpy(cmd->csid, csid, MAX_CSID_LEN);
|
||||
memcpy(cmd->csid, csid, max_csid_len);
|
||||
cmd->remote = 1;
|
||||
} else {
|
||||
cmd->remote = 0;
|
||||
@@ -1689,7 +1713,7 @@ static void check_all_callback(struct local_client *client, char *csid,
|
||||
int node_up)
|
||||
{
|
||||
if (!node_up)
|
||||
add_reply_to_list(client, -EHOSTDOWN, csid, "CLVMD not running",
|
||||
add_reply_to_list(client, EHOSTDOWN, csid, "CLVMD not running",
|
||||
18);
|
||||
}
|
||||
|
||||
@@ -1699,7 +1723,7 @@ static void check_all_callback(struct local_client *client, char *csid,
|
||||
static int check_all_clvmds_running(struct local_client *client)
|
||||
{
|
||||
DEBUGLOG("check_all_clvmds_running\n");
|
||||
return cluster_do_node_callback(client, check_all_callback);
|
||||
return clops->cluster_do_node_callback(client, check_all_callback);
|
||||
}
|
||||
|
||||
/* Return a local_client struct given a client ID.
|
||||
@@ -1745,3 +1769,14 @@ static void sigterm_handler(int sig)
|
||||
quit = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
int sync_lock(const char *resource, int mode, int flags, int *lockid)
|
||||
{
|
||||
return clops->sync_lock(resource, mode, flags, lockid);
|
||||
}
|
||||
|
||||
int sync_unlock(const char *resource, int lockid)
|
||||
{
|
||||
return clops->sync_unlock(resource, lockid);
|
||||
}
|
||||
|
||||
|
@@ -93,7 +93,7 @@ struct local_client {
|
||||
struct netsock_bits net;
|
||||
} bits;
|
||||
};
|
||||
|
||||
#define DEBUG
|
||||
#ifdef DEBUG
|
||||
#define DEBUGLOG(fmt, args...) fprintf(stderr, "CLVMD[%d]: %ld ", getpid(), time(NULL) ); fprintf(stderr, fmt, ## args)
|
||||
#else
|
||||
@@ -116,4 +116,8 @@ extern int add_client(struct local_client *new_client);
|
||||
|
||||
extern void clvmd_cluster_init_completed(void);
|
||||
extern void process_message(struct local_client *client, char *buf, int len, char *csid);
|
||||
|
||||
int sync_lock(const char *resource, int mode, int flags, int *lockid);
|
||||
int sync_unlock(const char *resource, int lockid);
|
||||
|
||||
#endif
|
||||
|
@@ -63,8 +63,8 @@
|
||||
|
||||
|
||||
/* Maximum size of a cluster message */
|
||||
#define MAX_CLUSTER_MESSAGE 1500
|
||||
#define MAX_CLUSTER_MEMBER_NAME_LEN 255
|
||||
#define CMAN_MAX_CLUSTER_MESSAGE 1500
|
||||
#define CMAN_MAX_CLUSTER_MEMBER_NAME_LEN 255
|
||||
#define MAX_BARRIER_NAME_LEN 33
|
||||
#define MAX_SA_ADDR_LEN 12
|
||||
#define MAX_CLUSTER_NAME_LEN 16
|
||||
@@ -147,7 +147,7 @@ struct cl_cluster_node {
|
||||
unsigned int leave_reason;
|
||||
unsigned int incarnation;
|
||||
nodestate_t state;
|
||||
char name[MAX_CLUSTER_MEMBER_NAME_LEN];
|
||||
char name[CMAN_MAX_CLUSTER_MEMBER_NAME_LEN];
|
||||
unsigned char votes;
|
||||
};
|
||||
|
||||
|
@@ -1,446 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 1997-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License v.2.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software Foundation,
|
||||
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
/* library functions for Cluster LVM Daemon */
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/uio.h>
|
||||
#include <sys/un.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <sys/utsname.h>
|
||||
#include <syslog.h>
|
||||
#include <netinet/in.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <stddef.h>
|
||||
#include <signal.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
#include <search.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include "clvm.h"
|
||||
#include "libclvm.h"
|
||||
|
||||
/* CLVM in hex! */
|
||||
#define LVM_SIGNATURE 0x434C564D
|
||||
|
||||
#define MAX_CLUSTER_MEMBER_NAME_LEN 255
|
||||
|
||||
/* NOTE: the LVMD uses the socket FD as the client ID, this means
|
||||
that any client that calls fork() will inherit the context of
|
||||
it's parent. */
|
||||
static int clvmd_sock = -1;
|
||||
|
||||
static int open_local_sock(void)
|
||||
{
|
||||
int local_socket;
|
||||
struct sockaddr_un sockaddr;
|
||||
|
||||
/* Open local socket */
|
||||
local_socket = socket(PF_UNIX, SOCK_STREAM, 0);
|
||||
if (local_socket < 0) {
|
||||
perror("Can't create local socket");
|
||||
return -1;
|
||||
}
|
||||
|
||||
fcntl(local_socket, F_SETFD, !FD_CLOEXEC);
|
||||
|
||||
strcpy(sockaddr.sun_path, CLVMD_SOCKNAME);
|
||||
sockaddr.sun_family = AF_UNIX;
|
||||
if (connect
|
||||
(local_socket, (struct sockaddr *) &sockaddr, sizeof(sockaddr))) {
|
||||
int saved_errno = errno;
|
||||
|
||||
close(local_socket);
|
||||
|
||||
errno = saved_errno;
|
||||
return -1;
|
||||
}
|
||||
return local_socket;
|
||||
}
|
||||
|
||||
/* Send a request and return the status */
|
||||
static int send_request(char *inbuf, int inlen, char **retbuf)
|
||||
{
|
||||
char outbuf[PIPE_BUF];
|
||||
struct clvm_header *outheader = (struct clvm_header *) outbuf;
|
||||
int len;
|
||||
int off;
|
||||
fd_set fds;
|
||||
|
||||
FD_ZERO(&fds);
|
||||
FD_SET(clvmd_sock, &fds);
|
||||
|
||||
/* Send it to CLVMD */
|
||||
if (write(clvmd_sock, inbuf, inlen) != inlen) {
|
||||
perror("Error writing to CLVMD");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Get the response */
|
||||
if ((len = read(clvmd_sock, outbuf, sizeof(struct clvm_header))) < 0) {
|
||||
perror("Error reading CLVMD");
|
||||
return -1;
|
||||
}
|
||||
if (len == 0) {
|
||||
fprintf(stderr, "EOF reading CLVMD");
|
||||
errno = ENOTCONN;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Allocate buffer */
|
||||
*retbuf = malloc(len + outheader->arglen);
|
||||
if (!*retbuf) {
|
||||
errno = ENOMEM;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Copy the header */
|
||||
memcpy(*retbuf, outbuf, len);
|
||||
outheader = (struct clvm_header *) *retbuf;
|
||||
|
||||
/* Read the returned values */
|
||||
off = 1; /* we've already read the first byte */
|
||||
|
||||
while (off < outheader->arglen && len > 0) {
|
||||
len = read(clvmd_sock, outheader->args + off, PIPE_BUF);
|
||||
if (len > 0)
|
||||
off += len;
|
||||
}
|
||||
|
||||
/* Was it an error ? */
|
||||
if (outheader->status < 0) {
|
||||
errno = -outheader->status;
|
||||
return -2;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Build the structure header and parse-out wildcard node names */
|
||||
static void build_header(struct clvm_header *head, int cmd, const char *node,
|
||||
void *data, int len)
|
||||
{
|
||||
head->cmd = cmd;
|
||||
head->status = 0;
|
||||
head->flags = 0;
|
||||
head->clientid = 0;
|
||||
head->arglen = len;
|
||||
if (node) {
|
||||
/* Allow a couple of special node names:
|
||||
"*" for all nodes,
|
||||
"." for the local node only
|
||||
*/
|
||||
if (strcmp(node, "*") == 0) {
|
||||
head->node[0] = '\0';
|
||||
} else if (strcmp(node, ".") == 0) {
|
||||
head->node[0] = '\0';
|
||||
head->flags = CLVMD_FLAG_LOCAL;
|
||||
} else {
|
||||
strcpy(head->node, node);
|
||||
}
|
||||
} else {
|
||||
head->node[0] = '\0';
|
||||
}
|
||||
}
|
||||
|
||||
/* Send a message to a(or all) node(s) in the cluster */
|
||||
int lvm_cluster_write(char cmd, char *node, void *data, int len)
|
||||
{
|
||||
char outbuf[sizeof(struct clvm_header) + len + strlen(node) + 1];
|
||||
char *retbuf = NULL;
|
||||
int status;
|
||||
struct clvm_header *head = (struct clvm_header *) outbuf;
|
||||
|
||||
if (clvmd_sock == -1)
|
||||
clvmd_sock = open_local_sock();
|
||||
if (clvmd_sock == -1)
|
||||
return -1;
|
||||
|
||||
build_header(head, cmd, node, data, len);
|
||||
memcpy(head->node + strlen(head->node) + 1, data, len);
|
||||
|
||||
status =
|
||||
send_request(outbuf,
|
||||
sizeof(struct clvm_header) + strlen(head->node) + len,
|
||||
&retbuf);
|
||||
if (retbuf)
|
||||
free(retbuf);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/* API: Send a message to a(or all) node(s) in the cluster
|
||||
and wait for replies */
|
||||
int lvm_cluster_request(char cmd, const char *node, void *data, int len,
|
||||
lvm_response_t ** response, int *num)
|
||||
{
|
||||
char outbuf[sizeof(struct clvm_header) + len + strlen(node) + 1];
|
||||
int *outptr;
|
||||
char *inptr;
|
||||
char *retbuf = NULL;
|
||||
int status;
|
||||
int i;
|
||||
int num_responses = 0;
|
||||
struct clvm_header *head = (struct clvm_header *) outbuf;
|
||||
lvm_response_t *rarray;
|
||||
|
||||
*num = 0;
|
||||
|
||||
if (clvmd_sock == -1)
|
||||
clvmd_sock = open_local_sock();
|
||||
if (clvmd_sock == -1)
|
||||
return -1;
|
||||
|
||||
build_header(head, cmd, node, data, len);
|
||||
memcpy(head->node + strlen(head->node) + 1, data, len);
|
||||
|
||||
status =
|
||||
send_request(outbuf,
|
||||
sizeof(struct clvm_header) + strlen(head->node) + len,
|
||||
&retbuf);
|
||||
if (status == 0 || status == -2) {
|
||||
/* Count the number of responses we got */
|
||||
head = (struct clvm_header *) retbuf;
|
||||
inptr = head->args;
|
||||
while (inptr[0]) {
|
||||
num_responses++;
|
||||
inptr += strlen(inptr) + 1;
|
||||
inptr += sizeof(int);
|
||||
inptr += strlen(inptr) + 1;
|
||||
}
|
||||
|
||||
/* Allocate response array. With an extra pair of INTs on the front to sanity
|
||||
check the pointer when we are given it back to free */
|
||||
outptr =
|
||||
malloc(sizeof(lvm_response_t) * num_responses +
|
||||
sizeof(int) * 2);
|
||||
if (!outptr) {
|
||||
if (retbuf)
|
||||
free(retbuf);
|
||||
errno = ENOMEM;
|
||||
return -1;
|
||||
}
|
||||
|
||||
*response = (lvm_response_t *) (outptr + 2);
|
||||
outptr[0] = LVM_SIGNATURE;
|
||||
outptr[1] = num_responses;
|
||||
rarray = *response;
|
||||
|
||||
/* Unpack the response into an lvm_response_t array */
|
||||
inptr = head->args;
|
||||
i = 0;
|
||||
while (inptr[0]) {
|
||||
strcpy(rarray[i].node, inptr);
|
||||
inptr += strlen(inptr) + 1;
|
||||
|
||||
rarray[i].status = *(int *) inptr;
|
||||
inptr += sizeof(int);
|
||||
|
||||
rarray[i].response = malloc(strlen(inptr) + 1);
|
||||
if (rarray[i].response == NULL) {
|
||||
/* Free up everything else and return error */
|
||||
int j;
|
||||
for (j = 0; j < i; j++)
|
||||
free(rarray[i].response);
|
||||
free(outptr);
|
||||
errno = ENOMEM;
|
||||
return -1;
|
||||
}
|
||||
|
||||
strcpy(rarray[i].response, inptr);
|
||||
rarray[i].len = strlen(inptr);
|
||||
inptr += strlen(inptr) + 1;
|
||||
i++;
|
||||
}
|
||||
*num = num_responses;
|
||||
*response = rarray;
|
||||
}
|
||||
|
||||
if (retbuf)
|
||||
free(retbuf);
|
||||
return status;
|
||||
}
|
||||
|
||||
/* API: Free reply array */
|
||||
int lvm_cluster_free_request(lvm_response_t * response)
|
||||
{
|
||||
int *ptr = (int *) response - 2;
|
||||
int i;
|
||||
int num;
|
||||
|
||||
/* Check it's ours to free */
|
||||
if (response == NULL || *ptr != LVM_SIGNATURE) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
num = ptr[1];
|
||||
for (i = 0; i < num; i++) {
|
||||
free(response[i].response);
|
||||
}
|
||||
free(ptr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* These are a "higher-level" API providing black-box lock/unlock
|
||||
functions for cluster LVM...maybe */
|
||||
|
||||
/* Set by lock(), used by unlock() */
|
||||
static int num_responses;
|
||||
static lvm_response_t *response;
|
||||
|
||||
int lvm_lock_for_cluster(char scope, char *name, int verbosity)
|
||||
{
|
||||
int status;
|
||||
int i;
|
||||
char *args;
|
||||
int len;
|
||||
|
||||
if (name) {
|
||||
len = strlen(name) + 2;
|
||||
args = alloca(len);
|
||||
strcpy(args + 1, name);
|
||||
} else {
|
||||
len = 2;
|
||||
args = alloca(len);
|
||||
args[1] = '\0';
|
||||
}
|
||||
args[0] = scope;
|
||||
|
||||
status = lvm_cluster_request(CLVMD_CMD_LOCK,
|
||||
"", args, len, &response, &num_responses);
|
||||
|
||||
/* If any nodes were down then display them and return an error */
|
||||
for (i = 0; i < num_responses; i++) {
|
||||
if (response[i].status == -EHOSTDOWN) {
|
||||
if (verbosity)
|
||||
fprintf(stderr,
|
||||
"clvmd not running on node %s\n",
|
||||
response[i].node);
|
||||
status = -1;
|
||||
}
|
||||
}
|
||||
|
||||
/* If there was an error then free the memory now as the caller won't
|
||||
want to do the unlock */
|
||||
if (status) {
|
||||
int saved_errno = errno;
|
||||
lvm_cluster_free_request(response);
|
||||
num_responses = 0;
|
||||
errno = saved_errno;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
int lvm_unlock_for_cluster(char scope, char *name, int verbosity)
|
||||
{
|
||||
int status;
|
||||
int i;
|
||||
int len;
|
||||
int failed;
|
||||
int num_unlock_responses;
|
||||
char *args;
|
||||
lvm_response_t *unlock_response;
|
||||
|
||||
/* We failed - this should not have been called */
|
||||
if (num_responses == 0)
|
||||
return 0;
|
||||
|
||||
if (name) {
|
||||
len = strlen(name) + 2;
|
||||
args = alloca(len);
|
||||
strcpy(args + 1, name);
|
||||
} else {
|
||||
len = 2;
|
||||
args = alloca(len);
|
||||
args[1] = '\0';
|
||||
}
|
||||
args[0] = scope;
|
||||
|
||||
/* See if it failed anywhere */
|
||||
failed = 0;
|
||||
for (i = 0; i < num_responses; i++) {
|
||||
if (response[i].status != 0)
|
||||
failed++;
|
||||
}
|
||||
|
||||
/* If it failed on any nodes then we only unlock on
|
||||
the nodes that succeeded */
|
||||
if (failed) {
|
||||
for (i = 0; i < num_responses; i++) {
|
||||
/* Unlock the ones that succeeded */
|
||||
if (response[i].status == 0) {
|
||||
status = lvm_cluster_request(CLVMD_CMD_UNLOCK,
|
||||
response[i].node,
|
||||
args, len,
|
||||
&unlock_response,
|
||||
&num_unlock_responses);
|
||||
if (status) {
|
||||
if (verbosity)
|
||||
fprintf(stderr,
|
||||
"cluster command to node %s failed: %s\n",
|
||||
response[i].node,
|
||||
strerror(errno));
|
||||
} else if (unlock_response[0].status != 0) {
|
||||
if (verbosity > 1)
|
||||
fprintf(stderr,
|
||||
"unlock on node %s failed: %s\n",
|
||||
response[i].node,
|
||||
strerror(unlock_response
|
||||
[0].status));
|
||||
}
|
||||
lvm_cluster_free_request(unlock_response);
|
||||
} else {
|
||||
if (verbosity)
|
||||
fprintf(stderr,
|
||||
"command on node %s failed: '%s' - will be left locked\n",
|
||||
response[i].node,
|
||||
strerror(response[i].status));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* All OK, we can do a full cluster unlock */
|
||||
status = lvm_cluster_request(CLVMD_CMD_UNLOCK,
|
||||
"",
|
||||
args, len,
|
||||
&unlock_response,
|
||||
&num_unlock_responses);
|
||||
if (status) {
|
||||
if (verbosity > 1)
|
||||
fprintf(stderr, "cluster command failed: %s\n",
|
||||
strerror(errno));
|
||||
} else {
|
||||
for (i = 0; i < num_unlock_responses; i++) {
|
||||
if (unlock_response[i].status != 0) {
|
||||
if (verbosity > 1)
|
||||
fprintf(stderr,
|
||||
"unlock on node %s failed: %s\n",
|
||||
response[i].node,
|
||||
strerror(unlock_response
|
||||
[0].status));
|
||||
}
|
||||
}
|
||||
}
|
||||
lvm_cluster_free_request(unlock_response);
|
||||
}
|
||||
lvm_cluster_free_request(response);
|
||||
|
||||
return 0;
|
||||
}
|
@@ -1,36 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 1997-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License v.2.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software Foundation,
|
||||
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#ifndef _LIBCLVM_H
|
||||
#define _LIBCLVM_H
|
||||
|
||||
typedef struct lvm_response {
|
||||
char node[255];
|
||||
char *response;
|
||||
int status;
|
||||
int len;
|
||||
|
||||
} lvm_response_t;
|
||||
|
||||
extern int lvm_cluster_request(char cmd, const char *node, void *data, int len,
|
||||
lvm_response_t ** response, int *num);
|
||||
extern int lvm_cluster_write(char cmd, char *node, void *data, int len);
|
||||
extern int lvm_cluster_free_request(lvm_response_t * response);
|
||||
|
||||
/* The "high-level" API */
|
||||
extern int lvm_lock_for_cluster(char scope, char *name, int verbosity);
|
||||
extern int lvm_unlock_for_cluster(char scope, char *name, int verbosity);
|
||||
|
||||
#endif
|
@@ -198,7 +198,7 @@ static int do_activate_lv(char *resource, int mode)
|
||||
return errno;
|
||||
|
||||
/* If it's suspended then resume it */
|
||||
if (!lv_info_by_lvid(cmd, resource, &lvi))
|
||||
if (!lv_info_by_lvid(cmd, resource, &lvi, 0))
|
||||
return EIO;
|
||||
|
||||
if (lvi.suspended)
|
||||
@@ -244,7 +244,7 @@ static int do_suspend_lv(char *resource)
|
||||
}
|
||||
|
||||
/* Only suspend it if it exists */
|
||||
if (!lv_info_by_lvid(cmd, resource, &lvi))
|
||||
if (!lv_info_by_lvid(cmd, resource, &lvi, 0))
|
||||
return EIO;
|
||||
|
||||
if (lvi.exists) {
|
||||
@@ -363,7 +363,7 @@ int post_lock_lv(unsigned char command, unsigned char lock_flags,
|
||||
if (oldmode == LKM_PWMODE) {
|
||||
struct lvinfo lvi;
|
||||
|
||||
if (!lv_info_by_lvid(cmd, resource, &lvi))
|
||||
if (!lv_info_by_lvid(cmd, resource, &lvi, 0))
|
||||
return EIO;
|
||||
|
||||
if (lvi.exists) {
|
||||
|
@@ -42,7 +42,9 @@
|
||||
#include "list.h"
|
||||
#include "locking.h"
|
||||
#include "system-lv.h"
|
||||
#include "clvm.h"
|
||||
#include "clvmd-comms.h"
|
||||
#include "clvmd.h"
|
||||
#ifdef HAVE_CCS
|
||||
#include "ccs.h"
|
||||
#endif
|
||||
|
@@ -96,19 +96,19 @@ void tcp_remove_client(char *csid)
|
||||
job of clvmd.c whch will do the job when it notices the
|
||||
other end has gone. We just need to remove the client(s) from
|
||||
the hash table so we don't try to use it for sending any more */
|
||||
client = hash_lookup_binary(sock_hash, csid, MAX_CSID_LEN);
|
||||
client = hash_lookup_binary(sock_hash, csid, GULM_MAX_CSID_LEN);
|
||||
if (client)
|
||||
{
|
||||
hash_remove_binary(sock_hash, csid, MAX_CSID_LEN);
|
||||
hash_remove_binary(sock_hash, csid, GULM_MAX_CSID_LEN);
|
||||
}
|
||||
|
||||
/* Look for a mangled one too */
|
||||
csid[0] ^= 0x80;
|
||||
|
||||
client = hash_lookup_binary(sock_hash, csid, MAX_CSID_LEN);
|
||||
client = hash_lookup_binary(sock_hash, csid, GULM_MAX_CSID_LEN);
|
||||
if (client)
|
||||
{
|
||||
hash_remove_binary(sock_hash, csid, MAX_CSID_LEN);
|
||||
hash_remove_binary(sock_hash, csid, GULM_MAX_CSID_LEN);
|
||||
}
|
||||
|
||||
/* Put it back as we found it */
|
||||
@@ -137,7 +137,7 @@ int alloc_client(int fd, char *csid, struct local_client **new_client)
|
||||
*new_client = client;
|
||||
|
||||
/* Add to our list of node sockets */
|
||||
if (hash_lookup_binary(sock_hash, csid, MAX_CSID_LEN))
|
||||
if (hash_lookup_binary(sock_hash, csid, GULM_MAX_CSID_LEN))
|
||||
{
|
||||
DEBUGLOG("alloc_client mangling CSID for second connection\n");
|
||||
/* This is a duplicate connection but we can't close it because
|
||||
@@ -150,7 +150,7 @@ int alloc_client(int fd, char *csid, struct local_client **new_client)
|
||||
|
||||
/* If it still exists then kill the connection as we should only
|
||||
ever have one incoming connection from each node */
|
||||
if (hash_lookup_binary(sock_hash, csid, MAX_CSID_LEN))
|
||||
if (hash_lookup_binary(sock_hash, csid, GULM_MAX_CSID_LEN))
|
||||
{
|
||||
DEBUGLOG("Multiple incoming connections from node\n");
|
||||
syslog(LOG_ERR, " Bogus incoming connection from %d.%d.%d.%d\n", csid[0],csid[1],csid[2],csid[3]);
|
||||
@@ -160,26 +160,26 @@ int alloc_client(int fd, char *csid, struct local_client **new_client)
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
hash_insert_binary(sock_hash, csid, MAX_CSID_LEN, client);
|
||||
hash_insert_binary(sock_hash, csid, GULM_MAX_CSID_LEN, client);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int get_main_cluster_fd()
|
||||
int get_main_gulm_cluster_fd()
|
||||
{
|
||||
return listen_fd;
|
||||
}
|
||||
|
||||
|
||||
/* Read on main comms (listen) socket, accept it */
|
||||
int cluster_fd_callback(struct local_client *fd, char *buf, int len, char *csid,
|
||||
int cluster_fd_gulm_callback(struct local_client *fd, char *buf, int len, char *csid,
|
||||
struct local_client **new_client)
|
||||
{
|
||||
int newfd;
|
||||
struct sockaddr_in6 addr;
|
||||
socklen_t addrlen = sizeof(addr);
|
||||
int status;
|
||||
char name[MAX_CLUSTER_MEMBER_NAME_LEN];
|
||||
char name[GULM_MAX_CLUSTER_MEMBER_NAME_LEN];
|
||||
|
||||
DEBUGLOG("cluster_fd_callback\n");
|
||||
*new_client = NULL;
|
||||
@@ -196,7 +196,7 @@ int cluster_fd_callback(struct local_client *fd, char *buf, int len, char *csid,
|
||||
/* Check that the client is a member of the cluster
|
||||
and reject if not.
|
||||
*/
|
||||
if (name_from_csid((char *)&addr.sin6_addr, name) < 0)
|
||||
if (gulm_name_from_csid((char *)&addr.sin6_addr, name) < 0)
|
||||
{
|
||||
syslog(LOG_ERR, "Got connect from non-cluster node %s\n",
|
||||
print_csid((char *)&addr.sin6_addr));
|
||||
@@ -234,7 +234,7 @@ static int read_from_tcpsock(struct local_client *client, char *buf, int len, ch
|
||||
|
||||
/* Get "csid" */
|
||||
getpeername(client->fd, (struct sockaddr *)&addr, &slen);
|
||||
memcpy(csid, &addr.sin6_addr, MAX_CSID_LEN);
|
||||
memcpy(csid, &addr.sin6_addr, GULM_MAX_CSID_LEN);
|
||||
|
||||
status = read(client->fd, buf, len);
|
||||
|
||||
@@ -245,15 +245,15 @@ static int read_from_tcpsock(struct local_client *client, char *buf, int len, ch
|
||||
if (status == 0 ||
|
||||
(status < 0 && errno != EAGAIN && errno != EINTR))
|
||||
{
|
||||
char remcsid[MAX_CSID_LEN];
|
||||
char remcsid[GULM_MAX_CSID_LEN];
|
||||
|
||||
memcpy(remcsid, csid, MAX_CSID_LEN);
|
||||
memcpy(remcsid, csid, GULM_MAX_CSID_LEN);
|
||||
close(client->fd);
|
||||
|
||||
/* If the csid was mangled, then make sure we remove the right entry */
|
||||
if (client->bits.net.flags)
|
||||
remcsid[0] ^= 0x80;
|
||||
hash_remove_binary(sock_hash, remcsid, MAX_CSID_LEN);
|
||||
hash_remove_binary(sock_hash, remcsid, GULM_MAX_CSID_LEN);
|
||||
|
||||
/* Tell cluster manager layer */
|
||||
add_down_node(remcsid);
|
||||
@@ -281,7 +281,7 @@ static int connect_csid(char *csid, struct local_client **newclient)
|
||||
}
|
||||
|
||||
addr.sin6_family = AF_INET6;
|
||||
memcpy(&addr.sin6_addr, csid, MAX_CSID_LEN);
|
||||
memcpy(&addr.sin6_addr, csid, GULM_MAX_CSID_LEN);
|
||||
addr.sin6_port = htons(tcp_port);
|
||||
|
||||
DEBUGLOG("Connecting socket %d\n", fd);
|
||||
@@ -300,7 +300,7 @@ static int connect_csid(char *csid, struct local_client **newclient)
|
||||
add_client(*newclient);
|
||||
|
||||
/* If we can connect to it, it must be running a clvmd */
|
||||
add_up_node(csid);
|
||||
gulm_add_up_node(csid);
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -309,18 +309,18 @@ static int tcp_send_message(void *buf, int msglen, unsigned char *csid, const ch
|
||||
{
|
||||
int status;
|
||||
struct local_client *client;
|
||||
char ourcsid[MAX_CSID_LEN];
|
||||
char ourcsid[GULM_MAX_CSID_LEN];
|
||||
|
||||
assert(csid);
|
||||
|
||||
DEBUGLOG("tcp_send_message, csid = %s, msglen = %d\n", print_csid(csid), msglen);
|
||||
|
||||
/* Don't connect to ourself */
|
||||
get_our_csid(ourcsid);
|
||||
if (memcmp(csid, ourcsid, MAX_CSID_LEN) == 0)
|
||||
get_our_gulm_csid(ourcsid);
|
||||
if (memcmp(csid, ourcsid, GULM_MAX_CSID_LEN) == 0)
|
||||
return msglen;
|
||||
|
||||
client = hash_lookup_binary(sock_hash, csid, MAX_CSID_LEN);
|
||||
client = hash_lookup_binary(sock_hash, csid, GULM_MAX_CSID_LEN);
|
||||
if (!client)
|
||||
{
|
||||
status = connect_csid(csid, &client);
|
||||
@@ -333,7 +333,7 @@ static int tcp_send_message(void *buf, int msglen, unsigned char *csid, const ch
|
||||
}
|
||||
|
||||
|
||||
int cluster_send_message(void *buf, int msglen, char *csid, const char *errtext)
|
||||
int gulm_cluster_send_message(void *buf, int msglen, char *csid, const char *errtext)
|
||||
{
|
||||
int status=0;
|
||||
|
||||
@@ -343,7 +343,7 @@ int cluster_send_message(void *buf, int msglen, char *csid, const char *errtext)
|
||||
if (!csid)
|
||||
{
|
||||
void *context = NULL;
|
||||
char loop_csid[MAX_CSID_LEN];
|
||||
char loop_csid[GULM_MAX_CSID_LEN];
|
||||
|
||||
/* Loop round all gulm-known nodes */
|
||||
while (get_next_node_csid(&context, loop_csid))
|
||||
@@ -377,9 +377,9 @@ static int get_our_ip_address(char *addr, int *family)
|
||||
|
||||
/* Public version of above for those that don't care what protocol
|
||||
we're using */
|
||||
void get_our_csid(char *csid)
|
||||
void get_our_gulm_csid(char *csid)
|
||||
{
|
||||
static char our_csid[MAX_CSID_LEN];
|
||||
static char our_csid[GULM_MAX_CSID_LEN];
|
||||
static int got_csid = 0;
|
||||
|
||||
if (!got_csid)
|
||||
@@ -392,7 +392,7 @@ void get_our_csid(char *csid)
|
||||
got_csid = 1;
|
||||
}
|
||||
}
|
||||
memcpy(csid, our_csid, MAX_CSID_LEN);
|
||||
memcpy(csid, our_csid, GULM_MAX_CSID_LEN);
|
||||
}
|
||||
|
||||
static void map_v4_to_v6(struct in_addr *ip4, struct in6_addr *ip6)
|
||||
@@ -408,7 +408,7 @@ int get_ip_address(char *node, char *addr)
|
||||
{
|
||||
struct hostent *he;
|
||||
|
||||
memset(addr, 0, MAX_CSID_LEN);
|
||||
memset(addr, 0, GULM_MAX_CSID_LEN);
|
||||
|
||||
// TODO: what do we do about multi-homed hosts ???
|
||||
// CCSs ip_interfaces solved this but some bugger removed it.
|
||||
|
@@ -1,8 +1,12 @@
|
||||
#include <netinet/in.h>
|
||||
|
||||
#define MAX_CLUSTER_MESSAGE 1600
|
||||
#define MAX_CSID_LEN sizeof(struct in6_addr)
|
||||
#define MAX_CLUSTER_MEMBER_NAME_LEN 128
|
||||
#define GULM_MAX_CLUSTER_MESSAGE 1600
|
||||
#define GULM_MAX_CSID_LEN sizeof(struct in6_addr)
|
||||
#define GULM_MAX_CLUSTER_MEMBER_NAME_LEN 128
|
||||
|
||||
extern int init_comms(unsigned short);
|
||||
extern char *print_csid(char *);
|
||||
int get_main_gulm_cluster_fd(void);
|
||||
int cluster_fd_gulm_callback(struct local_client *fd, char *buf, int len, char *csid, struct local_client **new_client);
|
||||
int gulm_cluster_send_message(void *buf, int msglen, char *csid, const char *errtext);
|
||||
void get_our_gulm_csid(char *csid);
|
||||
|
@@ -78,12 +78,13 @@ int target_present(const char *target_name)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lv_info(const struct logical_volume *lv, struct lvinfo *info)
|
||||
int lv_info(const struct logical_volume *lv, struct lvinfo *info,
|
||||
int with_open_count)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s,
|
||||
struct lvinfo *info)
|
||||
struct lvinfo *info, int with_open_count)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -333,7 +334,7 @@ int target_present(const char *target_name)
|
||||
* Returns 1 if info structure populated, else 0 on failure.
|
||||
*/
|
||||
static int _lv_info(const struct logical_volume *lv, int mknodes,
|
||||
struct lvinfo *info)
|
||||
struct lvinfo *info, int with_open_count)
|
||||
{
|
||||
int r;
|
||||
struct dev_manager *dm;
|
||||
@@ -347,7 +348,7 @@ static int _lv_info(const struct logical_volume *lv, int mknodes,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!(r = dev_manager_info(dm, lv, mknodes, &dminfo)))
|
||||
if (!(r = dev_manager_info(dm, lv, mknodes, with_open_count, &dminfo)))
|
||||
stack;
|
||||
|
||||
info->exists = dminfo.exists;
|
||||
@@ -361,20 +362,21 @@ static int _lv_info(const struct logical_volume *lv, int mknodes,
|
||||
return r;
|
||||
}
|
||||
|
||||
int lv_info(const struct logical_volume *lv, struct lvinfo *info)
|
||||
int lv_info(const struct logical_volume *lv, struct lvinfo *info,
|
||||
int with_open_count)
|
||||
{
|
||||
return _lv_info(lv, 0, info);
|
||||
return _lv_info(lv, 0, info, with_open_count);
|
||||
}
|
||||
|
||||
int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s,
|
||||
struct lvinfo *info)
|
||||
struct lvinfo *info, int with_open_count)
|
||||
{
|
||||
struct logical_volume *lv;
|
||||
|
||||
if (!(lv = lv_from_lvid(cmd, lvid_s)))
|
||||
return 0;
|
||||
|
||||
return _lv_info(lv, 0, info);
|
||||
return _lv_info(lv, 0, info, with_open_count);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -412,7 +414,7 @@ int lv_mirror_percent(struct logical_volume *lv, int wait, float *percent,
|
||||
if (!activation())
|
||||
return 0;
|
||||
|
||||
if (!lv_info(lv, &info)) {
|
||||
if (!lv_info(lv, &info, 0)) {
|
||||
stack;
|
||||
return 0;
|
||||
}
|
||||
@@ -437,7 +439,7 @@ static int _lv_active(struct logical_volume *lv)
|
||||
{
|
||||
struct lvinfo info;
|
||||
|
||||
if (!lv_info(lv, &info)) {
|
||||
if (!lv_info(lv, &info, 0)) {
|
||||
stack;
|
||||
return -1;
|
||||
}
|
||||
@@ -449,7 +451,7 @@ static int _lv_open_count(struct logical_volume *lv)
|
||||
{
|
||||
struct lvinfo info;
|
||||
|
||||
if (!lv_info(lv, &info)) {
|
||||
if (!lv_info(lv, &info, 1)) {
|
||||
stack;
|
||||
return -1;
|
||||
}
|
||||
@@ -566,7 +568,7 @@ static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!lv_info(lv, &info)) {
|
||||
if (!lv_info(lv, &info, 0)) {
|
||||
stack;
|
||||
return 0;
|
||||
}
|
||||
@@ -612,7 +614,7 @@ static int _lv_resume(struct cmd_context *cmd, const char *lvid_s,
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!lv_info(lv, &info)) {
|
||||
if (!lv_info(lv, &info, 0)) {
|
||||
stack;
|
||||
return 0;
|
||||
}
|
||||
@@ -657,7 +659,7 @@ int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!lv_info(lv, &info)) {
|
||||
if (!lv_info(lv, &info, 1)) {
|
||||
stack;
|
||||
return 0;
|
||||
}
|
||||
@@ -726,7 +728,7 @@ static int _lv_activate(struct cmd_context *cmd, const char *lvid_s, int filter)
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!lv_info(lv, &info)) {
|
||||
if (!lv_info(lv, &info, 0)) {
|
||||
stack;
|
||||
return 0;
|
||||
}
|
||||
@@ -765,7 +767,7 @@ int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (!_lv_info(lv, 1, &info)) {
|
||||
if (!_lv_info(lv, 1, &info, 0)) {
|
||||
stack;
|
||||
return 0;
|
||||
}
|
||||
|
@@ -55,9 +55,10 @@ int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv);
|
||||
/*
|
||||
* Returns 1 if info structure has been populated, else 0.
|
||||
*/
|
||||
int lv_info(const struct logical_volume *lv, struct lvinfo *info);
|
||||
int lv_info(const struct logical_volume *lv, struct lvinfo *info,
|
||||
int with_open_count);
|
||||
int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s,
|
||||
struct lvinfo *info);
|
||||
struct lvinfo *info, int with_open_count);
|
||||
|
||||
/*
|
||||
* Returns 1 if activate_lv has been set: 1 = activate; 0 = don't.
|
||||
|
@@ -211,7 +211,8 @@ static struct dm_task *_setup_task(const char *name, const char *uuid,
|
||||
}
|
||||
|
||||
static int _info_run(const char *name, const char *uuid, struct dm_info *info,
|
||||
int mknodes, struct pool *mem, char **uuid_out)
|
||||
int mknodes, int with_open_count, struct pool *mem,
|
||||
char **uuid_out)
|
||||
{
|
||||
int r = 0;
|
||||
struct dm_task *dmt;
|
||||
@@ -225,6 +226,10 @@ static int _info_run(const char *name, const char *uuid, struct dm_info *info,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!with_open_count)
|
||||
if (!dm_task_no_open_count(dmt))
|
||||
log_error("Failed to disable open_count");
|
||||
|
||||
if (!dm_task_run(dmt)) {
|
||||
stack;
|
||||
goto out;
|
||||
@@ -250,14 +255,17 @@ static int _info_run(const char *name, const char *uuid, struct dm_info *info,
|
||||
}
|
||||
|
||||
static int _info(const char *name, const char *uuid, int mknodes,
|
||||
struct dm_info *info, struct pool *mem, char **uuid_out)
|
||||
int with_open_count, struct dm_info *info,
|
||||
struct pool *mem, char **uuid_out)
|
||||
{
|
||||
if (!mknodes && uuid && *uuid &&
|
||||
_info_run(NULL, uuid, info, 0, mem, uuid_out) && info->exists)
|
||||
_info_run(NULL, uuid, info, 0, with_open_count, mem, uuid_out) &&
|
||||
info->exists)
|
||||
return 1;
|
||||
|
||||
if (name)
|
||||
return _info_run(name, NULL, info, mknodes, mem, uuid_out);
|
||||
return _info_run(name, NULL, info, mknodes, with_open_count,
|
||||
mem, uuid_out);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -279,6 +287,9 @@ static int _status_run(const char *name, const char *uuid,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!dm_task_no_open_count(dmt))
|
||||
log_error("Failed to disable open_count");
|
||||
|
||||
if (!dm_task_run(dmt)) {
|
||||
stack;
|
||||
goto out;
|
||||
@@ -357,6 +368,9 @@ static int _percent_run(struct dev_manager *dm, const char *name,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!dm_task_no_open_count(dmt))
|
||||
log_error("Failed to disable open_count");
|
||||
|
||||
if (!dm_task_run(dmt)) {
|
||||
stack;
|
||||
goto out;
|
||||
@@ -460,6 +474,9 @@ static int _rename(struct dev_manager *dm, struct dev_layer *dl, char *newname)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!dm_task_no_open_count(dmt))
|
||||
log_error("Failed to disable open_count");
|
||||
|
||||
if (!(r = dm_task_run(dmt))) {
|
||||
log_error("Couldn't rename device '%s'.", dl->name);
|
||||
goto out;
|
||||
@@ -488,6 +505,9 @@ static int _suspend_or_resume(const char *name, action_t suspend)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!dm_task_no_open_count(dmt))
|
||||
log_error("Failed to disable open_count");
|
||||
|
||||
if (!(r = dm_task_run(dmt)))
|
||||
log_error("Couldn't %s device '%s'", sus ? "suspend" : "resume",
|
||||
name);
|
||||
@@ -579,6 +599,9 @@ static int _load(struct dev_manager *dm, struct dev_layer *dl, int task)
|
||||
log_very_verbose("Activating %s read-only", dl->name);
|
||||
}
|
||||
|
||||
if (!dm_task_no_open_count(dmt))
|
||||
log_error("Failed to disable open_count");
|
||||
|
||||
if (!(r = dm_task_run(dmt))) {
|
||||
log_error("Couldn't load device '%s'.", dl->name);
|
||||
if ((dl->lv->minor >= 0 || dl->lv->major >= 0) &&
|
||||
@@ -635,6 +658,9 @@ static int _remove(struct dev_layer *dl)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!dm_task_no_open_count(dmt))
|
||||
log_error("Failed to disable open_count");
|
||||
|
||||
/* Suppress error message if it's still in use - we'll log it later */
|
||||
log_suppress(1);
|
||||
|
||||
@@ -852,6 +878,7 @@ static int _populate_snapshot(struct dev_manager *dm,
|
||||
struct snapshot *s;
|
||||
struct dev_layer *dlo, *dlc;
|
||||
char devbufo[10], devbufc[10];
|
||||
uint64_t size;
|
||||
|
||||
if (!(s = find_cow(dl->lv))) {
|
||||
log_error("Couldn't find snapshot for '%s'.", dl->lv->name);
|
||||
@@ -899,10 +926,10 @@ static int _populate_snapshot(struct dev_manager *dm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
log_debug("Adding target: 0 %" PRIu64 " snapshot %s",
|
||||
s->origin->size, params);
|
||||
if (!dm_task_add_target
|
||||
(dmt, UINT64_C(0), s->origin->size, "snapshot", params)) {
|
||||
size = (uint64_t) s->le_count * s->origin->vg->extent_size;
|
||||
|
||||
log_debug("Adding target: 0 %" PRIu64 " snapshot %s", size, params);
|
||||
if (!dm_task_add_target(dmt, UINT64_C(0), size, "snapshot", params)) {
|
||||
stack;
|
||||
return 0;
|
||||
}
|
||||
@@ -970,7 +997,7 @@ void dev_manager_destroy(struct dev_manager *dm)
|
||||
}
|
||||
|
||||
int dev_manager_info(struct dev_manager *dm, const struct logical_volume *lv,
|
||||
int mknodes, struct dm_info *info)
|
||||
int mknodes, int with_open_count, struct dm_info *info)
|
||||
{
|
||||
char *name;
|
||||
|
||||
@@ -986,7 +1013,8 @@ int dev_manager_info(struct dev_manager *dm, const struct logical_volume *lv,
|
||||
* Try and get some info on this device.
|
||||
*/
|
||||
log_debug("Getting device info for %s", name);
|
||||
if (!_info(name, lv->lvid.s, mknodes, info, NULL, NULL)) {
|
||||
if (!_info(name, lv->lvid.s, mknodes, with_open_count, info, NULL,
|
||||
NULL)) {
|
||||
stack;
|
||||
return 0;
|
||||
}
|
||||
@@ -1065,7 +1093,7 @@ static struct dev_layer *_create_dev(struct dev_manager *dm, char *name,
|
||||
dl->name = name;
|
||||
|
||||
log_debug("Getting device info for %s", dl->name);
|
||||
if (!_info(dl->name, dlid, 0, &dl->info, dm->mem, &uuid)) {
|
||||
if (!_info(dl->name, dlid, 0, 0, &dl->info, dm->mem, &uuid)) {
|
||||
stack;
|
||||
return NULL;
|
||||
}
|
||||
|
@@ -36,7 +36,7 @@ void dev_manager_exit(void);
|
||||
* unsuspended until the snapshot is also created.)
|
||||
*/
|
||||
int dev_manager_info(struct dev_manager *dm, const struct logical_volume *lv,
|
||||
int mknodes, struct dm_info *info);
|
||||
int mknodes, int with_open_count, struct dm_info *info);
|
||||
int dev_manager_snapshot_percent(struct dev_manager *dm,
|
||||
struct logical_volume *lv, float *percent);
|
||||
int dev_manager_mirror_percent(struct dev_manager *dm,
|
||||
|
@@ -317,7 +317,7 @@ void lvdisplay_colons(struct logical_volume *lv)
|
||||
{
|
||||
int inkernel;
|
||||
struct lvinfo info;
|
||||
inkernel = lv_info(lv, &info) && info.exists;
|
||||
inkernel = lv_info(lv, &info, 1) && info.exists;
|
||||
|
||||
log_print("%s%s/%s:%s:%d:%d:-1:%d:%" PRIu64 ":%d:-1:%d:%d:%d:%d",
|
||||
lv->vg->cmd->dev_dir,
|
||||
@@ -348,7 +348,7 @@ int lvdisplay_full(struct cmd_context *cmd, struct logical_volume *lv,
|
||||
return 0;
|
||||
}
|
||||
|
||||
inkernel = lv_info(lv, &info) && info.exists;
|
||||
inkernel = lv_info(lv, &info, 1) && info.exists;
|
||||
|
||||
log_print("--- Logical volume ---");
|
||||
|
||||
|
@@ -69,6 +69,7 @@ static const device_info_t device_info[] = {
|
||||
{"power2", 16}, /* EMC Powerpath */
|
||||
{"i2o_block", 16}, /* i2o Block Disk */
|
||||
{"iseries/vd", 8}, /* iSeries disks */
|
||||
{"gnbd", 1}, /* Network block device */
|
||||
{NULL, 0}
|
||||
};
|
||||
|
||||
|
@@ -646,7 +646,7 @@ int import_snapshots(struct pool *mem, struct volume_group *vg,
|
||||
continue;
|
||||
|
||||
/* insert the snapshot */
|
||||
if (!vg_add_snapshot(org, cow, 1, NULL,
|
||||
if (!vg_add_snapshot(org, cow, 1, NULL, org->le_count,
|
||||
lvd->lv_chunk_size)) {
|
||||
log_err("Couldn't add snapshot.");
|
||||
return 0;
|
||||
|
@@ -66,29 +66,6 @@ struct pool_list;
|
||||
struct user_subpool;
|
||||
struct user_device;
|
||||
|
||||
/* This must be kept up to date with sistina/pool/module/pool_sptypes.h */
|
||||
|
||||
/* Generic Labels */
|
||||
#define SPTYPE_DATA (0x00000000)
|
||||
|
||||
/* GFS specific labels */
|
||||
#define SPTYPE_GFS_DATA (0x68011670)
|
||||
#define SPTYPE_GFS_JOURNAL (0x69011670)
|
||||
|
||||
struct sptype_name {
|
||||
const char *name;
|
||||
uint32_t label;
|
||||
};
|
||||
|
||||
static const struct sptype_name sptype_names[] = {
|
||||
{"data", SPTYPE_DATA},
|
||||
|
||||
{"gfs_data", SPTYPE_GFS_DATA},
|
||||
{"gfs_journal", SPTYPE_GFS_JOURNAL},
|
||||
|
||||
{"", 0x0} /* This must be the last flag. */
|
||||
};
|
||||
|
||||
struct pool_disk {
|
||||
uint64_t pl_magic; /* Pool magic number */
|
||||
uint64_t pl_pool_id; /* Unique pool identifier */
|
||||
|
@@ -19,6 +19,7 @@
|
||||
#include "metadata.h"
|
||||
#include "lvmcache.h"
|
||||
#include "disk_rep.h"
|
||||
#include "sptype_names.h"
|
||||
#include "lv_alloc.h"
|
||||
#include "str_list.h"
|
||||
#include "display.h"
|
||||
|
42
lib/format_pool/sptype_names.h
Normal file
42
lib/format_pool/sptype_names.h
Normal file
@@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Copyright (C) 1997-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License v.2.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software Foundation,
|
||||
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#ifndef SPTYPE_NAMES_H
|
||||
#define SPTYPE_NAMES_H
|
||||
|
||||
/* This must be kept up to date with sistina/pool/module/pool_sptypes.h */
|
||||
|
||||
/* Generic Labels */
|
||||
#define SPTYPE_DATA (0x00000000)
|
||||
|
||||
/* GFS specific labels */
|
||||
#define SPTYPE_GFS_DATA (0x68011670)
|
||||
#define SPTYPE_GFS_JOURNAL (0x69011670)
|
||||
|
||||
struct sptype_name {
|
||||
const char *name;
|
||||
uint32_t label;
|
||||
};
|
||||
|
||||
static const struct sptype_name sptype_names[] = {
|
||||
{"data", SPTYPE_DATA},
|
||||
|
||||
{"gfs_data", SPTYPE_GFS_DATA},
|
||||
{"gfs_journal", SPTYPE_GFS_JOURNAL},
|
||||
|
||||
{"", 0x0} /* This must be the last flag. */
|
||||
};
|
||||
|
||||
#endif
|
@@ -513,7 +513,7 @@ static int _print_snapshot(struct formatter *f, struct snapshot *snap,
|
||||
}
|
||||
|
||||
seg.le = 0;
|
||||
seg.len = snap->origin->le_count;
|
||||
seg.len = snap->le_count;
|
||||
seg.origin = snap->origin;
|
||||
seg.cow = snap->cow;
|
||||
seg.chunk_size = snap->chunk_size;
|
||||
|
@@ -88,8 +88,11 @@ static int _lv_setup(struct format_instance *fid, struct logical_volume *lv)
|
||||
}
|
||||
*/
|
||||
|
||||
if (!*lv->lvid.s)
|
||||
lvid_create(&lv->lvid, &lv->vg->id);
|
||||
if (!*lv->lvid.s && !lvid_create(&lv->lvid, &lv->vg->id)) {
|
||||
log_error("Random lvid creation failed for %s/%s.",
|
||||
lv->vg->name, lv->name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@@ -147,10 +147,16 @@ static int _send_request(char *inbuf, int inlen, char **retbuf)
|
||||
}
|
||||
|
||||
/* Was it an error ? */
|
||||
if (outheader->status < 0) {
|
||||
errno = -outheader->status;
|
||||
log_error("cluster request failed: %s", strerror(errno));
|
||||
return 0;
|
||||
if (outheader->status != 0) {
|
||||
errno = outheader->status;
|
||||
|
||||
/* Only return an error here if there are no node-specific
|
||||
errors present in the message that might have more detail */
|
||||
if (!(outheader->flags & CLVMD_FLAG_NODEERRS)) {
|
||||
log_error("cluster request failed: %s", strerror(errno));
|
||||
return 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return 1;
|
||||
@@ -328,7 +334,7 @@ static int _lock_for_cluster(unsigned char cmd, unsigned int flags, char *name)
|
||||
* VG locks are just that: locks, and have no side effects
|
||||
* so we only need to do them on the local node because all
|
||||
* locks are cluster-wide.
|
||||
* Also, if the lock is exclusive it makes no sense to try to
|
||||
* Also, if the lock is exclusive it makes no sense to try to
|
||||
* acquire it on all nodes, so just do that on the local node too.
|
||||
*/
|
||||
if (cmd == CLVMD_CMD_LOCK_VG ||
|
||||
@@ -341,10 +347,11 @@ static int _lock_for_cluster(unsigned char cmd, unsigned int flags, char *name)
|
||||
|
||||
/* If any nodes were down then display them and return an error */
|
||||
for (i = 0; i < num_responses; i++) {
|
||||
if (response[i].status == -EHOSTDOWN) {
|
||||
if (response[i].status == EHOSTDOWN) {
|
||||
log_error("clvmd not running on node %s",
|
||||
response[i].node);
|
||||
status = 0;
|
||||
errno = response[i].status;
|
||||
} else if (response[i].status) {
|
||||
log_error("Error locking on node %s: %s",
|
||||
response[i].node,
|
||||
@@ -352,6 +359,7 @@ static int _lock_for_cluster(unsigned char cmd, unsigned int flags, char *name)
|
||||
response[i].response :
|
||||
strerror(response[i].status));
|
||||
status = 0;
|
||||
errno = response[i].status;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -267,10 +267,13 @@ struct physical_volume *pv_create(const struct format_type *fmt,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!id)
|
||||
id_create(&pv->id);
|
||||
else
|
||||
if (id)
|
||||
memcpy(&pv->id, id, sizeof(*id));
|
||||
else if (!id_create(&pv->id)) {
|
||||
log_error("Failed to create random uuid for %s.",
|
||||
dev_name(dev));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pv->dev = dev;
|
||||
|
||||
@@ -711,6 +714,11 @@ struct volume_group *vg_read(struct cmd_context *cmd, const char *vgname,
|
||||
log_error("Automatic metadata correction failed");
|
||||
return NULL;
|
||||
}
|
||||
if (!vg_commit(correct_vg)) {
|
||||
log_error("Automatic metadata correction commit "
|
||||
"failed");
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if ((correct_vg->status & PVMOVE) && !pvmove_mode()) {
|
||||
|
@@ -255,6 +255,7 @@ struct snapshot {
|
||||
|
||||
int persistent; /* boolean */
|
||||
uint32_t chunk_size; /* in 512 byte sectors */
|
||||
uint32_t le_count;
|
||||
|
||||
struct logical_volume *origin;
|
||||
struct logical_volume *cow;
|
||||
@@ -496,9 +497,9 @@ struct snapshot *find_cow(const struct logical_volume *lv);
|
||||
struct snapshot *find_origin(const struct logical_volume *lv);
|
||||
struct list *find_snapshots(const struct logical_volume *lv);
|
||||
|
||||
int vg_add_snapshot(struct logical_volume *origin,
|
||||
struct logical_volume *cow,
|
||||
int persistent, struct id *id, uint32_t chunk_size);
|
||||
int vg_add_snapshot(struct logical_volume *origin, struct logical_volume *cow,
|
||||
int persistent, struct id *id, uint32_t extent_count,
|
||||
uint32_t chunk_size);
|
||||
|
||||
int vg_remove_snapshot(struct volume_group *vg, struct logical_volume *cow);
|
||||
|
||||
|
@@ -104,9 +104,9 @@ struct list *find_snapshots(const struct logical_volume *lv)
|
||||
return snaplist;
|
||||
}
|
||||
|
||||
int vg_add_snapshot(struct logical_volume *origin,
|
||||
struct logical_volume *cow,
|
||||
int persistent, struct id *id, uint32_t chunk_size)
|
||||
int vg_add_snapshot(struct logical_volume *origin, struct logical_volume *cow,
|
||||
int persistent, struct id *id, uint32_t extent_count,
|
||||
uint32_t chunk_size)
|
||||
{
|
||||
struct snapshot *s;
|
||||
struct snapshot_list *sl;
|
||||
@@ -127,13 +127,15 @@ int vg_add_snapshot(struct logical_volume *origin,
|
||||
|
||||
s->persistent = persistent;
|
||||
s->chunk_size = chunk_size;
|
||||
s->le_count = extent_count;
|
||||
s->origin = origin;
|
||||
s->cow = cow;
|
||||
|
||||
if (id)
|
||||
s->id = *id;
|
||||
else if (!id_create(&s->id)) {
|
||||
log_error("Snapshot UUID creation failed");
|
||||
log_error("Random UUID creation failed for snapshot %s.",
|
||||
cow->name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -48,13 +48,14 @@ void *malloc_aux(size_t s, const char *file, int line)
|
||||
size_t tsize = s + sizeof(*nb) + sizeof(unsigned long);
|
||||
|
||||
if (s > 50000000) {
|
||||
log_error("Huge memory allocation (size %" PRIuPTR
|
||||
log_error("Huge memory allocation (size %" PRIsize_t
|
||||
") rejected - metadata corruption?", s);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!(nb = malloc(tsize))) {
|
||||
log_error("couldn't allocate any memory, size = %" PRIuPTR, s);
|
||||
log_error("couldn't allocate any memory, size = %" PRIsize_t,
|
||||
s);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -190,7 +191,7 @@ int dump_memory(void)
|
||||
str[sizeof(str) - 1] = '\0';
|
||||
|
||||
print_log(_LOG_INFO, mb->file, mb->line,
|
||||
"block %d at %p, size %" PRIdPTR "\t [%s]",
|
||||
"block %d at %p, size %" PRIsize_t "\t [%s]",
|
||||
mb->id, mb->magic, mb->length, str);
|
||||
tot += mb->length;
|
||||
}
|
||||
@@ -220,7 +221,7 @@ void bounds_check(void)
|
||||
void *malloc_aux(size_t s, const char *file, int line)
|
||||
{
|
||||
if (s > 50000000) {
|
||||
log_error("Huge memory allocation (size %" PRIuPTR
|
||||
log_error("Huge memory allocation (size %" PRIsize_t
|
||||
") rejected - metadata corruption?", s);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -27,14 +27,14 @@
|
||||
((x) & 0xff000000U) >> 24 | \
|
||||
((x) & 0x0000ff00U) << 8 | \
|
||||
((x) & 0x00ff0000U) >> 8)
|
||||
# define bswap_64(x) (((x) & 0x00000000000000ffU) << 56 | \
|
||||
((x) & 0xff00000000000000U) >> 56 | \
|
||||
((x) & 0x000000000000ff00U) << 40 | \
|
||||
((x) & 0x00ff000000000000U) >> 40 | \
|
||||
((x) & 0x0000000000ff0000U) << 24 | \
|
||||
((x) & 0x0000ff0000000000U) >> 24 | \
|
||||
((x) & 0x00000000ff000000U) << 8 | \
|
||||
((x) & 0x000000ff00000000U) >> 8)
|
||||
# define bswap_64(x) (((x) & 0x00000000000000ffULL) << 56 | \
|
||||
((x) & 0xff00000000000000ULL) >> 56 | \
|
||||
((x) & 0x000000000000ff00ULL) << 40 | \
|
||||
((x) & 0x00ff000000000000ULL) >> 40 | \
|
||||
((x) & 0x0000000000ff0000ULL) << 24 | \
|
||||
((x) & 0x0000ff0000000000ULL) >> 24 | \
|
||||
((x) & 0x00000000ff000000ULL) << 8 | \
|
||||
((x) & 0x000000ff00000000ULL) >> 8)
|
||||
#endif
|
||||
|
||||
#if BYTE_ORDER == LITTLE_ENDIAN
|
||||
|
@@ -295,7 +295,7 @@ static int _lvkmaj_disp(struct report_handle *rh, struct field *field,
|
||||
struct lvinfo info;
|
||||
uint64_t minusone = UINT64_C(-1);
|
||||
|
||||
if (lv_info(lv, &info) && info.exists)
|
||||
if (lv_info(lv, &info, 0) && info.exists)
|
||||
return _int_disp(rh, field, &info.major);
|
||||
else
|
||||
return _int_disp(rh, field, &minusone);
|
||||
@@ -310,7 +310,7 @@ static int _lvkmin_disp(struct report_handle *rh, struct field *field,
|
||||
struct lvinfo info;
|
||||
uint64_t minusone = UINT64_C(-1);
|
||||
|
||||
if (lv_info(lv, &info) && info.exists)
|
||||
if (lv_info(lv, &info, 0) && info.exists)
|
||||
return _int_disp(rh, field, &info.minor);
|
||||
else
|
||||
return _int_disp(rh, field, &minusone);
|
||||
@@ -362,7 +362,7 @@ static int _lvstatus_disp(struct report_handle *rh, struct field *field,
|
||||
else
|
||||
repstr[3] = '-';
|
||||
|
||||
if (lv_info(lv, &info) && info.exists) {
|
||||
if (lv_info(lv, &info, 1) && info.exists) {
|
||||
if (info.suspended)
|
||||
repstr[4] = 's'; /* Suspended */
|
||||
else
|
||||
@@ -774,7 +774,7 @@ static int _snpercent_disp(struct report_handle *rh, struct field *field,
|
||||
}
|
||||
|
||||
if (!(snap = find_cow(lv)) ||
|
||||
(lv_info(snap->cow, &info) && !info.exists)) {
|
||||
(lv_info(snap->cow, &info, 0) && !info.exists)) {
|
||||
field->report_string = "";
|
||||
*sortval = UINT64_C(0);
|
||||
field->sort_value = sortval;
|
||||
|
@@ -31,7 +31,7 @@ static const char *_name(const struct lv_segment *seg)
|
||||
static int _text_import(struct lv_segment *seg, const struct config_node *sn,
|
||||
struct hash_table *pv_hash)
|
||||
{
|
||||
uint32_t chunk_size;
|
||||
uint32_t chunk_size, extent_count;
|
||||
const char *org_name, *cow_name;
|
||||
struct logical_volume *org, *cow;
|
||||
|
||||
@@ -70,7 +70,11 @@ static int _text_import(struct lv_segment *seg, const struct config_node *sn,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!vg_add_snapshot(org, cow, 1, &seg->lv->lvid.id[1], chunk_size)) {
|
||||
if (!get_config_uint32(sn, "extent_count", &extent_count))
|
||||
extent_count = org->le_count;
|
||||
|
||||
if (!vg_add_snapshot(org, cow, 1, &seg->lv->lvid.id[1], extent_count,
|
||||
chunk_size)) {
|
||||
stack;
|
||||
return 0;
|
||||
}
|
||||
@@ -81,6 +85,8 @@ static int _text_import(struct lv_segment *seg, const struct config_node *sn,
|
||||
static int _text_export(const struct lv_segment *seg, struct formatter *f)
|
||||
{
|
||||
outf(f, "chunk_size = %u", seg->chunk_size);
|
||||
if (seg->len != seg->origin->le_count)
|
||||
outf(f, "extent_count = %u", seg->len);
|
||||
outf(f, "origin = \"%s\"", seg->origin->name);
|
||||
outf(f, "cow_store = \"%s\"", seg->cow->name);
|
||||
|
||||
|
@@ -29,9 +29,7 @@ static unsigned char _inverse_c[256];
|
||||
int lvid_create(union lvid *lvid, struct id *vgid)
|
||||
{
|
||||
memcpy(lvid->id, vgid, sizeof(*lvid->id));
|
||||
id_create(&lvid->id[1]);
|
||||
|
||||
return 1;
|
||||
return id_create(&lvid->id[1]);
|
||||
}
|
||||
|
||||
void uuid_from_num(char *uuid, uint32_t num)
|
||||
@@ -83,16 +81,18 @@ int id_create(struct id *id)
|
||||
|
||||
memset(id->uuid, 0, len);
|
||||
if ((randomfile = open("/dev/urandom", O_RDONLY)) < 0) {
|
||||
log_sys_error("open", "id_create");
|
||||
log_sys_error("open", "id_create: /dev/urandom");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (read(randomfile, id->uuid, len) != len) {
|
||||
log_sys_error("read", "id_create");
|
||||
close(randomfile);
|
||||
log_sys_error("read", "id_create: /dev/urandom");
|
||||
if (close(randomfile))
|
||||
stack;
|
||||
return 0;
|
||||
}
|
||||
close(randomfile);
|
||||
if (close(randomfile))
|
||||
stack;
|
||||
|
||||
/*
|
||||
* Skip out the last 2 chars in randomized creation for LVM1
|
||||
|
@@ -33,29 +33,44 @@ previously configured for LVM with
|
||||
.SH OPTIONS
|
||||
See \fBlvm\fP for common options.
|
||||
.TP
|
||||
.BR \-A ", " \-\-autobackup " {" y | n }
|
||||
Controls automatic backup of VG metadata after the change (see
|
||||
.BR vgcfgbackup (8)).
|
||||
Default is yes.
|
||||
.TP
|
||||
.BR \-l ", " \-\-maxlogicalvolumes " " \fIMaxLogicalVolumes\fR
|
||||
Sets the maximum possible logical volume count.
|
||||
More logical volumes can't be created in this volume group.
|
||||
Absolute maximum is 256.
|
||||
Sets the maximum number of logical volumes allowed in this
|
||||
volume group.
|
||||
The setting can be changed with \fBvgchange\fP.
|
||||
For volume groups with metadata in lvm1 format, the limit
|
||||
and default value is 255.
|
||||
If the metadata uses lvm2 format, the default value is 0
|
||||
which removes this restriction: there is then no limit.
|
||||
.TP
|
||||
.BR \-p ", " \-\-maxphysicalvolumes " " \fIMaxPhysicalVolumes\fR
|
||||
Sets the maximum possible physical volume count.
|
||||
More physical volumes can't be included in this volume group.
|
||||
Absolute maximum is 256.
|
||||
Sets the maximum number of physical volumes that can belong
|
||||
to this volume group.
|
||||
The setting can be changed with \fBvgchange\fP.
|
||||
For volume groups with metadata in lvm1 format, the limit
|
||||
and default value is 255.
|
||||
If the metadata uses lvm2 format, the default value is 0
|
||||
which removes this restriction: there is then no limit.
|
||||
If you have a large number of physical volumes in
|
||||
a volume group with metadata in lvm2 format,
|
||||
for tool performance reasons, you should consider
|
||||
some use of \fB--metadatacopies 0\fP
|
||||
as described in \fBpvcreate(8)\fP.
|
||||
.TP
|
||||
.BR \-s ", " \-\-physicalextentsize " " \fIPhysicalExtentSize\fR[\fBkKmMgGtT\fR]
|
||||
Sets the physical extent size on physical volumes of this volume group.
|
||||
A size suffix (k for kilobytes up to t for terabytes) is optional, megabytes
|
||||
is the default if no suffix is present. Values can be from 8 KB to 16 GB in
|
||||
powers of 2. The default of 4 MB causes maximum LV sizes of ~256GB because as
|
||||
many as ~64k extents are supported per LV. In case larger maximum LV sizes are
|
||||
needed (later), you need to set the PE size to a larger value as well. Later
|
||||
changes of the PE size in an existing VG are not supported.
|
||||
powers of 2. The default is 4 MB.
|
||||
Once this value has been set, it is difficult to change it without recreating
|
||||
the volume group which would involve backing up and restoring data on any
|
||||
logical volumes.
|
||||
If the volume group metadata uses lvm1 format, there is a limit of 65534
|
||||
extents in each logical volume, so the default of 4 MB leads to a maximum
|
||||
logical volume size of around 256GB.
|
||||
If the volume group metadata uses lvm2 format there is no such restriction,
|
||||
although having a large number of extents will slow down
|
||||
the tools but have no impact on I/O performance to the logical volume.
|
||||
The 2.4 kernel has a limitation of 2TB per block device.
|
||||
.SH EXAMPLES
|
||||
To create a volume group named
|
||||
.B test_vg
|
||||
@@ -67,11 +82,6 @@ with default physical extent size of 4MB:
|
||||
\ vgcreate test_vg /dev/sdk1 /dev/sdl1
|
||||
|
||||
.fi
|
||||
To limit kernel memory usage, there is a limit of 65536 physical extents
|
||||
(PE) per logical volume, so the PE size determines the maximum logical volume
|
||||
size. The default PE size of 4MB limits a single logical volume to 256GB (see
|
||||
the -s option to raise that limit).
|
||||
There is also (as of Linux 2.4) a kernel limitation of 2TB per block device.
|
||||
.SH SEE ALSO
|
||||
.BR lvm (8),
|
||||
.BR pvdisplay (8),
|
||||
|
835
po/lvm2.po
835
po/lvm2.po
File diff suppressed because it is too large
Load Diff
@@ -13,15 +13,12 @@
|
||||
|
||||
LOCK_FILE="/var/lock/subsys/clvmd"
|
||||
|
||||
#
|
||||
# FIXME -- the lvm2-cluster rpms put the lvm tools in a different location
|
||||
#
|
||||
lvdisplay=/usr/sbin/cluster/lvdisplay
|
||||
vgchange=/usr/sbin/cluster/vgchange
|
||||
lvdisplay="/sbin/lvm.static lvdisplay"
|
||||
vgchange="/sbin/lvm.static vgchange"
|
||||
vgscan="/sbin/lvm.static vgscan"
|
||||
|
||||
start()
|
||||
{
|
||||
modprobe dlm
|
||||
for rtrn in 0
|
||||
do
|
||||
if ! pidof clvmd > /dev/null
|
||||
@@ -40,6 +37,9 @@ start()
|
||||
fi
|
||||
fi
|
||||
|
||||
# refresh cache
|
||||
$vgscan > /dev/null 2>&1
|
||||
|
||||
if [ -n "$LVM_VGS" ]
|
||||
then
|
||||
for vg in $LVM_VGS
|
||||
@@ -124,7 +124,6 @@ stop()
|
||||
echo
|
||||
fi
|
||||
done
|
||||
modprobe -r dlm > /dev/null 2>&1
|
||||
|
||||
return $rtrn
|
||||
}
|
||||
|
@@ -256,7 +256,7 @@ static int lvchange_persistent(struct cmd_context *cmd,
|
||||
log_error("Major number must be specified with -My");
|
||||
return 0;
|
||||
}
|
||||
if (lv_info(lv, &info) && info.exists &&
|
||||
if (lv_info(lv, &info, 0) && info.exists &&
|
||||
!arg_count(cmd, force_ARG)) {
|
||||
if (yes_no_prompt("Logical volume %s will be "
|
||||
"deactivated temporarily. "
|
||||
|
@@ -609,7 +609,8 @@ static int _lvcreate(struct cmd_context *cmd, struct lvcreate_params *lp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!vg_add_snapshot(org, lv, 1, NULL, lp->chunk_size)) {
|
||||
if (!vg_add_snapshot(org, lv, 1, NULL, lv->le_count,
|
||||
lp->chunk_size)) {
|
||||
log_err("Couldn't create snapshot.");
|
||||
return 0;
|
||||
}
|
||||
|
@@ -41,7 +41,7 @@ static int lvremove_single(struct cmd_context *cmd, struct logical_volume *lv,
|
||||
|
||||
/* FIXME Ensure not referred to by another existing LVs */
|
||||
|
||||
if (lv_info(lv, &info)) {
|
||||
if (lv_info(lv, &info, 1)) {
|
||||
if (info.open_count) {
|
||||
log_error("Can't remove open logical volume \"%s\"",
|
||||
lv->name);
|
||||
|
@@ -364,7 +364,7 @@ static int _lvresize(struct cmd_context *cmd, struct lvresize_params *lp)
|
||||
if (lp->resize == LV_REDUCE || lp->resizefs) {
|
||||
memset(&info, 0, sizeof(info));
|
||||
|
||||
if (!lv_info(lv, &info) && driver_version(NULL, 0)) {
|
||||
if (!lv_info(lv, &info, 1) && driver_version(NULL, 0)) {
|
||||
log_error("lv_info failed: aborting");
|
||||
return ECMD_FAILED;
|
||||
}
|
||||
|
@@ -25,7 +25,7 @@ static int lvscan_single(struct cmd_context *cmd, struct logical_volume *lv,
|
||||
const char *active_str, *snapshot_str;
|
||||
|
||||
/* FIXME Add -D arg to skip this! */
|
||||
if (lv_info(lv, &info) && info.exists)
|
||||
if (lv_info(lv, &info, 0) && info.exists)
|
||||
active_str = "ACTIVE ";
|
||||
else
|
||||
active_str = "inactive ";
|
||||
|
@@ -173,7 +173,11 @@ static int _pvchange_single(struct cmd_context *cmd, struct physical_volume *pv,
|
||||
}
|
||||
} else {
|
||||
/* --uuid: Change PV ID randomly */
|
||||
id_create(&pv->id);
|
||||
if (!id_create(&pv->id)) {
|
||||
log_error("Failed to generate new random UUID for %s.",
|
||||
pv_name);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
log_verbose("Updating physical volume \"%s\"", pv_name);
|
||||
|
@@ -18,6 +18,9 @@
|
||||
#include <sys/stat.h>
|
||||
#include <sys/wait.h>
|
||||
|
||||
/*
|
||||
* Metadata iteration functions
|
||||
*/
|
||||
int process_each_lv_in_vg(struct cmd_context *cmd, struct volume_group *vg,
|
||||
struct list *arg_lvnames, struct list *tags,
|
||||
void *handle,
|
||||
@@ -93,23 +96,6 @@ int process_each_lv_in_vg(struct cmd_context *cmd, struct volume_group *vg,
|
||||
return ret_max;
|
||||
}
|
||||
|
||||
struct volume_group *recover_vg(struct cmd_context *cmd, const char *vgname,
|
||||
int lock_type)
|
||||
{
|
||||
int consistent = 1;
|
||||
|
||||
lock_type &= ~LCK_TYPE_MASK;
|
||||
lock_type |= LCK_WRITE;
|
||||
|
||||
if (!lock_vol(cmd, vgname, lock_type)) {
|
||||
log_error("Can't lock %s for metadata recovery: skipping",
|
||||
vgname);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return vg_read(cmd, vgname, &consistent);
|
||||
}
|
||||
|
||||
int process_each_lv(struct cmd_context *cmd, int argc, char **argv,
|
||||
int lock_type, void *handle,
|
||||
int (*process_single) (struct cmd_context * cmd,
|
||||
@@ -608,6 +594,9 @@ int process_each_pv(struct cmd_context *cmd, int argc, char **argv,
|
||||
return ret_max;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine volume group name from a logical volume name
|
||||
*/
|
||||
const char *extract_vgname(struct cmd_context *cmd, const char *lv_name)
|
||||
{
|
||||
const char *vg_name = lv_name;
|
||||
@@ -666,6 +655,9 @@ const char *extract_vgname(struct cmd_context *cmd, const char *lv_name)
|
||||
return vg_name;
|
||||
}
|
||||
|
||||
/*
|
||||
* Extract default volume group name from environment
|
||||
*/
|
||||
char *default_vgname(struct cmd_context *cmd)
|
||||
{
|
||||
char *vg_path;
|
||||
@@ -694,6 +686,9 @@ char *default_vgname(struct cmd_context *cmd)
|
||||
return pool_strdup(cmd->mem, vg_path);
|
||||
}
|
||||
|
||||
/*
|
||||
* Process physical extent range specifiers
|
||||
*/
|
||||
static int _add_pe_range(struct pool *mem, struct list *pe_ranges,
|
||||
uint32_t start, uint32_t count)
|
||||
{
|
||||
@@ -928,6 +923,29 @@ struct list *clone_pv_list(struct pool *mem, struct list *pvsl)
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* Attempt metadata recovery
|
||||
*/
|
||||
struct volume_group *recover_vg(struct cmd_context *cmd, const char *vgname,
|
||||
int lock_type)
|
||||
{
|
||||
int consistent = 1;
|
||||
|
||||
lock_type &= ~LCK_TYPE_MASK;
|
||||
lock_type |= LCK_WRITE;
|
||||
|
||||
if (!lock_vol(cmd, vgname, lock_type)) {
|
||||
log_error("Can't lock %s for metadata recovery: skipping",
|
||||
vgname);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return vg_read(cmd, vgname, &consistent);
|
||||
}
|
||||
|
||||
/*
|
||||
* Execute and wait for external command
|
||||
*/
|
||||
int exec_cmd(const char *command, const char *fscmd, const char *lv_path,
|
||||
const char *size)
|
||||
{
|
||||
|
@@ -264,7 +264,11 @@ static int _vgchange_uuid(struct cmd_context *cmd, struct volume_group *vg)
|
||||
if (!archive(vg))
|
||||
return ECMD_FAILED;
|
||||
|
||||
id_create(&vg->id);
|
||||
if (!id_create(&vg->id)) {
|
||||
log_error("Failed to generate new random UUID for VG %s.",
|
||||
vg->name);
|
||||
return ECMD_FAILED;
|
||||
}
|
||||
|
||||
list_iterate_items(lvl, &vg->lvs) {
|
||||
memcpy(&lvl->lv->lvid, &vg->id, sizeof(vg->id));
|
||||
|
@@ -94,7 +94,7 @@ static int vgconvert_single(struct cmd_context *cmd, const char *vg_name,
|
||||
lv = lvl->lv;
|
||||
if (lvnum_from_lvid(&lv->lvid) < MAX_RESTRICTED_LVS)
|
||||
continue;
|
||||
if (lv_info(lv, &info) && info.exists) {
|
||||
if (lv_info(lv, &info, 0) && info.exists) {
|
||||
log_error("Logical volume %s must be "
|
||||
"deactivated before conversion.",
|
||||
lv->name);
|
||||
|
@@ -56,7 +56,7 @@ int vgscan(struct cmd_context *cmd, int argc, char **argv)
|
||||
|
||||
log_print("Reading all physical volumes. This may take a while...");
|
||||
|
||||
maxret = process_each_vg(cmd, argc, argv, LCK_VG_READ, 1, NULL,
|
||||
maxret = process_each_vg(cmd, argc, argv, LCK_VG_READ, 0, NULL,
|
||||
&vgscan_single);
|
||||
|
||||
if (arg_count(cmd, mknodes_ARG)) {
|
||||
|
Reference in New Issue
Block a user