1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-12-21 13:34:40 +03:00

make clvmd FDs close-on-exec, to avoid warnings when running lvm via popen.

clvmd-gulm unlocks VG & orphan locks at startup in case they are stale.
clvmd-gulm now unlocks VG & orphan locks if client dies.
This commit is contained in:
Patrick Caulfield 2005-03-07 17:03:44 +00:00
parent 4c90852192
commit 3329bbfdf3
8 changed files with 94 additions and 17 deletions

View File

@ -1,5 +1,8 @@
Version 2.01.07 -
================================
clvmd fixes: make FDs close-on-exec
gulm unlocks VG & orphan locks at startup in case they are stale
gulm now unlocks VG & orphan locks if client dies.
Version 2.01.06 - 1st March 2005
================================

View File

@ -77,6 +77,8 @@ static int _init_cluster(void)
/* Don't print an error here because we could be just probing for CMAN */
return -1;
}
/* Set Close-on-exec */
fcntl(cluster_sock, F_SETFD, 1);
/* Bind to our port number on the cluster.
Writes to this will block if the cluster loses quorum */
@ -101,6 +103,7 @@ static int _init_cluster(void)
return -1;
}
dlm_ls_pthread_init(lockspace);
return 0;
}

View File

@ -274,9 +274,10 @@ void cmd_client_cleanup(struct local_client *client)
hash_iterate(v, lock_hash) {
int lkid = (int)(long)hash_get_data(lock_hash, v);
char *lockname = hash_get_key(lock_hash, v);
DEBUGLOG("cleanup: Unlocking lkid %x\n", lkid);
sync_unlock("DUMMY", lkid);
DEBUGLOG("cleanup: Unlocking lock %s %x\n", lockname, lkid);
sync_unlock(lockname, lkid);
}
hash_destroy(lock_hash);

View File

@ -117,6 +117,9 @@ static int add_internal_client(int fd, fd_callback_t callback)
client->callback = callback;
add_client(client);
/* Set Close-on-exec */
fcntl(fd, F_SETFD, 1);
return 0;
}
@ -471,7 +474,7 @@ static int lock_login_reply(void *misc, uint32_t error, uint8_t which)
lock_start_flag = 0;
pthread_mutex_unlock(&lock_start_mutex);
}
return 0;
}
@ -486,7 +489,7 @@ static int lock_lock_state(void *misc, uint8_t *key, uint16_t keylen,
/* No waiting process to wake up when we are shutting down */
if (in_shutdown)
return;
return 0;
lwait = hash_lookup(lock_hash, key);
if (!lwait)

View File

@ -165,6 +165,7 @@ int main(int argc, char *argv[])
int debug = 0;
int cmd_timeout = DEFAULT_CMD_TIMEOUT;
sigset_t ss;
int using_gulm = 0;
/* Deal with command-line arguments */
opterr = 0;
@ -250,6 +251,7 @@ int main(int argc, char *argv[])
max_csid_len = GULM_MAX_CSID_LEN;
max_cluster_message = GULM_MAX_CLUSTER_MESSAGE;
max_cluster_member_name_len = GULM_MAX_CLUSTER_MEMBER_NAME_LEN;
using_gulm = 1;
syslog(LOG_NOTICE, "Cluster LVM daemon started - connected to GULM");
}
#endif
@ -284,7 +286,7 @@ int main(int argc, char *argv[])
/* This needs to be started after cluster initialisation
as it may need to take out locks */
DEBUGLOG("starting LVM thread\n");
pthread_create(&lvm_thread, NULL, lvm_thread_fn, nodeinfo.nodename);
pthread_create(&lvm_thread, NULL, lvm_thread_fn, (void *)using_gulm);
/* Tell the rest of the cluster our version number */
/* CMAN can do this immediately, gulm needs to wait until
@ -543,8 +545,9 @@ static void main_loop(int local_sock, int cmd_timeout)
lastfd->next = thisfd->next;
free_fd = thisfd;
thisfd = lastfd;
cmd_client_cleanup(free_fd);
free(free_fd);
/* Queue cleanup, this also frees the client struct */
add_to_lvmqueue(free_fd, NULL, 0, NULL);
break;
}
@ -1565,6 +1568,14 @@ static int send_message(void *buf, int msglen, char *csid, int fd,
static int process_work_item(struct lvm_thread_cmd *cmd)
{
/* If msg is NULL then this is a cleanup request */
if (cmd->msg == NULL) {
cmd_client_cleanup(cmd->client);
free(cmd->client);
return 0;
}
if (!cmd->remote) {
DEBUGLOG("process_work_item: local\n");
process_local_command(cmd->msg, cmd->msglen, cmd->client,
@ -1584,6 +1595,7 @@ static void *lvm_thread_fn(void *arg)
{
struct list *cmdl, *tmp;
sigset_t ss;
int using_gulm = (int)arg;
DEBUGLOG("LVM thread function started\n");
pthread_mutex_lock(&lvm_thread_mutex);
@ -1595,7 +1607,7 @@ static void *lvm_thread_fn(void *arg)
pthread_sigmask(SIG_BLOCK, &ss, NULL);
/* Initialise the interface to liblvm */
init_lvm();
init_lvm(using_gulm);
pthread_mutex_unlock(&lvm_thread_mutex);
/* Now wait for some actual work */
@ -1634,17 +1646,22 @@ static int add_to_lvmqueue(struct local_client *client, struct clvm_header *msg,
if (!cmd)
return ENOMEM;
cmd->msg = malloc(msglen);
if (!cmd->msg) {
log_error("Unable to allocate buffer space\n");
free(cmd);
return -1;
if (msglen) {
cmd->msg = malloc(msglen);
if (!cmd->msg) {
log_error("Unable to allocate buffer space\n");
free(cmd);
return -1;
}
memcpy(cmd->msg, msg, msglen);
}
else {
cmd->msg = NULL;
}
cmd->client = client;
cmd->msglen = msglen;
cmd->xid = client->xid;
memcpy(cmd->msg, msg, msglen);
if (csid) {
memcpy(cmd->csid, csid, max_csid_len);
cmd->remote = 1;
@ -1677,6 +1694,8 @@ static int open_local_sock()
log_error("Can't create local socket: %m");
return -1;
}
/* Set Close-on-exec */
fcntl(local_socket, F_SETFD, 1);
memset(&sockaddr, 0, sizeof(sockaddr));
memcpy(sockaddr.sun_path, CLVMD_SOCKNAME, sizeof(CLVMD_SOCKNAME));

View File

@ -388,6 +388,44 @@ int do_check_lvm1(char *vgname)
return status == 1 ? 0 : EBUSY;
}
/* Only called at gulm startup. Drop any leftover VG or P_orphan locks
that might be hanging around if we died for any reason
*/
static void drop_vg_locks()
{
char vg[128];
char line[255];
FILE *vgs =
popen
("lvm pvs --nolocking --noheadings -o vg_name", "r");
sync_unlock("P_orphans", LCK_EXCL);
if (!vgs)
return;
while (fgets(line, sizeof(line), vgs)) {
char *vgend;
char *vgstart;
if (line[strlen(line)-1] == '\n')
line[strlen(line)-1] = '\0';
vgstart = line + strspn(line, " ");
vgend = vgstart + strcspn(vgstart, " ");
*vgend = '\0';
if (strncmp(vgstart, "WARNING:", 8) == 0)
continue;
sprintf(vg, "V_%s", vgstart);
sync_unlock(vg, LCK_EXCL);
}
fclose(vgs);
}
/*
* Ideally, clvmd should be started before any LVs are active
* but this may not be the case...
@ -470,7 +508,7 @@ void init_lvhash()
}
/* Called to initialise the LVM context of the daemon */
int init_lvm(void)
int init_lvm(int using_gulm)
{
if (!(cmd = create_toolcontext(NULL))) {
log_error("Failed to allocate command context");
@ -484,6 +522,10 @@ int init_lvm(void)
/* Check lvm.conf is setup for cluster-LVM */
check_config();
/* Remove any non-LV locks that may have been left around */
if (using_gulm)
drop_vg_locks();
get_initial_state();
return 1;

View File

@ -25,7 +25,7 @@ extern int do_lock_lv(unsigned char lock_cmd, unsigned char lock_flags,
extern int post_lock_lv(unsigned char lock_cmd, unsigned char lock_flags,
char *resource);
extern int do_check_lvm1(char *vgname);
extern int init_lvm(void);
extern int init_lvm(int using_gulm);
extern void init_lvhash(void);
extern int hold_unlock(char *resource);

View File

@ -85,6 +85,9 @@ int init_comms(unsigned short port)
listen(listen_fd, 5);
/* Set Close-on-exec */
fcntl(listen_fd, F_SETFD, 1);
return 0;
}
@ -322,6 +325,9 @@ int gulm_connect_csid(char *csid, struct local_client **newclient)
return -1;
}
/* Set Close-on-exec */
fcntl(fd, F_SETFD, 1);
status = alloc_client(fd, csid, newclient);
if (status)
close(fd);