1
0
mirror of git://sourceware.org/git/lvm2.git synced 2025-01-02 01:18:26 +03:00

Fix clvmd-gulm node up/down code so it actually works.

clvmd-gulm now releases locks when shut down.
This commit is contained in:
Patrick Caulfield 2005-02-22 16:26:21 +00:00
parent a00bfcc8b2
commit eb3037ddbd
4 changed files with 42 additions and 7 deletions

View File

@ -1,7 +1,9 @@
Version 2.01.06 -
====================================
Fix clvmd-gulm locking.
./configure --enable-debug now enables debugging code in clvmd
./configure --enable-debug now enables debugging code in clvmd.
Fix clvmd-gulm node up/down code so it actually works.
clvmd-gulm now releases locks when shut down.
Version 2.01.05 - 18th February 2005
====================================

View File

@ -2,6 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 2002-2003 All rights reserved.
** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
**
*******************************************************************************
******************************************************************************/
@ -46,6 +47,7 @@
#include "log.h"
#include "clvm.h"
#include "clvmd-comms.h"
#include "lvm-functions.h"
#include "clvmd.h"
#include "hash.h"
#include "clvmd-gulm.h"
@ -65,6 +67,7 @@ static uint8_t current_corestate;
static int num_nodes;
static char *cluster_name;
static int in_shutdown = 0;
static pthread_mutex_t lock_start_mutex;
static volatile int lock_start_flag;
@ -246,6 +249,8 @@ static int _init_cluster(void)
static void _cluster_closedown(void)
{
DEBUGLOG("cluster_closedown\n");
in_shutdown = 1;
unlock_all();
lg_lock_logout(gulm_if);
lg_core_logout(gulm_if);
lg_release(gulm_if);
@ -258,6 +263,7 @@ static void drop_expired_locks(char *nodename)
struct utsname nodeinfo;
uint8_t mask[GIO_KEY_SIZE];
DEBUGLOG("Dropping expired locks for %s\n", nodename?nodename:"(null)");
memset(mask, 0xff, GIO_KEY_SIZE);
if (!nodename)
@ -478,6 +484,10 @@ static int lock_lock_state(void *misc, uint8_t *key, uint16_t keylen,
DEBUGLOG("LOCK lock state: %s, error = %d\n", key, error);
/* No waiting process to wake up when we are shutting down */
if (in_shutdown)
return;
lwait = hash_lookup(lock_hash, key);
if (!lwait)
{
@ -596,9 +606,12 @@ void gulm_add_up_node(char *csid)
struct node_info *ninfo;
ninfo = hash_lookup_binary(node_hash, csid, GULM_MAX_CSID_LEN);
if (!ninfo)
if (!ninfo) {
DEBUGLOG("gulm_add_up_node no node_hash entry for csid %s\n", print_csid(csid));
return;
}
DEBUGLOG("gulm_add_up_node %s\n", ninfo->name);
ninfo->state = NODE_CLVMD;
return;
@ -616,6 +629,7 @@ void add_down_node(char *csid)
running clvmd - gulm may set it DOWN quite soon */
if (ninfo->state == NODE_CLVMD)
ninfo->state = NODE_UP;
drop_expired_locks(ninfo->name);
return;
}
@ -638,8 +652,19 @@ static int _cluster_do_node_callback(struct local_client *master_client,
DEBUGLOG("down_callback. node %s, state = %d\n", ninfo->name, ninfo->state);
client = hash_lookup_binary(sock_hash, csid, GULM_MAX_CSID_LEN);
if (client)
if (!client)
{
/* If it's up but not connected, try to make contact */
if (ninfo->state == NODE_UP)
gulm_connect_csid(csid, &client);
client = hash_lookup_binary(sock_hash, csid, GULM_MAX_CSID_LEN);
}
if (ninfo->state != NODE_DOWN)
callback(master_client, csid, ninfo->state == NODE_CLVMD);
}
return 0;
}
@ -727,6 +752,11 @@ static int _unlock_resource(char *resource, int lockid)
return status;
}
/* When we are shutting down, don't wait for unlocks
to be acknowledged, just do it. */
if (in_shutdown)
return status;
/* Wait for it to complete */
pthread_cond_wait(&lwait.cond, &lwait.mutex);

View File

@ -2,6 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 2002-2003 All rights reserved.
** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
**
*******************************************************************************
******************************************************************************/
@ -280,13 +281,14 @@ static int read_from_tcpsock(struct local_client *client, char *buf, int len, ch
add_down_node(remcsid);
}
else {
gulm_add_up_node(csid);
/* Send it back to clvmd */
process_message(client, buf, len, csid);
}
return status;
}
static int connect_csid(char *csid, struct local_client **newclient)
int gulm_connect_csid(char *csid, struct local_client **newclient)
{
int fd;
struct sockaddr_in6 addr;
@ -350,7 +352,7 @@ static int tcp_send_message(void *buf, int msglen, unsigned char *csid, const ch
client = hash_lookup_binary(sock_hash, csid, GULM_MAX_CSID_LEN);
if (!client)
{
status = connect_csid(csid, &client);
status = gulm_connect_csid(csid, &client);
if (status)
return -1;
}

View File

@ -10,3 +10,4 @@ int get_main_gulm_cluster_fd(void);
int cluster_fd_gulm_callback(struct local_client *fd, char *buf, int len, char *csid, struct local_client **new_client);
int gulm_cluster_send_message(void *buf, int msglen, char *csid, const char *errtext);
void get_our_gulm_csid(char *csid);
int gulm_connect_csid(char *csid, struct local_client **newclient);