4f567acb0b
Since commit 489d8e559c
("fs: dlm: add reliable connection if
reconnect") we have functionality like TCP offers for half-closed
sockets on dlm application protocol layer. This feature is required
because the cluster manager events about leaving resource memberships
can be locally already occurred but other cluster nodes having a pending
leaving membership over the cluster manager protocol happening. In this
time the local dlm node already shutdown it's connection and don't
transmit anymore any new dlm messages, but however it still needs to be
able to accept dlm messages because the pending leave membership request
of the cluster manager protocol which the dlm kernel implementation has
no control about it.
We have this functionality on the application for two reasons, the main
reason is that SCTP does not support such functionality on socket
layer. But we can do it inside application layer.
Another small issue is that this feature is broken in the TCP world
because some NAT devices does not implement such functionality
correctly. This is the same reason why the reliable connection session
layer in DLM exists. We give up on middle devices in the networking
which sends e.g. TCP resets out. In DLM we cannot have any message
dropping and we ensure it over a session layer that it can't happen.
Back to the half-closed grace shutdown handling. It's not necessary
anymore to do it on socket layer (which is only support for TCP sockets)
because we do it on application layer. This patch removes this handling,
if there are still issues then we have a problem on the application
layer for such handling.
Signed-off-by: Alexander Aring <aahringo@redhat.com>
Signed-off-by: David Teigland <teigland@redhat.com>
57 lines
2.0 KiB
C
57 lines
2.0 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/******************************************************************************
|
|
*******************************************************************************
|
|
**
|
|
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
|
** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
|
|
**
|
|
**
|
|
*******************************************************************************
|
|
******************************************************************************/
|
|
|
|
#ifndef __LOWCOMMS_DOT_H__
|
|
#define __LOWCOMMS_DOT_H__
|
|
|
|
#include "dlm_internal.h"
|
|
|
|
#define DLM_MIDCOMMS_OPT_LEN sizeof(struct dlm_opts)
|
|
#define DLM_MAX_APP_BUFSIZE (DLM_MAX_SOCKET_BUFSIZE - \
|
|
DLM_MIDCOMMS_OPT_LEN)
|
|
|
|
#define CONN_HASH_SIZE 32
|
|
|
|
/* This is deliberately very simple because most clusters have simple
|
|
* sequential nodeids, so we should be able to go straight to a connection
|
|
* struct in the array
|
|
*/
|
|
static inline int nodeid_hash(int nodeid)
|
|
{
|
|
return nodeid & (CONN_HASH_SIZE-1);
|
|
}
|
|
|
|
/* check if dlm is running */
|
|
bool dlm_lowcomms_is_running(void);
|
|
|
|
int dlm_lowcomms_start(void);
|
|
void dlm_lowcomms_shutdown(void);
|
|
void dlm_lowcomms_shutdown_node(int nodeid, bool force);
|
|
void dlm_lowcomms_stop(void);
|
|
void dlm_lowcomms_init(void);
|
|
void dlm_lowcomms_exit(void);
|
|
int dlm_lowcomms_close(int nodeid);
|
|
struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation,
|
|
char **ppc, void (*cb)(void *data),
|
|
void *data);
|
|
void dlm_lowcomms_commit_msg(struct dlm_msg *msg);
|
|
void dlm_lowcomms_put_msg(struct dlm_msg *msg);
|
|
int dlm_lowcomms_resend_msg(struct dlm_msg *msg);
|
|
int dlm_lowcomms_connect_node(int nodeid);
|
|
int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark);
|
|
int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len);
|
|
void dlm_midcomms_receive_done(int nodeid);
|
|
struct kmem_cache *dlm_lowcomms_writequeue_cache_create(void);
|
|
struct kmem_cache *dlm_lowcomms_msg_cache_create(void);
|
|
|
|
#endif /* __LOWCOMMS_DOT_H__ */
|
|
|