Merge branch 'rxrpc-misc'
David Howells says: ==================== rxrpc: Miscellaneous changes Here are some miscellaneous changes for AF_RXRPC: (1) Allow the list of local endpoints to be viewed through /proc. (2) Switch to using refcount_t for refcounting. (3) Fix a locking issue found by lockdep. (4) Autogenerate tracing symbol enums from symbol->string maps to make it easier to keep them in sync. (5) Return an error to sendmsg() if a call it tried to set up failed. Because it failed at this point, no notification will be generated for recvmsg to pick up - but userspace still needs to know about the failure. (6) Fix the selection of abort codes generated by internal events. In particular, rxrpc and kafs shouldn't be generating RX_USER_ABORT unless it's because userspace did something to cancel a call. (7) Adjust the interpretation and handling of certain ACK types to try and detect NAT changes causing a call to seem to start mid-flow from a different peer. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
baea40de32
@ -163,8 +163,11 @@ void afs_prioritise_error(struct afs_error *e, int error, u32 abort_code)
|
||||
return;
|
||||
|
||||
case -ECONNABORTED:
|
||||
error = afs_abort_to_error(abort_code);
|
||||
fallthrough;
|
||||
case -ENETRESET: /* Responded, but we seem to have changed address */
|
||||
e->responded = true;
|
||||
e->error = afs_abort_to_error(abort_code);
|
||||
e->error = error;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -292,6 +292,10 @@ bool afs_select_fileserver(struct afs_operation *op)
|
||||
op->error = error;
|
||||
goto iterate_address;
|
||||
|
||||
case -ENETRESET:
|
||||
pr_warn("kAFS: Peer reset %s (op=%x)\n",
|
||||
op->type ? op->type->name : "???", op->debug_id);
|
||||
fallthrough;
|
||||
case -ECONNRESET:
|
||||
_debug("call reset");
|
||||
op->error = error;
|
||||
|
@ -537,6 +537,8 @@ static void afs_deliver_to_call(struct afs_call *call)
|
||||
case -ENODATA:
|
||||
case -EBADMSG:
|
||||
case -EMSGSIZE:
|
||||
case -ENOMEM:
|
||||
case -EFAULT:
|
||||
abort_code = RXGEN_CC_UNMARSHAL;
|
||||
if (state != AFS_CALL_CL_AWAIT_REPLY)
|
||||
abort_code = RXGEN_SS_UNMARSHAL;
|
||||
@ -544,7 +546,7 @@ static void afs_deliver_to_call(struct afs_call *call)
|
||||
abort_code, ret, "KUM");
|
||||
goto local_abort;
|
||||
default:
|
||||
abort_code = RX_USER_ABORT;
|
||||
abort_code = RX_CALL_DEAD;
|
||||
rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
|
||||
abort_code, ret, "KER");
|
||||
goto local_abort;
|
||||
@ -836,7 +838,7 @@ void afs_send_empty_reply(struct afs_call *call)
|
||||
case -ENOMEM:
|
||||
_debug("oom");
|
||||
rxrpc_kernel_abort_call(net->socket, call->rxcall,
|
||||
RX_USER_ABORT, -ENOMEM, "KOO");
|
||||
RXGEN_SS_MARSHAL, -ENOMEM, "KOO");
|
||||
fallthrough;
|
||||
default:
|
||||
_leave(" [error]");
|
||||
@ -878,7 +880,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
|
||||
if (n == -ENOMEM) {
|
||||
_debug("oom");
|
||||
rxrpc_kernel_abort_call(net->socket, call->rxcall,
|
||||
RX_USER_ABORT, -ENOMEM, "KOO");
|
||||
RXGEN_SS_MARSHAL, -ENOMEM, "KOO");
|
||||
}
|
||||
_leave(" [error]");
|
||||
}
|
||||
|
@ -636,6 +636,7 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
|
||||
case -EKEYEXPIRED:
|
||||
case -EKEYREJECTED:
|
||||
case -EKEYREVOKED:
|
||||
case -ENETRESET:
|
||||
afs_redirty_pages(wbc, mapping, start, len);
|
||||
mapping_set_error(mapping, ret);
|
||||
break;
|
||||
|
@ -931,6 +931,38 @@ struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos)
|
||||
}
|
||||
EXPORT_SYMBOL(seq_list_next);
|
||||
|
||||
struct list_head *seq_list_start_rcu(struct list_head *head, loff_t pos)
|
||||
{
|
||||
struct list_head *lh;
|
||||
|
||||
list_for_each_rcu(lh, head)
|
||||
if (pos-- == 0)
|
||||
return lh;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(seq_list_start_rcu);
|
||||
|
||||
struct list_head *seq_list_start_head_rcu(struct list_head *head, loff_t pos)
|
||||
{
|
||||
if (!pos)
|
||||
return head;
|
||||
|
||||
return seq_list_start_rcu(head, pos - 1);
|
||||
}
|
||||
EXPORT_SYMBOL(seq_list_start_head_rcu);
|
||||
|
||||
struct list_head *seq_list_next_rcu(void *v, struct list_head *head,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct list_head *lh;
|
||||
|
||||
lh = list_next_rcu((struct list_head *)v);
|
||||
++*ppos;
|
||||
return lh == head ? NULL : lh;
|
||||
}
|
||||
EXPORT_SYMBOL(seq_list_next_rcu);
|
||||
|
||||
/**
|
||||
* seq_hlist_start - start an iteration of a hlist
|
||||
* @head: the head of the hlist
|
||||
|
@ -605,6 +605,16 @@ static inline void list_splice_tail_init(struct list_head *list,
|
||||
#define list_for_each(pos, head) \
|
||||
for (pos = (head)->next; !list_is_head(pos, (head)); pos = pos->next)
|
||||
|
||||
/**
|
||||
* list_for_each_rcu - Iterate over a list in an RCU-safe fashion
|
||||
* @pos: the &struct list_head to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
*/
|
||||
#define list_for_each_rcu(pos, head) \
|
||||
for (pos = rcu_dereference((head)->next); \
|
||||
!list_is_head(pos, (head)); \
|
||||
pos = rcu_dereference(pos->next))
|
||||
|
||||
/**
|
||||
* list_for_each_continue - continue iteration over a list
|
||||
* @pos: the &struct list_head to use as a loop cursor.
|
||||
|
@ -277,6 +277,10 @@ extern struct list_head *seq_list_start_head(struct list_head *head,
|
||||
extern struct list_head *seq_list_next(void *v, struct list_head *head,
|
||||
loff_t *ppos);
|
||||
|
||||
extern struct list_head *seq_list_start_rcu(struct list_head *head, loff_t pos);
|
||||
extern struct list_head *seq_list_start_head_rcu(struct list_head *head, loff_t pos);
|
||||
extern struct list_head *seq_list_next_rcu(void *v, struct list_head *head, loff_t *ppos);
|
||||
|
||||
/*
|
||||
* Helpers for iteration over hlist_head-s in seq_files
|
||||
*/
|
||||
|
@ -13,215 +13,6 @@
|
||||
#include <linux/tracepoint.h>
|
||||
#include <linux/errqueue.h>
|
||||
|
||||
/*
|
||||
* Define enums for tracing information.
|
||||
*
|
||||
* These should all be kept sorted, making it easier to match the string
|
||||
* mapping tables further on.
|
||||
*/
|
||||
#ifndef __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY
|
||||
#define __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY
|
||||
|
||||
enum rxrpc_skb_trace {
|
||||
rxrpc_skb_cleaned,
|
||||
rxrpc_skb_freed,
|
||||
rxrpc_skb_got,
|
||||
rxrpc_skb_lost,
|
||||
rxrpc_skb_new,
|
||||
rxrpc_skb_purged,
|
||||
rxrpc_skb_received,
|
||||
rxrpc_skb_rotated,
|
||||
rxrpc_skb_seen,
|
||||
rxrpc_skb_unshared,
|
||||
rxrpc_skb_unshared_nomem,
|
||||
};
|
||||
|
||||
enum rxrpc_local_trace {
|
||||
rxrpc_local_got,
|
||||
rxrpc_local_new,
|
||||
rxrpc_local_processing,
|
||||
rxrpc_local_put,
|
||||
rxrpc_local_queued,
|
||||
};
|
||||
|
||||
enum rxrpc_peer_trace {
|
||||
rxrpc_peer_got,
|
||||
rxrpc_peer_new,
|
||||
rxrpc_peer_processing,
|
||||
rxrpc_peer_put,
|
||||
};
|
||||
|
||||
enum rxrpc_conn_trace {
|
||||
rxrpc_conn_got,
|
||||
rxrpc_conn_new_client,
|
||||
rxrpc_conn_new_service,
|
||||
rxrpc_conn_put_client,
|
||||
rxrpc_conn_put_service,
|
||||
rxrpc_conn_queued,
|
||||
rxrpc_conn_reap_service,
|
||||
rxrpc_conn_seen,
|
||||
};
|
||||
|
||||
enum rxrpc_client_trace {
|
||||
rxrpc_client_activate_chans,
|
||||
rxrpc_client_alloc,
|
||||
rxrpc_client_chan_activate,
|
||||
rxrpc_client_chan_disconnect,
|
||||
rxrpc_client_chan_pass,
|
||||
rxrpc_client_chan_wait_failed,
|
||||
rxrpc_client_cleanup,
|
||||
rxrpc_client_discard,
|
||||
rxrpc_client_duplicate,
|
||||
rxrpc_client_exposed,
|
||||
rxrpc_client_replace,
|
||||
rxrpc_client_to_active,
|
||||
rxrpc_client_to_idle,
|
||||
};
|
||||
|
||||
enum rxrpc_call_trace {
|
||||
rxrpc_call_connected,
|
||||
rxrpc_call_error,
|
||||
rxrpc_call_got,
|
||||
rxrpc_call_got_kernel,
|
||||
rxrpc_call_got_timer,
|
||||
rxrpc_call_got_userid,
|
||||
rxrpc_call_new_client,
|
||||
rxrpc_call_new_service,
|
||||
rxrpc_call_put,
|
||||
rxrpc_call_put_kernel,
|
||||
rxrpc_call_put_noqueue,
|
||||
rxrpc_call_put_notimer,
|
||||
rxrpc_call_put_timer,
|
||||
rxrpc_call_put_userid,
|
||||
rxrpc_call_queued,
|
||||
rxrpc_call_queued_ref,
|
||||
rxrpc_call_release,
|
||||
rxrpc_call_seen,
|
||||
};
|
||||
|
||||
enum rxrpc_transmit_trace {
|
||||
rxrpc_transmit_await_reply,
|
||||
rxrpc_transmit_end,
|
||||
rxrpc_transmit_queue,
|
||||
rxrpc_transmit_queue_last,
|
||||
rxrpc_transmit_rotate,
|
||||
rxrpc_transmit_rotate_last,
|
||||
rxrpc_transmit_wait,
|
||||
};
|
||||
|
||||
enum rxrpc_receive_trace {
|
||||
rxrpc_receive_end,
|
||||
rxrpc_receive_front,
|
||||
rxrpc_receive_incoming,
|
||||
rxrpc_receive_queue,
|
||||
rxrpc_receive_queue_last,
|
||||
rxrpc_receive_rotate,
|
||||
};
|
||||
|
||||
enum rxrpc_recvmsg_trace {
|
||||
rxrpc_recvmsg_cont,
|
||||
rxrpc_recvmsg_data_return,
|
||||
rxrpc_recvmsg_dequeue,
|
||||
rxrpc_recvmsg_enter,
|
||||
rxrpc_recvmsg_full,
|
||||
rxrpc_recvmsg_hole,
|
||||
rxrpc_recvmsg_next,
|
||||
rxrpc_recvmsg_requeue,
|
||||
rxrpc_recvmsg_return,
|
||||
rxrpc_recvmsg_terminal,
|
||||
rxrpc_recvmsg_to_be_accepted,
|
||||
rxrpc_recvmsg_wait,
|
||||
};
|
||||
|
||||
enum rxrpc_rtt_tx_trace {
|
||||
rxrpc_rtt_tx_cancel,
|
||||
rxrpc_rtt_tx_data,
|
||||
rxrpc_rtt_tx_no_slot,
|
||||
rxrpc_rtt_tx_ping,
|
||||
};
|
||||
|
||||
enum rxrpc_rtt_rx_trace {
|
||||
rxrpc_rtt_rx_cancel,
|
||||
rxrpc_rtt_rx_lost,
|
||||
rxrpc_rtt_rx_obsolete,
|
||||
rxrpc_rtt_rx_ping_response,
|
||||
rxrpc_rtt_rx_requested_ack,
|
||||
};
|
||||
|
||||
enum rxrpc_timer_trace {
|
||||
rxrpc_timer_begin,
|
||||
rxrpc_timer_exp_ack,
|
||||
rxrpc_timer_exp_hard,
|
||||
rxrpc_timer_exp_idle,
|
||||
rxrpc_timer_exp_keepalive,
|
||||
rxrpc_timer_exp_lost_ack,
|
||||
rxrpc_timer_exp_normal,
|
||||
rxrpc_timer_exp_ping,
|
||||
rxrpc_timer_exp_resend,
|
||||
rxrpc_timer_expired,
|
||||
rxrpc_timer_init_for_reply,
|
||||
rxrpc_timer_init_for_send_reply,
|
||||
rxrpc_timer_restart,
|
||||
rxrpc_timer_set_for_ack,
|
||||
rxrpc_timer_set_for_hard,
|
||||
rxrpc_timer_set_for_idle,
|
||||
rxrpc_timer_set_for_keepalive,
|
||||
rxrpc_timer_set_for_lost_ack,
|
||||
rxrpc_timer_set_for_normal,
|
||||
rxrpc_timer_set_for_ping,
|
||||
rxrpc_timer_set_for_resend,
|
||||
rxrpc_timer_set_for_send,
|
||||
};
|
||||
|
||||
enum rxrpc_propose_ack_trace {
|
||||
rxrpc_propose_ack_client_tx_end,
|
||||
rxrpc_propose_ack_input_data,
|
||||
rxrpc_propose_ack_ping_for_check_life,
|
||||
rxrpc_propose_ack_ping_for_keepalive,
|
||||
rxrpc_propose_ack_ping_for_lost_ack,
|
||||
rxrpc_propose_ack_ping_for_lost_reply,
|
||||
rxrpc_propose_ack_ping_for_params,
|
||||
rxrpc_propose_ack_processing_op,
|
||||
rxrpc_propose_ack_respond_to_ack,
|
||||
rxrpc_propose_ack_respond_to_ping,
|
||||
rxrpc_propose_ack_retry_tx,
|
||||
rxrpc_propose_ack_rotate_rx,
|
||||
rxrpc_propose_ack_terminal_ack,
|
||||
};
|
||||
|
||||
enum rxrpc_propose_ack_outcome {
|
||||
rxrpc_propose_ack_subsume,
|
||||
rxrpc_propose_ack_update,
|
||||
rxrpc_propose_ack_use,
|
||||
};
|
||||
|
||||
enum rxrpc_congest_change {
|
||||
rxrpc_cong_begin_retransmission,
|
||||
rxrpc_cong_cleared_nacks,
|
||||
rxrpc_cong_new_low_nack,
|
||||
rxrpc_cong_no_change,
|
||||
rxrpc_cong_progress,
|
||||
rxrpc_cong_retransmit_again,
|
||||
rxrpc_cong_rtt_window_end,
|
||||
rxrpc_cong_saw_nack,
|
||||
};
|
||||
|
||||
enum rxrpc_tx_point {
|
||||
rxrpc_tx_point_call_abort,
|
||||
rxrpc_tx_point_call_ack,
|
||||
rxrpc_tx_point_call_data_frag,
|
||||
rxrpc_tx_point_call_data_nofrag,
|
||||
rxrpc_tx_point_call_final_resend,
|
||||
rxrpc_tx_point_conn_abort,
|
||||
rxrpc_tx_point_rxkad_challenge,
|
||||
rxrpc_tx_point_rxkad_response,
|
||||
rxrpc_tx_point_reject,
|
||||
rxrpc_tx_point_version_keepalive,
|
||||
rxrpc_tx_point_version_reply,
|
||||
};
|
||||
|
||||
#endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */
|
||||
|
||||
/*
|
||||
* Declare tracing information enums and their string mappings for display.
|
||||
*/
|
||||
@ -451,6 +242,36 @@ enum rxrpc_tx_point {
|
||||
EM(rxrpc_tx_point_version_keepalive, "VerKeepalive") \
|
||||
E_(rxrpc_tx_point_version_reply, "VerReply")
|
||||
|
||||
/*
|
||||
* Generate enums for tracing information.
|
||||
*/
|
||||
#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
|
||||
#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
|
||||
|
||||
#undef EM
|
||||
#undef E_
|
||||
#define EM(a, b) a,
|
||||
#define E_(a, b) a
|
||||
|
||||
enum rxrpc_call_trace { rxrpc_call_traces } __mode(byte);
|
||||
enum rxrpc_client_trace { rxrpc_client_traces } __mode(byte);
|
||||
enum rxrpc_congest_change { rxrpc_congest_changes } __mode(byte);
|
||||
enum rxrpc_conn_trace { rxrpc_conn_traces } __mode(byte);
|
||||
enum rxrpc_local_trace { rxrpc_local_traces } __mode(byte);
|
||||
enum rxrpc_peer_trace { rxrpc_peer_traces } __mode(byte);
|
||||
enum rxrpc_propose_ack_outcome { rxrpc_propose_ack_outcomes } __mode(byte);
|
||||
enum rxrpc_propose_ack_trace { rxrpc_propose_ack_traces } __mode(byte);
|
||||
enum rxrpc_receive_trace { rxrpc_receive_traces } __mode(byte);
|
||||
enum rxrpc_recvmsg_trace { rxrpc_recvmsg_traces } __mode(byte);
|
||||
enum rxrpc_rtt_rx_trace { rxrpc_rtt_rx_traces } __mode(byte);
|
||||
enum rxrpc_rtt_tx_trace { rxrpc_rtt_tx_traces } __mode(byte);
|
||||
enum rxrpc_skb_trace { rxrpc_skb_traces } __mode(byte);
|
||||
enum rxrpc_timer_trace { rxrpc_timer_traces } __mode(byte);
|
||||
enum rxrpc_transmit_trace { rxrpc_transmit_traces } __mode(byte);
|
||||
enum rxrpc_tx_point { rxrpc_tx_points } __mode(byte);
|
||||
|
||||
#endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */
|
||||
|
||||
/*
|
||||
* Export enum symbols via userspace.
|
||||
*/
|
||||
@ -459,21 +280,21 @@ enum rxrpc_tx_point {
|
||||
#define EM(a, b) TRACE_DEFINE_ENUM(a);
|
||||
#define E_(a, b) TRACE_DEFINE_ENUM(a);
|
||||
|
||||
rxrpc_skb_traces;
|
||||
rxrpc_local_traces;
|
||||
rxrpc_conn_traces;
|
||||
rxrpc_client_traces;
|
||||
rxrpc_call_traces;
|
||||
rxrpc_transmit_traces;
|
||||
rxrpc_client_traces;
|
||||
rxrpc_congest_changes;
|
||||
rxrpc_congest_modes;
|
||||
rxrpc_conn_traces;
|
||||
rxrpc_local_traces;
|
||||
rxrpc_propose_ack_outcomes;
|
||||
rxrpc_propose_ack_traces;
|
||||
rxrpc_receive_traces;
|
||||
rxrpc_recvmsg_traces;
|
||||
rxrpc_rtt_tx_traces;
|
||||
rxrpc_rtt_rx_traces;
|
||||
rxrpc_rtt_tx_traces;
|
||||
rxrpc_skb_traces;
|
||||
rxrpc_timer_traces;
|
||||
rxrpc_propose_ack_traces;
|
||||
rxrpc_propose_ack_outcomes;
|
||||
rxrpc_congest_modes;
|
||||
rxrpc_congest_changes;
|
||||
rxrpc_transmit_traces;
|
||||
rxrpc_tx_points;
|
||||
|
||||
/*
|
||||
@ -583,7 +404,7 @@ TRACE_EVENT(rxrpc_client,
|
||||
TP_fast_assign(
|
||||
__entry->conn = conn ? conn->debug_id : 0;
|
||||
__entry->channel = channel;
|
||||
__entry->usage = conn ? atomic_read(&conn->usage) : -2;
|
||||
__entry->usage = conn ? refcount_read(&conn->ref) : -2;
|
||||
__entry->op = op;
|
||||
__entry->cid = conn ? conn->proto.cid : 0;
|
||||
),
|
||||
@ -1574,6 +1395,8 @@ TRACE_EVENT(rxrpc_rx_discard_ack,
|
||||
__entry->call_ackr_prev)
|
||||
);
|
||||
|
||||
#undef EM
|
||||
#undef E_
|
||||
#endif /* _TRACE_RXRPC_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
@ -351,7 +351,7 @@ static void rxrpc_dummy_notify_rx(struct sock *sk, struct rxrpc_call *rxcall,
|
||||
*/
|
||||
void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call)
|
||||
{
|
||||
_enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
|
||||
_enter("%d{%d}", call->debug_id, refcount_read(&call->ref));
|
||||
|
||||
mutex_lock(&call->user_mutex);
|
||||
rxrpc_release_call(rxrpc_sk(sock->sk), call);
|
||||
|
@ -15,14 +15,6 @@
|
||||
#include <keys/rxrpc-type.h>
|
||||
#include "protocol.h"
|
||||
|
||||
#if 0
|
||||
#define CHECK_SLAB_OKAY(X) \
|
||||
BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
|
||||
(POISON_FREE << 8 | POISON_FREE))
|
||||
#else
|
||||
#define CHECK_SLAB_OKAY(X) do {} while (0)
|
||||
#endif
|
||||
|
||||
#define FCRYPT_BSIZE 8
|
||||
struct rxrpc_crypt {
|
||||
union {
|
||||
@ -68,7 +60,7 @@ struct rxrpc_net {
|
||||
struct proc_dir_entry *proc_net; /* Subdir in /proc/net */
|
||||
u32 epoch; /* Local epoch for detecting local-end reset */
|
||||
struct list_head calls; /* List of calls active in this namespace */
|
||||
rwlock_t call_lock; /* Lock for ->calls */
|
||||
spinlock_t call_lock; /* Lock for ->calls */
|
||||
atomic_t nr_calls; /* Count of allocated calls */
|
||||
|
||||
atomic_t nr_conns;
|
||||
@ -88,7 +80,7 @@ struct rxrpc_net {
|
||||
struct work_struct client_conn_reaper;
|
||||
struct timer_list client_conn_reap_timer;
|
||||
|
||||
struct list_head local_endpoints;
|
||||
struct hlist_head local_endpoints;
|
||||
struct mutex local_mutex; /* Lock for ->local_endpoints */
|
||||
|
||||
DECLARE_HASHTABLE (peer_hash, 10);
|
||||
@ -279,9 +271,9 @@ struct rxrpc_security {
|
||||
struct rxrpc_local {
|
||||
struct rcu_head rcu;
|
||||
atomic_t active_users; /* Number of users of the local endpoint */
|
||||
atomic_t usage; /* Number of references to the structure */
|
||||
refcount_t ref; /* Number of references to the structure */
|
||||
struct rxrpc_net *rxnet; /* The network ns in which this resides */
|
||||
struct list_head link;
|
||||
struct hlist_node link;
|
||||
struct socket *socket; /* my UDP socket */
|
||||
struct work_struct processor;
|
||||
struct rxrpc_sock __rcu *service; /* Service(s) listening on this endpoint */
|
||||
@ -304,7 +296,7 @@ struct rxrpc_local {
|
||||
*/
|
||||
struct rxrpc_peer {
|
||||
struct rcu_head rcu; /* This must be first */
|
||||
atomic_t usage;
|
||||
refcount_t ref;
|
||||
unsigned long hash_key;
|
||||
struct hlist_node hash_link;
|
||||
struct rxrpc_local *local;
|
||||
@ -406,7 +398,7 @@ enum rxrpc_conn_proto_state {
|
||||
*/
|
||||
struct rxrpc_bundle {
|
||||
struct rxrpc_conn_parameters params;
|
||||
atomic_t usage;
|
||||
refcount_t ref;
|
||||
unsigned int debug_id;
|
||||
bool try_upgrade; /* True if the bundle is attempting upgrade */
|
||||
bool alloc_conn; /* True if someone's getting a conn */
|
||||
@ -427,7 +419,7 @@ struct rxrpc_connection {
|
||||
struct rxrpc_conn_proto proto;
|
||||
struct rxrpc_conn_parameters params;
|
||||
|
||||
atomic_t usage;
|
||||
refcount_t ref;
|
||||
struct rcu_head rcu;
|
||||
struct list_head cache_link;
|
||||
|
||||
@ -609,7 +601,7 @@ struct rxrpc_call {
|
||||
int error; /* Local error incurred */
|
||||
enum rxrpc_call_state state; /* current state of call */
|
||||
enum rxrpc_call_completion completion; /* Call completion condition */
|
||||
atomic_t usage;
|
||||
refcount_t ref;
|
||||
u16 service_id; /* service ID */
|
||||
u8 security_ix; /* Security type */
|
||||
enum rxrpc_interruptibility interruptibility; /* At what point call may be interrupted */
|
||||
@ -1014,6 +1006,7 @@ void rxrpc_put_peer_locked(struct rxrpc_peer *);
|
||||
extern const struct seq_operations rxrpc_call_seq_ops;
|
||||
extern const struct seq_operations rxrpc_connection_seq_ops;
|
||||
extern const struct seq_operations rxrpc_peer_seq_ops;
|
||||
extern const struct seq_operations rxrpc_local_seq_ops;
|
||||
|
||||
/*
|
||||
* recvmsg.c
|
||||
|
@ -91,7 +91,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
|
||||
(head + 1) & (size - 1));
|
||||
|
||||
trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
|
||||
atomic_read(&conn->usage), here);
|
||||
refcount_read(&conn->ref), here);
|
||||
}
|
||||
|
||||
/* Now it gets complicated, because calls get registered with the
|
||||
@ -104,7 +104,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
|
||||
call->state = RXRPC_CALL_SERVER_PREALLOC;
|
||||
|
||||
trace_rxrpc_call(call->debug_id, rxrpc_call_new_service,
|
||||
atomic_read(&call->usage),
|
||||
refcount_read(&call->ref),
|
||||
here, (const void *)user_call_ID);
|
||||
|
||||
write_lock(&rx->call_lock);
|
||||
@ -140,9 +140,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
|
||||
write_unlock(&rx->call_lock);
|
||||
|
||||
rxnet = call->rxnet;
|
||||
write_lock(&rxnet->call_lock);
|
||||
list_add_tail(&call->link, &rxnet->calls);
|
||||
write_unlock(&rxnet->call_lock);
|
||||
spin_lock_bh(&rxnet->call_lock);
|
||||
list_add_tail_rcu(&call->link, &rxnet->calls);
|
||||
spin_unlock_bh(&rxnet->call_lock);
|
||||
|
||||
b->call_backlog[call_head] = call;
|
||||
smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
|
||||
|
@ -377,9 +377,9 @@ recheck_state:
|
||||
if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) &&
|
||||
(int)call->conn->hi_serial - (int)call->rx_serial > 0) {
|
||||
trace_rxrpc_call_reset(call);
|
||||
rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ECONNRESET);
|
||||
rxrpc_abort_call("EXP", call, 0, RX_CALL_DEAD, -ECONNRESET);
|
||||
} else {
|
||||
rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME);
|
||||
rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, -ETIME);
|
||||
}
|
||||
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
|
||||
goto recheck_state;
|
||||
|
@ -112,7 +112,7 @@ struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
|
||||
found_extant_call:
|
||||
rxrpc_get_call(call, rxrpc_call_got);
|
||||
read_unlock(&rx->call_lock);
|
||||
_leave(" = %p [%d]", call, atomic_read(&call->usage));
|
||||
_leave(" = %p [%d]", call, refcount_read(&call->ref));
|
||||
return call;
|
||||
}
|
||||
|
||||
@ -160,7 +160,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
|
||||
spin_lock_init(&call->notify_lock);
|
||||
spin_lock_init(&call->input_lock);
|
||||
rwlock_init(&call->state_lock);
|
||||
atomic_set(&call->usage, 1);
|
||||
refcount_set(&call->ref, 1);
|
||||
call->debug_id = debug_id;
|
||||
call->tx_total_len = -1;
|
||||
call->next_rx_timo = 20 * HZ;
|
||||
@ -299,7 +299,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
|
||||
call->interruptibility = p->interruptibility;
|
||||
call->tx_total_len = p->tx_total_len;
|
||||
trace_rxrpc_call(call->debug_id, rxrpc_call_new_client,
|
||||
atomic_read(&call->usage),
|
||||
refcount_read(&call->ref),
|
||||
here, (const void *)p->user_call_ID);
|
||||
if (p->kernel)
|
||||
__set_bit(RXRPC_CALL_KERNEL, &call->flags);
|
||||
@ -337,9 +337,9 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
|
||||
write_unlock(&rx->call_lock);
|
||||
|
||||
rxnet = call->rxnet;
|
||||
write_lock(&rxnet->call_lock);
|
||||
list_add_tail(&call->link, &rxnet->calls);
|
||||
write_unlock(&rxnet->call_lock);
|
||||
spin_lock_bh(&rxnet->call_lock);
|
||||
list_add_tail_rcu(&call->link, &rxnet->calls);
|
||||
spin_unlock_bh(&rxnet->call_lock);
|
||||
|
||||
/* From this point on, the call is protected by its own lock. */
|
||||
release_sock(&rx->sk);
|
||||
@ -352,7 +352,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
|
||||
goto error_attached_to_socket;
|
||||
|
||||
trace_rxrpc_call(call->debug_id, rxrpc_call_connected,
|
||||
atomic_read(&call->usage), here, NULL);
|
||||
refcount_read(&call->ref), here, NULL);
|
||||
|
||||
rxrpc_start_call_timer(call);
|
||||
|
||||
@ -372,7 +372,7 @@ error_dup_user_ID:
|
||||
__rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
|
||||
RX_CALL_DEAD, -EEXIST);
|
||||
trace_rxrpc_call(call->debug_id, rxrpc_call_error,
|
||||
atomic_read(&call->usage), here, ERR_PTR(-EEXIST));
|
||||
refcount_read(&call->ref), here, ERR_PTR(-EEXIST));
|
||||
rxrpc_release_call(rx, call);
|
||||
mutex_unlock(&call->user_mutex);
|
||||
rxrpc_put_call(call, rxrpc_call_put);
|
||||
@ -386,7 +386,7 @@ error_dup_user_ID:
|
||||
*/
|
||||
error_attached_to_socket:
|
||||
trace_rxrpc_call(call->debug_id, rxrpc_call_error,
|
||||
atomic_read(&call->usage), here, ERR_PTR(ret));
|
||||
refcount_read(&call->ref), here, ERR_PTR(ret));
|
||||
set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
|
||||
__rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
|
||||
RX_CALL_DEAD, ret);
|
||||
@ -442,8 +442,9 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
|
||||
bool rxrpc_queue_call(struct rxrpc_call *call)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
int n = atomic_fetch_add_unless(&call->usage, 1, 0);
|
||||
if (n == 0)
|
||||
int n;
|
||||
|
||||
if (!__refcount_inc_not_zero(&call->ref, &n))
|
||||
return false;
|
||||
if (rxrpc_queue_work(&call->processor))
|
||||
trace_rxrpc_call(call->debug_id, rxrpc_call_queued, n + 1,
|
||||
@ -459,7 +460,7 @@ bool rxrpc_queue_call(struct rxrpc_call *call)
|
||||
bool __rxrpc_queue_call(struct rxrpc_call *call)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
int n = atomic_read(&call->usage);
|
||||
int n = refcount_read(&call->ref);
|
||||
ASSERTCMP(n, >=, 1);
|
||||
if (rxrpc_queue_work(&call->processor))
|
||||
trace_rxrpc_call(call->debug_id, rxrpc_call_queued_ref, n,
|
||||
@ -476,7 +477,7 @@ void rxrpc_see_call(struct rxrpc_call *call)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
if (call) {
|
||||
int n = atomic_read(&call->usage);
|
||||
int n = refcount_read(&call->ref);
|
||||
|
||||
trace_rxrpc_call(call->debug_id, rxrpc_call_seen, n,
|
||||
here, NULL);
|
||||
@ -486,11 +487,11 @@ void rxrpc_see_call(struct rxrpc_call *call)
|
||||
bool rxrpc_try_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
int n = atomic_fetch_add_unless(&call->usage, 1, 0);
|
||||
int n;
|
||||
|
||||
if (n == 0)
|
||||
if (!__refcount_inc_not_zero(&call->ref, &n))
|
||||
return false;
|
||||
trace_rxrpc_call(call->debug_id, op, n, here, NULL);
|
||||
trace_rxrpc_call(call->debug_id, op, n + 1, here, NULL);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -500,9 +501,10 @@ bool rxrpc_try_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
|
||||
void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
int n = atomic_inc_return(&call->usage);
|
||||
int n;
|
||||
|
||||
trace_rxrpc_call(call->debug_id, op, n, here, NULL);
|
||||
__refcount_inc(&call->ref, &n);
|
||||
trace_rxrpc_call(call->debug_id, op, n + 1, here, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -527,10 +529,10 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
|
||||
struct rxrpc_connection *conn = call->conn;
|
||||
bool put = false;
|
||||
|
||||
_enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
|
||||
_enter("{%d,%d}", call->debug_id, refcount_read(&call->ref));
|
||||
|
||||
trace_rxrpc_call(call->debug_id, rxrpc_call_release,
|
||||
atomic_read(&call->usage),
|
||||
refcount_read(&call->ref),
|
||||
here, (const void *)call->flags);
|
||||
|
||||
ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
|
||||
@ -619,21 +621,21 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
|
||||
struct rxrpc_net *rxnet = call->rxnet;
|
||||
const void *here = __builtin_return_address(0);
|
||||
unsigned int debug_id = call->debug_id;
|
||||
bool dead;
|
||||
int n;
|
||||
|
||||
ASSERT(call != NULL);
|
||||
|
||||
n = atomic_dec_return(&call->usage);
|
||||
dead = __refcount_dec_and_test(&call->ref, &n);
|
||||
trace_rxrpc_call(debug_id, op, n, here, NULL);
|
||||
ASSERTCMP(n, >=, 0);
|
||||
if (n == 0) {
|
||||
if (dead) {
|
||||
_debug("call %d dead", call->debug_id);
|
||||
ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
|
||||
|
||||
if (!list_empty(&call->link)) {
|
||||
write_lock(&rxnet->call_lock);
|
||||
spin_lock_bh(&rxnet->call_lock);
|
||||
list_del_init(&call->link);
|
||||
write_unlock(&rxnet->call_lock);
|
||||
spin_unlock_bh(&rxnet->call_lock);
|
||||
}
|
||||
|
||||
rxrpc_cleanup_call(call);
|
||||
@ -705,7 +707,7 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
|
||||
_enter("");
|
||||
|
||||
if (!list_empty(&rxnet->calls)) {
|
||||
write_lock(&rxnet->call_lock);
|
||||
spin_lock_bh(&rxnet->call_lock);
|
||||
|
||||
while (!list_empty(&rxnet->calls)) {
|
||||
call = list_entry(rxnet->calls.next,
|
||||
@ -716,16 +718,16 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
|
||||
list_del_init(&call->link);
|
||||
|
||||
pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
|
||||
call, atomic_read(&call->usage),
|
||||
call, refcount_read(&call->ref),
|
||||
rxrpc_call_states[call->state],
|
||||
call->flags, call->events);
|
||||
|
||||
write_unlock(&rxnet->call_lock);
|
||||
spin_unlock_bh(&rxnet->call_lock);
|
||||
cond_resched();
|
||||
write_lock(&rxnet->call_lock);
|
||||
spin_lock_bh(&rxnet->call_lock);
|
||||
}
|
||||
|
||||
write_unlock(&rxnet->call_lock);
|
||||
spin_unlock_bh(&rxnet->call_lock);
|
||||
}
|
||||
|
||||
atomic_dec(&rxnet->nr_calls);
|
||||
|
@ -102,7 +102,7 @@ void rxrpc_destroy_client_conn_ids(void)
|
||||
if (!idr_is_empty(&rxrpc_client_conn_ids)) {
|
||||
idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) {
|
||||
pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
|
||||
conn, atomic_read(&conn->usage));
|
||||
conn, refcount_read(&conn->ref));
|
||||
}
|
||||
BUG();
|
||||
}
|
||||
@ -122,7 +122,7 @@ static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_conn_parameters *cp,
|
||||
if (bundle) {
|
||||
bundle->params = *cp;
|
||||
rxrpc_get_peer(bundle->params.peer);
|
||||
atomic_set(&bundle->usage, 1);
|
||||
refcount_set(&bundle->ref, 1);
|
||||
spin_lock_init(&bundle->channel_lock);
|
||||
INIT_LIST_HEAD(&bundle->waiting_calls);
|
||||
}
|
||||
@ -131,7 +131,7 @@ static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_conn_parameters *cp,
|
||||
|
||||
struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle)
|
||||
{
|
||||
atomic_inc(&bundle->usage);
|
||||
refcount_inc(&bundle->ref);
|
||||
return bundle;
|
||||
}
|
||||
|
||||
@ -144,10 +144,13 @@ static void rxrpc_free_bundle(struct rxrpc_bundle *bundle)
|
||||
void rxrpc_put_bundle(struct rxrpc_bundle *bundle)
|
||||
{
|
||||
unsigned int d = bundle->debug_id;
|
||||
unsigned int u = atomic_dec_return(&bundle->usage);
|
||||
bool dead;
|
||||
int r;
|
||||
|
||||
_debug("PUT B=%x %u", d, u);
|
||||
if (u == 0)
|
||||
dead = __refcount_dec_and_test(&bundle->ref, &r);
|
||||
|
||||
_debug("PUT B=%x %d", d, r);
|
||||
if (dead)
|
||||
rxrpc_free_bundle(bundle);
|
||||
}
|
||||
|
||||
@ -169,7 +172,7 @@ rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
atomic_set(&conn->usage, 1);
|
||||
refcount_set(&conn->ref, 1);
|
||||
conn->bundle = bundle;
|
||||
conn->params = bundle->params;
|
||||
conn->out_clientflag = RXRPC_CLIENT_INITIATED;
|
||||
@ -195,7 +198,7 @@ rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp)
|
||||
key_get(conn->params.key);
|
||||
|
||||
trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_client,
|
||||
atomic_read(&conn->usage),
|
||||
refcount_read(&conn->ref),
|
||||
__builtin_return_address(0));
|
||||
|
||||
atomic_inc(&rxnet->nr_client_conns);
|
||||
@ -966,14 +969,13 @@ void rxrpc_put_client_conn(struct rxrpc_connection *conn)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
unsigned int debug_id = conn->debug_id;
|
||||
int n;
|
||||
bool dead;
|
||||
int r;
|
||||
|
||||
n = atomic_dec_return(&conn->usage);
|
||||
trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, n, here);
|
||||
if (n <= 0) {
|
||||
ASSERTCMP(n, >=, 0);
|
||||
dead = __refcount_dec_and_test(&conn->ref, &r);
|
||||
trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, r - 1, here);
|
||||
if (dead)
|
||||
rxrpc_kill_client_conn(conn);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -104,7 +104,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
|
||||
goto not_found;
|
||||
*_peer = peer;
|
||||
conn = rxrpc_find_service_conn_rcu(peer, skb);
|
||||
if (!conn || atomic_read(&conn->usage) == 0)
|
||||
if (!conn || refcount_read(&conn->ref) == 0)
|
||||
goto not_found;
|
||||
_leave(" = %p", conn);
|
||||
return conn;
|
||||
@ -114,7 +114,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
|
||||
*/
|
||||
conn = idr_find(&rxrpc_client_conn_ids,
|
||||
sp->hdr.cid >> RXRPC_CIDSHIFT);
|
||||
if (!conn || atomic_read(&conn->usage) == 0) {
|
||||
if (!conn || refcount_read(&conn->ref) == 0) {
|
||||
_debug("no conn");
|
||||
goto not_found;
|
||||
}
|
||||
@ -183,7 +183,7 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
|
||||
chan->last_type = RXRPC_PACKET_TYPE_ABORT;
|
||||
break;
|
||||
default:
|
||||
chan->last_abort = RX_USER_ABORT;
|
||||
chan->last_abort = RX_CALL_DEAD;
|
||||
chan->last_type = RXRPC_PACKET_TYPE_ABORT;
|
||||
break;
|
||||
}
|
||||
@ -263,11 +263,12 @@ void rxrpc_kill_connection(struct rxrpc_connection *conn)
|
||||
bool rxrpc_queue_conn(struct rxrpc_connection *conn)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
|
||||
if (n == 0)
|
||||
int r;
|
||||
|
||||
if (!__refcount_inc_not_zero(&conn->ref, &r))
|
||||
return false;
|
||||
if (rxrpc_queue_work(&conn->processor))
|
||||
trace_rxrpc_conn(conn->debug_id, rxrpc_conn_queued, n + 1, here);
|
||||
trace_rxrpc_conn(conn->debug_id, rxrpc_conn_queued, r + 1, here);
|
||||
else
|
||||
rxrpc_put_connection(conn);
|
||||
return true;
|
||||
@ -280,7 +281,7 @@ void rxrpc_see_connection(struct rxrpc_connection *conn)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
if (conn) {
|
||||
int n = atomic_read(&conn->usage);
|
||||
int n = refcount_read(&conn->ref);
|
||||
|
||||
trace_rxrpc_conn(conn->debug_id, rxrpc_conn_seen, n, here);
|
||||
}
|
||||
@ -292,9 +293,10 @@ void rxrpc_see_connection(struct rxrpc_connection *conn)
|
||||
struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *conn)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
int n = atomic_inc_return(&conn->usage);
|
||||
int r;
|
||||
|
||||
trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n, here);
|
||||
__refcount_inc(&conn->ref, &r);
|
||||
trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, r, here);
|
||||
return conn;
|
||||
}
|
||||
|
||||
@ -305,11 +307,11 @@ struct rxrpc_connection *
|
||||
rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
int r;
|
||||
|
||||
if (conn) {
|
||||
int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
|
||||
if (n > 0)
|
||||
trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n + 1, here);
|
||||
if (__refcount_inc_not_zero(&conn->ref, &r))
|
||||
trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, r + 1, here);
|
||||
else
|
||||
conn = NULL;
|
||||
}
|
||||
@ -333,12 +335,11 @@ void rxrpc_put_service_conn(struct rxrpc_connection *conn)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
unsigned int debug_id = conn->debug_id;
|
||||
int n;
|
||||
int r;
|
||||
|
||||
n = atomic_dec_return(&conn->usage);
|
||||
trace_rxrpc_conn(debug_id, rxrpc_conn_put_service, n, here);
|
||||
ASSERTCMP(n, >=, 0);
|
||||
if (n == 1)
|
||||
__refcount_dec(&conn->ref, &r);
|
||||
trace_rxrpc_conn(debug_id, rxrpc_conn_put_service, r - 1, here);
|
||||
if (r - 1 == 1)
|
||||
rxrpc_set_service_reap_timer(conn->params.local->rxnet,
|
||||
jiffies + rxrpc_connection_expiry);
|
||||
}
|
||||
@ -351,9 +352,9 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu)
|
||||
struct rxrpc_connection *conn =
|
||||
container_of(rcu, struct rxrpc_connection, rcu);
|
||||
|
||||
_enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage));
|
||||
_enter("{%d,u=%d}", conn->debug_id, refcount_read(&conn->ref));
|
||||
|
||||
ASSERTCMP(atomic_read(&conn->usage), ==, 0);
|
||||
ASSERTCMP(refcount_read(&conn->ref), ==, 0);
|
||||
|
||||
_net("DESTROY CONN %d", conn->debug_id);
|
||||
|
||||
@ -392,8 +393,8 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
|
||||
|
||||
write_lock(&rxnet->conn_lock);
|
||||
list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
|
||||
ASSERTCMP(atomic_read(&conn->usage), >, 0);
|
||||
if (likely(atomic_read(&conn->usage) > 1))
|
||||
ASSERTCMP(refcount_read(&conn->ref), >, 0);
|
||||
if (likely(refcount_read(&conn->ref) > 1))
|
||||
continue;
|
||||
if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
|
||||
continue;
|
||||
@ -405,7 +406,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
|
||||
expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
|
||||
|
||||
_debug("reap CONN %d { u=%d,t=%ld }",
|
||||
conn->debug_id, atomic_read(&conn->usage),
|
||||
conn->debug_id, refcount_read(&conn->ref),
|
||||
(long)expire_at - (long)now);
|
||||
|
||||
if (time_before(now, expire_at)) {
|
||||
@ -418,7 +419,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
|
||||
/* The usage count sits at 1 whilst the object is unused on the
|
||||
* list; we reduce that to 0 to make the object unavailable.
|
||||
*/
|
||||
if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
|
||||
if (!refcount_dec_if_one(&conn->ref))
|
||||
continue;
|
||||
trace_rxrpc_conn(conn->debug_id, rxrpc_conn_reap_service, 0, NULL);
|
||||
|
||||
@ -442,7 +443,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
|
||||
link);
|
||||
list_del_init(&conn->link);
|
||||
|
||||
ASSERTCMP(atomic_read(&conn->usage), ==, 0);
|
||||
ASSERTCMP(refcount_read(&conn->ref), ==, 0);
|
||||
rxrpc_kill_connection(conn);
|
||||
}
|
||||
|
||||
@ -470,7 +471,7 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
|
||||
write_lock(&rxnet->conn_lock);
|
||||
list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
|
||||
pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
|
||||
conn, atomic_read(&conn->usage));
|
||||
conn, refcount_read(&conn->ref));
|
||||
leak = true;
|
||||
}
|
||||
write_unlock(&rxnet->conn_lock);
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include "ar-internal.h"
|
||||
|
||||
static struct rxrpc_bundle rxrpc_service_dummy_bundle = {
|
||||
.usage = ATOMIC_INIT(1),
|
||||
.ref = REFCOUNT_INIT(1),
|
||||
.debug_id = UINT_MAX,
|
||||
.channel_lock = __SPIN_LOCK_UNLOCKED(&rxrpc_service_dummy_bundle.channel_lock),
|
||||
};
|
||||
@ -99,7 +99,7 @@ conn_published:
|
||||
return;
|
||||
|
||||
found_extant_conn:
|
||||
if (atomic_read(&cursor->usage) == 0)
|
||||
if (refcount_read(&cursor->ref) == 0)
|
||||
goto replace_old_connection;
|
||||
write_sequnlock_bh(&peer->service_conn_lock);
|
||||
/* We should not be able to get here. rxrpc_incoming_connection() is
|
||||
@ -132,7 +132,7 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn
|
||||
* the rxrpc_connections list.
|
||||
*/
|
||||
conn->state = RXRPC_CONN_SERVICE_PREALLOC;
|
||||
atomic_set(&conn->usage, 2);
|
||||
refcount_set(&conn->ref, 2);
|
||||
conn->bundle = rxrpc_get_bundle(&rxrpc_service_dummy_bundle);
|
||||
|
||||
atomic_inc(&rxnet->nr_conns);
|
||||
@ -142,7 +142,7 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn
|
||||
write_unlock(&rxnet->conn_lock);
|
||||
|
||||
trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
|
||||
atomic_read(&conn->usage),
|
||||
refcount_read(&conn->ref),
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
|
@ -903,6 +903,33 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
rxrpc_propose_ack_respond_to_ack);
|
||||
}
|
||||
|
||||
/* If we get an EXCEEDS_WINDOW ACK from the server, it probably
|
||||
* indicates that the client address changed due to NAT. The server
|
||||
* lost the call because it switched to a different peer.
|
||||
*/
|
||||
if (unlikely(buf.ack.reason == RXRPC_ACK_EXCEEDS_WINDOW) &&
|
||||
first_soft_ack == 1 &&
|
||||
prev_pkt == 0 &&
|
||||
rxrpc_is_client_call(call)) {
|
||||
rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
|
||||
0, -ENETRESET);
|
||||
return;
|
||||
}
|
||||
|
||||
/* If we get an OUT_OF_SEQUENCE ACK from the server, that can also
|
||||
* indicate a change of address. However, we can retransmit the call
|
||||
* if we still have it buffered to the beginning.
|
||||
*/
|
||||
if (unlikely(buf.ack.reason == RXRPC_ACK_OUT_OF_SEQUENCE) &&
|
||||
first_soft_ack == 1 &&
|
||||
prev_pkt == 0 &&
|
||||
call->tx_hard_ack == 0 &&
|
||||
rxrpc_is_client_call(call)) {
|
||||
rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
|
||||
0, -ENETRESET);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Discard any out-of-order or duplicate ACKs (outside lock). */
|
||||
if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
|
||||
trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
|
||||
@ -1154,8 +1181,6 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
|
||||
*/
|
||||
static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
|
||||
{
|
||||
CHECK_SLAB_OKAY(&local->usage);
|
||||
|
||||
if (rxrpc_get_local_maybe(local)) {
|
||||
skb_queue_tail(&local->reject_queue, skb);
|
||||
rxrpc_queue_local(local);
|
||||
@ -1413,7 +1438,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
|
||||
}
|
||||
}
|
||||
|
||||
if (!call || atomic_read(&call->usage) == 0) {
|
||||
if (!call || refcount_read(&call->ref) == 0) {
|
||||
if (rxrpc_to_client(sp) ||
|
||||
sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
|
||||
goto bad_message;
|
||||
|
@ -79,10 +79,10 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
|
||||
|
||||
local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
|
||||
if (local) {
|
||||
atomic_set(&local->usage, 1);
|
||||
refcount_set(&local->ref, 1);
|
||||
atomic_set(&local->active_users, 1);
|
||||
local->rxnet = rxnet;
|
||||
INIT_LIST_HEAD(&local->link);
|
||||
INIT_HLIST_NODE(&local->link);
|
||||
INIT_WORK(&local->processor, rxrpc_local_processor);
|
||||
init_rwsem(&local->defrag_sem);
|
||||
skb_queue_head_init(&local->reject_queue);
|
||||
@ -180,7 +180,7 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
|
||||
{
|
||||
struct rxrpc_local *local;
|
||||
struct rxrpc_net *rxnet = rxrpc_net(net);
|
||||
struct list_head *cursor;
|
||||
struct hlist_node *cursor;
|
||||
const char *age;
|
||||
long diff;
|
||||
int ret;
|
||||
@ -190,16 +190,12 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
|
||||
|
||||
mutex_lock(&rxnet->local_mutex);
|
||||
|
||||
for (cursor = rxnet->local_endpoints.next;
|
||||
cursor != &rxnet->local_endpoints;
|
||||
cursor = cursor->next) {
|
||||
local = list_entry(cursor, struct rxrpc_local, link);
|
||||
hlist_for_each(cursor, &rxnet->local_endpoints) {
|
||||
local = hlist_entry(cursor, struct rxrpc_local, link);
|
||||
|
||||
diff = rxrpc_local_cmp_key(local, srx);
|
||||
if (diff < 0)
|
||||
if (diff != 0)
|
||||
continue;
|
||||
if (diff > 0)
|
||||
break;
|
||||
|
||||
/* Services aren't allowed to share transport sockets, so
|
||||
* reject that here. It is possible that the object is dying -
|
||||
@ -211,9 +207,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
|
||||
goto addr_in_use;
|
||||
}
|
||||
|
||||
/* Found a match. We replace a dying object. Attempting to
|
||||
* bind the transport socket may still fail if we're attempting
|
||||
* to use a local address that the dying object is still using.
|
||||
/* Found a match. We want to replace a dying object.
|
||||
* Attempting to bind the transport socket may still fail if
|
||||
* we're attempting to use a local address that the dying
|
||||
* object is still using.
|
||||
*/
|
||||
if (!rxrpc_use_local(local))
|
||||
break;
|
||||
@ -230,10 +227,12 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
|
||||
if (ret < 0)
|
||||
goto sock_error;
|
||||
|
||||
if (cursor != &rxnet->local_endpoints)
|
||||
list_replace_init(cursor, &local->link);
|
||||
else
|
||||
list_add_tail(&local->link, cursor);
|
||||
if (cursor) {
|
||||
hlist_replace_rcu(cursor, &local->link);
|
||||
cursor->pprev = NULL;
|
||||
} else {
|
||||
hlist_add_head_rcu(&local->link, &rxnet->local_endpoints);
|
||||
}
|
||||
age = "new";
|
||||
|
||||
found:
|
||||
@ -266,10 +265,10 @@ addr_in_use:
|
||||
struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
int n;
|
||||
int r;
|
||||
|
||||
n = atomic_inc_return(&local->usage);
|
||||
trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here);
|
||||
__refcount_inc(&local->ref, &r);
|
||||
trace_rxrpc_local(local->debug_id, rxrpc_local_got, r + 1, here);
|
||||
return local;
|
||||
}
|
||||
|
||||
@ -279,12 +278,12 @@ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
|
||||
struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
int r;
|
||||
|
||||
if (local) {
|
||||
int n = atomic_fetch_add_unless(&local->usage, 1, 0);
|
||||
if (n > 0)
|
||||
if (__refcount_inc_not_zero(&local->ref, &r))
|
||||
trace_rxrpc_local(local->debug_id, rxrpc_local_got,
|
||||
n + 1, here);
|
||||
r + 1, here);
|
||||
else
|
||||
local = NULL;
|
||||
}
|
||||
@ -298,10 +297,10 @@ void rxrpc_queue_local(struct rxrpc_local *local)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
unsigned int debug_id = local->debug_id;
|
||||
int n = atomic_read(&local->usage);
|
||||
int r = refcount_read(&local->ref);
|
||||
|
||||
if (rxrpc_queue_work(&local->processor))
|
||||
trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here);
|
||||
trace_rxrpc_local(debug_id, rxrpc_local_queued, r + 1, here);
|
||||
else
|
||||
rxrpc_put_local(local);
|
||||
}
|
||||
@ -313,15 +312,16 @@ void rxrpc_put_local(struct rxrpc_local *local)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
unsigned int debug_id;
|
||||
int n;
|
||||
bool dead;
|
||||
int r;
|
||||
|
||||
if (local) {
|
||||
debug_id = local->debug_id;
|
||||
|
||||
n = atomic_dec_return(&local->usage);
|
||||
trace_rxrpc_local(debug_id, rxrpc_local_put, n, here);
|
||||
dead = __refcount_dec_and_test(&local->ref, &r);
|
||||
trace_rxrpc_local(debug_id, rxrpc_local_put, r, here);
|
||||
|
||||
if (n == 0)
|
||||
if (dead)
|
||||
call_rcu(&local->rcu, rxrpc_local_rcu);
|
||||
}
|
||||
}
|
||||
@ -374,7 +374,7 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
|
||||
local->dead = true;
|
||||
|
||||
mutex_lock(&rxnet->local_mutex);
|
||||
list_del_init(&local->link);
|
||||
hlist_del_init_rcu(&local->link);
|
||||
mutex_unlock(&rxnet->local_mutex);
|
||||
|
||||
rxrpc_clean_up_local_conns(local);
|
||||
@ -406,7 +406,7 @@ static void rxrpc_local_processor(struct work_struct *work)
|
||||
bool again;
|
||||
|
||||
trace_rxrpc_local(local->debug_id, rxrpc_local_processing,
|
||||
atomic_read(&local->usage), NULL);
|
||||
refcount_read(&local->ref), NULL);
|
||||
|
||||
do {
|
||||
again = false;
|
||||
@ -458,11 +458,11 @@ void rxrpc_destroy_all_locals(struct rxrpc_net *rxnet)
|
||||
|
||||
flush_workqueue(rxrpc_workqueue);
|
||||
|
||||
if (!list_empty(&rxnet->local_endpoints)) {
|
||||
if (!hlist_empty(&rxnet->local_endpoints)) {
|
||||
mutex_lock(&rxnet->local_mutex);
|
||||
list_for_each_entry(local, &rxnet->local_endpoints, link) {
|
||||
hlist_for_each_entry(local, &rxnet->local_endpoints, link) {
|
||||
pr_err("AF_RXRPC: Leaked local %p {%d}\n",
|
||||
local, atomic_read(&local->usage));
|
||||
local, refcount_read(&local->ref));
|
||||
}
|
||||
mutex_unlock(&rxnet->local_mutex);
|
||||
BUG();
|
||||
|
@ -50,7 +50,7 @@ static __net_init int rxrpc_init_net(struct net *net)
|
||||
rxnet->epoch |= RXRPC_RANDOM_EPOCH;
|
||||
|
||||
INIT_LIST_HEAD(&rxnet->calls);
|
||||
rwlock_init(&rxnet->call_lock);
|
||||
spin_lock_init(&rxnet->call_lock);
|
||||
atomic_set(&rxnet->nr_calls, 1);
|
||||
|
||||
atomic_set(&rxnet->nr_conns, 1);
|
||||
@ -72,7 +72,7 @@ static __net_init int rxrpc_init_net(struct net *net)
|
||||
timer_setup(&rxnet->client_conn_reap_timer,
|
||||
rxrpc_client_conn_reap_timeout, 0);
|
||||
|
||||
INIT_LIST_HEAD(&rxnet->local_endpoints);
|
||||
INIT_HLIST_HEAD(&rxnet->local_endpoints);
|
||||
mutex_init(&rxnet->local_mutex);
|
||||
|
||||
hash_init(rxnet->peer_hash);
|
||||
@ -98,6 +98,9 @@ static __net_init int rxrpc_init_net(struct net *net)
|
||||
proc_create_net("peers", 0444, rxnet->proc_net,
|
||||
&rxrpc_peer_seq_ops,
|
||||
sizeof(struct seq_net_private));
|
||||
proc_create_net("locals", 0444, rxnet->proc_net,
|
||||
&rxrpc_local_seq_ops,
|
||||
sizeof(struct seq_net_private));
|
||||
return 0;
|
||||
|
||||
err_proc:
|
||||
|
@ -121,7 +121,7 @@ static struct rxrpc_peer *__rxrpc_lookup_peer_rcu(
|
||||
|
||||
hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) {
|
||||
if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 &&
|
||||
atomic_read(&peer->usage) > 0)
|
||||
refcount_read(&peer->ref) > 0)
|
||||
return peer;
|
||||
}
|
||||
|
||||
@ -140,7 +140,7 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
|
||||
peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
|
||||
if (peer) {
|
||||
_net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport);
|
||||
_leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
|
||||
_leave(" = %p {u=%d}", peer, refcount_read(&peer->ref));
|
||||
}
|
||||
return peer;
|
||||
}
|
||||
@ -216,7 +216,7 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
|
||||
|
||||
peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
|
||||
if (peer) {
|
||||
atomic_set(&peer->usage, 1);
|
||||
refcount_set(&peer->ref, 1);
|
||||
peer->local = rxrpc_get_local(local);
|
||||
INIT_HLIST_HEAD(&peer->error_targets);
|
||||
peer->service_conns = RB_ROOT;
|
||||
@ -378,7 +378,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
|
||||
|
||||
_net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport);
|
||||
|
||||
_leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
|
||||
_leave(" = %p {u=%d}", peer, refcount_read(&peer->ref));
|
||||
return peer;
|
||||
}
|
||||
|
||||
@ -388,10 +388,10 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
|
||||
struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
int n;
|
||||
int r;
|
||||
|
||||
n = atomic_inc_return(&peer->usage);
|
||||
trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n, here);
|
||||
__refcount_inc(&peer->ref, &r);
|
||||
trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, r + 1, here);
|
||||
return peer;
|
||||
}
|
||||
|
||||
@ -401,11 +401,11 @@ struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
|
||||
struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
int r;
|
||||
|
||||
if (peer) {
|
||||
int n = atomic_fetch_add_unless(&peer->usage, 1, 0);
|
||||
if (n > 0)
|
||||
trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n + 1, here);
|
||||
if (__refcount_inc_not_zero(&peer->ref, &r))
|
||||
trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, r + 1, here);
|
||||
else
|
||||
peer = NULL;
|
||||
}
|
||||
@ -436,13 +436,14 @@ void rxrpc_put_peer(struct rxrpc_peer *peer)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
unsigned int debug_id;
|
||||
int n;
|
||||
bool dead;
|
||||
int r;
|
||||
|
||||
if (peer) {
|
||||
debug_id = peer->debug_id;
|
||||
n = atomic_dec_return(&peer->usage);
|
||||
trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here);
|
||||
if (n == 0)
|
||||
dead = __refcount_dec_and_test(&peer->ref, &r);
|
||||
trace_rxrpc_peer(debug_id, rxrpc_peer_put, r - 1, here);
|
||||
if (dead)
|
||||
__rxrpc_put_peer(peer);
|
||||
}
|
||||
}
|
||||
@ -455,11 +456,12 @@ void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
unsigned int debug_id = peer->debug_id;
|
||||
int n;
|
||||
bool dead;
|
||||
int r;
|
||||
|
||||
n = atomic_dec_return(&peer->usage);
|
||||
trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here);
|
||||
if (n == 0) {
|
||||
dead = __refcount_dec_and_test(&peer->ref, &r);
|
||||
trace_rxrpc_peer(debug_id, rxrpc_peer_put, r - 1, here);
|
||||
if (dead) {
|
||||
hash_del_rcu(&peer->hash_link);
|
||||
list_del_init(&peer->keepalive_link);
|
||||
rxrpc_free_peer(peer);
|
||||
@ -481,7 +483,7 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)
|
||||
hlist_for_each_entry(peer, &rxnet->peer_hash[i], hash_link) {
|
||||
pr_err("Leaked peer %u {%u} %pISp\n",
|
||||
peer->debug_id,
|
||||
atomic_read(&peer->usage),
|
||||
refcount_read(&peer->ref),
|
||||
&peer->srx.transport);
|
||||
}
|
||||
}
|
||||
|
@ -26,29 +26,23 @@ static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
|
||||
*/
|
||||
static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
|
||||
__acquires(rcu)
|
||||
__acquires(rxnet->call_lock)
|
||||
{
|
||||
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
||||
|
||||
rcu_read_lock();
|
||||
read_lock(&rxnet->call_lock);
|
||||
return seq_list_start_head(&rxnet->calls, *_pos);
|
||||
return seq_list_start_head_rcu(&rxnet->calls, *_pos);
|
||||
}
|
||||
|
||||
static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
{
|
||||
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
||||
|
||||
return seq_list_next(v, &rxnet->calls, pos);
|
||||
return seq_list_next_rcu(v, &rxnet->calls, pos);
|
||||
}
|
||||
|
||||
static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
|
||||
__releases(rxnet->call_lock)
|
||||
__releases(rcu)
|
||||
{
|
||||
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
||||
|
||||
read_unlock(&rxnet->call_lock);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
@ -107,7 +101,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
|
||||
call->cid,
|
||||
call->call_id,
|
||||
rxrpc_is_service_call(call) ? "Svc" : "Clt",
|
||||
atomic_read(&call->usage),
|
||||
refcount_read(&call->ref),
|
||||
rxrpc_call_states[call->state],
|
||||
call->abort_code,
|
||||
call->debug_id,
|
||||
@ -189,7 +183,7 @@ print:
|
||||
conn->service_id,
|
||||
conn->proto.cid,
|
||||
rxrpc_conn_is_service(conn) ? "Svc" : "Clt",
|
||||
atomic_read(&conn->usage),
|
||||
refcount_read(&conn->ref),
|
||||
rxrpc_conn_states[conn->state],
|
||||
key_serial(conn->params.key),
|
||||
atomic_read(&conn->serial),
|
||||
@ -239,7 +233,7 @@ static int rxrpc_peer_seq_show(struct seq_file *seq, void *v)
|
||||
" %3u %5u %6llus %8u %8u\n",
|
||||
lbuff,
|
||||
rbuff,
|
||||
atomic_read(&peer->usage),
|
||||
refcount_read(&peer->ref),
|
||||
peer->cong_cwnd,
|
||||
peer->mtu,
|
||||
now - peer->last_tx_at,
|
||||
@ -334,3 +328,72 @@ const struct seq_operations rxrpc_peer_seq_ops = {
|
||||
.stop = rxrpc_peer_seq_stop,
|
||||
.show = rxrpc_peer_seq_show,
|
||||
};
|
||||
|
||||
/*
|
||||
* Generate a list of extant virtual local endpoints in /proc/net/rxrpc/locals
|
||||
*/
|
||||
static int rxrpc_local_seq_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
struct rxrpc_local *local;
|
||||
char lbuff[50];
|
||||
|
||||
if (v == SEQ_START_TOKEN) {
|
||||
seq_puts(seq,
|
||||
"Proto Local "
|
||||
" Use Act\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
local = hlist_entry(v, struct rxrpc_local, link);
|
||||
|
||||
sprintf(lbuff, "%pISpc", &local->srx.transport);
|
||||
|
||||
seq_printf(seq,
|
||||
"UDP %-47.47s %3u %3u\n",
|
||||
lbuff,
|
||||
refcount_read(&local->ref),
|
||||
atomic_read(&local->active_users));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *rxrpc_local_seq_start(struct seq_file *seq, loff_t *_pos)
|
||||
__acquires(rcu)
|
||||
{
|
||||
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
||||
unsigned int n;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
if (*_pos >= UINT_MAX)
|
||||
return NULL;
|
||||
|
||||
n = *_pos;
|
||||
if (n == 0)
|
||||
return SEQ_START_TOKEN;
|
||||
|
||||
return seq_hlist_start_rcu(&rxnet->local_endpoints, n - 1);
|
||||
}
|
||||
|
||||
static void *rxrpc_local_seq_next(struct seq_file *seq, void *v, loff_t *_pos)
|
||||
{
|
||||
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
||||
|
||||
if (*_pos >= UINT_MAX)
|
||||
return NULL;
|
||||
|
||||
return seq_hlist_next_rcu(v, &rxnet->local_endpoints, _pos);
|
||||
}
|
||||
|
||||
static void rxrpc_local_seq_stop(struct seq_file *seq, void *v)
|
||||
__releases(rcu)
|
||||
{
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
const struct seq_operations rxrpc_local_seq_ops = {
|
||||
.start = rxrpc_local_seq_start,
|
||||
.next = rxrpc_local_seq_next,
|
||||
.stop = rxrpc_local_seq_stop,
|
||||
.show = rxrpc_local_seq_show,
|
||||
};
|
||||
|
@ -444,6 +444,12 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
|
||||
|
||||
success:
|
||||
ret = copied;
|
||||
if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE) {
|
||||
read_lock_bh(&call->state_lock);
|
||||
if (call->error < 0)
|
||||
ret = call->error;
|
||||
read_unlock_bh(&call->state_lock);
|
||||
}
|
||||
out:
|
||||
call->tx_pending = skb;
|
||||
_leave(" = %d", ret);
|
||||
|
@ -71,7 +71,6 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
|
||||
const void *here = __builtin_return_address(0);
|
||||
if (skb) {
|
||||
int n;
|
||||
CHECK_SLAB_OKAY(&skb->users);
|
||||
n = atomic_dec_return(select_skb_count(skb));
|
||||
trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
|
||||
rxrpc_skb(skb)->rx_flags, here);
|
||||
|
Loading…
x
Reference in New Issue
Block a user