c1e15b4944
The rxrpc_input_packet() function and its call tree was built around the
assumption that data_ready() handler called from UDP to inform a kernel
service that there is data to be had was non-reentrant. This means that
certain locking could be dispensed with.
This, however, turns out not to be the case with a multi-queue network card
that can deliver packets to multiple cpus simultaneously. Each of those
cpus can be in the rxrpc_input_packet() function at the same time.
Fix by adding or changing some structure members:
(1) Add peer->rtt_input_lock to serialise access to the RTT buffer.
(2) Make conn->service_id into a 32-bit variable so that it can be
cmpxchg'd on all arches.
(3) Add call->input_lock to serialise access to the Rx/Tx state. Note
that although the Rx and Tx states are (almost) entirely separate,
there's no point completing the separation and having separate locks
since it's a bi-phasal RPC protocol rather than a bi-direction
streaming protocol. Data transmission and data reception do not take
place simultaneously on any particular call.
and making the following functional changes:
(1) In rxrpc_input_data(), hold call->input_lock around the core to
prevent simultaneous producing of packets into the Rx ring and
updating of tracking state for a particular call.
(2) In rxrpc_input_ping_response(), only read call->ping_serial once, and
check it before checking RXRPC_CALL_PINGING as that's a cheaper test.
The bit test and bit clear can then be combined. No further locking
is needed here.
(3) In rxrpc_input_ack(), take call->input_lock after we've parsed much of
the ACK packet. The superseded ACK check is then done both before and
after the lock is taken.
The handing of ackinfo data is split, parsing before the lock is taken
and processing with it held. This is keyed on rxMTU being non-zero.
Congestion management is also done within the locked section.
(4) In rxrpc_input_ackall(), take call->input_lock around the Tx window
rotation. The ACKALL packet carries no information and is only really
useful after all packets have been transmitted since it's imprecise.
(5) In rxrpc_input_implicit_end_call(), we use rx->incoming_lock to
prevent calls being simultaneously implicitly ended on two cpus and
also to prevent any races with incoming call setup.
(6) In rxrpc_input_packet(), use cmpxchg() to effect the service upgrade
on a connection. It is only permitted to happen once for a
connection.
(7) In rxrpc_new_incoming_call(), we have to recheck the routing inside
rx->incoming_lock to see if someone else set up the call, connection
or peer whilst we were getting there. We can't trust the values from
the earlier routing check unless we pin refs on them - which we want
to avoid.
Further, we need to allow for an incoming call to have its state
changed on another CPU between us making it live and us adjusting it
because the conn is now in the RXRPC_CONN_SERVICE state.
(8) In rxrpc_peer_add_rtt(), take peer->rtt_input_lock around the access
to the RTT buffer. Don't need to lock around setting peer->rtt.
For reference, the inventory of state-accessing or state-altering functions
used by the packet input procedure is:
> rxrpc_input_packet()
* PACKET CHECKING
* ROUTING
> rxrpc_post_packet_to_local()
> rxrpc_find_connection_rcu() - uses RCU
> rxrpc_lookup_peer_rcu() - uses RCU
> rxrpc_find_service_conn_rcu() - uses RCU
> idr_find() - uses RCU
* CONNECTION-LEVEL PROCESSING
- Service upgrade
- Can only happen once per conn
! Changed to use cmpxchg
> rxrpc_post_packet_to_conn()
- Setting conn->hi_serial
- Probably safe not using locks
- Maybe use cmpxchg
* CALL-LEVEL PROCESSING
> Old-call checking
> rxrpc_input_implicit_end_call()
> rxrpc_call_completed()
> rxrpc_queue_call()
! Need to take rx->incoming_lock
> __rxrpc_disconnect_call()
> rxrpc_notify_socket()
> rxrpc_new_incoming_call()
- Uses rx->incoming_lock for the entire process
- Might be able to drop this earlier in favour of the call lock
> rxrpc_incoming_call()
! Conflicts with rxrpc_input_implicit_end_call()
> rxrpc_send_ping()
- Don't need locks to check rtt state
> rxrpc_propose_ACK
* PACKET DISTRIBUTION
> rxrpc_input_call_packet()
> rxrpc_input_data()
* QUEUE DATA PACKET ON CALL
> rxrpc_reduce_call_timer()
- Uses timer_reduce()
! Needs call->input_lock()
> rxrpc_receiving_reply()
! Needs locking around ack state
> rxrpc_rotate_tx_window()
> rxrpc_end_tx_phase()
> rxrpc_proto_abort()
> rxrpc_input_dup_data()
- Fills the Rx buffer
- rxrpc_propose_ACK()
- rxrpc_notify_socket()
> rxrpc_input_ack()
* APPLY ACK PACKET TO CALL AND DISCARD PACKET
> rxrpc_input_ping_response()
- Probably doesn't need any extra locking
! Need READ_ONCE() on call->ping_serial
> rxrpc_input_check_for_lost_ack()
- Takes call->lock to consult Tx buffer
> rxrpc_peer_add_rtt()
! Needs to take a lock (peer->rtt_input_lock)
! Could perhaps manage with cmpxchg() and xadd() instead
> rxrpc_input_requested_ack
- Consults Tx buffer
! Probably needs a lock
> rxrpc_peer_add_rtt()
> rxrpc_propose_ack()
> rxrpc_input_ackinfo()
- Changes call->tx_winsize
! Use cmpxchg to handle change
! Should perhaps track serial number
- Uses peer->lock to record MTU specification changes
> rxrpc_proto_abort()
! Need to take call->input_lock
> rxrpc_rotate_tx_window()
> rxrpc_end_tx_phase()
> rxrpc_input_soft_acks()
- Consults the Tx buffer
> rxrpc_congestion_management()
- Modifies the Tx annotations
! Needs call->input_lock()
> rxrpc_queue_call()
> rxrpc_input_abort()
* APPLY ABORT PACKET TO CALL AND DISCARD PACKET
> rxrpc_set_call_completion()
> rxrpc_notify_socket()
> rxrpc_input_ackall()
* APPLY ACKALL PACKET TO CALL AND DISCARD PACKET
! Need to take call->input_lock
> rxrpc_rotate_tx_window()
> rxrpc_end_tx_phase()
> rxrpc_reject_packet()
There are some functions used by the above that queue the packet, after
which the procedure is terminated:
- rxrpc_post_packet_to_local()
- local->event_queue is an sk_buff_head
- local->processor is a work_struct
- rxrpc_post_packet_to_conn()
- conn->rx_queue is an sk_buff_head
- conn->processor is a work_struct
- rxrpc_reject_packet()
- local->reject_queue is an sk_buff_head
- local->processor is a work_struct
And some that offload processing to process context:
- rxrpc_notify_socket()
- Uses RCU lock
- Uses call->notify_lock to call call->notify_rx
- Uses call->recvmsg_lock to queue recvmsg side
- rxrpc_queue_call()
- call->processor is a work_struct
- rxrpc_propose_ACK()
- Uses call->lock to wrap __rxrpc_propose_ACK()
And a bunch that complete a call, all of which use call->state_lock to
protect the call state:
- rxrpc_call_completed()
- rxrpc_set_call_completion()
- rxrpc_abort_call()
- rxrpc_proto_abort()
- Also uses rxrpc_queue_call()
Fixes: 17926a7932
("[AF_RXRPC]: Provide secure RxRPC sockets for use by userspace and kernel both")
Signed-off-by: David Howells <dhowells@redhat.com>
660 lines
18 KiB
C
660 lines
18 KiB
C
/* incoming call handling
|
|
*
|
|
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/net.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/errqueue.h>
|
|
#include <linux/udp.h>
|
|
#include <linux/in.h>
|
|
#include <linux/in6.h>
|
|
#include <linux/icmp.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/circ_buf.h>
|
|
#include <net/sock.h>
|
|
#include <net/af_rxrpc.h>
|
|
#include <net/ip.h>
|
|
#include "ar-internal.h"
|
|
|
|
/*
|
|
* Preallocate a single service call, connection and peer and, if possible,
|
|
* give them a user ID and attach the user's side of the ID to them.
|
|
*/
|
|
static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
|
|
struct rxrpc_backlog *b,
|
|
rxrpc_notify_rx_t notify_rx,
|
|
rxrpc_user_attach_call_t user_attach_call,
|
|
unsigned long user_call_ID, gfp_t gfp,
|
|
unsigned int debug_id)
|
|
{
|
|
const void *here = __builtin_return_address(0);
|
|
struct rxrpc_call *call;
|
|
struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
|
|
int max, tmp;
|
|
unsigned int size = RXRPC_BACKLOG_MAX;
|
|
unsigned int head, tail, call_head, call_tail;
|
|
|
|
max = rx->sk.sk_max_ack_backlog;
|
|
tmp = rx->sk.sk_ack_backlog;
|
|
if (tmp >= max) {
|
|
_leave(" = -ENOBUFS [full %u]", max);
|
|
return -ENOBUFS;
|
|
}
|
|
max -= tmp;
|
|
|
|
/* We don't need more conns and peers than we have calls, but on the
|
|
* other hand, we shouldn't ever use more peers than conns or conns
|
|
* than calls.
|
|
*/
|
|
call_head = b->call_backlog_head;
|
|
call_tail = READ_ONCE(b->call_backlog_tail);
|
|
tmp = CIRC_CNT(call_head, call_tail, size);
|
|
if (tmp >= max) {
|
|
_leave(" = -ENOBUFS [enough %u]", tmp);
|
|
return -ENOBUFS;
|
|
}
|
|
max = tmp + 1;
|
|
|
|
head = b->peer_backlog_head;
|
|
tail = READ_ONCE(b->peer_backlog_tail);
|
|
if (CIRC_CNT(head, tail, size) < max) {
|
|
struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
|
|
if (!peer)
|
|
return -ENOMEM;
|
|
b->peer_backlog[head] = peer;
|
|
smp_store_release(&b->peer_backlog_head,
|
|
(head + 1) & (size - 1));
|
|
}
|
|
|
|
head = b->conn_backlog_head;
|
|
tail = READ_ONCE(b->conn_backlog_tail);
|
|
if (CIRC_CNT(head, tail, size) < max) {
|
|
struct rxrpc_connection *conn;
|
|
|
|
conn = rxrpc_prealloc_service_connection(rxnet, gfp);
|
|
if (!conn)
|
|
return -ENOMEM;
|
|
b->conn_backlog[head] = conn;
|
|
smp_store_release(&b->conn_backlog_head,
|
|
(head + 1) & (size - 1));
|
|
|
|
trace_rxrpc_conn(conn, rxrpc_conn_new_service,
|
|
atomic_read(&conn->usage), here);
|
|
}
|
|
|
|
/* Now it gets complicated, because calls get registered with the
|
|
* socket here, particularly if a user ID is preassigned by the user.
|
|
*/
|
|
call = rxrpc_alloc_call(rx, gfp, debug_id);
|
|
if (!call)
|
|
return -ENOMEM;
|
|
call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
|
|
call->state = RXRPC_CALL_SERVER_PREALLOC;
|
|
|
|
trace_rxrpc_call(call, rxrpc_call_new_service,
|
|
atomic_read(&call->usage),
|
|
here, (const void *)user_call_ID);
|
|
|
|
write_lock(&rx->call_lock);
|
|
if (user_attach_call) {
|
|
struct rxrpc_call *xcall;
|
|
struct rb_node *parent, **pp;
|
|
|
|
/* Check the user ID isn't already in use */
|
|
pp = &rx->calls.rb_node;
|
|
parent = NULL;
|
|
while (*pp) {
|
|
parent = *pp;
|
|
xcall = rb_entry(parent, struct rxrpc_call, sock_node);
|
|
if (user_call_ID < xcall->user_call_ID)
|
|
pp = &(*pp)->rb_left;
|
|
else if (user_call_ID > xcall->user_call_ID)
|
|
pp = &(*pp)->rb_right;
|
|
else
|
|
goto id_in_use;
|
|
}
|
|
|
|
call->user_call_ID = user_call_ID;
|
|
call->notify_rx = notify_rx;
|
|
rxrpc_get_call(call, rxrpc_call_got_kernel);
|
|
user_attach_call(call, user_call_ID);
|
|
rxrpc_get_call(call, rxrpc_call_got_userid);
|
|
rb_link_node(&call->sock_node, parent, pp);
|
|
rb_insert_color(&call->sock_node, &rx->calls);
|
|
set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
|
|
}
|
|
|
|
list_add(&call->sock_link, &rx->sock_calls);
|
|
|
|
write_unlock(&rx->call_lock);
|
|
|
|
rxnet = call->rxnet;
|
|
write_lock(&rxnet->call_lock);
|
|
list_add_tail(&call->link, &rxnet->calls);
|
|
write_unlock(&rxnet->call_lock);
|
|
|
|
b->call_backlog[call_head] = call;
|
|
smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
|
|
_leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
|
|
return 0;
|
|
|
|
id_in_use:
|
|
write_unlock(&rx->call_lock);
|
|
rxrpc_cleanup_call(call);
|
|
_leave(" = -EBADSLT");
|
|
return -EBADSLT;
|
|
}
|
|
|
|
/*
|
|
* Preallocate sufficient service connections, calls and peers to cover the
|
|
* entire backlog of a socket. When a new call comes in, if we don't have
|
|
* sufficient of each available, the call gets rejected as busy or ignored.
|
|
*
|
|
* The backlog is replenished when a connection is accepted or rejected.
|
|
*/
|
|
int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
|
|
{
|
|
struct rxrpc_backlog *b = rx->backlog;
|
|
|
|
if (!b) {
|
|
b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
|
|
if (!b)
|
|
return -ENOMEM;
|
|
rx->backlog = b;
|
|
}
|
|
|
|
if (rx->discard_new_call)
|
|
return 0;
|
|
|
|
while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp,
|
|
atomic_inc_return(&rxrpc_debug_id)) == 0)
|
|
;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Discard the preallocation on a service.
|
|
*/
|
|
void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
|
|
{
|
|
struct rxrpc_backlog *b = rx->backlog;
|
|
struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
|
|
unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
|
|
|
|
if (!b)
|
|
return;
|
|
rx->backlog = NULL;
|
|
|
|
/* Make sure that there aren't any incoming calls in progress before we
|
|
* clear the preallocation buffers.
|
|
*/
|
|
spin_lock_bh(&rx->incoming_lock);
|
|
spin_unlock_bh(&rx->incoming_lock);
|
|
|
|
head = b->peer_backlog_head;
|
|
tail = b->peer_backlog_tail;
|
|
while (CIRC_CNT(head, tail, size) > 0) {
|
|
struct rxrpc_peer *peer = b->peer_backlog[tail];
|
|
kfree(peer);
|
|
tail = (tail + 1) & (size - 1);
|
|
}
|
|
|
|
head = b->conn_backlog_head;
|
|
tail = b->conn_backlog_tail;
|
|
while (CIRC_CNT(head, tail, size) > 0) {
|
|
struct rxrpc_connection *conn = b->conn_backlog[tail];
|
|
write_lock(&rxnet->conn_lock);
|
|
list_del(&conn->link);
|
|
list_del(&conn->proc_link);
|
|
write_unlock(&rxnet->conn_lock);
|
|
kfree(conn);
|
|
if (atomic_dec_and_test(&rxnet->nr_conns))
|
|
wake_up_var(&rxnet->nr_conns);
|
|
tail = (tail + 1) & (size - 1);
|
|
}
|
|
|
|
head = b->call_backlog_head;
|
|
tail = b->call_backlog_tail;
|
|
while (CIRC_CNT(head, tail, size) > 0) {
|
|
struct rxrpc_call *call = b->call_backlog[tail];
|
|
rcu_assign_pointer(call->socket, rx);
|
|
if (rx->discard_new_call) {
|
|
_debug("discard %lx", call->user_call_ID);
|
|
rx->discard_new_call(call, call->user_call_ID);
|
|
rxrpc_put_call(call, rxrpc_call_put_kernel);
|
|
}
|
|
rxrpc_call_completed(call);
|
|
rxrpc_release_call(rx, call);
|
|
rxrpc_put_call(call, rxrpc_call_put);
|
|
tail = (tail + 1) & (size - 1);
|
|
}
|
|
|
|
kfree(b);
|
|
}
|
|
|
|
/*
|
|
* Allocate a new incoming call from the prealloc pool, along with a connection
|
|
* and a peer as necessary.
|
|
*/
|
|
static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
|
|
struct rxrpc_local *local,
|
|
struct rxrpc_peer *peer,
|
|
struct rxrpc_connection *conn,
|
|
struct sk_buff *skb)
|
|
{
|
|
struct rxrpc_backlog *b = rx->backlog;
|
|
struct rxrpc_call *call;
|
|
unsigned short call_head, conn_head, peer_head;
|
|
unsigned short call_tail, conn_tail, peer_tail;
|
|
unsigned short call_count, conn_count;
|
|
|
|
/* #calls >= #conns >= #peers must hold true. */
|
|
call_head = smp_load_acquire(&b->call_backlog_head);
|
|
call_tail = b->call_backlog_tail;
|
|
call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
|
|
conn_head = smp_load_acquire(&b->conn_backlog_head);
|
|
conn_tail = b->conn_backlog_tail;
|
|
conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
|
|
ASSERTCMP(conn_count, >=, call_count);
|
|
peer_head = smp_load_acquire(&b->peer_backlog_head);
|
|
peer_tail = b->peer_backlog_tail;
|
|
ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
|
|
conn_count);
|
|
|
|
if (call_count == 0)
|
|
return NULL;
|
|
|
|
if (!conn) {
|
|
if (peer && !rxrpc_get_peer_maybe(peer))
|
|
peer = NULL;
|
|
if (!peer) {
|
|
peer = b->peer_backlog[peer_tail];
|
|
if (rxrpc_extract_addr_from_skb(local, &peer->srx, skb) < 0)
|
|
return NULL;
|
|
b->peer_backlog[peer_tail] = NULL;
|
|
smp_store_release(&b->peer_backlog_tail,
|
|
(peer_tail + 1) &
|
|
(RXRPC_BACKLOG_MAX - 1));
|
|
|
|
rxrpc_new_incoming_peer(rx, local, peer);
|
|
}
|
|
|
|
/* Now allocate and set up the connection */
|
|
conn = b->conn_backlog[conn_tail];
|
|
b->conn_backlog[conn_tail] = NULL;
|
|
smp_store_release(&b->conn_backlog_tail,
|
|
(conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
|
|
conn->params.local = rxrpc_get_local(local);
|
|
conn->params.peer = peer;
|
|
rxrpc_see_connection(conn);
|
|
rxrpc_new_incoming_connection(rx, conn, skb);
|
|
} else {
|
|
rxrpc_get_connection(conn);
|
|
}
|
|
|
|
/* And now we can allocate and set up a new call */
|
|
call = b->call_backlog[call_tail];
|
|
b->call_backlog[call_tail] = NULL;
|
|
smp_store_release(&b->call_backlog_tail,
|
|
(call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
|
|
|
|
rxrpc_see_call(call);
|
|
call->conn = conn;
|
|
call->peer = rxrpc_get_peer(conn->params.peer);
|
|
call->cong_cwnd = call->peer->cong_cwnd;
|
|
return call;
|
|
}
|
|
|
|
/*
|
|
* Set up a new incoming call. Called in BH context with the RCU read lock
|
|
* held.
|
|
*
|
|
* If this is for a kernel service, when we allocate the call, it will have
|
|
* three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
|
|
* retainer ref obtained from the backlog buffer. Prealloc calls for userspace
|
|
* services only have the ref from the backlog buffer. We want to pass this
|
|
* ref to non-BH context to dispose of.
|
|
*
|
|
* If we want to report an error, we mark the skb with the packet type and
|
|
* abort code and return NULL.
|
|
*
|
|
* The call is returned with the user access mutex held.
|
|
*/
|
|
struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
|
|
struct rxrpc_sock *rx,
|
|
struct sk_buff *skb)
|
|
{
|
|
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
|
struct rxrpc_connection *conn;
|
|
struct rxrpc_peer *peer;
|
|
struct rxrpc_call *call;
|
|
|
|
_enter("");
|
|
|
|
spin_lock(&rx->incoming_lock);
|
|
if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
|
|
rx->sk.sk_state == RXRPC_CLOSE) {
|
|
trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
|
|
sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
|
|
skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
|
|
skb->priority = RX_INVALID_OPERATION;
|
|
_leave(" = NULL [close]");
|
|
call = NULL;
|
|
goto out;
|
|
}
|
|
|
|
/* The peer, connection and call may all have sprung into existence due
|
|
* to a duplicate packet being handled on another CPU in parallel, so
|
|
* we have to recheck the routing. However, we're now holding
|
|
* rx->incoming_lock, so the values should remain stable.
|
|
*/
|
|
conn = rxrpc_find_connection_rcu(local, skb, &peer);
|
|
|
|
call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
|
|
if (!call) {
|
|
skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
|
|
_leave(" = NULL [busy]");
|
|
call = NULL;
|
|
goto out;
|
|
}
|
|
|
|
trace_rxrpc_receive(call, rxrpc_receive_incoming,
|
|
sp->hdr.serial, sp->hdr.seq);
|
|
|
|
/* Lock the call to prevent rxrpc_kernel_send/recv_data() and
|
|
* sendmsg()/recvmsg() inconveniently stealing the mutex once the
|
|
* notification is generated.
|
|
*
|
|
* The BUG should never happen because the kernel should be well
|
|
* behaved enough not to access the call before the first notification
|
|
* event and userspace is prevented from doing so until the state is
|
|
* appropriate.
|
|
*/
|
|
if (!mutex_trylock(&call->user_mutex))
|
|
BUG();
|
|
|
|
/* Make the call live. */
|
|
rxrpc_incoming_call(rx, call, skb);
|
|
conn = call->conn;
|
|
|
|
if (rx->notify_new_call)
|
|
rx->notify_new_call(&rx->sk, call, call->user_call_ID);
|
|
else
|
|
sk_acceptq_added(&rx->sk);
|
|
|
|
spin_lock(&conn->state_lock);
|
|
switch (conn->state) {
|
|
case RXRPC_CONN_SERVICE_UNSECURED:
|
|
conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
|
|
set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
|
|
rxrpc_queue_conn(call->conn);
|
|
break;
|
|
|
|
case RXRPC_CONN_SERVICE:
|
|
write_lock(&call->state_lock);
|
|
if (call->state < RXRPC_CALL_COMPLETE) {
|
|
if (rx->discard_new_call)
|
|
call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
|
|
else
|
|
call->state = RXRPC_CALL_SERVER_ACCEPTING;
|
|
}
|
|
write_unlock(&call->state_lock);
|
|
break;
|
|
|
|
case RXRPC_CONN_REMOTELY_ABORTED:
|
|
rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
|
|
conn->abort_code, conn->error);
|
|
break;
|
|
case RXRPC_CONN_LOCALLY_ABORTED:
|
|
rxrpc_abort_call("CON", call, sp->hdr.seq,
|
|
conn->abort_code, conn->error);
|
|
break;
|
|
default:
|
|
BUG();
|
|
}
|
|
spin_unlock(&conn->state_lock);
|
|
|
|
if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
|
|
rxrpc_notify_socket(call);
|
|
|
|
/* We have to discard the prealloc queue's ref here and rely on a
|
|
* combination of the RCU read lock and refs held either by the socket
|
|
* (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
|
|
* service to prevent the call from being deallocated too early.
|
|
*/
|
|
rxrpc_put_call(call, rxrpc_call_put);
|
|
|
|
_leave(" = %p{%d}", call, call->debug_id);
|
|
out:
|
|
spin_unlock(&rx->incoming_lock);
|
|
return call;
|
|
}
|
|
|
|
/*
|
|
* handle acceptance of a call by userspace
|
|
* - assign the user call ID to the call at the front of the queue
|
|
* - called with the socket locked.
|
|
*/
|
|
struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
|
|
unsigned long user_call_ID,
|
|
rxrpc_notify_rx_t notify_rx)
|
|
__releases(&rx->sk.sk_lock.slock)
|
|
__acquires(call->user_mutex)
|
|
{
|
|
struct rxrpc_call *call;
|
|
struct rb_node *parent, **pp;
|
|
int ret;
|
|
|
|
_enter(",%lx", user_call_ID);
|
|
|
|
ASSERT(!irqs_disabled());
|
|
|
|
write_lock(&rx->call_lock);
|
|
|
|
if (list_empty(&rx->to_be_accepted)) {
|
|
write_unlock(&rx->call_lock);
|
|
release_sock(&rx->sk);
|
|
kleave(" = -ENODATA [empty]");
|
|
return ERR_PTR(-ENODATA);
|
|
}
|
|
|
|
/* check the user ID isn't already in use */
|
|
pp = &rx->calls.rb_node;
|
|
parent = NULL;
|
|
while (*pp) {
|
|
parent = *pp;
|
|
call = rb_entry(parent, struct rxrpc_call, sock_node);
|
|
|
|
if (user_call_ID < call->user_call_ID)
|
|
pp = &(*pp)->rb_left;
|
|
else if (user_call_ID > call->user_call_ID)
|
|
pp = &(*pp)->rb_right;
|
|
else
|
|
goto id_in_use;
|
|
}
|
|
|
|
/* Dequeue the first call and check it's still valid. We gain
|
|
* responsibility for the queue's reference.
|
|
*/
|
|
call = list_entry(rx->to_be_accepted.next,
|
|
struct rxrpc_call, accept_link);
|
|
write_unlock(&rx->call_lock);
|
|
|
|
/* We need to gain the mutex from the interrupt handler without
|
|
* upsetting lockdep, so we have to release it there and take it here.
|
|
* We are, however, still holding the socket lock, so other accepts
|
|
* must wait for us and no one can add the user ID behind our backs.
|
|
*/
|
|
if (mutex_lock_interruptible(&call->user_mutex) < 0) {
|
|
release_sock(&rx->sk);
|
|
kleave(" = -ERESTARTSYS");
|
|
return ERR_PTR(-ERESTARTSYS);
|
|
}
|
|
|
|
write_lock(&rx->call_lock);
|
|
list_del_init(&call->accept_link);
|
|
sk_acceptq_removed(&rx->sk);
|
|
rxrpc_see_call(call);
|
|
|
|
/* Find the user ID insertion point. */
|
|
pp = &rx->calls.rb_node;
|
|
parent = NULL;
|
|
while (*pp) {
|
|
parent = *pp;
|
|
call = rb_entry(parent, struct rxrpc_call, sock_node);
|
|
|
|
if (user_call_ID < call->user_call_ID)
|
|
pp = &(*pp)->rb_left;
|
|
else if (user_call_ID > call->user_call_ID)
|
|
pp = &(*pp)->rb_right;
|
|
else
|
|
BUG();
|
|
}
|
|
|
|
write_lock_bh(&call->state_lock);
|
|
switch (call->state) {
|
|
case RXRPC_CALL_SERVER_ACCEPTING:
|
|
call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
|
|
break;
|
|
case RXRPC_CALL_COMPLETE:
|
|
ret = call->error;
|
|
goto out_release;
|
|
default:
|
|
BUG();
|
|
}
|
|
|
|
/* formalise the acceptance */
|
|
call->notify_rx = notify_rx;
|
|
call->user_call_ID = user_call_ID;
|
|
rxrpc_get_call(call, rxrpc_call_got_userid);
|
|
rb_link_node(&call->sock_node, parent, pp);
|
|
rb_insert_color(&call->sock_node, &rx->calls);
|
|
if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
|
|
BUG();
|
|
|
|
write_unlock_bh(&call->state_lock);
|
|
write_unlock(&rx->call_lock);
|
|
rxrpc_notify_socket(call);
|
|
rxrpc_service_prealloc(rx, GFP_KERNEL);
|
|
release_sock(&rx->sk);
|
|
_leave(" = %p{%d}", call, call->debug_id);
|
|
return call;
|
|
|
|
out_release:
|
|
_debug("release %p", call);
|
|
write_unlock_bh(&call->state_lock);
|
|
write_unlock(&rx->call_lock);
|
|
rxrpc_release_call(rx, call);
|
|
rxrpc_put_call(call, rxrpc_call_put);
|
|
goto out;
|
|
|
|
id_in_use:
|
|
ret = -EBADSLT;
|
|
write_unlock(&rx->call_lock);
|
|
out:
|
|
rxrpc_service_prealloc(rx, GFP_KERNEL);
|
|
release_sock(&rx->sk);
|
|
_leave(" = %d", ret);
|
|
return ERR_PTR(ret);
|
|
}
|
|
|
|
/*
|
|
* Handle rejection of a call by userspace
|
|
* - reject the call at the front of the queue
|
|
*/
|
|
int rxrpc_reject_call(struct rxrpc_sock *rx)
|
|
{
|
|
struct rxrpc_call *call;
|
|
bool abort = false;
|
|
int ret;
|
|
|
|
_enter("");
|
|
|
|
ASSERT(!irqs_disabled());
|
|
|
|
write_lock(&rx->call_lock);
|
|
|
|
if (list_empty(&rx->to_be_accepted)) {
|
|
write_unlock(&rx->call_lock);
|
|
return -ENODATA;
|
|
}
|
|
|
|
/* Dequeue the first call and check it's still valid. We gain
|
|
* responsibility for the queue's reference.
|
|
*/
|
|
call = list_entry(rx->to_be_accepted.next,
|
|
struct rxrpc_call, accept_link);
|
|
list_del_init(&call->accept_link);
|
|
sk_acceptq_removed(&rx->sk);
|
|
rxrpc_see_call(call);
|
|
|
|
write_lock_bh(&call->state_lock);
|
|
switch (call->state) {
|
|
case RXRPC_CALL_SERVER_ACCEPTING:
|
|
__rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED);
|
|
abort = true;
|
|
/* fall through */
|
|
case RXRPC_CALL_COMPLETE:
|
|
ret = call->error;
|
|
goto out_discard;
|
|
default:
|
|
BUG();
|
|
}
|
|
|
|
out_discard:
|
|
write_unlock_bh(&call->state_lock);
|
|
write_unlock(&rx->call_lock);
|
|
if (abort) {
|
|
rxrpc_send_abort_packet(call);
|
|
rxrpc_release_call(rx, call);
|
|
rxrpc_put_call(call, rxrpc_call_put);
|
|
}
|
|
rxrpc_service_prealloc(rx, GFP_KERNEL);
|
|
_leave(" = %d", ret);
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
|
|
* @sock: The socket on which to preallocate
|
|
* @notify_rx: Event notification function for the call
|
|
* @user_attach_call: Func to attach call to user_call_ID
|
|
* @user_call_ID: The tag to attach to the preallocated call
|
|
* @gfp: The allocation conditions.
|
|
* @debug_id: The tracing debug ID.
|
|
*
|
|
* Charge up the socket with preallocated calls, each with a user ID. A
|
|
* function should be provided to effect the attachment from the user's side.
|
|
* The user is given a ref to hold on the call.
|
|
*
|
|
* Note that the call may be come connected before this function returns.
|
|
*/
|
|
int rxrpc_kernel_charge_accept(struct socket *sock,
|
|
rxrpc_notify_rx_t notify_rx,
|
|
rxrpc_user_attach_call_t user_attach_call,
|
|
unsigned long user_call_ID, gfp_t gfp,
|
|
unsigned int debug_id)
|
|
{
|
|
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
|
|
struct rxrpc_backlog *b = rx->backlog;
|
|
|
|
if (sock->sk->sk_state == RXRPC_CLOSE)
|
|
return -ESHUTDOWN;
|
|
|
|
return rxrpc_service_prealloc_one(rx, b, notify_rx,
|
|
user_attach_call, user_call_ID,
|
|
gfp, debug_id);
|
|
}
|
|
EXPORT_SYMBOL(rxrpc_kernel_charge_accept);
|