af_unix: Remove lock dance in unix_peek_fds().
In the previous GC implementation, the shape of the inflight socket graph was not expected to change while GC was in progress. MSG_PEEK was tricky because it could install inflight fd silently and transform the graph. Let's say we peeked a fd, which was a listening socket, and accept()ed some embryo sockets from it. The garbage collection algorithm would have been confused because the set of sockets visited in scan_inflight() would change within the same GC invocation. That's why we placed spin_lock(&unix_gc_lock) and spin_unlock() in unix_peek_fds() with a fat comment. In the new GC implementation, we no longer garbage-collect the socket if it exists in another queue, that is, if it has a bridge to another SCC. Also, accept() will require the lock if it has edges. Thus, we need not do the complicated lock dance. Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com> Link: https://lore.kernel.org/r/20240401173125.92184-3-kuniyu@amazon.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
7c349ed090
commit
118f457da9
@ -17,7 +17,6 @@ static inline struct unix_sock *unix_get_socket(struct file *filp)
|
||||
}
|
||||
#endif
|
||||
|
||||
extern spinlock_t unix_gc_lock;
|
||||
extern unsigned int unix_tot_inflight;
|
||||
void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver);
|
||||
void unix_del_edges(struct scm_fp_list *fpl);
|
||||
|
@ -1814,48 +1814,6 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
|
||||
static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
|
||||
{
|
||||
scm->fp = scm_fp_dup(UNIXCB(skb).fp);
|
||||
|
||||
/*
|
||||
* Garbage collection of unix sockets starts by selecting a set of
|
||||
* candidate sockets which have reference only from being in flight
|
||||
* (total_refs == inflight_refs). This condition is checked once during
|
||||
* the candidate collection phase, and candidates are marked as such, so
|
||||
* that non-candidates can later be ignored. While inflight_refs is
|
||||
* protected by unix_gc_lock, total_refs (file count) is not, hence this
|
||||
* is an instantaneous decision.
|
||||
*
|
||||
* Once a candidate, however, the socket must not be reinstalled into a
|
||||
* file descriptor while the garbage collection is in progress.
|
||||
*
|
||||
* If the above conditions are met, then the directed graph of
|
||||
* candidates (*) does not change while unix_gc_lock is held.
|
||||
*
|
||||
* Any operations that changes the file count through file descriptors
|
||||
* (dup, close, sendmsg) does not change the graph since candidates are
|
||||
* not installed in fds.
|
||||
*
|
||||
* Dequeing a candidate via recvmsg would install it into an fd, but
|
||||
* that takes unix_gc_lock to decrement the inflight count, so it's
|
||||
* serialized with garbage collection.
|
||||
*
|
||||
* MSG_PEEK is special in that it does not change the inflight count,
|
||||
* yet does install the socket into an fd. The following lock/unlock
|
||||
* pair is to ensure serialization with garbage collection. It must be
|
||||
* done between incrementing the file count and installing the file into
|
||||
* an fd.
|
||||
*
|
||||
* If garbage collection starts after the barrier provided by the
|
||||
* lock/unlock, then it will see the elevated refcount and not mark this
|
||||
* as a candidate. If a garbage collection is already in progress
|
||||
* before the file count was incremented, then the lock/unlock pair will
|
||||
* ensure that garbage collection is finished before progressing to
|
||||
* installing the fd.
|
||||
*
|
||||
* (*) A -> B where B is on the queue of A or B is on the queue of C
|
||||
* which is on the queue of listening socket A.
|
||||
*/
|
||||
spin_lock(&unix_gc_lock);
|
||||
spin_unlock(&unix_gc_lock);
|
||||
}
|
||||
|
||||
static void unix_destruct_scm(struct sk_buff *skb)
|
||||
|
@ -183,7 +183,7 @@ static void unix_free_vertices(struct scm_fp_list *fpl)
|
||||
}
|
||||
}
|
||||
|
||||
DEFINE_SPINLOCK(unix_gc_lock);
|
||||
static DEFINE_SPINLOCK(unix_gc_lock);
|
||||
unsigned int unix_tot_inflight;
|
||||
|
||||
void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver)
|
||||
|
Loading…
x
Reference in New Issue
Block a user