nfsd: simplify the delayed disposal list code

When queueing a dispose list to the appropriate "freeme" lists, it
pointlessly queues the objects one at a time to an intermediate list.

Remove a few helpers and just open code a list_move to make it more
clear and efficient. Better document the resulting functions with
kerneldoc comments.

Signed-off-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
This commit is contained in:
Jeff Layton 2023-04-14 17:31:44 -04:00 committed by Chuck Lever
parent 55fcc7d915
commit 92e4a6733f

View File

@ -402,49 +402,26 @@ nfsd_file_dispose_list(struct list_head *dispose)
} }
} }
static void /**
nfsd_file_list_remove_disposal(struct list_head *dst, * nfsd_file_dispose_list_delayed - move list of dead files to net's freeme list
struct nfsd_fcache_disposal *l) * @dispose: list of nfsd_files to be disposed
{ *
spin_lock(&l->lock); * Transfers each file to the "freeme" list for its nfsd_net, to eventually
list_splice_init(&l->freeme, dst); * be disposed of by the per-net garbage collector.
spin_unlock(&l->lock); */
}
static void
nfsd_file_list_add_disposal(struct list_head *files, struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct nfsd_fcache_disposal *l = nn->fcache_disposal;
spin_lock(&l->lock);
list_splice_tail_init(files, &l->freeme);
spin_unlock(&l->lock);
queue_work(nfsd_filecache_wq, &l->work);
}
static void
nfsd_file_list_add_pernet(struct list_head *dst, struct list_head *src,
struct net *net)
{
struct nfsd_file *nf, *tmp;
list_for_each_entry_safe(nf, tmp, src, nf_lru) {
if (nf->nf_net == net)
list_move_tail(&nf->nf_lru, dst);
}
}
static void static void
nfsd_file_dispose_list_delayed(struct list_head *dispose) nfsd_file_dispose_list_delayed(struct list_head *dispose)
{ {
LIST_HEAD(list);
struct nfsd_file *nf;
while(!list_empty(dispose)) { while(!list_empty(dispose)) {
nf = list_first_entry(dispose, struct nfsd_file, nf_lru); struct nfsd_file *nf = list_first_entry(dispose,
nfsd_file_list_add_pernet(&list, dispose, nf->nf_net); struct nfsd_file, nf_lru);
nfsd_file_list_add_disposal(&list, nf->nf_net); struct nfsd_net *nn = net_generic(nf->nf_net, nfsd_net_id);
struct nfsd_fcache_disposal *l = nn->fcache_disposal;
spin_lock(&l->lock);
list_move_tail(&nf->nf_lru, &l->freeme);
spin_unlock(&l->lock);
queue_work(nfsd_filecache_wq, &l->work);
} }
} }
@ -665,8 +642,8 @@ nfsd_file_close_inode_sync(struct inode *inode)
* nfsd_file_delayed_close - close unused nfsd_files * nfsd_file_delayed_close - close unused nfsd_files
* @work: dummy * @work: dummy
* *
* Walk the LRU list and destroy any entries that have not been used since * Scrape the freeme list for this nfsd_net, and then dispose of them
* the last scan. * all.
*/ */
static void static void
nfsd_file_delayed_close(struct work_struct *work) nfsd_file_delayed_close(struct work_struct *work)
@ -675,7 +652,10 @@ nfsd_file_delayed_close(struct work_struct *work)
struct nfsd_fcache_disposal *l = container_of(work, struct nfsd_fcache_disposal *l = container_of(work,
struct nfsd_fcache_disposal, work); struct nfsd_fcache_disposal, work);
nfsd_file_list_remove_disposal(&head, l); spin_lock(&l->lock);
list_splice_init(&l->freeme, &head);
spin_unlock(&l->lock);
nfsd_file_dispose_list(&head); nfsd_file_dispose_list(&head);
} }