xfs: allow multiple reclaimers per AG
Inode reclaim will still throttle direct reclaim on the per-ag reclaim locks. This is no longer necessary as reclaim can run non-blocking now. Hence we can remove these locks so that we don't arbitrarily block reclaimers just because there are more direct reclaimers than there are AGs. This can result in multiple reclaimers working on the same range of an AG, but this doesn't cause any apparent issues. Optimising the spread of concurrent reclaimers for best efficiency can be done in a future patchset. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Brian Foster <bfoster@redhat.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
This commit is contained in:
parent
617825fe34
commit
0e8e2c6343
@ -1211,12 +1211,9 @@ xfs_reclaim_inodes_ag(
|
||||
int *nr_to_scan)
|
||||
{
|
||||
struct xfs_perag *pag;
|
||||
xfs_agnumber_t ag;
|
||||
int trylock = flags & SYNC_TRYLOCK;
|
||||
int skipped;
|
||||
xfs_agnumber_t ag = 0;
|
||||
int skipped = 0;
|
||||
|
||||
ag = 0;
|
||||
skipped = 0;
|
||||
while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
|
||||
unsigned long first_index = 0;
|
||||
int done = 0;
|
||||
@ -1224,15 +1221,13 @@ xfs_reclaim_inodes_ag(
|
||||
|
||||
ag = pag->pag_agno + 1;
|
||||
|
||||
if (trylock) {
|
||||
if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
|
||||
skipped++;
|
||||
xfs_perag_put(pag);
|
||||
continue;
|
||||
}
|
||||
first_index = pag->pag_ici_reclaim_cursor;
|
||||
} else
|
||||
mutex_lock(&pag->pag_ici_reclaim_lock);
|
||||
/*
|
||||
* If the cursor is not zero, we haven't scanned the whole AG
|
||||
* so we might have skipped inodes here.
|
||||
*/
|
||||
first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
|
||||
if (first_index)
|
||||
skipped++;
|
||||
|
||||
do {
|
||||
struct xfs_inode *batch[XFS_LOOKUP_BATCH];
|
||||
@ -1298,11 +1293,9 @@ xfs_reclaim_inodes_ag(
|
||||
|
||||
} while (nr_found && !done && *nr_to_scan > 0);
|
||||
|
||||
if (trylock && !done)
|
||||
pag->pag_ici_reclaim_cursor = first_index;
|
||||
else
|
||||
pag->pag_ici_reclaim_cursor = 0;
|
||||
mutex_unlock(&pag->pag_ici_reclaim_lock);
|
||||
if (done)
|
||||
first_index = 0;
|
||||
WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
|
||||
xfs_perag_put(pag);
|
||||
}
|
||||
return skipped;
|
||||
|
@ -148,7 +148,6 @@ xfs_free_perag(
|
||||
ASSERT(atomic_read(&pag->pag_ref) == 0);
|
||||
xfs_iunlink_destroy(pag);
|
||||
xfs_buf_hash_destroy(pag);
|
||||
mutex_destroy(&pag->pag_ici_reclaim_lock);
|
||||
call_rcu(&pag->rcu_head, __xfs_free_perag);
|
||||
}
|
||||
}
|
||||
@ -200,7 +199,6 @@ xfs_initialize_perag(
|
||||
pag->pag_agno = index;
|
||||
pag->pag_mount = mp;
|
||||
spin_lock_init(&pag->pag_ici_lock);
|
||||
mutex_init(&pag->pag_ici_reclaim_lock);
|
||||
INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
|
||||
if (xfs_buf_hash_init(pag))
|
||||
goto out_free_pag;
|
||||
@ -242,7 +240,6 @@ xfs_initialize_perag(
|
||||
out_hash_destroy:
|
||||
xfs_buf_hash_destroy(pag);
|
||||
out_free_pag:
|
||||
mutex_destroy(&pag->pag_ici_reclaim_lock);
|
||||
kmem_free(pag);
|
||||
out_unwind_new_pags:
|
||||
/* unwind any prior newly initialized pags */
|
||||
@ -252,7 +249,6 @@ out_unwind_new_pags:
|
||||
break;
|
||||
xfs_buf_hash_destroy(pag);
|
||||
xfs_iunlink_destroy(pag);
|
||||
mutex_destroy(&pag->pag_ici_reclaim_lock);
|
||||
kmem_free(pag);
|
||||
}
|
||||
return error;
|
||||
|
@ -354,7 +354,6 @@ typedef struct xfs_perag {
|
||||
spinlock_t pag_ici_lock; /* incore inode cache lock */
|
||||
struct radix_tree_root pag_ici_root; /* incore inode cache root */
|
||||
int pag_ici_reclaimable; /* reclaimable inodes */
|
||||
struct mutex pag_ici_reclaim_lock; /* serialisation point */
|
||||
unsigned long pag_ici_reclaim_cursor; /* reclaim restart point */
|
||||
|
||||
/* buffer cache index */
|
||||
|
Loading…
Reference in New Issue
Block a user