xfs: use GFP_KERNEL in pure transaction contexts
When running in a transaction context, memory allocations are scoped to GFP_NOFS. Hence we don't need to use GFP_NOFS contexts in pure transaction context allocations - GFP_KERNEL will automatically get converted to GFP_NOFS as appropriate. Go through the code and convert all the obvious GFP_NOFS allocations in transaction context to use GFP_KERNEL. This further reduces the explicit use of GFP_NOFS in XFS. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: "Darrick J. Wong" <djwong@kernel.org> Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
This commit is contained in:
parent
94a69db236
commit
0b3a76e955
@ -891,7 +891,8 @@ xfs_attr_defer_add(
|
||||
|
||||
struct xfs_attr_intent *new;
|
||||
|
||||
new = kmem_cache_zalloc(xfs_attr_intent_cache, GFP_NOFS | __GFP_NOFAIL);
|
||||
new = kmem_cache_zalloc(xfs_attr_intent_cache,
|
||||
GFP_KERNEL | __GFP_NOFAIL);
|
||||
new->xattri_op_flags = op_flags;
|
||||
new->xattri_da_args = args;
|
||||
|
||||
|
@ -6098,7 +6098,7 @@ __xfs_bmap_add(
|
||||
bmap->br_blockcount,
|
||||
bmap->br_state);
|
||||
|
||||
bi = kmem_cache_alloc(xfs_bmap_intent_cache, GFP_NOFS | __GFP_NOFAIL);
|
||||
bi = kmem_cache_alloc(xfs_bmap_intent_cache, GFP_KERNEL | __GFP_NOFAIL);
|
||||
INIT_LIST_HEAD(&bi->bi_list);
|
||||
bi->bi_type = type;
|
||||
bi->bi_owner = ip;
|
||||
|
@ -825,7 +825,7 @@ xfs_defer_alloc(
|
||||
struct xfs_defer_pending *dfp;
|
||||
|
||||
dfp = kmem_cache_zalloc(xfs_defer_pending_cache,
|
||||
GFP_NOFS | __GFP_NOFAIL);
|
||||
GFP_KERNEL | __GFP_NOFAIL);
|
||||
dfp->dfp_ops = ops;
|
||||
INIT_LIST_HEAD(&dfp->dfp_work);
|
||||
list_add_tail(&dfp->dfp_list, &tp->t_dfops);
|
||||
@ -888,7 +888,7 @@ xfs_defer_start_recovery(
|
||||
struct xfs_defer_pending *dfp;
|
||||
|
||||
dfp = kmem_cache_zalloc(xfs_defer_pending_cache,
|
||||
GFP_NOFS | __GFP_NOFAIL);
|
||||
GFP_KERNEL | __GFP_NOFAIL);
|
||||
dfp->dfp_ops = ops;
|
||||
dfp->dfp_intent = lip;
|
||||
INIT_LIST_HEAD(&dfp->dfp_work);
|
||||
@ -979,7 +979,7 @@ xfs_defer_ops_capture(
|
||||
return ERR_PTR(error);
|
||||
|
||||
/* Create an object to capture the defer ops. */
|
||||
dfc = kzalloc(sizeof(*dfc), GFP_NOFS | __GFP_NOFAIL);
|
||||
dfc = kzalloc(sizeof(*dfc), GFP_KERNEL | __GFP_NOFAIL);
|
||||
INIT_LIST_HEAD(&dfc->dfc_list);
|
||||
INIT_LIST_HEAD(&dfc->dfc_dfops);
|
||||
|
||||
|
@ -236,7 +236,7 @@ xfs_dir_init(
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
args = kzalloc(sizeof(*args), GFP_NOFS | __GFP_NOFAIL);
|
||||
args = kzalloc(sizeof(*args), GFP_KERNEL | __GFP_NOFAIL);
|
||||
if (!args)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -273,7 +273,7 @@ xfs_dir_createname(
|
||||
XFS_STATS_INC(dp->i_mount, xs_dir_create);
|
||||
}
|
||||
|
||||
args = kzalloc(sizeof(*args), GFP_NOFS | __GFP_NOFAIL);
|
||||
args = kzalloc(sizeof(*args), GFP_KERNEL | __GFP_NOFAIL);
|
||||
if (!args)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -435,7 +435,7 @@ xfs_dir_removename(
|
||||
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
|
||||
XFS_STATS_INC(dp->i_mount, xs_dir_remove);
|
||||
|
||||
args = kzalloc(sizeof(*args), GFP_NOFS | __GFP_NOFAIL);
|
||||
args = kzalloc(sizeof(*args), GFP_KERNEL | __GFP_NOFAIL);
|
||||
if (!args)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -496,7 +496,7 @@ xfs_dir_replace(
|
||||
if (rval)
|
||||
return rval;
|
||||
|
||||
args = kzalloc(sizeof(*args), GFP_NOFS | __GFP_NOFAIL);
|
||||
args = kzalloc(sizeof(*args), GFP_KERNEL | __GFP_NOFAIL);
|
||||
if (!args)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -402,7 +402,7 @@ xfs_iroot_realloc(
|
||||
if (ifp->if_broot_bytes == 0) {
|
||||
new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, rec_diff);
|
||||
ifp->if_broot = kmalloc(new_size,
|
||||
GFP_NOFS | __GFP_NOFAIL);
|
||||
GFP_KERNEL | __GFP_NOFAIL);
|
||||
ifp->if_broot_bytes = (int)new_size;
|
||||
return;
|
||||
}
|
||||
@ -417,7 +417,7 @@ xfs_iroot_realloc(
|
||||
new_max = cur_max + rec_diff;
|
||||
new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max);
|
||||
ifp->if_broot = krealloc(ifp->if_broot, new_size,
|
||||
GFP_NOFS | __GFP_NOFAIL);
|
||||
GFP_KERNEL | __GFP_NOFAIL);
|
||||
op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
|
||||
ifp->if_broot_bytes);
|
||||
np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
|
||||
@ -443,7 +443,7 @@ xfs_iroot_realloc(
|
||||
else
|
||||
new_size = 0;
|
||||
if (new_size > 0) {
|
||||
new_broot = kmalloc(new_size, GFP_NOFS | __GFP_NOFAIL);
|
||||
new_broot = kmalloc(new_size, GFP_KERNEL | __GFP_NOFAIL);
|
||||
/*
|
||||
* First copy over the btree block header.
|
||||
*/
|
||||
@ -512,7 +512,7 @@ xfs_idata_realloc(
|
||||
|
||||
if (byte_diff) {
|
||||
ifp->if_data = krealloc(ifp->if_data, new_size,
|
||||
GFP_NOFS | __GFP_NOFAIL);
|
||||
GFP_KERNEL | __GFP_NOFAIL);
|
||||
if (new_size == 0)
|
||||
ifp->if_data = NULL;
|
||||
ifp->if_bytes = new_size;
|
||||
|
@ -1449,7 +1449,7 @@ __xfs_refcount_add(
|
||||
blockcount);
|
||||
|
||||
ri = kmem_cache_alloc(xfs_refcount_intent_cache,
|
||||
GFP_NOFS | __GFP_NOFAIL);
|
||||
GFP_KERNEL | __GFP_NOFAIL);
|
||||
INIT_LIST_HEAD(&ri->ri_list);
|
||||
ri->ri_type = type;
|
||||
ri->ri_startblock = startblock;
|
||||
|
@ -2559,7 +2559,7 @@ __xfs_rmap_add(
|
||||
bmap->br_blockcount,
|
||||
bmap->br_state);
|
||||
|
||||
ri = kmem_cache_alloc(xfs_rmap_intent_cache, GFP_NOFS | __GFP_NOFAIL);
|
||||
ri = kmem_cache_alloc(xfs_rmap_intent_cache, GFP_KERNEL | __GFP_NOFAIL);
|
||||
INIT_LIST_HEAD(&ri->ri_list);
|
||||
ri->ri_type = type;
|
||||
ri->ri_owner = owner;
|
||||
|
@ -226,7 +226,7 @@ xfs_attri_init(
|
||||
{
|
||||
struct xfs_attri_log_item *attrip;
|
||||
|
||||
attrip = kmem_cache_zalloc(xfs_attri_cache, GFP_NOFS | __GFP_NOFAIL);
|
||||
attrip = kmem_cache_zalloc(xfs_attri_cache, GFP_KERNEL | __GFP_NOFAIL);
|
||||
|
||||
/*
|
||||
* Grab an extra reference to the name/value buffer for this log item.
|
||||
@ -666,7 +666,7 @@ xfs_attr_create_done(
|
||||
|
||||
attrip = ATTRI_ITEM(intent);
|
||||
|
||||
attrdp = kmem_cache_zalloc(xfs_attrd_cache, GFP_NOFS | __GFP_NOFAIL);
|
||||
attrdp = kmem_cache_zalloc(xfs_attrd_cache, GFP_KERNEL | __GFP_NOFAIL);
|
||||
|
||||
xfs_log_item_init(tp->t_mountp, &attrdp->attrd_item, XFS_LI_ATTRD,
|
||||
&xfs_attrd_item_ops);
|
||||
|
@ -66,7 +66,7 @@ xfs_zero_extent(
|
||||
return blkdev_issue_zeroout(target->bt_bdev,
|
||||
block << (mp->m_super->s_blocksize_bits - 9),
|
||||
count_fsb << (mp->m_super->s_blocksize_bits - 9),
|
||||
GFP_NOFS, 0);
|
||||
GFP_KERNEL, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -190,7 +190,7 @@ xfs_buf_get_maps(
|
||||
}
|
||||
|
||||
bp->b_maps = kzalloc(map_count * sizeof(struct xfs_buf_map),
|
||||
GFP_NOFS | __GFP_NOFAIL);
|
||||
GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
|
||||
if (!bp->b_maps)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
@ -222,7 +222,8 @@ _xfs_buf_alloc(
|
||||
int i;
|
||||
|
||||
*bpp = NULL;
|
||||
bp = kmem_cache_zalloc(xfs_buf_cache, GFP_NOFS | __GFP_NOFAIL);
|
||||
bp = kmem_cache_zalloc(xfs_buf_cache,
|
||||
GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
|
||||
|
||||
/*
|
||||
* We don't want certain flags to appear in b_flags unless they are
|
||||
@ -325,7 +326,7 @@ xfs_buf_alloc_kmem(
|
||||
struct xfs_buf *bp,
|
||||
xfs_buf_flags_t flags)
|
||||
{
|
||||
gfp_t gfp_mask = GFP_NOFS | __GFP_NOFAIL;
|
||||
gfp_t gfp_mask = GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL;
|
||||
size_t size = BBTOB(bp->b_length);
|
||||
|
||||
/* Assure zeroed buffer for non-read cases. */
|
||||
@ -356,13 +357,11 @@ xfs_buf_alloc_pages(
|
||||
struct xfs_buf *bp,
|
||||
xfs_buf_flags_t flags)
|
||||
{
|
||||
gfp_t gfp_mask = __GFP_NOWARN;
|
||||
gfp_t gfp_mask = GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOWARN;
|
||||
long filled = 0;
|
||||
|
||||
if (flags & XBF_READ_AHEAD)
|
||||
gfp_mask |= __GFP_NORETRY;
|
||||
else
|
||||
gfp_mask |= GFP_NOFS;
|
||||
|
||||
/* Make sure that we have a page list */
|
||||
bp->b_page_count = DIV_ROUND_UP(BBTOB(bp->b_length), PAGE_SIZE);
|
||||
@ -429,11 +428,18 @@ _xfs_buf_map_pages(
|
||||
|
||||
/*
|
||||
* vm_map_ram() will allocate auxiliary structures (e.g.
|
||||
* pagetables) with GFP_KERNEL, yet we are likely to be under
|
||||
* GFP_NOFS context here. Hence we need to tell memory reclaim
|
||||
* that we are in such a context via PF_MEMALLOC_NOFS to prevent
|
||||
* memory reclaim re-entering the filesystem here and
|
||||
* potentially deadlocking.
|
||||
* pagetables) with GFP_KERNEL, yet we often under a scoped nofs
|
||||
* context here. Mixing GFP_KERNEL with GFP_NOFS allocations
|
||||
* from the same call site that can be run from both above and
|
||||
* below memory reclaim causes lockdep false positives. Hence we
|
||||
* always need to force this allocation to nofs context because
|
||||
* we can't pass __GFP_NOLOCKDEP down to auxillary structures to
|
||||
* prevent false positive lockdep reports.
|
||||
*
|
||||
* XXX(dgc): I think dquot reclaim is the only place we can get
|
||||
* to this function from memory reclaim context now. If we fix
|
||||
* that like we've fixed inode reclaim to avoid writeback from
|
||||
* reclaim, this nofs wrapping can go away.
|
||||
*/
|
||||
nofs_flag = memalloc_nofs_save();
|
||||
do {
|
||||
|
@ -3518,7 +3518,8 @@ xlog_ticket_alloc(
|
||||
struct xlog_ticket *tic;
|
||||
int unit_res;
|
||||
|
||||
tic = kmem_cache_zalloc(xfs_log_ticket_cache, GFP_NOFS | __GFP_NOFAIL);
|
||||
tic = kmem_cache_zalloc(xfs_log_ticket_cache,
|
||||
GFP_KERNEL | __GFP_NOFAIL);
|
||||
|
||||
unit_res = xlog_calc_unit_res(log, unit_bytes, &tic->t_iclog_hdrs);
|
||||
|
||||
|
@ -428,7 +428,7 @@ xfs_mru_cache_insert(
|
||||
if (!mru || !mru->lists)
|
||||
return -EINVAL;
|
||||
|
||||
if (radix_tree_preload(GFP_NOFS))
|
||||
if (radix_tree_preload(GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&elem->list_node);
|
||||
|
Loading…
x
Reference in New Issue
Block a user