xfs: pass the mapping flags to xfs_bmbt_to_iomap
To prepare for looking at the IOMAP_DAX flag in xfs_bmbt_to_iomap pass in the input mapping flags to xfs_bmbt_to_iomap. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Link: https://lore.kernel.org/r/20211129102203.2243509-24-hch@lst.de Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
a50f6ab3fd
commit
740fd671e0
@ -4551,7 +4551,7 @@ xfs_bmapi_convert_delalloc(
|
|||||||
* the extent. Just return the real extent at this offset.
|
* the extent. Just return the real extent at this offset.
|
||||||
*/
|
*/
|
||||||
if (!isnullstartblock(bma.got.br_startblock)) {
|
if (!isnullstartblock(bma.got.br_startblock)) {
|
||||||
xfs_bmbt_to_iomap(ip, iomap, &bma.got, flags);
|
xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags);
|
||||||
*seq = READ_ONCE(ifp->if_seq);
|
*seq = READ_ONCE(ifp->if_seq);
|
||||||
goto out_trans_cancel;
|
goto out_trans_cancel;
|
||||||
}
|
}
|
||||||
@ -4598,7 +4598,7 @@ xfs_bmapi_convert_delalloc(
|
|||||||
XFS_STATS_INC(mp, xs_xstrat_quick);
|
XFS_STATS_INC(mp, xs_xstrat_quick);
|
||||||
|
|
||||||
ASSERT(!isnullstartblock(bma.got.br_startblock));
|
ASSERT(!isnullstartblock(bma.got.br_startblock));
|
||||||
xfs_bmbt_to_iomap(ip, iomap, &bma.got, flags);
|
xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags);
|
||||||
*seq = READ_ONCE(ifp->if_seq);
|
*seq = READ_ONCE(ifp->if_seq);
|
||||||
|
|
||||||
if (whichfork == XFS_COW_FORK)
|
if (whichfork == XFS_COW_FORK)
|
||||||
|
@ -359,7 +359,7 @@ retry:
|
|||||||
isnullstartblock(imap.br_startblock))
|
isnullstartblock(imap.br_startblock))
|
||||||
goto allocate_blocks;
|
goto allocate_blocks;
|
||||||
|
|
||||||
xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0);
|
xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0, 0);
|
||||||
trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
|
trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
|
||||||
return 0;
|
return 0;
|
||||||
allocate_blocks:
|
allocate_blocks:
|
||||||
|
@ -53,7 +53,8 @@ xfs_bmbt_to_iomap(
|
|||||||
struct xfs_inode *ip,
|
struct xfs_inode *ip,
|
||||||
struct iomap *iomap,
|
struct iomap *iomap,
|
||||||
struct xfs_bmbt_irec *imap,
|
struct xfs_bmbt_irec *imap,
|
||||||
u16 flags)
|
unsigned int mapping_flags,
|
||||||
|
u16 iomap_flags)
|
||||||
{
|
{
|
||||||
struct xfs_mount *mp = ip->i_mount;
|
struct xfs_mount *mp = ip->i_mount;
|
||||||
struct xfs_buftarg *target = xfs_inode_buftarg(ip);
|
struct xfs_buftarg *target = xfs_inode_buftarg(ip);
|
||||||
@ -79,7 +80,7 @@ xfs_bmbt_to_iomap(
|
|||||||
iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
|
iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
|
||||||
iomap->bdev = target->bt_bdev;
|
iomap->bdev = target->bt_bdev;
|
||||||
iomap->dax_dev = target->bt_daxdev;
|
iomap->dax_dev = target->bt_daxdev;
|
||||||
iomap->flags = flags;
|
iomap->flags = iomap_flags;
|
||||||
|
|
||||||
if (xfs_ipincount(ip) &&
|
if (xfs_ipincount(ip) &&
|
||||||
(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
|
(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
|
||||||
@ -799,7 +800,7 @@ xfs_direct_write_iomap_begin(
|
|||||||
|
|
||||||
xfs_iunlock(ip, lockmode);
|
xfs_iunlock(ip, lockmode);
|
||||||
trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
|
trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
|
||||||
return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags);
|
return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, iomap_flags);
|
||||||
|
|
||||||
allocate_blocks:
|
allocate_blocks:
|
||||||
error = -EAGAIN;
|
error = -EAGAIN;
|
||||||
@ -830,18 +831,19 @@ allocate_blocks:
|
|||||||
return error;
|
return error;
|
||||||
|
|
||||||
trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
|
trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
|
||||||
return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags | IOMAP_F_NEW);
|
return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
|
||||||
|
iomap_flags | IOMAP_F_NEW);
|
||||||
|
|
||||||
out_found_cow:
|
out_found_cow:
|
||||||
xfs_iunlock(ip, lockmode);
|
xfs_iunlock(ip, lockmode);
|
||||||
length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount);
|
length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount);
|
||||||
trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap);
|
trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap);
|
||||||
if (imap.br_startblock != HOLESTARTBLOCK) {
|
if (imap.br_startblock != HOLESTARTBLOCK) {
|
||||||
error = xfs_bmbt_to_iomap(ip, srcmap, &imap, 0);
|
error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0);
|
||||||
if (error)
|
if (error)
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
return xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
|
return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, IOMAP_F_SHARED);
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
if (lockmode)
|
if (lockmode)
|
||||||
@ -1051,23 +1053,24 @@ retry:
|
|||||||
*/
|
*/
|
||||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||||
trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap);
|
trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap);
|
||||||
return xfs_bmbt_to_iomap(ip, iomap, &imap, IOMAP_F_NEW);
|
return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_NEW);
|
||||||
|
|
||||||
found_imap:
|
found_imap:
|
||||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||||
return xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
|
return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0);
|
||||||
|
|
||||||
found_cow:
|
found_cow:
|
||||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||||
if (imap.br_startoff <= offset_fsb) {
|
if (imap.br_startoff <= offset_fsb) {
|
||||||
error = xfs_bmbt_to_iomap(ip, srcmap, &imap, 0);
|
error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0);
|
||||||
if (error)
|
if (error)
|
||||||
return error;
|
return error;
|
||||||
return xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
|
return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
|
||||||
|
IOMAP_F_SHARED);
|
||||||
}
|
}
|
||||||
|
|
||||||
xfs_trim_extent(&cmap, offset_fsb, imap.br_startoff - offset_fsb);
|
xfs_trim_extent(&cmap, offset_fsb, imap.br_startoff - offset_fsb);
|
||||||
return xfs_bmbt_to_iomap(ip, iomap, &cmap, 0);
|
return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, 0);
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||||
@ -1176,7 +1179,8 @@ xfs_read_iomap_begin(
|
|||||||
if (error)
|
if (error)
|
||||||
return error;
|
return error;
|
||||||
trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
|
trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
|
||||||
return xfs_bmbt_to_iomap(ip, iomap, &imap, shared ? IOMAP_F_SHARED : 0);
|
return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
|
||||||
|
shared ? IOMAP_F_SHARED : 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct iomap_ops xfs_read_iomap_ops = {
|
const struct iomap_ops xfs_read_iomap_ops = {
|
||||||
@ -1235,7 +1239,8 @@ xfs_seek_iomap_begin(
|
|||||||
if (data_fsb < cow_fsb + cmap.br_blockcount)
|
if (data_fsb < cow_fsb + cmap.br_blockcount)
|
||||||
end_fsb = min(end_fsb, data_fsb);
|
end_fsb = min(end_fsb, data_fsb);
|
||||||
xfs_trim_extent(&cmap, offset_fsb, end_fsb);
|
xfs_trim_extent(&cmap, offset_fsb, end_fsb);
|
||||||
error = xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
|
error = xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
|
||||||
|
IOMAP_F_SHARED);
|
||||||
/*
|
/*
|
||||||
* This is a COW extent, so we must probe the page cache
|
* This is a COW extent, so we must probe the page cache
|
||||||
* because there could be dirty page cache being backed
|
* because there could be dirty page cache being backed
|
||||||
@ -1257,7 +1262,7 @@ xfs_seek_iomap_begin(
|
|||||||
imap.br_state = XFS_EXT_NORM;
|
imap.br_state = XFS_EXT_NORM;
|
||||||
done:
|
done:
|
||||||
xfs_trim_extent(&imap, offset_fsb, end_fsb);
|
xfs_trim_extent(&imap, offset_fsb, end_fsb);
|
||||||
error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
|
error = xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0);
|
||||||
out_unlock:
|
out_unlock:
|
||||||
xfs_iunlock(ip, lockmode);
|
xfs_iunlock(ip, lockmode);
|
||||||
return error;
|
return error;
|
||||||
@ -1304,7 +1309,7 @@ out_unlock:
|
|||||||
if (error)
|
if (error)
|
||||||
return error;
|
return error;
|
||||||
ASSERT(nimaps);
|
ASSERT(nimaps);
|
||||||
return xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
|
return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct iomap_ops xfs_xattr_iomap_ops = {
|
const struct iomap_ops xfs_xattr_iomap_ops = {
|
||||||
|
@ -17,8 +17,9 @@ int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool);
|
|||||||
xfs_fileoff_t xfs_iomap_eof_align_last_fsb(struct xfs_inode *ip,
|
xfs_fileoff_t xfs_iomap_eof_align_last_fsb(struct xfs_inode *ip,
|
||||||
xfs_fileoff_t end_fsb);
|
xfs_fileoff_t end_fsb);
|
||||||
|
|
||||||
int xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
|
int xfs_bmbt_to_iomap(struct xfs_inode *ip, struct iomap *iomap,
|
||||||
struct xfs_bmbt_irec *, u16);
|
struct xfs_bmbt_irec *imap, unsigned int mapping_flags,
|
||||||
|
u16 iomap_flags);
|
||||||
|
|
||||||
int xfs_zero_range(struct xfs_inode *ip, loff_t pos, loff_t len,
|
int xfs_zero_range(struct xfs_inode *ip, loff_t pos, loff_t len,
|
||||||
bool *did_zero);
|
bool *did_zero);
|
||||||
|
@ -173,7 +173,7 @@ xfs_fs_map_blocks(
|
|||||||
}
|
}
|
||||||
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
|
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
|
||||||
|
|
||||||
error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
|
error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0, 0);
|
||||||
*device_generation = mp->m_generation;
|
*device_generation = mp->m_generation;
|
||||||
return error;
|
return error;
|
||||||
out_unlock:
|
out_unlock:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user