New code for 5.9:
- Make sure we call ->iomap_end with a failure code if ->iomap_begin failed in any way; some filesystems need to try to undo things. - Don't invalidate the page cache during direct reads since we already sync'd the cache with disk. - Make direct writes fall back to the page cache if the pre-write cache invalidation fails. This avoids a cache coherency problem. - Fix some idiotic virus scanner warning bs in the previous tag. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEUzaAxoMeQq6m2jMV+H93GTRKtOsFAl8q3aMACgkQ+H93GTRK tOvh9BAAkUF11er5pSefKdM1t2WGlSjMgPRmMHELRFwLhJBPK1zyIIs+kCz9+k1/ lGe8mAEI7cA06jiUXYCbHZW1Cgno46VYZxWVnIE3i7c3xYt8pwApqwY+ATqH6X75 7peax9L0Dn8DK7mzw6ihcO6LCIH0iyfHeBpWyKN87APBhKU6nNtVah/I/3NGnbWJ EbH6TSf4FWqzBvYJZKUQRqrGZRJWUinrRAqLnh2fWxVcjUDLVTnbjWxAuL0StgWB H1AY3dof9ROYK3SKFNPqtur8nXcrHNCvnOSgQmB8F++ZkfsubR1MREWpndBJTHnd /a5zNveiQGvA8drM1+2v/QLd30yp3I+LHSlM+BY5Bc/Xl8c2ZanwAhu+x40Ha6qq rjsh31Hdn6E4qzP+ne+eVSWyPPHNZCK0i7gBWlTBodlJyHN70N1RBCfGBnO2VVbt fZCxn6kxLYrfKEoQVQS+9QGu3cRSh7yYsLGjWoK5iynsVJCOvMjmTZ6uPL2EWAEY 9oz/QRxyTaVit1sgk0ypsrfZ4yFafI5QIDCLM9pHpxgj0QNddO2smAyKO2WItZ90 ERz/0UYg1LJoEl4lmBwHoYAI3aU37FyO9UhjgTIJSZeLZbnK1aba9uikwgrSmS/c XLVy0WyPWd/JMBhA0EAAaQFBa1D6gTdTskSG8Djl1saiYNu6kGs= =rjsZ -----END PGP SIGNATURE----- Merge tag 'iomap-5.9-merge-5' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux Pull iomap updates from Darrick Wong: "The most notable changes are: - iomap no longer invalidates the page cache when performing a direct read, since doing so is unnecessary and the old directio code doesn't do that either. - iomap embraced the use of returning ENOTBLK from a direct write to trigger falling back to a buffered write since ext4 already did this and btrfs wants it for their port. - iomap falls back to buffered writes if we're doing a direct write and the page cache invalidation after the flush fails; this was necessary to handle a corner case in the btrfs port. - Remove email virus scanner detritus that was accidentally included in yesterday's pull request. Clearly I need(ed) to update my git branch checker scripts. :(" * tag 'iomap-5.9-merge-5' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux: iomap: fall back to buffered writes for invalidation failures xfs: use ENOTBLK for direct I/O to buffered I/O fallback iomap: Only invalidate page cache pages on direct IO writes iomap: Make sure iomap_end is called after iomap_begin
This commit is contained in:
commit
0e4656a299
@ -544,6 +544,8 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
iomap_ops = &ext4_iomap_overwrite_ops;
|
||||
ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
|
||||
is_sync_kiocb(iocb) || unaligned_io || extend);
|
||||
if (ret == -ENOTBLK)
|
||||
ret = 0;
|
||||
|
||||
if (extend)
|
||||
ret = ext4_handle_inode_extension(inode, offset, ret, count);
|
||||
|
@ -835,7 +835,8 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
|
||||
|
||||
ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL,
|
||||
is_sync_kiocb(iocb));
|
||||
|
||||
if (ret == -ENOTBLK)
|
||||
ret = 0;
|
||||
out:
|
||||
gfs2_glock_dq(&gh);
|
||||
out_uninit:
|
||||
|
@ -46,10 +46,14 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
|
||||
ret = ops->iomap_begin(inode, pos, length, flags, &iomap, &srcmap);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (WARN_ON(iomap.offset > pos))
|
||||
return -EIO;
|
||||
if (WARN_ON(iomap.length == 0))
|
||||
return -EIO;
|
||||
if (WARN_ON(iomap.offset > pos)) {
|
||||
written = -EIO;
|
||||
goto out;
|
||||
}
|
||||
if (WARN_ON(iomap.length == 0)) {
|
||||
written = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
trace_iomap_apply_dstmap(inode, &iomap);
|
||||
if (srcmap.type != IOMAP_HOLE)
|
||||
@ -80,6 +84,7 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
|
||||
written = actor(inode, pos, length, data, &iomap,
|
||||
srcmap.type != IOMAP_HOLE ? &srcmap : &iomap);
|
||||
|
||||
out:
|
||||
/*
|
||||
* Now the data has been copied, commit the range we've copied. This
|
||||
* should not fail unless the filesystem has had a fatal error.
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/task_io_accounting_ops.h>
|
||||
#include "trace.h"
|
||||
|
||||
#include "../internal.h"
|
||||
|
||||
@ -401,6 +402,9 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
|
||||
* can be mapped into multiple disjoint IOs and only a subset of the IOs issued
|
||||
* may be pure data writes. In that case, we still need to do a full data sync
|
||||
* completion.
|
||||
*
|
||||
* Returns -ENOTBLK In case of a page invalidation invalidation failure for
|
||||
* writes. The callers needs to fall back to buffered I/O in this case.
|
||||
*/
|
||||
ssize_t
|
||||
iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||
@ -475,23 +479,24 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||
if (ret)
|
||||
goto out_free_dio;
|
||||
|
||||
/*
|
||||
* Try to invalidate cache pages for the range we're direct
|
||||
* writing. If this invalidation fails, tough, the write will
|
||||
* still work, but racing two incompatible write paths is a
|
||||
* pretty crazy thing to do, so we don't support it 100%.
|
||||
*/
|
||||
ret = invalidate_inode_pages2_range(mapping,
|
||||
pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
|
||||
if (ret)
|
||||
dio_warn_stale_pagecache(iocb->ki_filp);
|
||||
ret = 0;
|
||||
|
||||
if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
|
||||
!inode->i_sb->s_dio_done_wq) {
|
||||
ret = sb_init_dio_done_wq(inode->i_sb);
|
||||
if (ret < 0)
|
||||
if (iov_iter_rw(iter) == WRITE) {
|
||||
/*
|
||||
* Try to invalidate cache pages for the range we are writing.
|
||||
* If this invalidation fails, let the caller fall back to
|
||||
* buffered I/O.
|
||||
*/
|
||||
if (invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
|
||||
end >> PAGE_SHIFT)) {
|
||||
trace_iomap_dio_invalidate_fail(inode, pos, count);
|
||||
ret = -ENOTBLK;
|
||||
goto out_free_dio;
|
||||
}
|
||||
|
||||
if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) {
|
||||
ret = sb_init_dio_done_wq(inode->i_sb);
|
||||
if (ret < 0)
|
||||
goto out_free_dio;
|
||||
}
|
||||
}
|
||||
|
||||
inode_dio_begin(inode);
|
||||
|
@ -74,6 +74,7 @@ DEFINE_EVENT(iomap_range_class, name, \
|
||||
DEFINE_RANGE_EVENT(iomap_writepage);
|
||||
DEFINE_RANGE_EVENT(iomap_releasepage);
|
||||
DEFINE_RANGE_EVENT(iomap_invalidatepage);
|
||||
DEFINE_RANGE_EVENT(iomap_dio_invalidate_fail);
|
||||
|
||||
#define IOMAP_TYPE_STRINGS \
|
||||
{ IOMAP_HOLE, "HOLE" }, \
|
||||
|
@ -505,7 +505,7 @@ xfs_file_dio_aio_write(
|
||||
*/
|
||||
if (xfs_is_cow_inode(ip)) {
|
||||
trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
|
||||
return -EREMCHG;
|
||||
return -ENOTBLK;
|
||||
}
|
||||
iolock = XFS_IOLOCK_EXCL;
|
||||
} else {
|
||||
@ -553,8 +553,8 @@ out:
|
||||
xfs_iunlock(ip, iolock);
|
||||
|
||||
/*
|
||||
* No fallback to buffered IO on errors for XFS, direct IO will either
|
||||
* complete fully or fail.
|
||||
* No fallback to buffered IO after short writes for XFS, direct I/O
|
||||
* will either complete fully or return an error.
|
||||
*/
|
||||
ASSERT(ret < 0 || ret == count);
|
||||
return ret;
|
||||
@ -714,7 +714,7 @@ xfs_file_write_iter(
|
||||
* allow an operation to fall back to buffered mode.
|
||||
*/
|
||||
ret = xfs_file_dio_aio_write(iocb, from);
|
||||
if (ret != -EREMCHG)
|
||||
if (ret != -ENOTBLK)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -786,8 +786,11 @@ static ssize_t zonefs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
if (iocb->ki_pos >= ZONEFS_I(inode)->i_max_size)
|
||||
return -EFBIG;
|
||||
|
||||
if (iocb->ki_flags & IOCB_DIRECT)
|
||||
return zonefs_file_dio_write(iocb, from);
|
||||
if (iocb->ki_flags & IOCB_DIRECT) {
|
||||
ssize_t ret = zonefs_file_dio_write(iocb, from);
|
||||
if (ret != -ENOTBLK)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return zonefs_file_buffered_write(iocb, from);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user