overlayfs vfs updates for 6.8

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE9zuTYTs0RXF+Ke33EVvVyTe/1WoFAmWG9DgACgkQEVvVyTe/
 1Wq3Ew//U0XeYofLP13qHCvvegg8zOVR59TUwzfRV8kxWf7xLUTk6NaeXYLmBTiD
 r+ga5phP/0xbnStOJmBEeRKbClAtAnZ9j3JK9vTlWf5ymzeWxxBs/JdCX0c1xxuy
 fEtHIMMOYIF9t7X4IfzrMaNMv8fZff/lGcgefLvDCawQzGzF79AifYOUv+OR4SuJ
 98kkHUMTs4ZEvgxFhAZ14qKnXeSy9O+Fz7xvbNZ/lNKXQ0HdcdVrhKh3gQ9sj7uC
 73eUhfNZP7nXtMcgNwSayuJGLY5OFw7vHBEH8DjWlPMyodrsIn4v509h0KwziK57
 DPCCykRHHtGhXh0m7LHIwmejf/TA1OScPRqh/hXyCrICdothY+60uJggo3lExCq7
 QTLU7h9U39mR/QCTEolITzgvOTTVUxPbKS2v/CZ8I7rmjOXahJKb316hLcQohZXr
 lV0H+RKZCrUwl5o0PcIKC51QFHHcxVWS+MefE1Vfi53etYw7odJY/RCE7HDpArKx
 AVSpdtpfXV7vvQHi7FrAYj1sgPsx4YS/IdVF/DaOJnt1G6sGnfBJwNVyQ1s0zjrq
 u3TKa2Y2YSQkKG5Wh48zG/MaTxFUNcfc1zfO3535NMDQKEEQkHF+FXL0VW7vsP9b
 rk6JKHERd5/A59IMNNwM9xiy2TmAuC8q9Tv0SjPn5q6tsntdZ+Q=
 =emtX
 -----END PGP SIGNATURE-----
gpgsig -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCZYc6BQAKCRCRxhvAZXjc
 ouHyAQCQXEi1yvF+leS7jutSCypsk9bqAaiENwSqSD4FQe5TZgEAovM+alMGofhl
 qdpncCR9ixj76Dx+ihxEasGKVYiFfQ0=
 =3Yyz
 -----END PGP SIGNATURE-----

Merge tag 'ovl-vfs-6.8' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/overlayfs/vfs

Pull backing file updates from Amir Goldstein:

These patches essentially just lift some overlayfs code to common code.
The motivation is to reuse common stacking code for the FUSE passthrough
patches that I am shaping up for upstream. The FUSE passthrough work
will be coming in over the next cycles.

I have been testing those patches with my fuse-backing-fd development
branch for quite some time and I think both you and Miklos gave a
conceptual ACK to some version of this work.

* tag 'ovl-vfs-6.8' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/overlayfs/vfs:
  fs: factor out backing_file_mmap() helper
  fs: factor out backing_file_splice_{read,write}() helpers
  fs: factor out backing_file_{read,write}_iter() helpers
  fs: prepare for stackable filesystems backing file helpers

Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
Christian Brauner 2023-12-23 20:17:05 +01:00
commit 7a18c0fff4
11 changed files with 435 additions and 263 deletions

View File

@ -8186,6 +8186,15 @@ S: Supported
F: fs/iomap/
F: include/linux/iomap.h
FILESYSTEMS [STACKABLE]
M: Miklos Szeredi <miklos@szeredi.hu>
M: Amir Goldstein <amir73il@gmail.com>
L: linux-fsdevel@vger.kernel.org
L: linux-unionfs@vger.kernel.org
S: Maintained
F: fs/backing-file.c
F: include/linux/backing-file.h
FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
M: Riku Voipio <riku.voipio@iki.fi>
L: linux-hwmon@vger.kernel.org

View File

@ -18,6 +18,10 @@ config VALIDATE_FS_PARSER
config FS_IOMAP
bool
# Stackable filesystems
config FS_STACK
bool
config BUFFER_HEAD
bool

View File

@ -39,6 +39,7 @@ obj-$(CONFIG_COMPAT_BINFMT_ELF) += compat_binfmt_elf.o
obj-$(CONFIG_BINFMT_ELF_FDPIC) += binfmt_elf_fdpic.o
obj-$(CONFIG_BINFMT_FLAT) += binfmt_flat.o
obj-$(CONFIG_FS_STACK) += backing-file.o
obj-$(CONFIG_FS_MBCACHE) += mbcache.o
obj-$(CONFIG_FS_POSIX_ACL) += posix_acl.o
obj-$(CONFIG_NFS_COMMON) += nfs_common/

336
fs/backing-file.c Normal file
View File

@ -0,0 +1,336 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Common helpers for stackable filesystems and backing files.
*
* Forked from fs/overlayfs/file.c.
*
* Copyright (C) 2017 Red Hat, Inc.
* Copyright (C) 2023 CTERA Networks.
*/
#include <linux/fs.h>
#include <linux/backing-file.h>
#include <linux/splice.h>
#include <linux/mm.h>
#include "internal.h"
/**
* backing_file_open - open a backing file for kernel internal use
* @user_path: path that the user reuqested to open
* @flags: open flags
* @real_path: path of the backing file
* @cred: credentials for open
*
* Open a backing file for a stackable filesystem (e.g., overlayfs).
* @user_path may be on the stackable filesystem and @real_path on the
* underlying filesystem. In this case, we want to be able to return the
* @user_path of the stackable filesystem. This is done by embedding the
* returned file into a container structure that also stores the stacked
* file's path, which can be retrieved using backing_file_user_path().
*/
struct file *backing_file_open(const struct path *user_path, int flags,
const struct path *real_path,
const struct cred *cred)
{
struct file *f;
int error;
f = alloc_empty_backing_file(flags, cred);
if (IS_ERR(f))
return f;
path_get(user_path);
*backing_file_user_path(f) = *user_path;
error = vfs_open(real_path, f);
if (error) {
fput(f);
f = ERR_PTR(error);
}
return f;
}
EXPORT_SYMBOL_GPL(backing_file_open);
struct backing_aio {
struct kiocb iocb;
refcount_t ref;
struct kiocb *orig_iocb;
/* used for aio completion */
void (*end_write)(struct file *);
struct work_struct work;
long res;
};
static struct kmem_cache *backing_aio_cachep;
#define BACKING_IOCB_MASK \
(IOCB_NOWAIT | IOCB_HIPRI | IOCB_DSYNC | IOCB_SYNC | IOCB_APPEND)
static rwf_t iocb_to_rw_flags(int flags)
{
return (__force rwf_t)(flags & BACKING_IOCB_MASK);
}
static void backing_aio_put(struct backing_aio *aio)
{
if (refcount_dec_and_test(&aio->ref)) {
fput(aio->iocb.ki_filp);
kmem_cache_free(backing_aio_cachep, aio);
}
}
static void backing_aio_cleanup(struct backing_aio *aio, long res)
{
struct kiocb *iocb = &aio->iocb;
struct kiocb *orig_iocb = aio->orig_iocb;
if (aio->end_write)
aio->end_write(orig_iocb->ki_filp);
orig_iocb->ki_pos = iocb->ki_pos;
backing_aio_put(aio);
}
static void backing_aio_rw_complete(struct kiocb *iocb, long res)
{
struct backing_aio *aio = container_of(iocb, struct backing_aio, iocb);
struct kiocb *orig_iocb = aio->orig_iocb;
if (iocb->ki_flags & IOCB_WRITE)
kiocb_end_write(iocb);
backing_aio_cleanup(aio, res);
orig_iocb->ki_complete(orig_iocb, res);
}
static void backing_aio_complete_work(struct work_struct *work)
{
struct backing_aio *aio = container_of(work, struct backing_aio, work);
backing_aio_rw_complete(&aio->iocb, aio->res);
}
static void backing_aio_queue_completion(struct kiocb *iocb, long res)
{
struct backing_aio *aio = container_of(iocb, struct backing_aio, iocb);
/*
* Punt to a work queue to serialize updates of mtime/size.
*/
aio->res = res;
INIT_WORK(&aio->work, backing_aio_complete_work);
queue_work(file_inode(aio->orig_iocb->ki_filp)->i_sb->s_dio_done_wq,
&aio->work);
}
static int backing_aio_init_wq(struct kiocb *iocb)
{
struct super_block *sb = file_inode(iocb->ki_filp)->i_sb;
if (sb->s_dio_done_wq)
return 0;
return sb_init_dio_done_wq(sb);
}
ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter,
struct kiocb *iocb, int flags,
struct backing_file_ctx *ctx)
{
struct backing_aio *aio = NULL;
const struct cred *old_cred;
ssize_t ret;
if (WARN_ON_ONCE(!(file->f_mode & FMODE_BACKING)))
return -EIO;
if (!iov_iter_count(iter))
return 0;
if (iocb->ki_flags & IOCB_DIRECT &&
!(file->f_mode & FMODE_CAN_ODIRECT))
return -EINVAL;
old_cred = override_creds(ctx->cred);
if (is_sync_kiocb(iocb)) {
rwf_t rwf = iocb_to_rw_flags(flags);
ret = vfs_iter_read(file, iter, &iocb->ki_pos, rwf);
} else {
ret = -ENOMEM;
aio = kmem_cache_zalloc(backing_aio_cachep, GFP_KERNEL);
if (!aio)
goto out;
aio->orig_iocb = iocb;
kiocb_clone(&aio->iocb, iocb, get_file(file));
aio->iocb.ki_complete = backing_aio_rw_complete;
refcount_set(&aio->ref, 2);
ret = vfs_iocb_iter_read(file, &aio->iocb, iter);
backing_aio_put(aio);
if (ret != -EIOCBQUEUED)
backing_aio_cleanup(aio, ret);
}
out:
revert_creds(old_cred);
if (ctx->accessed)
ctx->accessed(ctx->user_file);
return ret;
}
EXPORT_SYMBOL_GPL(backing_file_read_iter);
ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter,
struct kiocb *iocb, int flags,
struct backing_file_ctx *ctx)
{
const struct cred *old_cred;
ssize_t ret;
if (WARN_ON_ONCE(!(file->f_mode & FMODE_BACKING)))
return -EIO;
if (!iov_iter_count(iter))
return 0;
ret = file_remove_privs(ctx->user_file);
if (ret)
return ret;
if (iocb->ki_flags & IOCB_DIRECT &&
!(file->f_mode & FMODE_CAN_ODIRECT))
return -EINVAL;
/*
* Stacked filesystems don't support deferred completions, don't copy
* this property in case it is set by the issuer.
*/
flags &= ~IOCB_DIO_CALLER_COMP;
old_cred = override_creds(ctx->cred);
if (is_sync_kiocb(iocb)) {
rwf_t rwf = iocb_to_rw_flags(flags);
ret = vfs_iter_write(file, iter, &iocb->ki_pos, rwf);
if (ctx->end_write)
ctx->end_write(ctx->user_file);
} else {
struct backing_aio *aio;
ret = backing_aio_init_wq(iocb);
if (ret)
goto out;
ret = -ENOMEM;
aio = kmem_cache_zalloc(backing_aio_cachep, GFP_KERNEL);
if (!aio)
goto out;
aio->orig_iocb = iocb;
aio->end_write = ctx->end_write;
kiocb_clone(&aio->iocb, iocb, get_file(file));
aio->iocb.ki_flags = flags;
aio->iocb.ki_complete = backing_aio_queue_completion;
refcount_set(&aio->ref, 2);
ret = vfs_iocb_iter_write(file, &aio->iocb, iter);
backing_aio_put(aio);
if (ret != -EIOCBQUEUED)
backing_aio_cleanup(aio, ret);
}
out:
revert_creds(old_cred);
return ret;
}
EXPORT_SYMBOL_GPL(backing_file_write_iter);
ssize_t backing_file_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags,
struct backing_file_ctx *ctx)
{
const struct cred *old_cred;
ssize_t ret;
if (WARN_ON_ONCE(!(in->f_mode & FMODE_BACKING)))
return -EIO;
old_cred = override_creds(ctx->cred);
ret = vfs_splice_read(in, ppos, pipe, len, flags);
revert_creds(old_cred);
if (ctx->accessed)
ctx->accessed(ctx->user_file);
return ret;
}
EXPORT_SYMBOL_GPL(backing_file_splice_read);
ssize_t backing_file_splice_write(struct pipe_inode_info *pipe,
struct file *out, loff_t *ppos, size_t len,
unsigned int flags,
struct backing_file_ctx *ctx)
{
const struct cred *old_cred;
ssize_t ret;
if (WARN_ON_ONCE(!(out->f_mode & FMODE_BACKING)))
return -EIO;
ret = file_remove_privs(ctx->user_file);
if (ret)
return ret;
old_cred = override_creds(ctx->cred);
file_start_write(out);
ret = iter_file_splice_write(pipe, out, ppos, len, flags);
file_end_write(out);
revert_creds(old_cred);
if (ctx->end_write)
ctx->end_write(ctx->user_file);
return ret;
}
EXPORT_SYMBOL_GPL(backing_file_splice_write);
int backing_file_mmap(struct file *file, struct vm_area_struct *vma,
struct backing_file_ctx *ctx)
{
const struct cred *old_cred;
int ret;
if (WARN_ON_ONCE(!(file->f_mode & FMODE_BACKING)) ||
WARN_ON_ONCE(ctx->user_file != vma->vm_file))
return -EIO;
if (!file->f_op->mmap)
return -ENODEV;
vma_set_file(vma, file);
old_cred = override_creds(ctx->cred);
ret = call_mmap(vma->vm_file, vma);
revert_creds(old_cred);
if (ctx->accessed)
ctx->accessed(ctx->user_file);
return ret;
}
EXPORT_SYMBOL_GPL(backing_file_mmap);
static int __init backing_aio_init(void)
{
backing_aio_cachep = kmem_cache_create("backing_aio",
sizeof(struct backing_aio),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!backing_aio_cachep)
return -ENOMEM;
return 0;
}
fs_initcall(backing_aio_init);

View File

@ -1184,44 +1184,6 @@ struct file *kernel_file_open(const struct path *path, int flags,
}
EXPORT_SYMBOL_GPL(kernel_file_open);
/**
* backing_file_open - open a backing file for kernel internal use
* @user_path: path that the user reuqested to open
* @flags: open flags
* @real_path: path of the backing file
* @cred: credentials for open
*
* Open a backing file for a stackable filesystem (e.g., overlayfs).
* @user_path may be on the stackable filesystem and @real_path on the
* underlying filesystem. In this case, we want to be able to return the
* @user_path of the stackable filesystem. This is done by embedding the
* returned file into a container structure that also stores the stacked
* file's path, which can be retrieved using backing_file_user_path().
*/
struct file *backing_file_open(const struct path *user_path, int flags,
const struct path *real_path,
const struct cred *cred)
{
struct file *f;
int error;
f = alloc_empty_backing_file(flags, cred);
if (IS_ERR(f))
return f;
path_get(user_path);
*backing_file_user_path(f) = *user_path;
f->f_path = *real_path;
error = do_dentry_open(f, d_inode(real_path->dentry), NULL);
if (error) {
fput(f);
f = ERR_PTR(error);
}
return f;
}
EXPORT_SYMBOL_GPL(backing_file_open);
#define WILL_CREATE(flags) (flags & (O_CREAT | __O_TMPFILE))
#define O_PATH_FLAGS (O_DIRECTORY | O_NOFOLLOW | O_PATH | O_CLOEXEC)

View File

@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config OVERLAY_FS
tristate "Overlay filesystem support"
select FS_STACK
select EXPORTFS
help
An overlay filesystem combines two filesystems - an 'upper' filesystem

View File

@ -9,25 +9,11 @@
#include <linux/xattr.h>
#include <linux/uio.h>
#include <linux/uaccess.h>
#include <linux/splice.h>
#include <linux/security.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/backing-file.h>
#include "overlayfs.h"
#include "../internal.h" /* for sb_init_dio_done_wq */
struct ovl_aio_req {
struct kiocb iocb;
refcount_t ref;
struct kiocb *orig_iocb;
/* used for aio completion */
struct work_struct work;
long res;
};
static struct kmem_cache *ovl_aio_request_cachep;
static char ovl_whatisit(struct inode *inode, struct inode *realinode)
{
if (realinode != ovl_inode_upper(inode))
@ -274,84 +260,16 @@ static void ovl_file_accessed(struct file *file)
touch_atime(&file->f_path);
}
#define OVL_IOCB_MASK \
(IOCB_NOWAIT | IOCB_HIPRI | IOCB_DSYNC | IOCB_SYNC | IOCB_APPEND)
static rwf_t iocb_to_rw_flags(int flags)
{
return (__force rwf_t)(flags & OVL_IOCB_MASK);
}
static inline void ovl_aio_put(struct ovl_aio_req *aio_req)
{
if (refcount_dec_and_test(&aio_req->ref)) {
fput(aio_req->iocb.ki_filp);
kmem_cache_free(ovl_aio_request_cachep, aio_req);
}
}
static void ovl_aio_cleanup_handler(struct ovl_aio_req *aio_req)
{
struct kiocb *iocb = &aio_req->iocb;
struct kiocb *orig_iocb = aio_req->orig_iocb;
if (iocb->ki_flags & IOCB_WRITE)
ovl_file_modified(orig_iocb->ki_filp);
orig_iocb->ki_pos = iocb->ki_pos;
ovl_aio_put(aio_req);
}
static void ovl_aio_rw_complete(struct kiocb *iocb, long res)
{
struct ovl_aio_req *aio_req = container_of(iocb,
struct ovl_aio_req, iocb);
struct kiocb *orig_iocb = aio_req->orig_iocb;
if (iocb->ki_flags & IOCB_WRITE)
kiocb_end_write(iocb);
ovl_aio_cleanup_handler(aio_req);
orig_iocb->ki_complete(orig_iocb, res);
}
static void ovl_aio_complete_work(struct work_struct *work)
{
struct ovl_aio_req *aio_req = container_of(work,
struct ovl_aio_req, work);
ovl_aio_rw_complete(&aio_req->iocb, aio_req->res);
}
static void ovl_aio_queue_completion(struct kiocb *iocb, long res)
{
struct ovl_aio_req *aio_req = container_of(iocb,
struct ovl_aio_req, iocb);
struct kiocb *orig_iocb = aio_req->orig_iocb;
/*
* Punt to a work queue to serialize updates of mtime/size.
*/
aio_req->res = res;
INIT_WORK(&aio_req->work, ovl_aio_complete_work);
queue_work(file_inode(orig_iocb->ki_filp)->i_sb->s_dio_done_wq,
&aio_req->work);
}
static int ovl_init_aio_done_wq(struct super_block *sb)
{
if (sb->s_dio_done_wq)
return 0;
return sb_init_dio_done_wq(sb);
}
static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
struct fd real;
const struct cred *old_cred;
ssize_t ret;
struct backing_file_ctx ctx = {
.cred = ovl_creds(file_inode(file)->i_sb),
.user_file = file,
.accessed = ovl_file_accessed,
};
if (!iov_iter_count(iter))
return 0;
@ -360,37 +278,8 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
if (ret)
return ret;
ret = -EINVAL;
if (iocb->ki_flags & IOCB_DIRECT &&
!(real.file->f_mode & FMODE_CAN_ODIRECT))
goto out_fdput;
old_cred = ovl_override_creds(file_inode(file)->i_sb);
if (is_sync_kiocb(iocb)) {
rwf_t rwf = iocb_to_rw_flags(iocb->ki_flags);
ret = vfs_iter_read(real.file, iter, &iocb->ki_pos, rwf);
} else {
struct ovl_aio_req *aio_req;
ret = -ENOMEM;
aio_req = kmem_cache_zalloc(ovl_aio_request_cachep, GFP_KERNEL);
if (!aio_req)
goto out;
aio_req->orig_iocb = iocb;
kiocb_clone(&aio_req->iocb, iocb, get_file(real.file));
aio_req->iocb.ki_complete = ovl_aio_rw_complete;
refcount_set(&aio_req->ref, 2);
ret = vfs_iocb_iter_read(real.file, &aio_req->iocb, iter);
ovl_aio_put(aio_req);
if (ret != -EIOCBQUEUED)
ovl_aio_cleanup_handler(aio_req);
}
out:
revert_creds(old_cred);
ovl_file_accessed(file);
out_fdput:
ret = backing_file_read_iter(real.file, iter, iocb, iocb->ki_flags,
&ctx);
fdput(real);
return ret;
@ -401,9 +290,13 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct fd real;
const struct cred *old_cred;
ssize_t ret;
int ifl = iocb->ki_flags;
struct backing_file_ctx ctx = {
.cred = ovl_creds(inode->i_sb),
.user_file = file,
.end_write = ovl_file_modified,
};
if (!iov_iter_count(iter))
return 0;
@ -411,19 +304,11 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
inode_lock(inode);
/* Update mode */
ovl_copyattr(inode);
ret = file_remove_privs(file);
if (ret)
goto out_unlock;
ret = ovl_real_fdget(file, &real);
if (ret)
goto out_unlock;
ret = -EINVAL;
if (iocb->ki_flags & IOCB_DIRECT &&
!(real.file->f_mode & FMODE_CAN_ODIRECT))
goto out_fdput;
if (!ovl_should_sync(OVL_FS(inode->i_sb)))
ifl &= ~(IOCB_DSYNC | IOCB_SYNC);
@ -432,39 +317,7 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
* this property in case it is set by the issuer.
*/
ifl &= ~IOCB_DIO_CALLER_COMP;
old_cred = ovl_override_creds(file_inode(file)->i_sb);
if (is_sync_kiocb(iocb)) {
rwf_t rwf = iocb_to_rw_flags(ifl);
ret = vfs_iter_write(real.file, iter, &iocb->ki_pos, rwf);
/* Update size */
ovl_file_modified(file);
} else {
struct ovl_aio_req *aio_req;
ret = ovl_init_aio_done_wq(inode->i_sb);
if (ret)
goto out;
ret = -ENOMEM;
aio_req = kmem_cache_zalloc(ovl_aio_request_cachep, GFP_KERNEL);
if (!aio_req)
goto out;
aio_req->orig_iocb = iocb;
kiocb_clone(&aio_req->iocb, iocb, get_file(real.file));
aio_req->iocb.ki_flags = ifl;
aio_req->iocb.ki_complete = ovl_aio_queue_completion;
refcount_set(&aio_req->ref, 2);
ret = vfs_iocb_iter_write(real.file, &aio_req->iocb, iter);
ovl_aio_put(aio_req);
if (ret != -EIOCBQUEUED)
ovl_aio_cleanup_handler(aio_req);
}
out:
revert_creds(old_cred);
out_fdput:
ret = backing_file_write_iter(real.file, iter, iocb, ifl, &ctx);
fdput(real);
out_unlock:
@ -477,20 +330,21 @@ static ssize_t ovl_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
const struct cred *old_cred;
struct fd real;
ssize_t ret;
struct backing_file_ctx ctx = {
.cred = ovl_creds(file_inode(in)->i_sb),
.user_file = in,
.accessed = ovl_file_accessed,
};
ret = ovl_real_fdget(in, &real);
if (ret)
return ret;
old_cred = ovl_override_creds(file_inode(in)->i_sb);
ret = vfs_splice_read(real.file, ppos, pipe, len, flags);
revert_creds(old_cred);
ovl_file_accessed(in);
ret = backing_file_splice_read(real.file, ppos, pipe, len, flags, &ctx);
fdput(real);
return ret;
}
@ -506,30 +360,23 @@ static ssize_t ovl_splice_write(struct pipe_inode_info *pipe, struct file *out,
loff_t *ppos, size_t len, unsigned int flags)
{
struct fd real;
const struct cred *old_cred;
struct inode *inode = file_inode(out);
ssize_t ret;
struct backing_file_ctx ctx = {
.cred = ovl_creds(inode->i_sb),
.user_file = out,
.end_write = ovl_file_modified,
};
inode_lock(inode);
/* Update mode */
ovl_copyattr(inode);
ret = file_remove_privs(out);
if (ret)
goto out_unlock;
ret = ovl_real_fdget(out, &real);
if (ret)
goto out_unlock;
old_cred = ovl_override_creds(inode->i_sb);
file_start_write(real.file);
ret = iter_file_splice_write(pipe, real.file, ppos, len, flags);
file_end_write(real.file);
/* Update size */
ovl_file_modified(out);
revert_creds(old_cred);
ret = backing_file_splice_write(pipe, real.file, ppos, len, flags, &ctx);
fdput(real);
out_unlock:
@ -567,23 +414,13 @@ static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync)
static int ovl_mmap(struct file *file, struct vm_area_struct *vma)
{
struct file *realfile = file->private_data;
const struct cred *old_cred;
int ret;
struct backing_file_ctx ctx = {
.cred = ovl_creds(file_inode(file)->i_sb),
.user_file = file,
.accessed = ovl_file_accessed,
};
if (!realfile->f_op->mmap)
return -ENODEV;
if (WARN_ON(file != vma->vm_file))
return -EIO;
vma_set_file(vma, realfile);
old_cred = ovl_override_creds(file_inode(file)->i_sb);
ret = call_mmap(vma->vm_file, vma);
revert_creds(old_cred);
ovl_file_accessed(file);
return ret;
return backing_file_mmap(realfile, vma, &ctx);
}
static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
@ -776,19 +613,3 @@ const struct file_operations ovl_file_operations = {
.copy_file_range = ovl_copy_file_range,
.remap_file_range = ovl_remap_file_range,
};
int __init ovl_aio_request_cache_init(void)
{
ovl_aio_request_cachep = kmem_cache_create("ovl_aio_req",
sizeof(struct ovl_aio_req),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!ovl_aio_request_cachep)
return -ENOMEM;
return 0;
}
void ovl_aio_request_cache_destroy(void)
{
kmem_cache_destroy(ovl_aio_request_cachep);
}

View File

@ -417,6 +417,12 @@ int ovl_want_write(struct dentry *dentry);
void ovl_drop_write(struct dentry *dentry);
struct dentry *ovl_workdir(struct dentry *dentry);
const struct cred *ovl_override_creds(struct super_block *sb);
static inline const struct cred *ovl_creds(struct super_block *sb)
{
return OVL_FS(sb)->creator_cred;
}
int ovl_can_decode_fh(struct super_block *sb);
struct dentry *ovl_indexdir(struct super_block *sb);
bool ovl_index_all(struct super_block *sb);
@ -829,8 +835,6 @@ struct dentry *ovl_create_temp(struct ovl_fs *ofs, struct dentry *workdir,
/* file.c */
extern const struct file_operations ovl_file_operations;
int __init ovl_aio_request_cache_init(void);
void ovl_aio_request_cache_destroy(void);
int ovl_real_fileattr_get(const struct path *realpath, struct fileattr *fa);
int ovl_real_fileattr_set(const struct path *realpath, struct fileattr *fa);
int ovl_fileattr_get(struct dentry *dentry, struct fileattr *fa);

View File

@ -1501,14 +1501,10 @@ static int __init ovl_init(void)
if (ovl_inode_cachep == NULL)
return -ENOMEM;
err = ovl_aio_request_cache_init();
if (!err) {
err = register_filesystem(&ovl_fs_type);
if (!err)
return 0;
err = register_filesystem(&ovl_fs_type);
if (!err)
return 0;
ovl_aio_request_cache_destroy();
}
kmem_cache_destroy(ovl_inode_cachep);
return err;
@ -1524,7 +1520,6 @@ static void __exit ovl_exit(void)
*/
rcu_barrier();
kmem_cache_destroy(ovl_inode_cachep);
ovl_aio_request_cache_destroy();
}
module_init(ovl_init);

View File

@ -0,0 +1,42 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Common helpers for stackable filesystems and backing files.
*
* Copyright (C) 2023 CTERA Networks.
*/
#ifndef _LINUX_BACKING_FILE_H
#define _LINUX_BACKING_FILE_H
#include <linux/file.h>
#include <linux/uio.h>
#include <linux/fs.h>
struct backing_file_ctx {
const struct cred *cred;
struct file *user_file;
void (*accessed)(struct file *);
void (*end_write)(struct file *);
};
struct file *backing_file_open(const struct path *user_path, int flags,
const struct path *real_path,
const struct cred *cred);
ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter,
struct kiocb *iocb, int flags,
struct backing_file_ctx *ctx);
ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter,
struct kiocb *iocb, int flags,
struct backing_file_ctx *ctx);
ssize_t backing_file_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags,
struct backing_file_ctx *ctx);
ssize_t backing_file_splice_write(struct pipe_inode_info *pipe,
struct file *out, loff_t *ppos, size_t len,
unsigned int flags,
struct backing_file_ctx *ctx);
int backing_file_mmap(struct file *file, struct vm_area_struct *vma,
struct backing_file_ctx *ctx);
#endif /* _LINUX_BACKING_FILE_H */

View File

@ -2575,9 +2575,6 @@ struct file *dentry_open(const struct path *path, int flags,
const struct cred *creds);
struct file *dentry_create(const struct path *path, int flags, umode_t mode,
const struct cred *cred);
struct file *backing_file_open(const struct path *user_path, int flags,
const struct path *real_path,
const struct cred *cred);
struct path *backing_file_user_path(struct file *f);
/*