2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2018-07-18 16:44:41 +03:00
/*
* Copyright ( C ) 2017 Red Hat , Inc .
*/
# include <linux/cred.h>
# include <linux/file.h>
2018-07-18 16:44:42 +03:00
# include <linux/mount.h>
2018-07-18 16:44:41 +03:00
# include <linux/xattr.h>
2018-07-18 16:44:41 +03:00
# include <linux/uio.h>
2019-05-06 10:41:02 +03:00
# include <linux/uaccess.h>
2020-01-17 15:49:29 +03:00
# include <linux/splice.h>
2020-06-02 23:20:26 +03:00
# include <linux/security.h>
2020-01-17 15:49:29 +03:00
# include <linux/mm.h>
# include <linux/fs.h>
2023-10-02 17:19:46 +03:00
# include <linux/backing-file.h>
2018-07-18 16:44:41 +03:00
# include "overlayfs.h"
2023-08-22 20:50:59 +03:00
# include "../internal.h" /* for sb_init_dio_done_wq */
2019-11-20 12:45:26 +03:00
struct ovl_aio_req {
struct kiocb iocb ;
2021-09-30 06:22:28 +03:00
refcount_t ref ;
2019-11-20 12:45:26 +03:00
struct kiocb * orig_iocb ;
2023-08-22 20:50:59 +03:00
/* used for aio completion */
struct work_struct work ;
long res ;
2019-11-20 12:45:26 +03:00
} ;
static struct kmem_cache * ovl_aio_request_cachep ;
2018-05-11 18:49:31 +03:00
static char ovl_whatisit ( struct inode * inode , struct inode * realinode )
{
if ( realinode ! = ovl_inode_upper ( inode ) )
return ' l ' ;
if ( ovl_has_upperdata ( inode ) )
return ' u ' ;
else
return ' m ' ;
}
2023-06-15 14:22:29 +03:00
/* No atime modification on underlying */
# define OVL_OPEN_FLAGS (O_NOATIME)
2020-06-18 18:43:53 +03:00
2018-05-11 18:49:31 +03:00
static struct file * ovl_open_realfile ( const struct file * file ,
2022-08-04 20:11:15 +03:00
const struct path * realpath )
2018-07-18 16:44:41 +03:00
{
2022-04-04 13:51:47 +03:00
struct inode * realinode = d_inode ( realpath - > dentry ) ;
2018-07-18 16:44:41 +03:00
struct inode * inode = file_inode ( file ) ;
2023-01-13 14:49:22 +03:00
struct mnt_idmap * real_idmap ;
2018-07-18 16:44:41 +03:00
struct file * realfile ;
const struct cred * old_cred ;
2020-06-18 18:43:53 +03:00
int flags = file - > f_flags | OVL_OPEN_FLAGS ;
2020-06-02 23:20:26 +03:00
int acc_mode = ACC_MODE ( flags ) ;
int err ;
if ( flags & O_APPEND )
acc_mode | = MAY_APPEND ;
2018-07-18 16:44:41 +03:00
old_cred = ovl_override_creds ( inode - > i_sb ) ;
2023-01-13 14:49:22 +03:00
real_idmap = mnt_idmap ( realpath - > mnt ) ;
err = inode_permission ( real_idmap , realinode , MAY_OPEN | acc_mode ) ;
2020-06-02 23:20:26 +03:00
if ( err ) {
realfile = ERR_PTR ( err ) ;
} else {
2023-01-13 14:49:26 +03:00
if ( ! inode_owner_or_capable ( real_idmap , realinode ) )
2020-12-14 17:26:14 +03:00
flags & = ~ O_NOATIME ;
2023-06-15 14:22:28 +03:00
realfile = backing_file_open ( & file - > f_path , flags , realpath ,
current_cred ( ) ) ;
2020-06-02 23:20:26 +03:00
}
2018-07-18 16:44:41 +03:00
revert_creds ( old_cred ) ;
pr_debug ( " open(%p[%pD2/%c], 0%o) -> (%p, 0%o) \n " ,
2018-05-11 18:49:31 +03:00
file , file , ovl_whatisit ( inode , realinode ) , file - > f_flags ,
2018-07-18 16:44:41 +03:00
realfile , IS_ERR ( realfile ) ? 0 : realfile - > f_flags ) ;
return realfile ;
}
2018-07-18 16:44:41 +03:00
# define OVL_SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT)
static int ovl_change_flags ( struct file * file , unsigned int flags )
{
struct inode * inode = file_inode ( file ) ;
int err ;
flags & = OVL_SETFL_MASK ;
if ( ( ( flags ^ file - > f_flags ) & O_APPEND ) & & IS_APPEND ( inode ) )
return - EPERM ;
2022-05-10 04:20:49 +03:00
if ( ( flags & O_DIRECT ) & & ! ( file - > f_mode & FMODE_CAN_ODIRECT ) )
return - EINVAL ;
2018-07-18 16:44:41 +03:00
if ( file - > f_op - > check_flags ) {
err = file - > f_op - > check_flags ( flags ) ;
if ( err )
return err ;
}
spin_lock ( & file - > f_lock ) ;
file - > f_flags = ( file - > f_flags & ~ OVL_SETFL_MASK ) | flags ;
2022-11-24 20:03:11 +03:00
file - > f_iocb_flags = iocb_flags ( file ) ;
2018-07-18 16:44:41 +03:00
spin_unlock ( & file - > f_lock ) ;
return 0 ;
}
2018-05-11 18:49:31 +03:00
static int ovl_real_fdget_meta ( const struct file * file , struct fd * real ,
bool allow_meta )
2018-07-18 16:44:41 +03:00
{
2022-04-04 13:51:47 +03:00
struct dentry * dentry = file_dentry ( file ) ;
struct path realpath ;
2023-04-27 13:39:09 +03:00
int err ;
2018-07-18 16:44:41 +03:00
real - > flags = 0 ;
real - > file = file - > private_data ;
2023-04-27 13:39:09 +03:00
if ( allow_meta ) {
2022-04-04 13:51:47 +03:00
ovl_path_real ( dentry , & realpath ) ;
2023-04-27 13:39:09 +03:00
} else {
2023-06-21 11:44:27 +03:00
/* lazy lookup and verify of lowerdata */
err = ovl_verify_lowerdata ( dentry ) ;
2023-04-27 13:39:09 +03:00
if ( err )
return err ;
2022-04-04 13:51:47 +03:00
ovl_path_realdata ( dentry , & realpath ) ;
2023-04-27 13:39:09 +03:00
}
2023-04-02 21:56:49 +03:00
if ( ! realpath . dentry )
return - EIO ;
2018-05-11 18:49:31 +03:00
2018-07-18 16:44:41 +03:00
/* Has it been copied up since we'd opened it? */
2022-04-04 13:51:47 +03:00
if ( unlikely ( file_inode ( real - > file ) ! = d_inode ( realpath . dentry ) ) ) {
2018-07-18 16:44:41 +03:00
real - > flags = FDPUT_FPUT ;
2022-04-04 13:51:47 +03:00
real - > file = ovl_open_realfile ( file , & realpath ) ;
2018-07-18 16:44:41 +03:00
return PTR_ERR_OR_ZERO ( real - > file ) ;
}
/* Did the flags change since open? */
2020-06-18 18:43:53 +03:00
if ( unlikely ( ( file - > f_flags ^ real - > file - > f_flags ) & ~ OVL_OPEN_FLAGS ) )
2018-07-18 16:44:41 +03:00
return ovl_change_flags ( real - > file , file - > f_flags ) ;
return 0 ;
}
2018-05-11 18:49:31 +03:00
static int ovl_real_fdget ( const struct file * file , struct fd * real )
{
2020-09-29 10:28:47 +03:00
if ( d_is_dir ( file_dentry ( file ) ) ) {
real - > flags = 0 ;
real - > file = ovl_dir_real_file ( file , false ) ;
return PTR_ERR_OR_ZERO ( real - > file ) ;
}
2018-05-11 18:49:31 +03:00
return ovl_real_fdget_meta ( file , real , false ) ;
}
2018-07-18 16:44:41 +03:00
static int ovl_open ( struct inode * inode , struct file * file )
{
2022-04-04 13:51:47 +03:00
struct dentry * dentry = file_dentry ( file ) ;
2018-07-18 16:44:41 +03:00
struct file * realfile ;
2022-04-04 13:51:47 +03:00
struct path realpath ;
2018-07-18 16:44:41 +03:00
int err ;
2023-06-21 11:44:27 +03:00
/* lazy lookup and verify lowerdata */
err = ovl_verify_lowerdata ( dentry ) ;
2023-04-27 13:39:09 +03:00
if ( err )
return err ;
2022-04-04 13:51:47 +03:00
err = ovl_maybe_copy_up ( dentry , file - > f_flags ) ;
2018-07-18 16:44:41 +03:00
if ( err )
return err ;
/* No longer need these flags, so don't pass them on to underlying fs */
file - > f_flags & = ~ ( O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC ) ;
2022-04-04 13:51:47 +03:00
ovl_path_realdata ( dentry , & realpath ) ;
2023-04-02 21:56:49 +03:00
if ( ! realpath . dentry )
return - EIO ;
2022-04-04 13:51:47 +03:00
realfile = ovl_open_realfile ( file , & realpath ) ;
2018-07-18 16:44:41 +03:00
if ( IS_ERR ( realfile ) )
return PTR_ERR ( realfile ) ;
file - > private_data = realfile ;
return 0 ;
}
static int ovl_release ( struct inode * inode , struct file * file )
{
fput ( file - > private_data ) ;
return 0 ;
}
static loff_t ovl_llseek ( struct file * file , loff_t offset , int whence )
{
2019-02-27 14:32:11 +03:00
struct inode * inode = file_inode ( file ) ;
struct fd real ;
const struct cred * old_cred ;
2020-02-03 13:41:53 +03:00
loff_t ret ;
2019-02-27 14:32:11 +03:00
/*
* The two special cases below do not need to involve real fs ,
* so we can optimizing concurrent callers .
*/
if ( offset = = 0 ) {
if ( whence = = SEEK_CUR )
return file - > f_pos ;
if ( whence = = SEEK_SET )
return vfs_setpos ( file , 0 , 0 ) ;
}
ret = ovl_real_fdget ( file , & real ) ;
if ( ret )
return ret ;
/*
* Overlay file f_pos is the master copy that is preserved
* through copy up and modified on read / write , but only real
* fs knows how to SEEK_HOLE / SEEK_DATA and real fs may impose
* limitations that are more strict than - > s_maxbytes for specific
* files , so we use the real file to perform seeks .
*/
2019-12-21 12:42:29 +03:00
ovl_inode_lock ( inode ) ;
2019-02-27 14:32:11 +03:00
real . file - > f_pos = file - > f_pos ;
old_cred = ovl_override_creds ( inode - > i_sb ) ;
ret = vfs_llseek ( real . file , offset , whence ) ;
revert_creds ( old_cred ) ;
file - > f_pos = real . file - > f_pos ;
2019-12-21 12:42:29 +03:00
ovl_inode_unlock ( inode ) ;
2019-02-27 14:32:11 +03:00
fdput ( real ) ;
2018-07-18 16:44:41 +03:00
2019-02-27 14:32:11 +03:00
return ret ;
2018-07-18 16:44:41 +03:00
}
2023-09-27 13:43:44 +03:00
static void ovl_file_modified ( struct file * file )
{
/* Update size/mtime */
ovl_copyattr ( file_inode ( file ) ) ;
}
2018-07-18 16:44:41 +03:00
static void ovl_file_accessed ( struct file * file )
{
struct inode * inode , * upperinode ;
2023-07-05 22:01:31 +03:00
struct timespec64 ctime , uctime ;
2023-10-04 21:52:45 +03:00
struct timespec64 mtime , umtime ;
2018-07-18 16:44:41 +03:00
if ( file - > f_flags & O_NOATIME )
return ;
inode = file_inode ( file ) ;
upperinode = ovl_inode_upper ( inode ) ;
if ( ! upperinode )
return ;
2023-07-05 22:01:31 +03:00
ctime = inode_get_ctime ( inode ) ;
uctime = inode_get_ctime ( upperinode ) ;
2023-10-04 21:52:45 +03:00
mtime = inode_get_mtime ( inode ) ;
umtime = inode_get_mtime ( upperinode ) ;
if ( ( ! timespec64_equal ( & mtime , & umtime ) ) | |
! timespec64_equal ( & ctime , & uctime ) ) {
inode_set_mtime_to_ts ( inode , inode_get_mtime ( upperinode ) ) ;
2023-07-05 22:01:31 +03:00
inode_set_ctime_to_ts ( inode , uctime ) ;
2018-07-18 16:44:41 +03:00
}
touch_atime ( & file - > f_path ) ;
}
2023-09-06 10:52:13 +03:00
# define OVL_IOCB_MASK \
2023-08-29 16:25:47 +03:00
( IOCB_NOWAIT | IOCB_HIPRI | IOCB_DSYNC | IOCB_SYNC | IOCB_APPEND )
2023-09-06 10:52:13 +03:00
static rwf_t iocb_to_rw_flags ( int flags )
2018-07-18 16:44:41 +03:00
{
2023-09-06 10:52:13 +03:00
return ( __force rwf_t ) ( flags & OVL_IOCB_MASK ) ;
2018-07-18 16:44:41 +03:00
}
2021-09-30 06:22:28 +03:00
static inline void ovl_aio_put ( struct ovl_aio_req * aio_req )
{
if ( refcount_dec_and_test ( & aio_req - > ref ) ) {
2023-08-22 20:50:59 +03:00
fput ( aio_req - > iocb . ki_filp ) ;
2021-09-30 06:22:28 +03:00
kmem_cache_free ( ovl_aio_request_cachep , aio_req ) ;
}
}
2019-11-20 12:45:26 +03:00
static void ovl_aio_cleanup_handler ( struct ovl_aio_req * aio_req )
{
struct kiocb * iocb = & aio_req - > iocb ;
struct kiocb * orig_iocb = aio_req - > orig_iocb ;
2023-11-22 15:27:12 +03:00
if ( iocb - > ki_flags & IOCB_WRITE )
2023-09-27 13:43:44 +03:00
ovl_file_modified ( orig_iocb - > ki_filp ) ;
2019-11-20 12:45:26 +03:00
orig_iocb - > ki_pos = iocb - > ki_pos ;
2021-09-30 06:22:28 +03:00
ovl_aio_put ( aio_req ) ;
2019-11-20 12:45:26 +03:00
}
2021-10-21 18:22:35 +03:00
static void ovl_aio_rw_complete ( struct kiocb * iocb , long res )
2019-11-20 12:45:26 +03:00
{
struct ovl_aio_req * aio_req = container_of ( iocb ,
struct ovl_aio_req , iocb ) ;
struct kiocb * orig_iocb = aio_req - > orig_iocb ;
2023-11-22 15:27:12 +03:00
if ( iocb - > ki_flags & IOCB_WRITE )
kiocb_end_write ( iocb ) ;
2019-11-20 12:45:26 +03:00
ovl_aio_cleanup_handler ( aio_req ) ;
2021-10-21 18:22:35 +03:00
orig_iocb - > ki_complete ( orig_iocb , res ) ;
2019-11-20 12:45:26 +03:00
}
2023-08-22 20:50:59 +03:00
static void ovl_aio_complete_work ( struct work_struct * work )
{
struct ovl_aio_req * aio_req = container_of ( work ,
struct ovl_aio_req , work ) ;
ovl_aio_rw_complete ( & aio_req - > iocb , aio_req - > res ) ;
}
static void ovl_aio_queue_completion ( struct kiocb * iocb , long res )
{
struct ovl_aio_req * aio_req = container_of ( iocb ,
struct ovl_aio_req , iocb ) ;
struct kiocb * orig_iocb = aio_req - > orig_iocb ;
/*
* Punt to a work queue to serialize updates of mtime / size .
*/
aio_req - > res = res ;
INIT_WORK ( & aio_req - > work , ovl_aio_complete_work ) ;
queue_work ( file_inode ( orig_iocb - > ki_filp ) - > i_sb - > s_dio_done_wq ,
& aio_req - > work ) ;
}
static int ovl_init_aio_done_wq ( struct super_block * sb )
{
if ( sb - > s_dio_done_wq )
return 0 ;
return sb_init_dio_done_wq ( sb ) ;
}
2018-07-18 16:44:41 +03:00
static ssize_t ovl_read_iter ( struct kiocb * iocb , struct iov_iter * iter )
{
struct file * file = iocb - > ki_filp ;
struct fd real ;
const struct cred * old_cred ;
ssize_t ret ;
if ( ! iov_iter_count ( iter ) )
return 0 ;
ret = ovl_real_fdget ( file , & real ) ;
if ( ret )
return ret ;
2021-09-27 12:23:57 +03:00
ret = - EINVAL ;
if ( iocb - > ki_flags & IOCB_DIRECT & &
2022-05-10 04:20:49 +03:00
! ( real . file - > f_mode & FMODE_CAN_ODIRECT ) )
2021-09-27 12:23:57 +03:00
goto out_fdput ;
2018-07-18 16:44:41 +03:00
old_cred = ovl_override_creds ( file_inode ( file ) - > i_sb ) ;
2019-11-20 12:45:26 +03:00
if ( is_sync_kiocb ( iocb ) ) {
2023-09-06 10:52:13 +03:00
rwf_t rwf = iocb_to_rw_flags ( iocb - > ki_flags ) ;
ret = vfs_iter_read ( real . file , iter , & iocb - > ki_pos , rwf ) ;
2019-11-20 12:45:26 +03:00
} else {
struct ovl_aio_req * aio_req ;
ret = - ENOMEM ;
aio_req = kmem_cache_zalloc ( ovl_aio_request_cachep , GFP_KERNEL ) ;
if ( ! aio_req )
goto out ;
aio_req - > orig_iocb = iocb ;
2023-08-22 20:50:59 +03:00
kiocb_clone ( & aio_req - > iocb , iocb , get_file ( real . file ) ) ;
2019-11-20 12:45:26 +03:00
aio_req - > iocb . ki_complete = ovl_aio_rw_complete ;
2021-09-30 06:22:28 +03:00
refcount_set ( & aio_req - > ref , 2 ) ;
2019-11-20 12:45:26 +03:00
ret = vfs_iocb_iter_read ( real . file , & aio_req - > iocb , iter ) ;
2021-09-30 06:22:28 +03:00
ovl_aio_put ( aio_req ) ;
2019-11-20 12:45:26 +03:00
if ( ret ! = - EIOCBQUEUED )
ovl_aio_cleanup_handler ( aio_req ) ;
}
out :
2018-07-18 16:44:41 +03:00
revert_creds ( old_cred ) ;
ovl_file_accessed ( file ) ;
2021-09-27 12:23:57 +03:00
out_fdput :
2018-07-18 16:44:41 +03:00
fdput ( real ) ;
return ret ;
}
2018-07-18 16:44:41 +03:00
static ssize_t ovl_write_iter ( struct kiocb * iocb , struct iov_iter * iter )
{
struct file * file = iocb - > ki_filp ;
struct inode * inode = file_inode ( file ) ;
struct fd real ;
const struct cred * old_cred ;
ssize_t ret ;
2020-08-31 21:15:29 +03:00
int ifl = iocb - > ki_flags ;
2018-07-18 16:44:41 +03:00
if ( ! iov_iter_count ( iter ) )
return 0 ;
inode_lock ( inode ) ;
/* Update mode */
2022-04-04 13:51:54 +03:00
ovl_copyattr ( inode ) ;
2018-07-18 16:44:41 +03:00
ret = file_remove_privs ( file ) ;
if ( ret )
goto out_unlock ;
ret = ovl_real_fdget ( file , & real ) ;
if ( ret )
goto out_unlock ;
2021-09-27 12:23:57 +03:00
ret = - EINVAL ;
if ( iocb - > ki_flags & IOCB_DIRECT & &
2022-05-10 04:20:49 +03:00
! ( real . file - > f_mode & FMODE_CAN_ODIRECT ) )
2021-09-27 12:23:57 +03:00
goto out_fdput ;
2020-08-31 21:15:29 +03:00
if ( ! ovl_should_sync ( OVL_FS ( inode - > i_sb ) ) )
ifl & = ~ ( IOCB_DSYNC | IOCB_SYNC ) ;
2023-09-25 09:21:35 +03:00
/*
* Overlayfs doesn ' t support deferred completions , don ' t copy
* this property in case it is set by the issuer .
*/
ifl & = ~ IOCB_DIO_CALLER_COMP ;
2018-07-18 16:44:41 +03:00
old_cred = ovl_override_creds ( file_inode ( file ) - > i_sb ) ;
2019-11-20 12:45:26 +03:00
if ( is_sync_kiocb ( iocb ) ) {
2023-09-06 10:52:13 +03:00
rwf_t rwf = iocb_to_rw_flags ( ifl ) ;
ret = vfs_iter_write ( real . file , iter , & iocb - > ki_pos , rwf ) ;
2019-11-20 12:45:26 +03:00
/* Update size */
2023-09-27 13:43:44 +03:00
ovl_file_modified ( file ) ;
2019-11-20 12:45:26 +03:00
} else {
struct ovl_aio_req * aio_req ;
2023-08-22 20:50:59 +03:00
ret = ovl_init_aio_done_wq ( inode - > i_sb ) ;
if ( ret )
goto out ;
2019-11-20 12:45:26 +03:00
ret = - ENOMEM ;
aio_req = kmem_cache_zalloc ( ovl_aio_request_cachep , GFP_KERNEL ) ;
if ( ! aio_req )
goto out ;
aio_req - > orig_iocb = iocb ;
2023-08-22 20:50:59 +03:00
kiocb_clone ( & aio_req - > iocb , iocb , get_file ( real . file ) ) ;
2020-08-31 21:15:29 +03:00
aio_req - > iocb . ki_flags = ifl ;
2023-08-22 20:50:59 +03:00
aio_req - > iocb . ki_complete = ovl_aio_queue_completion ;
2021-09-30 06:22:28 +03:00
refcount_set ( & aio_req - > ref , 2 ) ;
2019-11-20 12:45:26 +03:00
ret = vfs_iocb_iter_write ( real . file , & aio_req - > iocb , iter ) ;
2021-09-30 06:22:28 +03:00
ovl_aio_put ( aio_req ) ;
2019-11-20 12:45:26 +03:00
if ( ret ! = - EIOCBQUEUED )
ovl_aio_cleanup_handler ( aio_req ) ;
}
out :
2018-07-18 16:44:41 +03:00
revert_creds ( old_cred ) ;
2021-09-27 12:23:57 +03:00
out_fdput :
2018-07-18 16:44:41 +03:00
fdput ( real ) ;
out_unlock :
inode_unlock ( inode ) ;
return ret ;
}
2023-05-22 16:49:57 +03:00
static ssize_t ovl_splice_read ( struct file * in , loff_t * ppos ,
struct pipe_inode_info * pipe , size_t len ,
unsigned int flags )
{
const struct cred * old_cred ;
struct fd real ;
ssize_t ret ;
ret = ovl_real_fdget ( in , & real ) ;
if ( ret )
return ret ;
old_cred = ovl_override_creds ( file_inode ( in ) - > i_sb ) ;
ret = vfs_splice_read ( real . file , ppos , pipe , len , flags ) ;
revert_creds ( old_cred ) ;
ovl_file_accessed ( in ) ;
fdput ( real ) ;
return ret ;
}
2021-07-28 11:38:43 +03:00
/*
* Calling iter_file_splice_write ( ) directly from overlay ' s f_op may deadlock
* due to lock order inversion between pipe - > mutex in iter_file_splice_write ( )
* and file_start_write ( real . file ) in ovl_write_iter ( ) .
*
* So do everything ovl_write_iter ( ) does and call iter_file_splice_write ( ) on
* the real file .
*/
static ssize_t ovl_splice_write ( struct pipe_inode_info * pipe , struct file * out ,
loff_t * ppos , size_t len , unsigned int flags )
{
struct fd real ;
const struct cred * old_cred ;
struct inode * inode = file_inode ( out ) ;
ssize_t ret ;
inode_lock ( inode ) ;
/* Update mode */
2022-04-04 13:51:54 +03:00
ovl_copyattr ( inode ) ;
2021-07-28 11:38:43 +03:00
ret = file_remove_privs ( out ) ;
if ( ret )
goto out_unlock ;
ret = ovl_real_fdget ( out , & real ) ;
if ( ret )
goto out_unlock ;
old_cred = ovl_override_creds ( inode - > i_sb ) ;
file_start_write ( real . file ) ;
ret = iter_file_splice_write ( pipe , real . file , ppos , len , flags ) ;
file_end_write ( real . file ) ;
/* Update size */
2023-09-27 13:43:44 +03:00
ovl_file_modified ( out ) ;
2021-07-28 11:38:43 +03:00
revert_creds ( old_cred ) ;
fdput ( real ) ;
out_unlock :
inode_unlock ( inode ) ;
return ret ;
}
2018-07-18 16:44:42 +03:00
static int ovl_fsync ( struct file * file , loff_t start , loff_t end , int datasync )
{
struct fd real ;
const struct cred * old_cred ;
int ret ;
ovl: implement volatile-specific fsync error behaviour
Overlayfs's volatile option allows the user to bypass all forced sync calls
to the upperdir filesystem. This comes at the cost of safety. We can never
ensure that the user's data is intact, but we can make a best effort to
expose whether or not the data is likely to be in a bad state.
The best way to handle this in the time being is that if an overlayfs's
upperdir experiences an error after a volatile mount occurs, that error
will be returned on fsync, fdatasync, sync, and syncfs. This is
contradictory to the traditional behaviour of VFS which fails the call
once, and only raises an error if a subsequent fsync error has occurred,
and been raised by the filesystem.
One awkward aspect of the patch is that we have to manually set the
superblock's errseq_t after the sync_fs callback as opposed to just
returning an error from syncfs. This is because the call chain looks
something like this:
sys_syncfs ->
sync_filesystem ->
__sync_filesystem ->
/* The return value is ignored here
sb->s_op->sync_fs(sb)
_sync_blockdev
/* Where the VFS fetches the error to raise to userspace */
errseq_check_and_advance
Because of this we call errseq_set every time the sync_fs callback occurs.
Due to the nature of this seen / unseen dichotomy, if the upperdir is an
inconsistent state at the initial mount time, overlayfs will refuse to
mount, as overlayfs cannot get a snapshot of the upperdir's errseq that
will increment on error until the user calls syncfs.
Signed-off-by: Sargun Dhillon <sargun@sargun.me>
Suggested-by: Amir Goldstein <amir73il@gmail.com>
Reviewed-by: Amir Goldstein <amir73il@gmail.com>
Fixes: c86243b090bc ("ovl: provide a mount option "volatile"")
Cc: stable@vger.kernel.org
Reviewed-by: Vivek Goyal <vgoyal@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
2021-01-08 03:10:43 +03:00
ret = ovl_sync_status ( OVL_FS ( file_inode ( file ) - > i_sb ) ) ;
if ( ret < = 0 )
return ret ;
2020-08-31 21:15:29 +03:00
2018-05-11 18:49:31 +03:00
ret = ovl_real_fdget_meta ( file , & real , ! datasync ) ;
2018-07-18 16:44:42 +03:00
if ( ret )
return ret ;
/* Don't sync lower file for fear of receiving EROFS error */
if ( file_inode ( real . file ) = = ovl_inode_upper ( file_inode ( file ) ) ) {
old_cred = ovl_override_creds ( file_inode ( file ) - > i_sb ) ;
ret = vfs_fsync_range ( real . file , start , end , datasync ) ;
revert_creds ( old_cred ) ;
}
fdput ( real ) ;
return ret ;
}
2018-07-18 16:44:42 +03:00
static int ovl_mmap ( struct file * file , struct vm_area_struct * vma )
{
struct file * realfile = file - > private_data ;
const struct cred * old_cred ;
int ret ;
if ( ! realfile - > f_op - > mmap )
return - ENODEV ;
if ( WARN_ON ( file ! = vma - > vm_file ) )
return - EIO ;
2021-04-24 00:28:54 +03:00
vma_set_file ( vma , realfile ) ;
2018-07-18 16:44:42 +03:00
old_cred = ovl_override_creds ( file_inode ( file ) - > i_sb ) ;
ret = call_mmap ( vma - > vm_file , vma ) ;
revert_creds ( old_cred ) ;
ovl_file_accessed ( file ) ;
return ret ;
}
2018-07-18 16:44:42 +03:00
static long ovl_fallocate ( struct file * file , int mode , loff_t offset , loff_t len )
{
struct inode * inode = file_inode ( file ) ;
struct fd real ;
const struct cred * old_cred ;
int ret ;
2022-10-17 18:06:39 +03:00
inode_lock ( inode ) ;
/* Update mode */
ovl_copyattr ( inode ) ;
ret = file_remove_privs ( file ) ;
if ( ret )
goto out_unlock ;
2018-07-18 16:44:42 +03:00
ret = ovl_real_fdget ( file , & real ) ;
if ( ret )
2022-10-17 18:06:39 +03:00
goto out_unlock ;
2018-07-18 16:44:42 +03:00
old_cred = ovl_override_creds ( file_inode ( file ) - > i_sb ) ;
ret = vfs_fallocate ( real . file , mode , offset , len ) ;
revert_creds ( old_cred ) ;
/* Update size */
2023-09-27 13:43:44 +03:00
ovl_file_modified ( file ) ;
2018-07-18 16:44:42 +03:00
fdput ( real ) ;
2022-10-17 18:06:39 +03:00
out_unlock :
inode_unlock ( inode ) ;
2018-07-18 16:44:42 +03:00
return ret ;
}
2018-08-28 10:58:41 +03:00
static int ovl_fadvise ( struct file * file , loff_t offset , loff_t len , int advice )
{
struct fd real ;
const struct cred * old_cred ;
int ret ;
ret = ovl_real_fdget ( file , & real ) ;
if ( ret )
return ret ;
old_cred = ovl_override_creds ( file_inode ( file ) - > i_sb ) ;
ret = vfs_fadvise ( real . file , offset , len , advice ) ;
revert_creds ( old_cred ) ;
fdput ( real ) ;
return ret ;
}
2018-07-18 16:44:42 +03:00
enum ovl_copyop {
OVL_COPY ,
OVL_CLONE ,
OVL_DEDUPE ,
} ;
2018-10-30 02:41:49 +03:00
static loff_t ovl_copyfile ( struct file * file_in , loff_t pos_in ,
2018-07-18 16:44:42 +03:00
struct file * file_out , loff_t pos_out ,
2018-10-30 02:41:49 +03:00
loff_t len , unsigned int flags , enum ovl_copyop op )
2018-07-18 16:44:42 +03:00
{
struct inode * inode_out = file_inode ( file_out ) ;
struct fd real_in , real_out ;
const struct cred * old_cred ;
2018-10-30 02:41:49 +03:00
loff_t ret ;
2018-07-18 16:44:42 +03:00
2022-10-17 18:06:38 +03:00
inode_lock ( inode_out ) ;
if ( op ! = OVL_DEDUPE ) {
/* Update mode */
ovl_copyattr ( inode_out ) ;
ret = file_remove_privs ( file_out ) ;
if ( ret )
goto out_unlock ;
}
2018-07-18 16:44:42 +03:00
ret = ovl_real_fdget ( file_out , & real_out ) ;
if ( ret )
2022-10-17 18:06:38 +03:00
goto out_unlock ;
2018-07-18 16:44:42 +03:00
ret = ovl_real_fdget ( file_in , & real_in ) ;
if ( ret ) {
fdput ( real_out ) ;
2022-10-17 18:06:38 +03:00
goto out_unlock ;
2018-07-18 16:44:42 +03:00
}
old_cred = ovl_override_creds ( file_inode ( file_out ) - > i_sb ) ;
switch ( op ) {
case OVL_COPY :
ret = vfs_copy_file_range ( real_in . file , pos_in ,
real_out . file , pos_out , len , flags ) ;
break ;
case OVL_CLONE :
2018-09-18 16:34:34 +03:00
ret = vfs_clone_file_range ( real_in . file , pos_in ,
2018-10-30 02:41:56 +03:00
real_out . file , pos_out , len , flags ) ;
2018-07-18 16:44:42 +03:00
break ;
case OVL_DEDUPE :
ret = vfs_dedupe_file_range_one ( real_in . file , pos_in ,
2018-10-30 02:42:03 +03:00
real_out . file , pos_out , len ,
flags ) ;
2018-07-18 16:44:42 +03:00
break ;
}
revert_creds ( old_cred ) ;
/* Update size */
2023-09-27 13:43:44 +03:00
ovl_file_modified ( file_out ) ;
2018-07-18 16:44:42 +03:00
fdput ( real_in ) ;
fdput ( real_out ) ;
2022-10-17 18:06:38 +03:00
out_unlock :
inode_unlock ( inode_out ) ;
2018-07-18 16:44:42 +03:00
return ret ;
}
static ssize_t ovl_copy_file_range ( struct file * file_in , loff_t pos_in ,
struct file * file_out , loff_t pos_out ,
size_t len , unsigned int flags )
{
return ovl_copyfile ( file_in , pos_in , file_out , pos_out , len , flags ,
OVL_COPY ) ;
}
2018-10-30 02:41:49 +03:00
static loff_t ovl_remap_file_range ( struct file * file_in , loff_t pos_in ,
struct file * file_out , loff_t pos_out ,
loff_t len , unsigned int remap_flags )
2018-07-18 16:44:42 +03:00
{
2018-10-30 02:41:21 +03:00
enum ovl_copyop op ;
if ( remap_flags & ~ ( REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY ) )
return - EINVAL ;
if ( remap_flags & REMAP_FILE_DEDUP )
op = OVL_DEDUPE ;
else
op = OVL_CLONE ;
2018-07-18 16:44:42 +03:00
/*
* Don ' t copy up because of a dedupe request , this wouldn ' t make sense
* most of the time ( data would be duplicated instead of deduplicated ) .
*/
2018-10-30 02:41:21 +03:00
if ( op = = OVL_DEDUPE & &
( ! ovl_inode_upper ( file_inode ( file_in ) ) | |
! ovl_inode_upper ( file_inode ( file_out ) ) ) )
2018-07-18 16:44:42 +03:00
return - EPERM ;
2018-10-30 02:41:56 +03:00
return ovl_copyfile ( file_in , pos_in , file_out , pos_out , len ,
remap_flags , op ) ;
2018-07-18 16:44:42 +03:00
}
2020-11-30 06:00:39 +03:00
static int ovl_flush ( struct file * file , fl_owner_t id )
{
struct fd real ;
const struct cred * old_cred ;
int err ;
err = ovl_real_fdget ( file , & real ) ;
if ( err )
return err ;
if ( real . file - > f_op - > flush ) {
old_cred = ovl_override_creds ( file_inode ( file ) - > i_sb ) ;
err = real . file - > f_op - > flush ( real . file , id ) ;
revert_creds ( old_cred ) ;
}
fdput ( real ) ;
return err ;
}
2018-07-18 16:44:41 +03:00
const struct file_operations ovl_file_operations = {
. open = ovl_open ,
. release = ovl_release ,
. llseek = ovl_llseek ,
2018-07-18 16:44:41 +03:00
. read_iter = ovl_read_iter ,
2018-07-18 16:44:41 +03:00
. write_iter = ovl_write_iter ,
2018-07-18 16:44:42 +03:00
. fsync = ovl_fsync ,
2018-07-18 16:44:42 +03:00
. mmap = ovl_mmap ,
2018-07-18 16:44:42 +03:00
. fallocate = ovl_fallocate ,
2018-08-28 10:58:41 +03:00
. fadvise = ovl_fadvise ,
2020-11-30 06:00:39 +03:00
. flush = ovl_flush ,
2023-05-22 16:49:57 +03:00
. splice_read = ovl_splice_read ,
2021-07-28 11:38:43 +03:00
. splice_write = ovl_splice_write ,
2018-07-18 16:44:42 +03:00
. copy_file_range = ovl_copy_file_range ,
2018-10-30 02:41:21 +03:00
. remap_file_range = ovl_remap_file_range ,
2018-07-18 16:44:41 +03:00
} ;
2019-11-20 12:45:26 +03:00
int __init ovl_aio_request_cache_init ( void )
{
ovl_aio_request_cachep = kmem_cache_create ( " ovl_aio_req " ,
sizeof ( struct ovl_aio_req ) ,
0 , SLAB_HWCACHE_ALIGN , NULL ) ;
if ( ! ovl_aio_request_cachep )
return - ENOMEM ;
return 0 ;
}
void ovl_aio_request_cache_destroy ( void )
{
kmem_cache_destroy ( ovl_aio_request_cachep ) ;
}