2017-03-17 09:18:50 +03:00
// SPDX-License-Identifier: GPL-2.0
# ifndef NO_BCACHEFS_FS
# include "bcachefs.h"
# include "acl.h"
2020-12-17 23:08:58 +03:00
# include "bkey_buf.h"
2017-03-17 09:18:50 +03:00
# include "btree_update.h"
# include "buckets.h"
# include "chardev.h"
# include "dirent.h"
# include "extents.h"
# include "fs.h"
2019-10-03 01:35:36 +03:00
# include "fs-common.h"
2017-03-17 09:18:50 +03:00
# include "fs-io.h"
# include "fs-ioctl.h"
# include "fsck.h"
# include "inode.h"
# include "io.h"
# include "journal.h"
# include "keylist.h"
# include "quota.h"
# include "super.h"
# include "xattr.h"
# include <linux/aio.h>
# include <linux/backing-dev.h>
# include <linux/exportfs.h>
# include <linux/fiemap.h>
# include <linux/module.h>
# include <linux/pagemap.h>
# include <linux/posix_acl.h>
# include <linux/random.h>
# include <linux/seq_file.h>
# include <linux/statfs.h>
2021-05-13 23:08:47 +03:00
# include <linux/string.h>
2017-03-17 09:18:50 +03:00
# include <linux/xattr.h>
static struct kmem_cache * bch2_inode_cache ;
2021-11-06 07:03:40 +03:00
static void bch2_vfs_inode_init ( struct btree_trans * , subvol_inum ,
2017-03-17 09:18:50 +03:00
struct bch_inode_info * ,
struct bch_inode_unpacked * ) ;
static void __pagecache_lock_put ( struct pagecache_lock * lock , long i )
{
BUG_ON ( atomic_long_read ( & lock - > v ) = = 0 ) ;
if ( atomic_long_sub_return_release ( i , & lock - > v ) = = 0 )
wake_up_all ( & lock - > wait ) ;
}
static bool __pagecache_lock_tryget ( struct pagecache_lock * lock , long i )
{
long v = atomic_long_read ( & lock - > v ) , old ;
do {
old = v ;
if ( i > 0 ? v < 0 : v > 0 )
return false ;
} while ( ( v = atomic_long_cmpxchg_acquire ( & lock - > v ,
old , old + i ) ) ! = old ) ;
return true ;
}
static void __pagecache_lock_get ( struct pagecache_lock * lock , long i )
{
wait_event ( lock - > wait , __pagecache_lock_tryget ( lock , i ) ) ;
}
void bch2_pagecache_add_put ( struct pagecache_lock * lock )
{
__pagecache_lock_put ( lock , 1 ) ;
}
2020-11-11 20:33:12 +03:00
bool bch2_pagecache_add_tryget ( struct pagecache_lock * lock )
{
return __pagecache_lock_tryget ( lock , 1 ) ;
}
2017-03-17 09:18:50 +03:00
void bch2_pagecache_add_get ( struct pagecache_lock * lock )
{
__pagecache_lock_get ( lock , 1 ) ;
}
void bch2_pagecache_block_put ( struct pagecache_lock * lock )
{
__pagecache_lock_put ( lock , - 1 ) ;
}
void bch2_pagecache_block_get ( struct pagecache_lock * lock )
{
__pagecache_lock_get ( lock , - 1 ) ;
}
2021-11-06 07:03:40 +03:00
void bch2_inode_update_after_write ( struct btree_trans * trans ,
2017-03-17 09:18:50 +03:00
struct bch_inode_info * inode ,
struct bch_inode_unpacked * bi ,
unsigned fields )
{
2021-11-06 07:03:40 +03:00
struct bch_fs * c = trans - > c ;
BUG_ON ( bi - > bi_inum ! = inode - > v . i_ino ) ;
bch2_assert_pos_locked ( trans , BTREE_ID_inodes ,
POS ( 0 , bi - > bi_inum ) ,
0 & & c - > opts . inodes_use_key_cache ) ;
2019-09-25 23:19:52 +03:00
set_nlink ( & inode - > v , bch2_inode_nlink_get ( bi ) ) ;
2017-03-17 09:18:50 +03:00
i_uid_write ( & inode - > v , bi - > bi_uid ) ;
i_gid_write ( & inode - > v , bi - > bi_gid ) ;
inode - > v . i_mode = bi - > bi_mode ;
if ( fields & ATTR_ATIME )
inode - > v . i_atime = bch2_time_to_timespec ( c , bi - > bi_atime ) ;
if ( fields & ATTR_MTIME )
inode - > v . i_mtime = bch2_time_to_timespec ( c , bi - > bi_mtime ) ;
if ( fields & ATTR_CTIME )
inode_set_ctime_to_ts ( & inode - > v , bch2_time_to_timespec ( c , bi - > bi_ctime ) ) ;
inode - > ei_inode = * bi ;
2018-07-17 21:12:42 +03:00
bch2_inode_flags_to_vfs ( inode ) ;
2017-03-17 09:18:50 +03:00
}
2018-07-17 21:12:42 +03:00
int __must_check bch2_write_inode ( struct bch_fs * c ,
struct bch_inode_info * inode ,
inode_set_fn set ,
void * p , unsigned fields )
2017-03-17 09:18:50 +03:00
{
struct btree_trans trans ;
2021-08-30 22:18:31 +03:00
struct btree_iter iter = { NULL } ;
2017-03-17 09:18:50 +03:00
struct bch_inode_unpacked inode_u ;
int ret ;
2021-06-03 06:31:42 +03:00
bch2_trans_init ( & trans , c , 0 , 512 ) ;
2021-11-06 07:03:40 +03:00
trans . ip = _RET_IP_ ;
2017-03-17 09:18:50 +03:00
retry :
bch2_trans_begin ( & trans ) ;
2021-03-16 07:28:17 +03:00
ret = bch2_inode_peek ( & trans , & iter , & inode_u , inode_inum ( inode ) ,
2021-08-30 22:18:31 +03:00
BTREE_ITER_INTENT ) ? :
2019-10-03 01:35:36 +03:00
( set ? set ( inode , & inode_u , p ) : 0 ) ? :
2021-08-30 22:18:31 +03:00
bch2_inode_write ( & trans , & iter , & inode_u ) ? :
2021-11-05 22:17:13 +03:00
bch2_trans_commit ( & trans , NULL , NULL , BTREE_INSERT_NOFAIL ) ;
2017-03-17 09:18:50 +03:00
/*
* the btree node lock protects inode - > ei_inode , not ei_update_lock ;
* this is important for inode updates via bchfs_write_index_update
*/
if ( ! ret )
2021-11-06 07:03:40 +03:00
bch2_inode_update_after_write ( & trans , inode , & inode_u , fields ) ;
2017-03-17 09:18:50 +03:00
2021-08-30 22:18:31 +03:00
bch2_trans_iter_exit ( & trans , & iter ) ;
2020-04-02 00:28:39 +03:00
if ( ret = = - EINTR )
goto retry ;
2017-03-17 09:18:50 +03:00
bch2_trans_exit ( & trans ) ;
return ret < 0 ? ret : 0 ;
}
2018-12-17 13:43:00 +03:00
int bch2_fs_quota_transfer ( struct bch_fs * c ,
struct bch_inode_info * inode ,
struct bch_qid new_qid ,
unsigned qtypes ,
enum quota_acct_mode mode )
{
unsigned i ;
int ret ;
qtypes & = enabled_qtypes ( c ) ;
for ( i = 0 ; i < QTYP_NR ; i + + )
if ( new_qid . q [ i ] = = inode - > ei_qid . q [ i ] )
qtypes & = ~ ( 1U < < i ) ;
if ( ! qtypes )
return 0 ;
mutex_lock ( & inode - > ei_quota_lock ) ;
ret = bch2_quota_transfer ( c , qtypes , new_qid ,
inode - > ei_qid ,
inode - > v . i_blocks +
inode - > ei_quota_reserved ,
mode ) ;
if ( ! ret )
for ( i = 0 ; i < QTYP_NR ; i + + )
if ( qtypes & ( 1 < < i ) )
inode - > ei_qid . q [ i ] = new_qid . q [ i ] ;
mutex_unlock ( & inode - > ei_quota_lock ) ;
return ret ;
}
2021-03-16 08:33:39 +03:00
static int bch2_iget5_test ( struct inode * vinode , void * p )
{
struct bch_inode_info * inode = to_bch_ei ( vinode ) ;
subvol_inum * inum = p ;
return inode - > ei_subvol = = inum - > subvol & &
inode - > ei_inode . bi_inum = = inum - > inum ;
}
static int bch2_iget5_set ( struct inode * vinode , void * p )
{
struct bch_inode_info * inode = to_bch_ei ( vinode ) ;
subvol_inum * inum = p ;
inode - > v . i_ino = inum - > inum ;
inode - > ei_subvol = inum - > subvol ;
inode - > ei_inode . bi_inum = inum - > inum ;
return 0 ;
}
static unsigned bch2_inode_hash ( subvol_inum inum )
{
return jhash_3words ( inum . subvol , inum . inum > > 32 , inum . inum , JHASH_INITVAL ) ;
}
struct inode * bch2_vfs_inode_get ( struct bch_fs * c , subvol_inum inum )
2017-03-17 09:18:50 +03:00
{
struct bch_inode_unpacked inode_u ;
struct bch_inode_info * inode ;
2021-11-06 07:03:40 +03:00
struct btree_trans trans ;
2017-03-17 09:18:50 +03:00
int ret ;
2021-03-16 08:33:39 +03:00
inode = to_bch_ei ( iget5_locked ( c - > vfs_sb ,
bch2_inode_hash ( inum ) ,
bch2_iget5_test ,
bch2_iget5_set ,
& inum ) ) ;
2017-03-17 09:18:50 +03:00
if ( unlikely ( ! inode ) )
return ERR_PTR ( - ENOMEM ) ;
if ( ! ( inode - > v . i_state & I_NEW ) )
return & inode - > v ;
2021-11-06 07:03:40 +03:00
bch2_trans_init ( & trans , c , 8 , 0 ) ;
ret = lockrestart_do ( & trans ,
bch2_inode_find_by_inum_trans ( & trans , inum , & inode_u ) ) ;
if ( ! ret )
bch2_vfs_inode_init ( & trans , inum , inode , & inode_u ) ;
bch2_trans_exit ( & trans ) ;
2017-03-17 09:18:50 +03:00
if ( ret ) {
iget_failed ( & inode - > v ) ;
return ERR_PTR ( ret ) ;
}
unlock_new_inode ( & inode - > v ) ;
return & inode - > v ;
}
2021-03-16 07:28:17 +03:00
struct bch_inode_info *
2017-03-17 09:18:50 +03:00
__bch2_create ( struct mnt_idmap * idmap ,
struct bch_inode_info * dir , struct dentry * dentry ,
2021-03-17 06:28:43 +03:00
umode_t mode , dev_t rdev , subvol_inum snapshot_src ,
unsigned flags )
2017-03-17 09:18:50 +03:00
{
struct bch_fs * c = dir - > v . i_sb - > s_fs_info ;
struct btree_trans trans ;
struct bch_inode_unpacked dir_u ;
struct bch_inode_info * inode , * old ;
struct bch_inode_unpacked inode_u ;
struct posix_acl * default_acl = NULL , * acl = NULL ;
2021-03-16 08:33:39 +03:00
subvol_inum inum ;
2018-07-23 12:48:35 +03:00
u64 journal_seq = 0 ;
2017-03-17 09:18:50 +03:00
int ret ;
2019-10-03 01:35:36 +03:00
/*
* preallocate acls + vfs inode before btree transaction , so that
* nothing can fail after the transaction succeeds :
*/
2017-03-17 09:18:50 +03:00
# ifdef CONFIG_BCACHEFS_POSIX_ACL
2019-10-03 01:35:36 +03:00
ret = posix_acl_create ( & dir - > v , & mode , & default_acl , & acl ) ;
2017-03-17 09:18:50 +03:00
if ( ret )
2019-10-03 01:35:36 +03:00
return ERR_PTR ( ret ) ;
2017-03-17 09:18:50 +03:00
# endif
inode = to_bch_ei ( new_inode ( c - > vfs_sb ) ) ;
if ( unlikely ( ! inode ) ) {
2019-10-03 01:35:36 +03:00
inode = ERR_PTR ( - ENOMEM ) ;
2017-03-17 09:18:50 +03:00
goto err ;
}
2019-10-03 01:35:36 +03:00
bch2_inode_init_early ( c , & inode_u ) ;
2021-03-16 07:28:17 +03:00
if ( ! ( flags & BCH_CREATE_TMPFILE ) )
2017-03-17 09:18:50 +03:00
mutex_lock ( & dir - > ei_update_lock ) ;
2020-11-16 04:52:55 +03:00
bch2_trans_init ( & trans , c , 8 ,
2021-03-16 07:28:17 +03:00
2048 + ( ! ( flags & BCH_CREATE_TMPFILE )
? dentry - > d_name . len : 0 ) ) ;
2017-03-17 09:18:50 +03:00
retry :
bch2_trans_begin ( & trans ) ;
2021-03-16 07:28:17 +03:00
ret = bch2_create_trans ( & trans ,
inode_inum ( dir ) , & dir_u , & inode_u ,
! ( flags & BCH_CREATE_TMPFILE )
? & dentry - > d_name : NULL ,
2019-10-03 01:35:36 +03:00
from_kuid ( i_user_ns ( & dir - > v ) , current_fsuid ( ) ) ,
from_kgid ( i_user_ns ( & dir - > v ) , current_fsgid ( ) ) ,
mode , rdev ,
2021-03-17 06:28:43 +03:00
default_acl , acl , snapshot_src , flags ) ? :
2019-10-03 01:35:36 +03:00
bch2_quota_acct ( c , bch_qid ( & inode_u ) , Q_INO , 1 ,
KEY_TYPE_QUOTA_PREALLOC ) ;
if ( unlikely ( ret ) )
goto err_before_quota ;
2021-07-28 05:15:04 +03:00
ret = bch2_trans_commit ( & trans , NULL , & journal_seq , 0 ) ;
2019-10-03 01:35:36 +03:00
if ( unlikely ( ret ) ) {
bch2_quota_acct ( c , bch_qid ( & inode_u ) , Q_INO , - 1 ,
KEY_TYPE_QUOTA_WARN ) ;
err_before_quota :
if ( ret = = - EINTR )
goto retry ;
2017-03-17 09:18:50 +03:00
goto err_trans ;
2019-10-03 01:35:36 +03:00
}
2017-03-17 09:18:50 +03:00
2021-03-16 07:28:17 +03:00
if ( ! ( flags & BCH_CREATE_TMPFILE ) ) {
2021-11-06 07:03:40 +03:00
bch2_inode_update_after_write ( & trans , dir , & dir_u ,
2017-03-17 09:18:50 +03:00
ATTR_MTIME | ATTR_CTIME ) ;
mutex_unlock ( & dir - > ei_update_lock ) ;
}
2021-03-16 08:33:39 +03:00
inum . subvol = inode_u . bi_subvol ? : dir - > ei_subvol ;
inum . inum = inode_u . bi_inum ;
2021-11-06 07:03:40 +03:00
bch2_iget5_set ( & inode - > v , & inum ) ;
bch2_vfs_inode_init ( & trans , inum , inode , & inode_u ) ;
2017-03-17 09:18:50 +03:00
set_cached_acl ( & inode - > v , ACL_TYPE_ACCESS , acl ) ;
set_cached_acl ( & inode - > v , ACL_TYPE_DEFAULT , default_acl ) ;
/*
* we must insert the new inode into the inode cache before calling
* bch2_trans_exit ( ) and dropping locks , else we could race with another
* thread pulling the inode in and modifying it :
*/
inode - > v . i_state | = I_CREATING ;
2021-03-16 08:33:39 +03:00
old = to_bch_ei ( inode_insert5 ( & inode - > v ,
bch2_inode_hash ( inum ) ,
bch2_iget5_test ,
bch2_iget5_set ,
& inum ) ) ;
2017-03-17 09:18:50 +03:00
BUG_ON ( ! old ) ;
if ( unlikely ( old ! = inode ) ) {
/*
* We raced , another process pulled the new inode into cache
* before us :
*/
make_bad_inode ( & inode - > v ) ;
iput ( & inode - > v ) ;
inode = old ;
} else {
/*
* we really don ' t want insert_inode_locked2 ( ) to be setting
* I_NEW . . .
*/
unlock_new_inode ( & inode - > v ) ;
}
bch2_trans_exit ( & trans ) ;
2019-10-03 01:35:36 +03:00
err :
2017-03-17 09:18:50 +03:00
posix_acl_release ( default_acl ) ;
posix_acl_release ( acl ) ;
return inode ;
err_trans :
2021-03-16 07:28:17 +03:00
if ( ! ( flags & BCH_CREATE_TMPFILE ) )
2018-07-21 05:23:42 +03:00
mutex_unlock ( & dir - > ei_update_lock ) ;
2017-03-17 09:18:50 +03:00
bch2_trans_exit ( & trans ) ;
make_bad_inode ( & inode - > v ) ;
iput ( & inode - > v ) ;
inode = ERR_PTR ( ret ) ;
2019-10-03 01:35:36 +03:00
goto err ;
2017-03-17 09:18:50 +03:00
}
/* methods */
static struct dentry * bch2_lookup ( struct inode * vdir , struct dentry * dentry ,
unsigned int flags )
{
struct bch_fs * c = vdir - > i_sb - > s_fs_info ;
struct bch_inode_info * dir = to_bch_ei ( vdir ) ;
2021-03-03 02:35:30 +03:00
struct bch_hash_info hash = bch2_hash_info_init ( c , & dir - > ei_inode ) ;
2017-03-17 09:18:50 +03:00
struct inode * vinode = NULL ;
2021-03-16 08:33:39 +03:00
subvol_inum inum = { . subvol = 1 } ;
2021-03-16 07:28:17 +03:00
int ret ;
2017-03-17 09:18:50 +03:00
2021-03-16 07:28:17 +03:00
ret = bch2_dirent_lookup ( c , inode_inum ( dir ) , & hash ,
& dentry - > d_name , & inum ) ;
2017-03-17 09:18:50 +03:00
2021-03-16 07:28:17 +03:00
if ( ! ret )
2017-03-17 09:18:50 +03:00
vinode = bch2_vfs_inode_get ( c , inum ) ;
return d_splice_alias ( vinode , dentry ) ;
}
2019-10-11 22:14:36 +03:00
static int bch2_mknod ( struct mnt_idmap * idmap ,
struct inode * vdir , struct dentry * dentry ,
umode_t mode , dev_t rdev )
2017-03-17 09:18:50 +03:00
{
struct bch_inode_info * inode =
2021-03-17 06:28:43 +03:00
__bch2_create ( idmap , to_bch_ei ( vdir ) , dentry , mode , rdev ,
( subvol_inum ) { 0 } , 0 ) ;
2017-03-17 09:18:50 +03:00
if ( IS_ERR ( inode ) )
return PTR_ERR ( inode ) ;
d_instantiate ( dentry , & inode - > v ) ;
return 0 ;
}
2019-10-11 22:14:36 +03:00
static int bch2_create ( struct mnt_idmap * idmap ,
struct inode * vdir , struct dentry * dentry ,
umode_t mode , bool excl )
{
return bch2_mknod ( idmap , vdir , dentry , mode | S_IFREG , 0 ) ;
}
2017-03-17 09:18:50 +03:00
static int __bch2_link ( struct bch_fs * c ,
struct bch_inode_info * inode ,
struct bch_inode_info * dir ,
struct dentry * dentry )
{
struct btree_trans trans ;
2019-11-12 07:14:30 +03:00
struct bch_inode_unpacked dir_u , inode_u ;
2017-03-17 09:18:50 +03:00
int ret ;
2018-07-21 05:23:42 +03:00
mutex_lock ( & inode - > ei_update_lock ) ;
2019-05-15 17:54:43 +03:00
bch2_trans_init ( & trans , c , 4 , 1024 ) ;
2019-10-01 23:51:57 +03:00
2021-11-05 22:17:13 +03:00
ret = __bch2_trans_do ( & trans , NULL , NULL , 0 ,
2021-03-13 01:52:42 +03:00
bch2_link_trans ( & trans ,
2021-03-16 07:28:17 +03:00
inode_inum ( dir ) , & dir_u ,
inode_inum ( inode ) , & inode_u ,
2021-03-13 01:52:42 +03:00
& dentry - > d_name ) ) ;
2017-03-17 09:18:50 +03:00
2019-11-12 07:14:30 +03:00
if ( likely ( ! ret ) ) {
2021-11-06 07:03:40 +03:00
bch2_inode_update_after_write ( & trans , dir , & dir_u ,
2019-11-12 07:14:30 +03:00
ATTR_MTIME | ATTR_CTIME ) ;
2021-11-06 07:03:40 +03:00
bch2_inode_update_after_write ( & trans , inode , & inode_u , ATTR_CTIME ) ;
2019-11-12 07:14:30 +03:00
}
2017-03-17 09:18:50 +03:00
bch2_trans_exit ( & trans ) ;
2018-07-21 05:23:42 +03:00
mutex_unlock ( & inode - > ei_update_lock ) ;
2017-03-17 09:18:50 +03:00
return ret ;
}
static int bch2_link ( struct dentry * old_dentry , struct inode * vdir ,
struct dentry * dentry )
{
struct bch_fs * c = vdir - > i_sb - > s_fs_info ;
struct bch_inode_info * dir = to_bch_ei ( vdir ) ;
struct bch_inode_info * inode = to_bch_ei ( old_dentry - > d_inode ) ;
int ret ;
2018-07-17 21:03:47 +03:00
lockdep_assert_held ( & inode - > v . i_rwsem ) ;
2017-03-17 09:18:50 +03:00
ret = __bch2_link ( c , inode , dir , dentry ) ;
if ( unlikely ( ret ) )
return ret ;
ihold ( & inode - > v ) ;
d_instantiate ( dentry , & inode - > v ) ;
return 0 ;
}
2021-03-17 06:28:43 +03:00
int __bch2_unlink ( struct inode * vdir , struct dentry * dentry ,
2021-10-11 19:03:19 +03:00
bool deleting_snapshot )
2017-03-17 09:18:50 +03:00
{
struct bch_fs * c = vdir - > i_sb - > s_fs_info ;
struct bch_inode_info * dir = to_bch_ei ( vdir ) ;
struct bch_inode_info * inode = to_bch_ei ( dentry - > d_inode ) ;
struct bch_inode_unpacked dir_u , inode_u ;
struct btree_trans trans ;
int ret ;
2019-06-25 01:24:38 +03:00
bch2_lock_inodes ( INODE_UPDATE_LOCK , dir , inode ) ;
2019-05-15 17:54:43 +03:00
bch2_trans_init ( & trans , c , 4 , 1024 ) ;
2019-10-01 23:51:57 +03:00
2021-11-05 22:17:13 +03:00
ret = __bch2_trans_do ( & trans , NULL , NULL ,
2021-03-13 01:52:42 +03:00
BTREE_INSERT_NOFAIL ,
bch2_unlink_trans ( & trans ,
2021-03-16 07:28:17 +03:00
inode_inum ( dir ) , & dir_u ,
2021-03-17 06:28:43 +03:00
& inode_u , & dentry - > d_name ,
deleting_snapshot ) ) ;
2019-10-03 01:35:36 +03:00
if ( likely ( ! ret ) ) {
2021-11-06 07:03:40 +03:00
bch2_inode_update_after_write ( & trans , dir , & dir_u ,
2019-10-03 01:35:36 +03:00
ATTR_MTIME | ATTR_CTIME ) ;
2021-11-06 07:03:40 +03:00
bch2_inode_update_after_write ( & trans , inode , & inode_u ,
2019-10-03 01:35:36 +03:00
ATTR_MTIME ) ;
}
2017-03-17 09:18:50 +03:00
bch2_trans_exit ( & trans ) ;
2019-06-25 01:24:38 +03:00
bch2_unlock_inodes ( INODE_UPDATE_LOCK , dir , inode ) ;
2017-03-17 09:18:50 +03:00
return ret ;
}
2021-03-17 06:28:43 +03:00
static int bch2_unlink ( struct inode * vdir , struct dentry * dentry )
{
2021-10-11 19:03:19 +03:00
return __bch2_unlink ( vdir , dentry , false ) ;
2021-03-17 06:28:43 +03:00
}
2017-03-17 09:18:50 +03:00
static int bch2_symlink ( struct mnt_idmap * idmap ,
struct inode * vdir , struct dentry * dentry ,
const char * symname )
{
struct bch_fs * c = vdir - > i_sb - > s_fs_info ;
struct bch_inode_info * dir = to_bch_ei ( vdir ) , * inode ;
int ret ;
2021-03-16 07:28:17 +03:00
inode = __bch2_create ( idmap , dir , dentry , S_IFLNK | S_IRWXUGO , 0 ,
2021-03-17 06:28:43 +03:00
( subvol_inum ) { 0 } , BCH_CREATE_TMPFILE ) ;
2017-03-17 09:18:50 +03:00
if ( unlikely ( IS_ERR ( inode ) ) )
return PTR_ERR ( inode ) ;
inode_lock ( & inode - > v ) ;
ret = page_symlink ( & inode - > v , symname , strlen ( symname ) + 1 ) ;
inode_unlock ( & inode - > v ) ;
if ( unlikely ( ret ) )
goto err ;
ret = filemap_write_and_wait_range ( inode - > v . i_mapping , 0 , LLONG_MAX ) ;
if ( unlikely ( ret ) )
goto err ;
ret = __bch2_link ( c , inode , dir , dentry ) ;
if ( unlikely ( ret ) )
goto err ;
d_instantiate ( dentry , & inode - > v ) ;
return 0 ;
err :
iput ( & inode - > v ) ;
return ret ;
}
static int bch2_mkdir ( struct mnt_idmap * idmap ,
struct inode * vdir , struct dentry * dentry , umode_t mode )
{
2019-10-11 22:14:36 +03:00
return bch2_mknod ( idmap , vdir , dentry , mode | S_IFDIR , 0 ) ;
2017-03-17 09:18:50 +03:00
}
static int bch2_rename2 ( struct mnt_idmap * idmap ,
struct inode * src_vdir , struct dentry * src_dentry ,
struct inode * dst_vdir , struct dentry * dst_dentry ,
unsigned flags )
{
struct bch_fs * c = src_vdir - > i_sb - > s_fs_info ;
2019-10-03 01:35:36 +03:00
struct bch_inode_info * src_dir = to_bch_ei ( src_vdir ) ;
struct bch_inode_info * dst_dir = to_bch_ei ( dst_vdir ) ;
struct bch_inode_info * src_inode = to_bch_ei ( src_dentry - > d_inode ) ;
struct bch_inode_info * dst_inode = to_bch_ei ( dst_dentry - > d_inode ) ;
2017-03-17 09:18:50 +03:00
struct bch_inode_unpacked dst_dir_u , src_dir_u ;
struct bch_inode_unpacked src_inode_u , dst_inode_u ;
2019-10-03 01:35:36 +03:00
struct btree_trans trans ;
enum bch_rename_mode mode = flags & RENAME_EXCHANGE
? BCH_RENAME_EXCHANGE
: dst_dentry - > d_inode
? BCH_RENAME_OVERWRITE : BCH_RENAME ;
2017-03-17 09:18:50 +03:00
int ret ;
if ( flags & ~ ( RENAME_NOREPLACE | RENAME_EXCHANGE ) )
return - EINVAL ;
2019-10-03 01:35:36 +03:00
if ( mode = = BCH_RENAME_OVERWRITE ) {
ret = filemap_write_and_wait_range ( src_inode - > v . i_mapping ,
2017-03-17 09:18:50 +03:00
0 , LLONG_MAX ) ;
if ( ret )
return ret ;
}
2019-05-15 17:54:43 +03:00
bch2_trans_init ( & trans , c , 8 , 2048 ) ;
2019-06-25 01:24:38 +03:00
bch2_lock_inodes ( INODE_UPDATE_LOCK ,
2019-10-03 01:35:36 +03:00
src_dir ,
dst_dir ,
src_inode ,
dst_inode ) ;
if ( inode_attr_changing ( dst_dir , src_inode , Inode_opt_project ) ) {
ret = bch2_fs_quota_transfer ( c , src_inode ,
dst_dir - > ei_qid ,
2018-12-17 13:31:49 +03:00
1 < < QTYP_PRJ ,
KEY_TYPE_QUOTA_PREALLOC ) ;
if ( ret )
goto err ;
}
2019-10-03 01:35:36 +03:00
if ( mode = = BCH_RENAME_EXCHANGE & &
inode_attr_changing ( src_dir , dst_inode , Inode_opt_project ) ) {
ret = bch2_fs_quota_transfer ( c , dst_inode ,
src_dir - > ei_qid ,
2018-12-17 13:31:49 +03:00
1 < < QTYP_PRJ ,
KEY_TYPE_QUOTA_PREALLOC ) ;
if ( ret )
goto err ;
}
2021-11-05 22:17:13 +03:00
ret = __bch2_trans_do ( & trans , NULL , NULL , 0 ,
2021-03-13 01:52:42 +03:00
bch2_rename_trans ( & trans ,
2021-03-16 07:28:17 +03:00
inode_inum ( src_dir ) , & src_dir_u ,
inode_inum ( dst_dir ) , & dst_dir_u ,
2021-03-13 01:52:42 +03:00
& src_inode_u ,
& dst_inode_u ,
& src_dentry - > d_name ,
& dst_dentry - > d_name ,
mode ) ) ;
2017-03-17 09:18:50 +03:00
if ( unlikely ( ret ) )
goto err ;
2019-10-03 01:35:36 +03:00
BUG_ON ( src_inode - > v . i_ino ! = src_inode_u . bi_inum ) ;
BUG_ON ( dst_inode & &
dst_inode - > v . i_ino ! = dst_inode_u . bi_inum ) ;
2021-11-06 07:03:40 +03:00
bch2_inode_update_after_write ( & trans , src_dir , & src_dir_u ,
2017-03-17 09:18:50 +03:00
ATTR_MTIME | ATTR_CTIME ) ;
2021-11-05 22:17:13 +03:00
if ( src_dir ! = dst_dir )
2021-11-06 07:03:40 +03:00
bch2_inode_update_after_write ( & trans , dst_dir , & dst_dir_u ,
2017-03-17 09:18:50 +03:00
ATTR_MTIME | ATTR_CTIME ) ;
2021-11-06 07:03:40 +03:00
bch2_inode_update_after_write ( & trans , src_inode , & src_inode_u ,
2017-03-17 09:18:50 +03:00
ATTR_CTIME ) ;
2019-10-03 01:35:36 +03:00
2021-11-05 22:17:13 +03:00
if ( dst_inode )
2021-11-06 07:03:40 +03:00
bch2_inode_update_after_write ( & trans , dst_inode , & dst_inode_u ,
2017-03-17 09:18:50 +03:00
ATTR_CTIME ) ;
err :
bch2_trans_exit ( & trans ) ;
2018-12-17 13:31:49 +03:00
2019-10-03 01:35:36 +03:00
bch2_fs_quota_transfer ( c , src_inode ,
bch_qid ( & src_inode - > ei_inode ) ,
2018-12-17 13:31:49 +03:00
1 < < QTYP_PRJ ,
KEY_TYPE_QUOTA_NOCHECK ) ;
2019-10-03 01:35:36 +03:00
if ( dst_inode )
bch2_fs_quota_transfer ( c , dst_inode ,
bch_qid ( & dst_inode - > ei_inode ) ,
2018-12-17 13:31:49 +03:00
1 < < QTYP_PRJ ,
KEY_TYPE_QUOTA_NOCHECK ) ;
2019-06-25 01:24:38 +03:00
bch2_unlock_inodes ( INODE_UPDATE_LOCK ,
2019-10-03 01:35:36 +03:00
src_dir ,
dst_dir ,
src_inode ,
dst_inode ) ;
2017-03-17 09:18:50 +03:00
return ret ;
}
2019-10-01 23:51:57 +03:00
static void bch2_setattr_copy ( struct mnt_idmap * idmap ,
struct bch_inode_info * inode ,
struct bch_inode_unpacked * bi ,
struct iattr * attr )
2017-03-17 09:18:50 +03:00
{
struct bch_fs * c = inode - > v . i_sb - > s_fs_info ;
2019-10-01 23:51:57 +03:00
unsigned int ia_valid = attr - > ia_valid ;
2017-03-17 09:18:50 +03:00
if ( ia_valid & ATTR_UID )
2019-10-01 23:51:57 +03:00
bi - > bi_uid = from_kuid ( i_user_ns ( & inode - > v ) , attr - > ia_uid ) ;
2017-03-17 09:18:50 +03:00
if ( ia_valid & ATTR_GID )
2019-10-01 23:51:57 +03:00
bi - > bi_gid = from_kgid ( i_user_ns ( & inode - > v ) , attr - > ia_gid ) ;
2017-03-17 09:18:50 +03:00
2021-06-15 05:29:54 +03:00
if ( ia_valid & ATTR_SIZE )
bi - > bi_size = attr - > ia_size ;
2017-03-17 09:18:50 +03:00
if ( ia_valid & ATTR_ATIME )
2019-10-01 23:51:57 +03:00
bi - > bi_atime = timespec_to_bch2_time ( c , attr - > ia_atime ) ;
2017-03-17 09:18:50 +03:00
if ( ia_valid & ATTR_MTIME )
2019-10-01 23:51:57 +03:00
bi - > bi_mtime = timespec_to_bch2_time ( c , attr - > ia_mtime ) ;
2017-03-17 09:18:50 +03:00
if ( ia_valid & ATTR_CTIME )
2019-10-01 23:51:57 +03:00
bi - > bi_ctime = timespec_to_bch2_time ( c , attr - > ia_ctime ) ;
2017-03-17 09:18:50 +03:00
if ( ia_valid & ATTR_MODE ) {
2019-10-01 23:51:57 +03:00
umode_t mode = attr - > ia_mode ;
2017-03-17 09:18:50 +03:00
kgid_t gid = ia_valid & ATTR_GID
2019-10-01 23:51:57 +03:00
? attr - > ia_gid
2017-03-17 09:18:50 +03:00
: inode - > v . i_gid ;
if ( ! in_group_p ( gid ) & &
2019-10-01 23:51:57 +03:00
! capable_wrt_inode_uidgid ( idmap , & inode - > v , CAP_FSETID ) )
2017-03-17 09:18:50 +03:00
mode & = ~ S_ISGID ;
bi - > bi_mode = mode ;
}
}
2021-06-15 05:29:54 +03:00
int bch2_setattr_nonsize ( struct mnt_idmap * idmap ,
struct bch_inode_info * inode ,
struct iattr * attr )
2017-03-17 09:18:50 +03:00
{
struct bch_fs * c = inode - > v . i_sb - > s_fs_info ;
2018-12-17 13:43:00 +03:00
struct bch_qid qid ;
2017-03-17 09:18:50 +03:00
struct btree_trans trans ;
2021-08-30 22:18:31 +03:00
struct btree_iter inode_iter = { NULL } ;
2017-03-17 09:18:50 +03:00
struct bch_inode_unpacked inode_u ;
struct posix_acl * acl = NULL ;
int ret ;
mutex_lock ( & inode - > ei_update_lock ) ;
2018-12-17 13:43:00 +03:00
qid = inode - > ei_qid ;
2019-10-01 23:51:57 +03:00
if ( attr - > ia_valid & ATTR_UID )
qid . q [ QTYP_USR ] = from_kuid ( i_user_ns ( & inode - > v ) , attr - > ia_uid ) ;
2017-03-17 09:18:50 +03:00
2019-10-01 23:51:57 +03:00
if ( attr - > ia_valid & ATTR_GID )
qid . q [ QTYP_GRP ] = from_kgid ( i_user_ns ( & inode - > v ) , attr - > ia_gid ) ;
2017-03-17 09:18:50 +03:00
2018-12-17 13:43:00 +03:00
ret = bch2_fs_quota_transfer ( c , inode , qid , ~ 0 ,
KEY_TYPE_QUOTA_PREALLOC ) ;
if ( ret )
goto err ;
2017-03-17 09:18:50 +03:00
2019-05-15 17:54:43 +03:00
bch2_trans_init ( & trans , c , 0 , 0 ) ;
2017-03-17 09:18:50 +03:00
retry :
bch2_trans_begin ( & trans ) ;
kfree ( acl ) ;
acl = NULL ;
2021-03-16 07:28:17 +03:00
ret = bch2_inode_peek ( & trans , & inode_iter , & inode_u , inode_inum ( inode ) ,
2021-08-30 22:18:31 +03:00
BTREE_ITER_INTENT ) ;
2019-10-01 23:51:57 +03:00
if ( ret )
goto btree_err ;
bch2_setattr_copy ( idmap , inode , & inode_u , attr ) ;
if ( attr - > ia_valid & ATTR_MODE ) {
2021-03-16 07:28:17 +03:00
ret = bch2_acl_chmod ( & trans , inode_inum ( inode ) , & inode_u ,
inode_u . bi_mode , & acl ) ;
2019-10-01 23:51:57 +03:00
if ( ret )
goto btree_err ;
}
2021-08-30 22:18:31 +03:00
ret = bch2_inode_write ( & trans , & inode_iter , & inode_u ) ? :
2021-11-05 22:17:13 +03:00
bch2_trans_commit ( & trans , NULL , NULL ,
2017-03-17 09:18:50 +03:00
BTREE_INSERT_NOFAIL ) ;
2019-10-01 23:51:57 +03:00
btree_err :
2021-08-30 22:18:31 +03:00
bch2_trans_iter_exit ( & trans , & inode_iter ) ;
2021-03-20 03:29:11 +03:00
2017-03-17 09:18:50 +03:00
if ( ret = = - EINTR )
goto retry ;
if ( unlikely ( ret ) )
goto err_trans ;
2021-11-06 07:03:40 +03:00
bch2_inode_update_after_write ( & trans , inode , & inode_u , attr - > ia_valid ) ;
2017-03-17 09:18:50 +03:00
if ( acl )
set_cached_acl ( & inode - > v , ACL_TYPE_ACCESS , acl ) ;
err_trans :
bch2_trans_exit ( & trans ) ;
err :
mutex_unlock ( & inode - > ei_update_lock ) ;
return ret ;
}
static int bch2_getattr ( struct mnt_idmap * idmap ,
const struct path * path , struct kstat * stat ,
u32 request_mask , unsigned query_flags )
{
struct bch_inode_info * inode = to_bch_ei ( d_inode ( path - > dentry ) ) ;
struct bch_fs * c = inode - > v . i_sb - > s_fs_info ;
stat - > dev = inode - > v . i_sb - > s_dev ;
stat - > ino = inode - > v . i_ino ;
stat - > mode = inode - > v . i_mode ;
stat - > nlink = inode - > v . i_nlink ;
stat - > uid = inode - > v . i_uid ;
stat - > gid = inode - > v . i_gid ;
stat - > rdev = inode - > v . i_rdev ;
stat - > size = i_size_read ( & inode - > v ) ;
stat - > atime = inode - > v . i_atime ;
stat - > mtime = inode - > v . i_mtime ;
stat - > ctime = inode_get_ctime ( & inode - > v ) ;
stat - > blksize = block_bytes ( c ) ;
stat - > blocks = inode - > v . i_blocks ;
if ( request_mask & STATX_BTIME ) {
stat - > result_mask | = STATX_BTIME ;
stat - > btime = bch2_time_to_timespec ( c , inode - > ei_inode . bi_otime ) ;
}
if ( inode - > ei_inode . bi_flags & BCH_INODE_IMMUTABLE )
stat - > attributes | = STATX_ATTR_IMMUTABLE ;
2019-11-06 22:29:30 +03:00
stat - > attributes_mask | = STATX_ATTR_IMMUTABLE ;
2017-03-17 09:18:50 +03:00
if ( inode - > ei_inode . bi_flags & BCH_INODE_APPEND )
stat - > attributes | = STATX_ATTR_APPEND ;
2019-11-06 22:29:30 +03:00
stat - > attributes_mask | = STATX_ATTR_APPEND ;
2017-03-17 09:18:50 +03:00
if ( inode - > ei_inode . bi_flags & BCH_INODE_NODUMP )
stat - > attributes | = STATX_ATTR_NODUMP ;
2019-11-06 22:29:30 +03:00
stat - > attributes_mask | = STATX_ATTR_NODUMP ;
2017-03-17 09:18:50 +03:00
return 0 ;
}
static int bch2_setattr ( struct mnt_idmap * idmap ,
struct dentry * dentry , struct iattr * iattr )
{
struct bch_inode_info * inode = to_bch_ei ( dentry - > d_inode ) ;
int ret ;
lockdep_assert_held ( & inode - > v . i_rwsem ) ;
ret = setattr_prepare ( idmap , dentry , iattr ) ;
if ( ret )
return ret ;
return iattr - > ia_valid & ATTR_SIZE
2021-06-15 05:29:54 +03:00
? bch2_truncate ( idmap , inode , iattr )
2017-03-17 09:18:50 +03:00
: bch2_setattr_nonsize ( idmap , inode , iattr ) ;
}
static int bch2_tmpfile ( struct mnt_idmap * idmap ,
struct inode * vdir , struct file * file , umode_t mode )
{
struct bch_inode_info * inode =
__bch2_create ( idmap , to_bch_ei ( vdir ) ,
2021-03-16 07:28:17 +03:00
file - > f_path . dentry , mode , 0 ,
2021-03-17 06:28:43 +03:00
( subvol_inum ) { 0 } , BCH_CREATE_TMPFILE ) ;
2017-03-17 09:18:50 +03:00
if ( IS_ERR ( inode ) )
return PTR_ERR ( inode ) ;
d_mark_tmpfile ( file , & inode - > v ) ;
d_instantiate ( file - > f_path . dentry , & inode - > v ) ;
return finish_open_simple ( file , 0 ) ;
}
2019-06-30 00:59:21 +03:00
static int bch2_fill_extent ( struct bch_fs * c ,
struct fiemap_extent_info * info ,
2019-07-25 20:52:14 +03:00
struct bkey_s_c k , unsigned flags )
2017-03-17 09:18:50 +03:00
{
2020-10-27 00:03:28 +03:00
if ( bkey_extent_is_direct_data ( k . k ) ) {
2019-07-25 20:52:14 +03:00
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c ( k ) ;
2018-09-28 04:08:39 +03:00
const union bch_extent_entry * entry ;
struct extent_ptr_decoded p ;
2017-03-17 09:18:50 +03:00
int ret ;
2019-08-16 16:59:56 +03:00
if ( k . k - > type = = KEY_TYPE_reflink_v )
flags | = FIEMAP_EXTENT_SHARED ;
2019-07-25 20:52:14 +03:00
bkey_for_each_ptr_decode ( k . k , ptrs , p , entry ) {
2017-03-17 09:18:50 +03:00
int flags2 = 0 ;
2018-09-28 04:08:39 +03:00
u64 offset = p . ptr . offset ;
2017-03-17 09:18:50 +03:00
2018-09-28 04:08:39 +03:00
if ( p . crc . compression_type )
2017-03-17 09:18:50 +03:00
flags2 | = FIEMAP_EXTENT_ENCODED ;
else
2018-09-28 04:08:39 +03:00
offset + = p . crc . offset ;
2017-03-17 09:18:50 +03:00
2019-06-30 00:59:21 +03:00
if ( ( offset & ( c - > opts . block_size - 1 ) ) | |
( k . k - > size & ( c - > opts . block_size - 1 ) ) )
2017-03-17 09:18:50 +03:00
flags2 | = FIEMAP_EXTENT_NOT_ALIGNED ;
ret = fiemap_fill_next_extent ( info ,
2019-07-25 20:52:14 +03:00
bkey_start_offset ( k . k ) < < 9 ,
2018-09-28 04:08:39 +03:00
offset < < 9 ,
2019-07-25 20:52:14 +03:00
k . k - > size < < 9 , flags | flags2 ) ;
2017-03-17 09:18:50 +03:00
if ( ret )
return ret ;
}
return 0 ;
2020-10-27 00:03:28 +03:00
} else if ( bkey_extent_is_inline_data ( k . k ) ) {
return fiemap_fill_next_extent ( info ,
bkey_start_offset ( k . k ) < < 9 ,
0 , k . k - > size < < 9 ,
flags |
FIEMAP_EXTENT_DATA_INLINE ) ;
2019-07-25 20:52:14 +03:00
} else if ( k . k - > type = = KEY_TYPE_reservation ) {
2017-03-17 09:18:50 +03:00
return fiemap_fill_next_extent ( info ,
2019-07-25 20:52:14 +03:00
bkey_start_offset ( k . k ) < < 9 ,
0 , k . k - > size < < 9 ,
2017-03-17 09:18:50 +03:00
flags |
FIEMAP_EXTENT_DELALLOC |
FIEMAP_EXTENT_UNWRITTEN ) ;
} else {
BUG ( ) ;
}
}
static int bch2_fiemap ( struct inode * vinode , struct fiemap_extent_info * info ,
u64 start , u64 len )
{
struct bch_fs * c = vinode - > i_sb - > s_fs_info ;
struct bch_inode_info * ei = to_bch_ei ( vinode ) ;
2019-03-25 22:10:15 +03:00
struct btree_trans trans ;
2021-08-30 22:18:31 +03:00
struct btree_iter iter ;
2017-03-17 09:18:50 +03:00
struct bkey_s_c k ;
2020-12-17 23:08:58 +03:00
struct bkey_buf cur , prev ;
2019-08-22 23:12:28 +03:00
struct bpos end = POS ( ei - > v . i_ino , ( start + len ) > > 9 ) ;
2019-08-16 16:59:56 +03:00
unsigned offset_into_extent , sectors ;
2017-03-17 09:18:50 +03:00
bool have_extent = false ;
2021-03-16 07:28:17 +03:00
u32 snapshot ;
2017-03-17 09:18:50 +03:00
int ret = 0 ;
ret = fiemap_prep ( & ei - > v , info , start , & len , FIEMAP_FLAG_SYNC ) ;
if ( ret )
return ret ;
if ( start + len < start )
return - EINVAL ;
2021-03-16 07:28:17 +03:00
start > > = 9 ;
2020-12-17 23:08:58 +03:00
bch2_bkey_buf_init ( & cur ) ;
bch2_bkey_buf_init ( & prev ) ;
2019-05-15 17:54:43 +03:00
bch2_trans_init ( & trans , c , 0 , 0 ) ;
2019-08-22 23:12:28 +03:00
retry :
2021-07-25 03:24:10 +03:00
bch2_trans_begin ( & trans ) ;
2021-03-16 07:28:17 +03:00
ret = bch2_subvolume_get_snapshot ( & trans , ei - > ei_subvol , & snapshot ) ;
if ( ret )
goto err ;
bch2_trans_iter_init ( & trans , & iter , BTREE_ID_extents ,
SPOS ( ei - > v . i_ino , start , snapshot ) , 0 ) ;
2021-08-30 22:18:31 +03:00
while ( ( k = bch2_btree_iter_peek ( & iter ) ) . k & &
2019-08-22 23:12:28 +03:00
! ( ret = bkey_err ( k ) ) & &
2021-08-30 22:18:31 +03:00
bkey_cmp ( iter . pos , end ) < 0 ) {
2021-03-15 04:30:08 +03:00
enum btree_id data_btree = BTREE_ID_extents ;
2019-08-22 23:12:28 +03:00
if ( ! bkey_extent_is_data ( k . k ) & &
k . k - > type ! = KEY_TYPE_reservation ) {
2021-08-30 22:18:31 +03:00
bch2_btree_iter_advance ( & iter ) ;
2019-08-22 23:12:28 +03:00
continue ;
}
2019-07-25 20:52:14 +03:00
2021-08-30 22:18:31 +03:00
offset_into_extent = iter . pos . offset -
2019-08-16 16:59:56 +03:00
bkey_start_offset ( k . k ) ;
sectors = k . k - > size - offset_into_extent ;
2020-12-17 23:08:58 +03:00
bch2_bkey_buf_reassemble ( & cur , c , k ) ;
2020-10-25 03:56:47 +03:00
2021-03-15 04:30:08 +03:00
ret = bch2_read_indirect_extent ( & trans , & data_btree ,
2020-05-22 17:50:05 +03:00
& offset_into_extent , & cur ) ;
2019-08-16 16:59:56 +03:00
if ( ret )
break ;
2020-10-25 03:56:47 +03:00
k = bkey_i_to_s_c ( cur . k ) ;
2020-12-17 23:08:58 +03:00
bch2_bkey_buf_realloc ( & prev , c , k . k - > u64s ) ;
2020-10-25 03:56:47 +03:00
2019-08-16 16:59:56 +03:00
sectors = min ( sectors , k . k - > size - offset_into_extent ) ;
2020-10-27 00:03:28 +03:00
bch2_cut_front ( POS ( k . k - > p . inode ,
bkey_start_offset ( k . k ) +
offset_into_extent ) ,
cur . k ) ;
2019-11-10 00:01:15 +03:00
bch2_key_resize ( & cur . k - > k , sectors ) ;
2021-08-30 22:18:31 +03:00
cur . k - > k . p = iter . pos ;
2019-11-10 00:01:15 +03:00
cur . k - > k . p . offset + = cur . k - > k . size ;
2019-08-16 16:59:56 +03:00
2019-08-22 23:12:28 +03:00
if ( have_extent ) {
ret = bch2_fill_extent ( c , info ,
2019-11-10 00:01:15 +03:00
bkey_i_to_s_c ( prev . k ) , 0 ) ;
2019-08-22 23:12:28 +03:00
if ( ret )
break ;
2017-03-17 09:18:50 +03:00
}
2019-08-16 16:59:56 +03:00
2019-11-10 00:01:15 +03:00
bkey_copy ( prev . k , cur . k ) ;
2019-08-22 23:12:28 +03:00
have_extent = true ;
2021-08-30 22:18:31 +03:00
bch2_btree_iter_set_pos ( & iter ,
POS ( iter . pos . inode , iter . pos . offset + sectors ) ) ;
2019-07-25 20:52:14 +03:00
}
2021-03-16 07:28:17 +03:00
start = iter . pos . offset ;
bch2_trans_iter_exit ( & trans , & iter ) ;
err :
2019-08-22 23:12:28 +03:00
if ( ret = = - EINTR )
goto retry ;
2019-04-17 22:49:28 +03:00
if ( ! ret & & have_extent )
2019-11-10 00:01:15 +03:00
ret = bch2_fill_extent ( c , info , bkey_i_to_s_c ( prev . k ) ,
2019-07-25 20:52:14 +03:00
FIEMAP_EXTENT_LAST ) ;
2019-08-22 23:12:28 +03:00
2021-10-19 22:08:00 +03:00
bch2_trans_exit ( & trans ) ;
2020-12-17 23:08:58 +03:00
bch2_bkey_buf_exit ( & cur , c ) ;
bch2_bkey_buf_exit ( & prev , c ) ;
2017-03-17 09:18:50 +03:00
return ret < 0 ? ret : 0 ;
}
static const struct vm_operations_struct bch_vm_ops = {
. fault = bch2_page_fault ,
. map_pages = filemap_map_pages ,
. page_mkwrite = bch2_page_mkwrite ,
} ;
static int bch2_mmap ( struct file * file , struct vm_area_struct * vma )
{
file_accessed ( file ) ;
vma - > vm_ops = & bch_vm_ops ;
return 0 ;
}
/* Directories: */
static loff_t bch2_dir_llseek ( struct file * file , loff_t offset , int whence )
{
return generic_file_llseek_size ( file , offset , whence ,
S64_MAX , S64_MAX ) ;
}
static int bch2_vfs_readdir ( struct file * file , struct dir_context * ctx )
{
2019-10-03 01:35:36 +03:00
struct bch_inode_info * inode = file_bch_inode ( file ) ;
struct bch_fs * c = inode - > v . i_sb - > s_fs_info ;
if ( ! dir_emit_dots ( file , ctx ) )
return 0 ;
2017-03-17 09:18:50 +03:00
2021-03-16 07:28:17 +03:00
return bch2_readdir ( c , inode_inum ( inode ) , ctx ) ;
2017-03-17 09:18:50 +03:00
}
static const struct file_operations bch_file_operations = {
. llseek = bch2_llseek ,
. read_iter = bch2_read_iter ,
. write_iter = bch2_write_iter ,
. mmap = bch2_mmap ,
. open = generic_file_open ,
. fsync = bch2_fsync ,
. splice_read = filemap_splice_read ,
. splice_write = iter_file_splice_write ,
. fallocate = bch2_fallocate_dispatch ,
. unlocked_ioctl = bch2_fs_file_ioctl ,
# ifdef CONFIG_COMPAT
. compat_ioctl = bch2_compat_fs_ioctl ,
# endif
2019-08-16 16:59:56 +03:00
. remap_file_range = bch2_remap_file_range ,
2017-03-17 09:18:50 +03:00
} ;
static const struct inode_operations bch_file_inode_operations = {
. getattr = bch2_getattr ,
. setattr = bch2_setattr ,
. fiemap = bch2_fiemap ,
. listxattr = bch2_xattr_list ,
# ifdef CONFIG_BCACHEFS_POSIX_ACL
. get_acl = bch2_get_acl ,
. set_acl = bch2_set_acl ,
# endif
} ;
static const struct inode_operations bch_dir_inode_operations = {
. lookup = bch2_lookup ,
. create = bch2_create ,
. link = bch2_link ,
. unlink = bch2_unlink ,
. symlink = bch2_symlink ,
. mkdir = bch2_mkdir ,
2019-10-11 22:14:36 +03:00
. rmdir = bch2_unlink ,
2017-03-17 09:18:50 +03:00
. mknod = bch2_mknod ,
. rename = bch2_rename2 ,
. getattr = bch2_getattr ,
. setattr = bch2_setattr ,
. tmpfile = bch2_tmpfile ,
. listxattr = bch2_xattr_list ,
# ifdef CONFIG_BCACHEFS_POSIX_ACL
. get_acl = bch2_get_acl ,
. set_acl = bch2_set_acl ,
# endif
} ;
static const struct file_operations bch_dir_file_operations = {
. llseek = bch2_dir_llseek ,
. read = generic_read_dir ,
. iterate_shared = bch2_vfs_readdir ,
. fsync = bch2_fsync ,
. unlocked_ioctl = bch2_fs_file_ioctl ,
# ifdef CONFIG_COMPAT
. compat_ioctl = bch2_compat_fs_ioctl ,
# endif
} ;
static const struct inode_operations bch_symlink_inode_operations = {
. get_link = page_get_link ,
. getattr = bch2_getattr ,
. setattr = bch2_setattr ,
. listxattr = bch2_xattr_list ,
# ifdef CONFIG_BCACHEFS_POSIX_ACL
. get_acl = bch2_get_acl ,
. set_acl = bch2_set_acl ,
# endif
} ;
static const struct inode_operations bch_special_inode_operations = {
. getattr = bch2_getattr ,
. setattr = bch2_setattr ,
. listxattr = bch2_xattr_list ,
# ifdef CONFIG_BCACHEFS_POSIX_ACL
. get_acl = bch2_get_acl ,
. set_acl = bch2_set_acl ,
# endif
} ;
static const struct address_space_operations bch_address_space_operations = {
. writepage = bch2_writepage ,
. read_folio = bch2_read_folio ,
. writepages = bch2_writepages ,
. readahead = bch2_readahead ,
2019-07-02 21:59:15 +03:00
. dirty_folio = filemap_dirty_folio ,
2017-03-17 09:18:50 +03:00
. write_begin = bch2_write_begin ,
. write_end = bch2_write_end ,
. invalidate_folio = bch2_invalidate_folio ,
. release_folio = bch2_release_folio ,
. direct_IO = noop_direct_IO ,
# ifdef CONFIG_MIGRATION
. migrate_folio = filemap_migrate_folio ,
# endif
. error_remove_page = generic_error_remove_page ,
} ;
2021-11-14 03:49:14 +03:00
struct bcachefs_fid {
u64 inum ;
u32 subvol ;
u32 gen ;
} __packed ;
struct bcachefs_fid_with_parent {
struct bcachefs_fid fid ;
struct bcachefs_fid dir ;
} __packed ;
static int bcachefs_fid_valid ( int fh_len , int fh_type )
2017-03-17 09:18:50 +03:00
{
2021-11-14 03:49:14 +03:00
switch ( fh_type ) {
case FILEID_BCACHEFS_WITHOUT_PARENT :
return fh_len = = sizeof ( struct bcachefs_fid ) / sizeof ( u32 ) ;
case FILEID_BCACHEFS_WITH_PARENT :
return fh_len = = sizeof ( struct bcachefs_fid_with_parent ) / sizeof ( u32 ) ;
default :
return false ;
}
}
static struct bcachefs_fid bch2_inode_to_fid ( struct bch_inode_info * inode )
{
return ( struct bcachefs_fid ) {
. inum = inode - > ei_inode . bi_inum ,
. subvol = inode - > ei_subvol ,
. gen = inode - > ei_inode . bi_generation ,
} ;
}
static int bch2_encode_fh ( struct inode * vinode , u32 * fh , int * len ,
struct inode * vdir )
{
struct bch_inode_info * inode = to_bch_ei ( vinode ) ;
struct bch_inode_info * dir = to_bch_ei ( vdir ) ;
if ( * len < sizeof ( struct bcachefs_fid_with_parent ) / sizeof ( u32 ) )
return FILEID_INVALID ;
if ( ! S_ISDIR ( inode - > v . i_mode ) & & dir ) {
struct bcachefs_fid_with_parent * fid = ( void * ) fh ;
2017-03-17 09:18:50 +03:00
2021-11-14 03:49:14 +03:00
fid - > fid = bch2_inode_to_fid ( inode ) ;
fid - > dir = bch2_inode_to_fid ( dir ) ;
2017-03-17 09:18:50 +03:00
2021-11-14 03:49:14 +03:00
* len = sizeof ( * fid ) / sizeof ( u32 ) ;
return FILEID_BCACHEFS_WITH_PARENT ;
} else {
struct bcachefs_fid * fid = ( void * ) fh ;
* fid = bch2_inode_to_fid ( inode ) ;
* len = sizeof ( * fid ) / sizeof ( u32 ) ;
return FILEID_BCACHEFS_WITHOUT_PARENT ;
}
}
static struct inode * bch2_nfs_get_inode ( struct super_block * sb ,
struct bcachefs_fid fid )
{
struct bch_fs * c = sb - > s_fs_info ;
struct inode * vinode = bch2_vfs_inode_get ( c , ( subvol_inum ) {
. subvol = fid . subvol ,
. inum = fid . inum ,
} ) ;
if ( ! IS_ERR ( vinode ) & & vinode - > i_generation ! = fid . gen ) {
2017-03-17 09:18:50 +03:00
iput ( vinode ) ;
2021-11-14 03:49:14 +03:00
vinode = ERR_PTR ( - ESTALE ) ;
2017-03-17 09:18:50 +03:00
}
return vinode ;
}
2021-11-14 03:49:14 +03:00
static struct dentry * bch2_fh_to_dentry ( struct super_block * sb , struct fid * _fid ,
2017-03-17 09:18:50 +03:00
int fh_len , int fh_type )
{
2021-11-14 03:49:14 +03:00
struct bcachefs_fid * fid = ( void * ) _fid ;
if ( ! bcachefs_fid_valid ( fh_len , fh_type ) )
return NULL ;
return d_obtain_alias ( bch2_nfs_get_inode ( sb , * fid ) ) ;
2017-03-17 09:18:50 +03:00
}
2021-11-14 03:49:14 +03:00
static struct dentry * bch2_fh_to_parent ( struct super_block * sb , struct fid * _fid ,
2017-03-17 09:18:50 +03:00
int fh_len , int fh_type )
{
2021-11-14 03:49:14 +03:00
struct bcachefs_fid_with_parent * fid = ( void * ) _fid ;
if ( ! bcachefs_fid_valid ( fh_len , fh_type ) | |
fh_type ! = FILEID_BCACHEFS_WITH_PARENT )
return NULL ;
return d_obtain_alias ( bch2_nfs_get_inode ( sb , fid - > dir ) ) ;
}
static struct dentry * bch2_get_parent ( struct dentry * child )
{
struct bch_inode_info * inode = to_bch_ei ( child - > d_inode ) ;
struct bch_fs * c = inode - > v . i_sb - > s_fs_info ;
subvol_inum parent_inum = {
. subvol = inode - > ei_inode . bi_parent_subvol ? :
inode - > ei_subvol ,
. inum = inode - > ei_inode . bi_dir ,
} ;
if ( ! parent_inum . inum )
return NULL ;
return d_obtain_alias ( bch2_vfs_inode_get ( c , parent_inum ) ) ;
}
static int bch2_get_name ( struct dentry * parent , char * name , struct dentry * child )
{
struct bch_inode_info * inode = to_bch_ei ( child - > d_inode ) ;
struct bch_inode_info * dir = to_bch_ei ( parent - > d_inode ) ;
struct bch_fs * c = inode - > v . i_sb - > s_fs_info ;
struct btree_trans trans ;
struct btree_iter iter1 ;
struct btree_iter iter2 ;
struct bkey_s_c k ;
struct bkey_s_c_dirent d ;
struct bch_inode_unpacked inode_u ;
subvol_inum target ;
u32 snapshot ;
unsigned name_len ;
int ret ;
if ( ! S_ISDIR ( dir - > v . i_mode ) )
return - EINVAL ;
bch2_trans_init ( & trans , c , 0 , 0 ) ;
bch2_trans_iter_init ( & trans , & iter1 , BTREE_ID_dirents ,
POS ( dir - > ei_inode . bi_inum , 0 ) , 0 ) ;
bch2_trans_iter_init ( & trans , & iter2 , BTREE_ID_dirents ,
POS ( dir - > ei_inode . bi_inum , 0 ) , 0 ) ;
retry :
bch2_trans_begin ( & trans ) ;
ret = bch2_subvolume_get_snapshot ( & trans , dir - > ei_subvol , & snapshot ) ;
if ( ret )
goto err ;
bch2_btree_iter_set_snapshot ( & iter1 , snapshot ) ;
bch2_btree_iter_set_snapshot ( & iter2 , snapshot ) ;
ret = bch2_inode_find_by_inum_trans ( & trans , inode_inum ( inode ) , & inode_u ) ;
if ( ret )
goto err ;
if ( inode_u . bi_dir = = dir - > ei_inode . bi_inum ) {
bch2_btree_iter_set_pos ( & iter1 , POS ( inode_u . bi_dir , inode_u . bi_dir_offset ) ) ;
k = bch2_btree_iter_peek_slot ( & iter1 ) ;
ret = bkey_err ( k ) ;
if ( ret )
goto err ;
if ( k . k - > type ! = KEY_TYPE_dirent ) {
ret = - ENOENT ;
goto err ;
}
d = bkey_s_c_to_dirent ( k ) ;
ret = bch2_dirent_read_target ( & trans , inode_inum ( dir ) , d , & target ) ;
if ( ret > 0 )
ret = - ENOENT ;
if ( ret )
goto err ;
if ( target . subvol = = inode - > ei_subvol & &
target . inum = = inode - > ei_inode . bi_inum )
goto found ;
} else {
/*
* File with multiple hardlinks and our backref is to the wrong
* directory - linear search :
*/
for_each_btree_key_continue_norestart ( iter2 , 0 , k , ret ) {
if ( k . k - > p . inode > dir - > ei_inode . bi_inum )
break ;
if ( k . k - > type ! = KEY_TYPE_dirent )
continue ;
d = bkey_s_c_to_dirent ( k ) ;
ret = bch2_dirent_read_target ( & trans , inode_inum ( dir ) , d , & target ) ;
if ( ret < 0 )
break ;
if ( ret )
continue ;
if ( target . subvol = = inode - > ei_subvol & &
target . inum = = inode - > ei_inode . bi_inum )
goto found ;
}
}
ret = - ENOENT ;
goto err ;
found :
name_len = min_t ( unsigned , bch2_dirent_name_bytes ( d ) , NAME_MAX ) ;
memcpy ( name , d . v - > d_name , name_len ) ;
name [ name_len ] = ' \0 ' ;
err :
if ( ret = = - EINTR )
goto retry ;
bch2_trans_iter_exit ( & trans , & iter1 ) ;
bch2_trans_iter_exit ( & trans , & iter2 ) ;
bch2_trans_exit ( & trans ) ;
return ret ;
2017-03-17 09:18:50 +03:00
}
static const struct export_operations bch_export_ops = {
2021-11-14 03:49:14 +03:00
. encode_fh = bch2_encode_fh ,
. fh_to_dentry = bch2_fh_to_dentry ,
. fh_to_parent = bch2_fh_to_parent ,
. get_parent = bch2_get_parent ,
. get_name = bch2_get_name ,
2017-03-17 09:18:50 +03:00
} ;
2021-11-06 07:03:40 +03:00
static void bch2_vfs_inode_init ( struct btree_trans * trans , subvol_inum inum ,
2017-03-17 09:18:50 +03:00
struct bch_inode_info * inode ,
struct bch_inode_unpacked * bi )
{
2021-11-06 07:03:40 +03:00
bch2_inode_update_after_write ( trans , inode , bi , ~ 0 ) ;
2017-03-17 09:18:50 +03:00
inode - > v . i_blocks = bi - > bi_sectors ;
inode - > v . i_ino = bi - > bi_inum ;
inode - > v . i_rdev = bi - > bi_dev ;
inode - > v . i_generation = bi - > bi_generation ;
inode - > v . i_size = bi - > bi_size ;
2020-12-03 22:27:20 +03:00
inode - > ei_flags = 0 ;
2017-03-17 09:18:50 +03:00
inode - > ei_quota_reserved = 0 ;
2018-12-17 13:43:00 +03:00
inode - > ei_qid = bch_qid ( bi ) ;
2021-03-16 08:33:39 +03:00
inode - > ei_subvol = inum . subvol ;
2017-03-17 09:18:50 +03:00
inode - > v . i_mapping - > a_ops = & bch_address_space_operations ;
switch ( inode - > v . i_mode & S_IFMT ) {
case S_IFREG :
inode - > v . i_op = & bch_file_inode_operations ;
inode - > v . i_fop = & bch_file_operations ;
break ;
case S_IFDIR :
inode - > v . i_op = & bch_dir_inode_operations ;
inode - > v . i_fop = & bch_dir_file_operations ;
break ;
case S_IFLNK :
inode_nohighmem ( & inode - > v ) ;
inode - > v . i_op = & bch_symlink_inode_operations ;
break ;
default :
init_special_inode ( & inode - > v , inode - > v . i_mode , inode - > v . i_rdev ) ;
inode - > v . i_op = & bch_special_inode_operations ;
break ;
}
}
static struct inode * bch2_alloc_inode ( struct super_block * sb )
{
struct bch_inode_info * inode ;
inode = kmem_cache_alloc ( bch2_inode_cache , GFP_NOFS ) ;
if ( ! inode )
return NULL ;
inode_init_once ( & inode - > v ) ;
mutex_init ( & inode - > ei_update_lock ) ;
pagecache_lock_init ( & inode - > ei_pagecache_lock ) ;
mutex_init ( & inode - > ei_quota_lock ) ;
return & inode - > v ;
}
static void bch2_i_callback ( struct rcu_head * head )
{
struct inode * vinode = container_of ( head , struct inode , i_rcu ) ;
struct bch_inode_info * inode = to_bch_ei ( vinode ) ;
kmem_cache_free ( bch2_inode_cache , inode ) ;
}
static void bch2_destroy_inode ( struct inode * vinode )
{
call_rcu ( & vinode - > i_rcu , bch2_i_callback ) ;
}
static int inode_update_times_fn ( struct bch_inode_info * inode ,
struct bch_inode_unpacked * bi ,
void * p )
{
struct bch_fs * c = inode - > v . i_sb - > s_fs_info ;
bi - > bi_atime = timespec_to_bch2_time ( c , inode - > v . i_atime ) ;
bi - > bi_mtime = timespec_to_bch2_time ( c , inode - > v . i_mtime ) ;
bi - > bi_ctime = timespec_to_bch2_time ( c , inode_get_ctime ( & inode - > v ) ) ;
return 0 ;
}
static int bch2_vfs_write_inode ( struct inode * vinode ,
struct writeback_control * wbc )
{
struct bch_fs * c = vinode - > i_sb - > s_fs_info ;
struct bch_inode_info * inode = to_bch_ei ( vinode ) ;
int ret ;
mutex_lock ( & inode - > ei_update_lock ) ;
2018-07-17 21:12:42 +03:00
ret = bch2_write_inode ( c , inode , inode_update_times_fn , NULL ,
ATTR_ATIME | ATTR_MTIME | ATTR_CTIME ) ;
2017-03-17 09:18:50 +03:00
mutex_unlock ( & inode - > ei_update_lock ) ;
return ret ;
}
static void bch2_evict_inode ( struct inode * vinode )
{
struct bch_fs * c = vinode - > i_sb - > s_fs_info ;
struct bch_inode_info * inode = to_bch_ei ( vinode ) ;
truncate_inode_pages_final ( & inode - > v . i_data ) ;
clear_inode ( & inode - > v ) ;
BUG_ON ( ! is_bad_inode ( & inode - > v ) & & inode - > ei_quota_reserved ) ;
if ( ! inode - > v . i_nlink & & ! is_bad_inode ( & inode - > v ) ) {
bch2_quota_acct ( c , inode - > ei_qid , Q_SPC , - ( ( s64 ) inode - > v . i_blocks ) ,
2018-11-01 22:10:01 +03:00
KEY_TYPE_QUOTA_WARN ) ;
2017-03-17 09:18:50 +03:00
bch2_quota_acct ( c , inode - > ei_qid , Q_INO , - 1 ,
2018-11-01 22:10:01 +03:00
KEY_TYPE_QUOTA_WARN ) ;
2021-03-16 07:28:17 +03:00
bch2_inode_rm ( c , inode_inum ( inode ) , true ) ;
2017-03-17 09:18:50 +03:00
}
}
2021-10-28 23:24:39 +03:00
void bch2_evict_subvolume_inodes ( struct bch_fs * c ,
struct snapshot_id_list * s )
{
struct super_block * sb = c - > vfs_sb ;
struct inode * inode ;
spin_lock ( & sb - > s_inode_list_lock ) ;
list_for_each_entry ( inode , & sb - > s_inodes , i_sb_list ) {
if ( ! snapshot_list_has_id ( s , to_bch_ei ( inode ) - > ei_subvol ) | |
( inode - > i_state & I_FREEING ) )
continue ;
d_mark_dontcache ( inode ) ;
d_prune_aliases ( inode ) ;
}
spin_unlock ( & sb - > s_inode_list_lock ) ;
again :
cond_resched ( ) ;
spin_lock ( & sb - > s_inode_list_lock ) ;
list_for_each_entry ( inode , & sb - > s_inodes , i_sb_list ) {
if ( ! snapshot_list_has_id ( s , to_bch_ei ( inode ) - > ei_subvol ) | |
( inode - > i_state & I_FREEING ) )
continue ;
if ( ! ( inode - > i_state & I_DONTCACHE ) ) {
d_mark_dontcache ( inode ) ;
d_prune_aliases ( inode ) ;
}
spin_lock ( & inode - > i_lock ) ;
if ( snapshot_list_has_id ( s , to_bch_ei ( inode ) - > ei_subvol ) & &
! ( inode - > i_state & I_FREEING ) ) {
wait_queue_head_t * wq = bit_waitqueue ( & inode - > i_state , __I_NEW ) ;
DEFINE_WAIT_BIT ( wait , & inode - > i_state , __I_NEW ) ;
prepare_to_wait ( wq , & wait . wq_entry , TASK_UNINTERRUPTIBLE ) ;
spin_unlock ( & inode - > i_lock ) ;
spin_unlock ( & sb - > s_inode_list_lock ) ;
schedule ( ) ;
finish_wait ( wq , & wait . wq_entry ) ;
goto again ;
}
spin_unlock ( & inode - > i_lock ) ;
}
spin_unlock ( & sb - > s_inode_list_lock ) ;
}
2017-03-17 09:18:50 +03:00
static int bch2_statfs ( struct dentry * dentry , struct kstatfs * buf )
{
struct super_block * sb = dentry - > d_sb ;
struct bch_fs * c = sb - > s_fs_info ;
2018-11-27 16:23:22 +03:00
struct bch_fs_usage_short usage = bch2_fs_usage_read_short ( c ) ;
2018-07-24 21:54:39 +03:00
unsigned shift = sb - > s_blocksize_bits - 9 ;
2020-11-03 03:49:23 +03:00
/*
* this assumes inodes take up 64 bytes , which is a decent average
* number :
*/
u64 avail_inodes = ( ( usage . capacity - usage . used ) < < 3 ) ;
2017-03-17 09:18:50 +03:00
u64 fsid ;
buf - > f_type = BCACHEFS_STATFS_MAGIC ;
buf - > f_bsize = sb - > s_blocksize ;
2018-11-27 16:23:22 +03:00
buf - > f_blocks = usage . capacity > > shift ;
2023-09-11 06:35:02 +03:00
buf - > f_bfree = usage . free > > shift ;
buf - > f_bavail = avail_factor ( usage . free ) > > shift ;
2020-11-03 03:49:23 +03:00
buf - > f_files = usage . nr_inodes + avail_inodes ;
buf - > f_ffree = avail_inodes ;
2017-03-17 09:18:50 +03:00
fsid = le64_to_cpup ( ( void * ) c - > sb . user_uuid . b ) ^
le64_to_cpup ( ( void * ) c - > sb . user_uuid . b + sizeof ( u64 ) ) ;
buf - > f_fsid . val [ 0 ] = fsid & 0xFFFFFFFFUL ;
buf - > f_fsid . val [ 1 ] = ( fsid > > 32 ) & 0xFFFFFFFFUL ;
buf - > f_namelen = BCH_NAME_MAX ;
return 0 ;
}
static int bch2_sync_fs ( struct super_block * sb , int wait )
{
struct bch_fs * c = sb - > s_fs_info ;
2019-08-22 23:30:55 +03:00
if ( c - > opts . journal_flush_disabled )
return 0 ;
2017-03-17 09:18:50 +03:00
if ( ! wait ) {
bch2_journal_flush_async ( & c - > journal , NULL ) ;
return 0 ;
}
return bch2_journal_flush ( & c - > journal ) ;
}
static struct bch_fs * bch2_path_to_fs ( const char * path )
{
struct bch_fs * c ;
dev_t dev ;
int ret ;
ret = lookup_bdev ( path , & dev ) ;
if ( ret )
return ERR_PTR ( ret ) ;
c = bch2_dev_to_fs ( dev ) ;
2020-09-09 01:30:32 +03:00
if ( c )
2017-03-17 09:18:50 +03:00
closure_put ( & c - > cl ) ;
2020-09-09 01:30:32 +03:00
return c ? : ERR_PTR ( - ENOENT ) ;
2017-03-17 09:18:50 +03:00
}
2020-09-09 01:30:32 +03:00
static char * * split_devs ( const char * _dev_name , unsigned * nr )
2017-03-17 09:18:50 +03:00
{
char * dev_name = NULL , * * devs = NULL , * s ;
size_t i , nr_devs = 0 ;
dev_name = kstrdup ( _dev_name , GFP_KERNEL ) ;
if ( ! dev_name )
2020-09-09 01:30:32 +03:00
return NULL ;
2017-03-17 09:18:50 +03:00
for ( s = dev_name ; s ; s = strchr ( s + 1 , ' : ' ) )
nr_devs + + ;
2020-09-09 01:30:32 +03:00
devs = kcalloc ( nr_devs + 1 , sizeof ( const char * ) , GFP_KERNEL ) ;
if ( ! devs ) {
kfree ( dev_name ) ;
return NULL ;
}
2017-03-17 09:18:50 +03:00
for ( i = 0 , s = dev_name ;
s ;
( s = strchr ( s , ' : ' ) ) & & ( * s + + = ' \0 ' ) )
devs [ i + + ] = s ;
2020-09-09 01:30:32 +03:00
* nr = nr_devs ;
return devs ;
2017-03-17 09:18:50 +03:00
}
static int bch2_remount ( struct super_block * sb , int * flags , char * data )
{
struct bch_fs * c = sb - > s_fs_info ;
struct bch_opts opts = bch2_opts_empty ( ) ;
int ret ;
opt_set ( opts , read_only , ( * flags & SB_RDONLY ) ! = 0 ) ;
2020-10-24 04:07:17 +03:00
ret = bch2_parse_mount_opts ( c , & opts , data ) ;
2017-03-17 09:18:50 +03:00
if ( ret )
return ret ;
if ( opts . read_only ! = c - > opts . read_only ) {
2020-06-15 21:58:47 +03:00
down_write ( & c - > state_lock ) ;
2017-03-17 09:18:50 +03:00
if ( opts . read_only ) {
bch2_fs_read_only ( c ) ;
sb - > s_flags | = SB_RDONLY ;
} else {
2019-03-22 05:19:57 +03:00
ret = bch2_fs_read_write ( c ) ;
if ( ret ) {
bch_err ( c , " error going rw: %i " , ret ) ;
2020-06-15 21:58:47 +03:00
up_write ( & c - > state_lock ) ;
2017-03-17 09:18:50 +03:00
return - EINVAL ;
}
sb - > s_flags & = ~ SB_RDONLY ;
}
c - > opts . read_only = opts . read_only ;
2020-06-15 21:58:47 +03:00
up_write ( & c - > state_lock ) ;
2017-03-17 09:18:50 +03:00
}
if ( opts . errors > = 0 )
c - > opts . errors = opts . errors ;
return ret ;
}
2020-09-07 05:58:28 +03:00
static int bch2_show_devname ( struct seq_file * seq , struct dentry * root )
{
struct bch_fs * c = root - > d_sb - > s_fs_info ;
struct bch_dev * ca ;
unsigned i ;
bool first = true ;
for_each_online_member ( ca , c , i ) {
if ( ! first )
seq_putc ( seq , ' : ' ) ;
first = false ;
seq_puts ( seq , " /dev/ " ) ;
seq_puts ( seq , ca - > name ) ;
}
return 0 ;
}
2017-03-17 09:18:50 +03:00
static int bch2_show_options ( struct seq_file * seq , struct dentry * root )
{
struct bch_fs * c = root - > d_sb - > s_fs_info ;
enum bch_opt_id i ;
char buf [ 512 ] ;
for ( i = 0 ; i < bch2_opts_nr ; i + + ) {
const struct bch_option * opt = & bch2_opt_table [ i ] ;
u64 v = bch2_opt_get_by_id ( & c - > opts , i ) ;
2018-12-19 20:58:56 +03:00
if ( ! ( opt - > mode & OPT_MOUNT ) )
2017-03-17 09:18:50 +03:00
continue ;
if ( v = = bch2_opt_get_by_id ( & bch2_opts_default , i ) )
continue ;
2018-11-09 09:24:07 +03:00
bch2_opt_to_text ( & PBUF ( buf ) , c , opt , v ,
2017-03-17 09:18:50 +03:00
OPT_SHOW_MOUNT_STYLE ) ;
seq_putc ( seq , ' , ' ) ;
seq_puts ( seq , buf ) ;
}
return 0 ;
}
2020-09-09 01:30:32 +03:00
static void bch2_put_super ( struct super_block * sb )
{
struct bch_fs * c = sb - > s_fs_info ;
__bch2_fs_stop ( c ) ;
}
2017-03-17 09:18:50 +03:00
static const struct super_operations bch_super_operations = {
. alloc_inode = bch2_alloc_inode ,
. destroy_inode = bch2_destroy_inode ,
. write_inode = bch2_vfs_write_inode ,
. evict_inode = bch2_evict_inode ,
. sync_fs = bch2_sync_fs ,
. statfs = bch2_statfs ,
2020-09-07 05:58:28 +03:00
. show_devname = bch2_show_devname ,
2017-03-17 09:18:50 +03:00
. show_options = bch2_show_options ,
. remount_fs = bch2_remount ,
. put_super = bch2_put_super ,
2020-09-09 01:30:32 +03:00
#if 0
2017-03-17 09:18:50 +03:00
. freeze_fs = bch2_freeze ,
. unfreeze_fs = bch2_unfreeze ,
# endif
} ;
static int bch2_set_super ( struct super_block * s , void * data )
{
s - > s_fs_info = data ;
return 0 ;
}
2020-09-09 01:30:32 +03:00
static int bch2_noset_super ( struct super_block * s , void * data )
{
return - EBUSY ;
}
static int bch2_test_super ( struct super_block * s , void * data )
{
struct bch_fs * c = s - > s_fs_info ;
struct bch_fs * * devs = data ;
unsigned i ;
if ( ! c )
return false ;
for ( i = 0 ; devs [ i ] ; i + + )
if ( c ! = devs [ i ] )
return false ;
return true ;
}
2017-03-17 09:18:50 +03:00
static struct dentry * bch2_mount ( struct file_system_type * fs_type ,
int flags , const char * dev_name , void * data )
{
struct bch_fs * c ;
struct bch_dev * ca ;
struct super_block * sb ;
struct inode * vinode ;
struct bch_opts opts = bch2_opts_empty ( ) ;
2020-09-09 01:30:32 +03:00
char * * devs ;
struct bch_fs * * devs_to_fs = NULL ;
unsigned i , nr_devs ;
2017-03-17 09:18:50 +03:00
int ret ;
opt_set ( opts , read_only , ( flags & SB_RDONLY ) ! = 0 ) ;
2020-10-24 04:07:17 +03:00
ret = bch2_parse_mount_opts ( NULL , & opts , data ) ;
2017-03-17 09:18:50 +03:00
if ( ret )
return ERR_PTR ( ret ) ;
2021-06-10 14:52:42 +03:00
if ( ! dev_name | | strlen ( dev_name ) = = 0 )
return ERR_PTR ( - EINVAL ) ;
2020-09-09 01:30:32 +03:00
devs = split_devs ( dev_name , & nr_devs ) ;
if ( ! devs )
return ERR_PTR ( - ENOMEM ) ;
2017-03-17 09:18:50 +03:00
2020-09-09 01:30:32 +03:00
devs_to_fs = kcalloc ( nr_devs + 1 , sizeof ( void * ) , GFP_KERNEL ) ;
if ( ! devs_to_fs ) {
sb = ERR_PTR ( - ENOMEM ) ;
goto got_sb ;
2017-03-17 09:18:50 +03:00
}
2020-09-09 01:30:32 +03:00
for ( i = 0 ; i < nr_devs ; i + + )
devs_to_fs [ i ] = bch2_path_to_fs ( devs [ i ] ) ;
2017-03-17 09:18:50 +03:00
2020-09-09 01:30:32 +03:00
sb = sget ( fs_type , bch2_test_super , bch2_noset_super ,
flags | SB_NOSEC , devs_to_fs ) ;
if ( ! IS_ERR ( sb ) )
goto got_sb ;
c = bch2_fs_open ( devs , nr_devs , opts ) ;
2020-10-24 04:07:17 +03:00
if ( IS_ERR ( c ) ) {
2020-09-09 01:30:32 +03:00
sb = ERR_CAST ( c ) ;
2020-10-24 04:07:17 +03:00
goto got_sb ;
}
/* Some options can't be parsed until after the fs is started: */
ret = bch2_parse_mount_opts ( c , & opts , data ) ;
if ( ret ) {
bch2_fs_stop ( c ) ;
sb = ERR_PTR ( ret ) ;
goto got_sb ;
}
bch2_opts_apply ( & c - > opts , opts ) ;
sb = sget ( fs_type , NULL , bch2_set_super , flags | SB_NOSEC , c ) ;
if ( IS_ERR ( sb ) )
bch2_fs_stop ( c ) ;
2020-09-09 01:30:32 +03:00
got_sb :
kfree ( devs_to_fs ) ;
kfree ( devs [ 0 ] ) ;
kfree ( devs ) ;
if ( IS_ERR ( sb ) )
return ERR_CAST ( sb ) ;
c = sb - > s_fs_info ;
2017-03-17 09:18:50 +03:00
2020-09-09 01:30:32 +03:00
if ( sb - > s_root ) {
2017-03-17 09:18:50 +03:00
if ( ( flags ^ sb - > s_flags ) & SB_RDONLY ) {
ret = - EBUSY ;
goto err_put_super ;
}
goto out ;
}
2019-06-30 00:59:21 +03:00
sb - > s_blocksize = block_bytes ( c ) ;
sb - > s_blocksize_bits = ilog2 ( block_bytes ( c ) ) ;
2017-03-17 09:18:50 +03:00
sb - > s_maxbytes = MAX_LFS_FILESIZE ;
sb - > s_op = & bch_super_operations ;
sb - > s_export_op = & bch_export_ops ;
# ifdef CONFIG_BCACHEFS_QUOTA
sb - > s_qcop = & bch2_quotactl_operations ;
sb - > s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ ;
# endif
sb - > s_xattr = bch2_xattr_handlers ;
sb - > s_magic = BCACHEFS_STATFS_MAGIC ;
2021-04-29 05:51:42 +03:00
sb - > s_time_gran = c - > sb . nsec_per_time_unit ;
sb - > s_time_min = div_s64 ( S64_MIN , c - > sb . time_units_per_sec ) + 1 ;
sb - > s_time_max = div_s64 ( S64_MAX , c - > sb . time_units_per_sec ) ;
2017-03-17 09:18:50 +03:00
c - > vfs_sb = sb ;
strlcpy ( sb - > s_id , c - > name , sizeof ( sb - > s_id ) ) ;
ret = super_setup_bdi ( sb ) ;
if ( ret )
goto err_put_super ;
sb - > s_bdi - > ra_pages = VM_READAHEAD_PAGES ;
for_each_online_member ( ca , c , i ) {
struct block_device * bdev = ca - > disk_sb . bdev ;
/* XXX: create an anonymous device for multi device filesystems */
sb - > s_bdev = bdev ;
sb - > s_dev = bdev - > bd_dev ;
percpu_ref_put ( & ca - > io_ref ) ;
break ;
}
2021-05-28 02:15:44 +03:00
c - > dev = sb - > s_dev ;
2017-03-17 09:18:50 +03:00
# ifdef CONFIG_BCACHEFS_POSIX_ACL
if ( c - > opts . acl )
sb - > s_flags | = SB_POSIXACL ;
# endif
2021-11-13 21:36:26 +03:00
sb - > s_shrink . seeks = 0 ;
2021-03-16 08:33:39 +03:00
vinode = bch2_vfs_inode_get ( c , BCACHEFS_ROOT_SUBVOL_INUM ) ;
2017-03-17 09:18:50 +03:00
if ( IS_ERR ( vinode ) ) {
2019-04-18 01:21:19 +03:00
bch_err ( c , " error mounting: error getting root inode %i " ,
( int ) PTR_ERR ( vinode ) ) ;
2017-03-17 09:18:50 +03:00
ret = PTR_ERR ( vinode ) ;
goto err_put_super ;
}
sb - > s_root = d_make_root ( vinode ) ;
if ( ! sb - > s_root ) {
2019-04-18 01:21:19 +03:00
bch_err ( c , " error mounting: error allocating root dentry " ) ;
2017-03-17 09:18:50 +03:00
ret = - ENOMEM ;
goto err_put_super ;
}
sb - > s_flags | = SB_ACTIVE ;
out :
return dget ( sb - > s_root ) ;
err_put_super :
deactivate_locked_super ( sb ) ;
return ERR_PTR ( ret ) ;
}
static void bch2_kill_sb ( struct super_block * sb )
{
struct bch_fs * c = sb - > s_fs_info ;
generic_shutdown_super ( sb ) ;
2020-09-09 01:30:32 +03:00
bch2_fs_free ( c ) ;
2017-03-17 09:18:50 +03:00
}
static struct file_system_type bcache_fs_type = {
. owner = THIS_MODULE ,
. name = " bcachefs " ,
. mount = bch2_mount ,
. kill_sb = bch2_kill_sb ,
. fs_flags = FS_REQUIRES_DEV ,
} ;
MODULE_ALIAS_FS ( " bcachefs " ) ;
void bch2_vfs_exit ( void )
{
unregister_filesystem ( & bcache_fs_type ) ;
if ( bch2_inode_cache )
kmem_cache_destroy ( bch2_inode_cache ) ;
}
int __init bch2_vfs_init ( void )
{
int ret = - ENOMEM ;
bch2_inode_cache = KMEM_CACHE ( bch_inode_info , 0 ) ;
if ( ! bch2_inode_cache )
goto err ;
ret = register_filesystem ( & bcache_fs_type ) ;
if ( ret )
goto err ;
return 0 ;
err :
bch2_vfs_exit ( ) ;
return ret ;
}
# endif /* NO_BCACHEFS_FS */