2017-03-17 09:18:50 +03:00
// SPDX-License-Identifier: GPL-2.0
# ifndef NO_BCACHEFS_FS
# include "bcachefs.h"
2018-10-06 07:46:55 +03:00
# include "alloc_foreground.h"
2023-09-11 01:05:17 +03:00
# include "bkey_buf.h"
2017-03-17 09:18:50 +03:00
# include "btree_update.h"
# include "buckets.h"
# include "clock.h"
# include "error.h"
2018-08-06 00:46:41 +03:00
# include "extents.h"
2019-11-15 23:52:28 +03:00
# include "extent_update.h"
2017-03-17 09:18:50 +03:00
# include "fs.h"
# include "fs-io.h"
2023-08-04 01:18:21 +03:00
# include "fs-io-buffered.h"
# include "fs-io-pagecache.h"
2017-03-17 09:18:50 +03:00
# include "fsck.h"
# include "inode.h"
# include "journal.h"
2023-09-11 01:05:17 +03:00
# include "io_misc.h"
2017-03-17 09:18:50 +03:00
# include "keylist.h"
# include "quota.h"
2019-08-16 16:59:56 +03:00
# include "reflink.h"
2017-03-17 09:18:50 +03:00
# include "trace.h"
# include <linux/aio.h>
# include <linux/backing-dev.h>
# include <linux/falloc.h>
# include <linux/migrate.h>
# include <linux/mmu_context.h>
# include <linux/pagevec.h>
2020-10-09 07:09:20 +03:00
# include <linux/rmap.h>
2017-03-17 09:18:50 +03:00
# include <linux/sched/signal.h>
# include <linux/task_io_accounting_ops.h>
# include <linux/uio.h>
# include <trace/events/writeback.h>
2023-08-04 01:18:21 +03:00
struct nocow_flush {
struct closure * cl ;
struct bch_dev * ca ;
struct bio bio ;
} ;
2022-11-03 07:29:43 +03:00
2023-08-04 01:18:21 +03:00
static void nocow_flush_endio ( struct bio * _bio )
2022-11-03 07:29:43 +03:00
{
2023-08-04 01:18:21 +03:00
struct nocow_flush * bio = container_of ( _bio , struct nocow_flush , bio ) ;
2022-11-03 07:29:43 +03:00
2023-08-04 01:18:21 +03:00
closure_put ( bio - > cl ) ;
percpu_ref_put ( & bio - > ca - > io_ref ) ;
bio_put ( & bio - > bio ) ;
2022-11-03 07:29:43 +03:00
}
2020-06-30 01:22:06 +03:00
2023-08-04 01:18:21 +03:00
void bch2_inode_flush_nocow_writes_async ( struct bch_fs * c ,
struct bch_inode_info * inode ,
struct closure * cl )
2022-11-01 03:30:27 +03:00
{
2023-08-04 01:18:21 +03:00
struct nocow_flush * bio ;
struct bch_dev * ca ;
struct bch_devs_mask devs ;
unsigned dev ;
2022-11-01 03:30:27 +03:00
2023-08-04 01:18:21 +03:00
dev = find_first_bit ( inode - > ei_devs_need_flush . d , BCH_SB_MEMBERS_MAX ) ;
if ( dev = = BCH_SB_MEMBERS_MAX )
return ;
2022-11-03 07:29:43 +03:00
2023-08-04 01:18:21 +03:00
devs = inode - > ei_devs_need_flush ;
memset ( & inode - > ei_devs_need_flush , 0 , sizeof ( inode - > ei_devs_need_flush ) ) ;
2022-11-01 03:30:27 +03:00
2023-08-04 01:18:21 +03:00
for_each_set_bit ( dev , devs . d , BCH_SB_MEMBERS_MAX ) {
rcu_read_lock ( ) ;
ca = rcu_dereference ( c - > devs [ dev ] ) ;
if ( ca & & ! percpu_ref_tryget ( & ca - > io_ref ) )
ca = NULL ;
rcu_read_unlock ( ) ;
2022-11-01 03:30:27 +03:00
2023-08-04 01:18:21 +03:00
if ( ! ca )
continue ;
2022-11-01 03:30:27 +03:00
2023-08-04 01:18:21 +03:00
bio = container_of ( bio_alloc_bioset ( ca - > disk_sb . bdev , 0 ,
2024-01-11 10:36:55 +03:00
REQ_OP_WRITE | REQ_PREFLUSH ,
2023-08-04 01:18:21 +03:00
GFP_KERNEL ,
& c - > nocow_flush_bioset ) ,
struct nocow_flush , bio ) ;
bio - > cl = cl ;
bio - > ca = ca ;
bio - > bio . bi_end_io = nocow_flush_endio ;
closure_bio_submit ( & bio - > bio , cl ) ;
2022-11-01 03:30:27 +03:00
}
}
2023-08-04 01:18:21 +03:00
static int bch2_inode_flush_nocow_writes ( struct bch_fs * c ,
struct bch_inode_info * inode )
2022-11-01 03:30:27 +03:00
{
2023-08-04 01:18:21 +03:00
struct closure cl ;
2022-11-01 03:30:27 +03:00
2023-08-04 01:18:21 +03:00
closure_init_stack ( & cl ) ;
bch2_inode_flush_nocow_writes_async ( c , inode , & cl ) ;
closure_sync ( & cl ) ;
2022-11-01 03:30:27 +03:00
2023-08-04 01:18:21 +03:00
return 0 ;
2022-11-01 03:30:27 +03:00
}
2023-08-04 01:18:21 +03:00
/* i_size updates: */
2022-11-14 06:43:37 +03:00
2023-08-04 01:18:21 +03:00
struct inode_new_size {
loff_t new_size ;
u64 now ;
unsigned fields ;
} ;
2017-03-17 09:18:50 +03:00
2023-08-12 17:47:45 +03:00
static int inode_set_size ( struct btree_trans * trans ,
struct bch_inode_info * inode ,
2023-08-04 01:18:21 +03:00
struct bch_inode_unpacked * bi ,
void * p )
2017-03-17 09:18:50 +03:00
{
2023-08-04 01:18:21 +03:00
struct inode_new_size * s = p ;
2022-11-01 03:30:27 +03:00
2023-08-04 01:18:21 +03:00
bi - > bi_size = s - > new_size ;
if ( s - > fields & ATTR_ATIME )
bi - > bi_atime = s - > now ;
if ( s - > fields & ATTR_MTIME )
bi - > bi_mtime = s - > now ;
if ( s - > fields & ATTR_CTIME )
bi - > bi_ctime = s - > now ;
2017-03-17 09:18:50 +03:00
2023-08-04 01:18:21 +03:00
return 0 ;
2017-03-17 09:18:50 +03:00
}
2023-08-04 01:18:21 +03:00
int __must_check bch2_write_inode_size ( struct bch_fs * c ,
struct bch_inode_info * inode ,
loff_t new_size , unsigned fields )
2022-11-25 07:52:28 +03:00
{
2023-08-04 01:18:21 +03:00
struct inode_new_size s = {
. new_size = new_size ,
. now = bch2_current_time ( c ) ,
. fields = fields ,
} ;
2022-11-25 07:52:28 +03:00
2023-08-04 01:18:21 +03:00
return bch2_write_inode ( c , inode , inode_set_size , & s , fields ) ;
2022-11-25 07:52:28 +03:00
}
2023-08-04 01:18:21 +03:00
void __bch2_i_sectors_acct ( struct bch_fs * c , struct bch_inode_info * inode ,
struct quota_res * quota_res , s64 sectors )
2017-03-17 09:18:50 +03:00
{
2023-08-04 01:18:21 +03:00
bch2_fs_inconsistent_on ( ( s64 ) inode - > v . i_blocks + sectors < 0 , c ,
" inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli) " ,
inode - > v . i_ino , ( u64 ) inode - > v . i_blocks , sectors ,
inode - > ei_inode . bi_sectors ) ;
inode - > v . i_blocks + = sectors ;
2017-03-17 09:18:50 +03:00
2023-08-04 01:18:21 +03:00
# ifdef CONFIG_BCACHEFS_QUOTA
if ( quota_res & &
! test_bit ( EI_INODE_SNAPSHOT , & inode - > ei_flags ) & &
sectors > 0 ) {
BUG_ON ( sectors > quota_res - > sectors ) ;
BUG_ON ( sectors > inode - > ei_quota_reserved ) ;
2017-03-17 09:18:50 +03:00
2023-08-04 01:18:21 +03:00
quota_res - > sectors - = sectors ;
inode - > ei_quota_reserved - = sectors ;
} else {
bch2_quota_acct ( c , inode - > ei_qid , Q_SPC , sectors , KEY_TYPE_QUOTA_WARN ) ;
2022-09-18 22:43:50 +03:00
}
2023-08-04 01:18:21 +03:00
# endif
2017-03-17 09:18:50 +03:00
}
/* fsync: */
2021-11-05 22:17:13 +03:00
/*
* inode - > ei_inode . bi_journal_seq won ' t be up to date since it ' s set in an
* insert trigger : look up the btree inode instead
*/
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 00:12:00 +03:00
static int bch2_flush_inode ( struct bch_fs * c ,
struct bch_inode_info * inode )
2017-03-17 09:18:50 +03:00
{
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 00:12:00 +03:00
struct bch_inode_unpacked u ;
2021-11-05 22:17:13 +03:00
int ret ;
2017-03-17 09:18:50 +03:00
2021-11-05 22:17:13 +03:00
if ( c - > opts . journal_flush_disabled )
return 0 ;
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 00:12:00 +03:00
ret = bch2_inode_find_by_inum ( c , inode_inum ( inode ) , & u ) ;
2017-03-17 09:18:50 +03:00
if ( ret )
return ret ;
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 00:12:00 +03:00
return bch2_journal_flush_seq ( & c - > journal , u . bi_journal_seq ) ? :
bch2_inode_flush_nocow_writes ( c , inode ) ;
2021-11-05 22:17:13 +03:00
}
2017-03-17 09:18:50 +03:00
2021-11-05 22:17:13 +03:00
int bch2_fsync ( struct file * file , loff_t start , loff_t end , int datasync )
{
struct bch_inode_info * inode = file_bch_inode ( file ) ;
struct bch_fs * c = inode - > v . i_sb - > s_fs_info ;
2023-12-05 16:24:39 +03:00
int ret ;
2021-11-05 22:17:13 +03:00
ret = file_write_and_wait_range ( file , start , end ) ;
2023-12-05 16:24:39 +03:00
if ( ret )
goto out ;
ret = sync_inode_metadata ( & inode - > v , 1 ) ;
if ( ret )
goto out ;
ret = bch2_flush_inode ( c , inode ) ;
out :
return bch2_err_class ( ret ) ;
2017-03-17 09:18:50 +03:00
}
/* truncate: */
2021-03-16 07:28:17 +03:00
static inline int range_has_data ( struct bch_fs * c , u32 subvol ,
struct bpos start ,
struct bpos end )
2017-03-17 09:18:50 +03:00
{
2023-09-13 00:16:02 +03:00
struct btree_trans * trans = bch2_trans_get ( c ) ;
2021-08-30 22:18:31 +03:00
struct btree_iter iter ;
2017-03-17 09:18:50 +03:00
struct bkey_s_c k ;
int ret = 0 ;
2021-03-16 07:28:17 +03:00
retry :
2023-09-13 00:16:02 +03:00
bch2_trans_begin ( trans ) ;
2021-03-16 07:28:17 +03:00
2023-09-13 00:16:02 +03:00
ret = bch2_subvolume_get_snapshot ( trans , subvol , & start . snapshot ) ;
2021-03-16 07:28:17 +03:00
if ( ret )
goto err ;
2019-03-25 22:10:15 +03:00
2023-09-13 00:16:02 +03:00
for_each_btree_key_upto_norestart ( trans , iter , BTREE_ID_extents , start , end , 0 , k , ret )
2023-03-29 16:49:04 +03:00
if ( bkey_extent_is_data ( k . k ) & & ! bkey_extent_is_unwritten ( k ) ) {
2017-03-17 09:18:50 +03:00
ret = 1 ;
break ;
}
2021-03-16 07:28:17 +03:00
start = iter . pos ;
2023-09-13 00:16:02 +03:00
bch2_trans_iter_exit ( trans , & iter ) ;
2021-03-16 07:28:17 +03:00
err :
2022-07-18 06:06:38 +03:00
if ( bch2_err_matches ( ret , BCH_ERR_transaction_restart ) )
2021-03-16 07:28:17 +03:00
goto retry ;
2017-03-17 09:18:50 +03:00
2023-09-13 00:16:02 +03:00
bch2_trans_put ( trans ) ;
2021-10-19 22:08:00 +03:00
return ret ;
2017-03-17 09:18:50 +03:00
}
2023-03-20 01:03:22 +03:00
static int __bch2_truncate_folio ( struct bch_inode_info * inode ,
pgoff_t index , loff_t start , loff_t end )
2017-03-17 09:18:50 +03:00
{
struct bch_fs * c = inode - > v . i_sb - > s_fs_info ;
struct address_space * mapping = inode - > v . i_mapping ;
2023-03-17 19:53:15 +03:00
struct bch_folio * s ;
2023-09-12 15:37:42 +03:00
unsigned start_offset ;
unsigned end_offset ;
2019-08-06 18:19:58 +03:00
unsigned i ;
2023-03-17 21:55:53 +03:00
struct folio * folio ;
2021-11-22 20:47:20 +03:00
s64 i_sectors_delta = 0 ;
2017-03-17 09:18:50 +03:00
int ret = 0 ;
2023-03-29 17:43:23 +03:00
u64 end_pos ;
2017-03-17 09:18:50 +03:00
2023-03-17 21:55:53 +03:00
folio = filemap_lock_folio ( mapping , index ) ;
2023-06-21 07:31:49 +03:00
if ( IS_ERR_OR_NULL ( folio ) ) {
2017-03-17 09:18:50 +03:00
/*
* XXX : we ' re doing two index lookups when we end up reading the
2023-03-17 21:55:53 +03:00
* folio
2017-03-17 09:18:50 +03:00
*/
2021-03-16 07:28:17 +03:00
ret = range_has_data ( c , inode - > ei_subvol ,
2022-10-11 11:32:41 +03:00
POS ( inode - > v . i_ino , ( index < < PAGE_SECTORS_SHIFT ) ) ,
POS ( inode - > v . i_ino , ( index < < PAGE_SECTORS_SHIFT ) + PAGE_SECTORS ) ) ;
2017-03-17 09:18:50 +03:00
if ( ret < = 0 )
return ret ;
2023-03-17 21:55:53 +03:00
folio = __filemap_get_folio ( mapping , index ,
FGP_LOCK | FGP_CREAT , GFP_KERNEL ) ;
2023-08-07 19:04:05 +03:00
if ( IS_ERR_OR_NULL ( folio ) ) {
2017-03-17 09:18:50 +03:00
ret = - ENOMEM ;
goto out ;
}
}
2023-03-20 01:03:22 +03:00
BUG_ON ( start > = folio_end_pos ( folio ) ) ;
BUG_ON ( end < = folio_pos ( folio ) ) ;
start_offset = max ( start , folio_pos ( folio ) ) - folio_pos ( folio ) ;
2023-03-29 17:43:23 +03:00
end_offset = min_t ( u64 , end , folio_end_pos ( folio ) ) - folio_pos ( folio ) ;
2023-03-20 01:03:22 +03:00
/* Folio boundary? Nothing to do */
if ( start_offset = = 0 & &
end_offset = = folio_size ( folio ) ) {
ret = 0 ;
goto unlock ;
}
2023-03-17 21:55:53 +03:00
s = bch2_folio_create ( folio , 0 ) ;
2019-08-06 18:19:58 +03:00
if ( ! s ) {
ret = - ENOMEM ;
goto unlock ;
}
2023-03-17 21:55:53 +03:00
if ( ! folio_test_uptodate ( folio ) ) {
ret = bch2_read_single_folio ( folio , mapping ) ;
2017-03-17 09:18:50 +03:00
if ( ret )
goto unlock ;
}
bcachefs: Avoid __GFP_NOFAIL
We've been using __GFP_NOFAIL for allocating struct bch_folio, our
private per-folio state.
However, that struct is variable size - it holds state for each sector
in the folio, and folios can be quite large now, which means it's
possible for bch_folio to be larger than PAGE_SIZE now.
__GFP_NOFAIL allocations are undesirable in normal circumstances, but
particularly so at >= PAGE_SIZE, and warnings are emitted for that.
So, this patch adds proper error paths and eliminates most uses of
__GFP_NOFAIL. Also, do some more cleanup of gfp flags w.r.t. btree node
locks: we can use GFP_KERNEL, but only if we're not holding btree locks,
and if we are holding btree locks we should be using GFP_NOWAIT.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-05-28 07:35:35 +03:00
ret = bch2_folio_set ( c , inode_inum ( inode ) , & folio , 1 ) ;
if ( ret )
goto unlock ;
2023-03-27 22:16:24 +03:00
2019-08-06 18:19:58 +03:00
for ( i = round_up ( start_offset , block_bytes ( c ) ) > > 9 ;
i < round_down ( end_offset , block_bytes ( c ) ) > > 9 ;
i + + ) {
s - > s [ i ] . nr_replicas = 0 ;
2023-03-23 18:08:04 +03:00
i_sectors_delta - = s - > s [ i ] . state = = SECTOR_dirty ;
2023-08-04 01:18:21 +03:00
bch2_folio_sector_set ( folio , s , i , SECTOR_unallocated ) ;
2019-08-06 18:19:58 +03:00
}
2023-08-04 01:18:21 +03:00
bch2_i_sectors_acct ( c , inode , NULL , i_sectors_delta ) ;
2021-11-22 20:47:20 +03:00
2021-11-06 20:39:42 +03:00
/*
2023-03-17 21:55:53 +03:00
* Caller needs to know whether this folio will be written out by
2021-11-06 20:39:42 +03:00
* writeback - doing an i_size update if necessary - or whether it will
2023-03-29 16:49:04 +03:00
* be responsible for the i_size update .
*
* Note that we shouldn ' t ever see a folio beyond EOF , but check and
* warn if so . This has been observed by failure to clean up folios
* after a short write and there ' s still a chance reclaim will fix
* things up .
2021-11-06 20:39:42 +03:00
*/
2023-03-29 16:49:04 +03:00
WARN_ON_ONCE ( folio_pos ( folio ) > = inode - > v . i_size ) ;
end_pos = folio_end_pos ( folio ) ;
if ( inode - > v . i_size > folio_pos ( folio ) )
2023-03-29 17:43:23 +03:00
end_pos = min_t ( u64 , inode - > v . i_size , end_pos ) ;
2023-04-03 15:17:26 +03:00
ret = s - > s [ folio_pos_to_s ( folio , end_pos - 1 ) ] . state > = SECTOR_dirty ;
2021-11-06 20:39:42 +03:00
2023-03-17 21:55:53 +03:00
folio_zero_segment ( folio , start_offset , end_offset ) ;
2019-08-06 18:19:58 +03:00
2017-03-17 09:18:50 +03:00
/*
* Bit of a hack - we don ' t want truncate to fail due to - ENOSPC .
*
2023-03-17 21:55:53 +03:00
* XXX : because we aren ' t currently tracking whether the folio has actual
2017-03-17 09:18:50 +03:00
* data in it ( vs . just 0 s , or only partially written ) this wrong . ick .
*/
2023-03-17 21:55:53 +03:00
BUG_ON ( bch2_get_folio_disk_reservation ( c , inode , folio , false ) ) ;
2017-03-17 09:18:50 +03:00
2020-10-09 07:09:20 +03:00
/*
* This removes any writeable userspace mappings ; we need to force
* . page_mkwrite to be called again before any mmapped writes , to
* redirty the full page :
*/
2023-03-17 21:55:53 +03:00
folio_mkclean ( folio ) ;
filemap_dirty_folio ( mapping , folio ) ;
2017-03-17 09:18:50 +03:00
unlock :
2023-03-17 21:55:53 +03:00
folio_unlock ( folio ) ;
folio_put ( folio ) ;
2017-03-17 09:18:50 +03:00
out :
return ret ;
}
2023-03-20 01:03:22 +03:00
static int bch2_truncate_folio ( struct bch_inode_info * inode , loff_t from )
2017-03-17 09:18:50 +03:00
{
2023-03-20 01:03:22 +03:00
return __bch2_truncate_folio ( inode , from > > PAGE_SHIFT ,
from , ANYSINT_MAX ( loff_t ) ) ;
2017-03-17 09:18:50 +03:00
}
2023-03-20 01:03:22 +03:00
static int bch2_truncate_folios ( struct bch_inode_info * inode ,
loff_t start , loff_t end )
2021-11-06 20:39:42 +03:00
{
2023-03-20 01:03:22 +03:00
int ret = __bch2_truncate_folio ( inode , start > > PAGE_SHIFT ,
start , end ) ;
2021-11-06 20:39:42 +03:00
if ( ret > = 0 & &
start > > PAGE_SHIFT ! = end > > PAGE_SHIFT )
2023-03-20 01:03:22 +03:00
ret = __bch2_truncate_folio ( inode ,
( end - 1 ) > > PAGE_SHIFT ,
start , end ) ;
2021-11-06 20:39:42 +03:00
return ret ;
}
2021-06-15 05:29:54 +03:00
static int bch2_extend ( struct mnt_idmap * idmap ,
struct bch_inode_info * inode ,
2019-10-09 18:12:48 +03:00
struct bch_inode_unpacked * inode_u ,
struct iattr * iattr )
2017-03-17 09:18:50 +03:00
{
struct address_space * mapping = inode - > v . i_mapping ;
int ret ;
2019-10-09 18:12:48 +03:00
/*
* sync appends :
2019-10-09 19:11:00 +03:00
*
* this has to be done _before_ extending i_size :
2019-10-09 18:12:48 +03:00
*/
ret = filemap_write_and_wait_range ( mapping , inode_u - > bi_size , S64_MAX ) ;
2017-03-17 09:18:50 +03:00
if ( ret )
return ret ;
truncate_setsize ( & inode - > v , iattr - > ia_size ) ;
2021-06-15 05:29:54 +03:00
return bch2_setattr_nonsize ( idmap , inode , iattr ) ;
2017-03-17 09:18:50 +03:00
}
2023-09-04 12:38:30 +03:00
int bchfs_truncate ( struct mnt_idmap * idmap ,
2021-06-15 05:29:54 +03:00
struct bch_inode_info * inode , struct iattr * iattr )
2017-03-17 09:18:50 +03:00
{
struct bch_fs * c = inode - > v . i_sb - > s_fs_info ;
struct address_space * mapping = inode - > v . i_mapping ;
2019-10-09 18:12:48 +03:00
struct bch_inode_unpacked inode_u ;
2019-10-10 19:47:22 +03:00
s64 i_sectors_delta = 0 ;
2017-03-17 09:18:50 +03:00
int ret = 0 ;
2021-06-15 05:29:54 +03:00
/*
2021-06-28 03:54:34 +03:00
* If the truncate call with change the size of the file , the
* cmtimes should be updated . If the size will not change , we
* do not need to update the cmtimes .
2021-06-15 05:29:54 +03:00
*/
2021-06-28 03:54:34 +03:00
if ( iattr - > ia_size ! = inode - > v . i_size ) {
if ( ! ( iattr - > ia_valid & ATTR_MTIME ) )
ktime_get_coarse_real_ts64 ( & iattr - > ia_mtime ) ;
if ( ! ( iattr - > ia_valid & ATTR_CTIME ) )
ktime_get_coarse_real_ts64 ( & iattr - > ia_ctime ) ;
iattr - > ia_valid | = ATTR_MTIME | ATTR_CTIME ;
}
2021-06-15 05:29:54 +03:00
2017-03-17 09:18:50 +03:00
inode_dio_wait ( & inode - > v ) ;
2022-11-04 20:25:57 +03:00
bch2_pagecache_block_get ( inode ) ;
2017-03-17 09:18:50 +03:00
2021-03-16 07:28:17 +03:00
ret = bch2_inode_find_by_inum ( c , inode_inum ( inode ) , & inode_u ) ;
2019-12-18 21:18:33 +03:00
if ( ret )
goto err ;
/*
* check this before next assertion ; on filesystem error our normal
* invariants are a bit broken ( truncate has to truncate the page cache
* before the inode ) .
*/
ret = bch2_journal_error ( & c - > journal ) ;
2019-10-09 18:12:48 +03:00
if ( ret )
goto err ;
2017-03-17 09:18:50 +03:00
2022-11-15 23:57:07 +03:00
WARN_ONCE ( ! test_bit ( EI_INODE_ERROR , & inode - > ei_flags ) & &
inode - > v . i_size < inode_u . bi_size ,
" truncate spotted in mem i_size < btree i_size: %llu < %llu \n " ,
( u64 ) inode - > v . i_size , inode_u . bi_size ) ;
2017-03-17 09:18:50 +03:00
2019-10-09 18:12:48 +03:00
if ( iattr - > ia_size > inode - > v . i_size ) {
2021-06-15 05:29:54 +03:00
ret = bch2_extend ( idmap , inode , & inode_u , iattr ) ;
2018-08-09 04:09:31 +03:00
goto err ;
2017-03-17 09:18:50 +03:00
}
2021-06-15 05:29:54 +03:00
iattr - > ia_valid & = ~ ATTR_SIZE ;
2023-03-20 01:03:22 +03:00
ret = bch2_truncate_folio ( inode , iattr - > ia_size ) ;
2021-11-06 20:39:42 +03:00
if ( unlikely ( ret < 0 ) )
2018-08-09 04:09:31 +03:00
goto err ;
2017-03-17 09:18:50 +03:00
2023-09-04 12:38:30 +03:00
truncate_setsize ( & inode - > v , iattr - > ia_size ) ;
2019-09-20 01:05:04 +03:00
/*
* When extending , we ' re going to write the new i_size to disk
* immediately so we need to flush anything above the current on disk
* i_size first :
*
* Also , when extending we need to flush the page that i_size currently
* straddles - if it ' s mapped to userspace , we need to ensure that
* userspace has to redirty it and call . mkwrite - > set_page_dirty
* again to allocate the part of the page that was extended .
*/
2019-10-09 18:12:48 +03:00
if ( iattr - > ia_size > inode_u . bi_size )
2017-03-17 09:18:50 +03:00
ret = filemap_write_and_wait_range ( mapping ,
2019-10-09 18:12:48 +03:00
inode_u . bi_size ,
2017-03-17 09:18:50 +03:00
iattr - > ia_size - 1 ) ;
else if ( iattr - > ia_size & ( PAGE_SIZE - 1 ) )
ret = filemap_write_and_wait_range ( mapping ,
round_down ( iattr - > ia_size , PAGE_SIZE ) ,
iattr - > ia_size - 1 ) ;
if ( ret )
2018-08-09 04:09:31 +03:00
goto err ;
2017-03-17 09:18:50 +03:00
2023-09-04 12:38:30 +03:00
ret = bch2_truncate ( c , inode_inum ( inode ) , iattr - > ia_size , & i_sectors_delta ) ;
bch2_i_sectors_acct ( c , inode , NULL , i_sectors_delta ) ;
2017-03-17 09:18:50 +03:00
2023-09-04 12:38:30 +03:00
if ( unlikely ( ret ) ) {
/*
* If we error here , VFS caches are now inconsistent with btree
*/
set_bit ( EI_INODE_ERROR , & inode - > ei_flags ) ;
2018-08-09 04:09:31 +03:00
goto err ;
2023-09-04 12:38:30 +03:00
}
2019-10-10 19:47:22 +03:00
2022-04-16 23:06:59 +03:00
bch2_fs_inconsistent_on ( ! inode - > v . i_size & & inode - > v . i_blocks & &
! bch2_journal_error ( & c - > journal ) , c ,
" inode %lu truncated to 0 but i_blocks %llu (ondisk %lli) " ,
inode - > v . i_ino , ( u64 ) inode - > v . i_blocks ,
inode - > ei_inode . bi_sectors ) ;
2021-06-15 05:29:54 +03:00
ret = bch2_setattr_nonsize ( idmap , inode , iattr ) ;
2018-08-09 04:09:31 +03:00
err :
2022-11-04 20:25:57 +03:00
bch2_pagecache_block_put ( inode ) ;
2022-09-18 22:43:50 +03:00
return bch2_err_class ( ret ) ;
2017-03-17 09:18:50 +03:00
}
/* fallocate: */
2023-08-12 17:47:45 +03:00
static int inode_update_times_fn ( struct btree_trans * trans ,
struct bch_inode_info * inode ,
2021-04-29 02:36:12 +03:00
struct bch_inode_unpacked * bi , void * p )
{
struct bch_fs * c = inode - > v . i_sb - > s_fs_info ;
bi - > bi_mtime = bi - > bi_ctime = bch2_current_time ( c ) ;
return 0 ;
}
2019-10-10 19:47:22 +03:00
static long bchfs_fpunch ( struct bch_inode_info * inode , loff_t offset , loff_t len )
2017-03-17 09:18:50 +03:00
{
struct bch_fs * c = inode - > v . i_sb - > s_fs_info ;
2021-11-06 20:39:42 +03:00
u64 end = offset + len ;
u64 block_start = round_up ( offset , block_bytes ( c ) ) ;
u64 block_end = round_down ( end , block_bytes ( c ) ) ;
bool truncated_last_page ;
2017-03-17 09:18:50 +03:00
int ret = 0 ;
2023-03-20 01:03:22 +03:00
ret = bch2_truncate_folios ( inode , offset , end ) ;
2021-11-06 20:39:42 +03:00
if ( unlikely ( ret < 0 ) )
2017-03-17 09:18:50 +03:00
goto err ;
2021-11-06 20:39:42 +03:00
truncated_last_page = ret ;
2017-03-17 09:18:50 +03:00
2021-11-06 20:39:42 +03:00
truncate_pagecache_range ( & inode - > v , offset , end - 1 ) ;
2017-03-17 09:18:50 +03:00
2022-10-20 01:31:33 +03:00
if ( block_start < block_end ) {
2019-10-10 19:47:22 +03:00
s64 i_sectors_delta = 0 ;
2021-03-13 04:30:39 +03:00
ret = bch2_fpunch ( c , inode_inum ( inode ) ,
2021-11-06 20:39:42 +03:00
block_start > > 9 , block_end > > 9 ,
2019-10-10 19:47:22 +03:00
& i_sectors_delta ) ;
2023-08-04 01:18:21 +03:00
bch2_i_sectors_acct ( c , inode , NULL , i_sectors_delta ) ;
2019-10-10 19:47:22 +03:00
}
2021-04-29 02:36:12 +03:00
mutex_lock ( & inode - > ei_update_lock ) ;
2021-11-06 20:39:42 +03:00
if ( end > = inode - > v . i_size & & ! truncated_last_page ) {
ret = bch2_write_inode_size ( c , inode , inode - > v . i_size ,
ATTR_MTIME | ATTR_CTIME ) ;
} else {
ret = bch2_write_inode ( c , inode , inode_update_times_fn , NULL ,
ATTR_MTIME | ATTR_CTIME ) ;
}
2021-04-29 02:36:12 +03:00
mutex_unlock ( & inode - > ei_update_lock ) ;
2017-03-17 09:18:50 +03:00
err :
return ret ;
}
2019-10-10 19:47:22 +03:00
static long bchfs_fcollapse_finsert ( struct bch_inode_info * inode ,
2019-09-08 01:04:23 +03:00
loff_t offset , loff_t len ,
bool insert )
2017-03-17 09:18:50 +03:00
{
struct bch_fs * c = inode - > v . i_sb - > s_fs_info ;
struct address_space * mapping = inode - > v . i_mapping ;
2023-09-04 12:38:30 +03:00
s64 i_sectors_delta = 0 ;
2021-03-20 03:29:11 +03:00
int ret = 0 ;
2017-03-17 09:18:50 +03:00
if ( ( offset | len ) & ( block_bytes ( c ) - 1 ) )
return - EINVAL ;
2019-09-08 01:04:23 +03:00
if ( insert ) {
if ( offset > = inode - > v . i_size )
2021-11-06 20:39:42 +03:00
return - EINVAL ;
2019-09-08 01:04:23 +03:00
} else {
if ( offset + len > = inode - > v . i_size )
2021-11-06 20:39:42 +03:00
return - EINVAL ;
2019-09-08 01:04:23 +03:00
}
2023-08-04 01:18:21 +03:00
ret = bch2_write_invalidate_inode_pages_range ( mapping , offset , LLONG_MAX ) ;
2019-07-22 20:37:02 +03:00
if ( ret )
2021-11-06 20:39:42 +03:00
return ret ;
2019-07-22 20:37:02 +03:00
2023-09-04 12:38:30 +03:00
if ( insert )
i_size_write ( & inode - > v , inode - > v . i_size + len ) ;
2021-03-20 03:29:11 +03:00
2023-09-04 12:38:30 +03:00
ret = bch2_fcollapse_finsert ( c , inode_inum ( inode ) , offset > > 9 , len > > 9 ,
insert , & i_sectors_delta ) ;
if ( ! ret & & ! insert )
i_size_write ( & inode - > v , inode - > v . i_size - len ) ;
bch2_i_sectors_acct ( c , inode , NULL , i_sectors_delta ) ;
2017-03-17 09:18:50 +03:00
return ret ;
}
2021-04-17 03:35:20 +03:00
static int __bchfs_fallocate ( struct bch_inode_info * inode , int mode ,
u64 start_sector , u64 end_sector )
2017-03-17 09:18:50 +03:00
{
struct bch_fs * c = inode - > v . i_sb - > s_fs_info ;
2023-09-13 00:16:02 +03:00
struct btree_trans * trans = bch2_trans_get ( c ) ;
2021-08-30 22:18:31 +03:00
struct btree_iter iter ;
2021-04-17 03:35:20 +03:00
struct bpos end_pos = POS ( inode - > v . i_ino , end_sector ) ;
2022-11-24 04:14:55 +03:00
struct bch_io_opts opts ;
2021-04-17 03:35:20 +03:00
int ret = 0 ;
2017-03-17 09:18:50 +03:00
2022-11-24 04:14:55 +03:00
bch2_inode_opts_get ( & opts , c , & inode - > ei_inode ) ;
2017-03-17 09:18:50 +03:00
2023-09-13 00:16:02 +03:00
bch2_trans_iter_init ( trans , & iter , BTREE_ID_extents ,
2021-04-17 03:35:20 +03:00
POS ( inode - > v . i_ino , start_sector ) ,
2018-08-06 00:48:00 +03:00
BTREE_ITER_SLOTS | BTREE_ITER_INTENT ) ;
2017-03-17 09:18:50 +03:00
2022-11-24 11:12:22 +03:00
while ( ! ret & & bkey_lt ( iter . pos , end_pos ) ) {
2019-10-10 19:47:22 +03:00
s64 i_sectors_delta = 0 ;
2018-08-06 00:48:00 +03:00
struct quota_res quota_res = { 0 } ;
2017-03-17 09:18:50 +03:00
struct bkey_s_c k ;
2021-04-17 03:35:20 +03:00
unsigned sectors ;
2023-07-10 05:28:08 +03:00
bool is_allocation ;
u64 hole_start , hole_end ;
2021-03-16 07:28:17 +03:00
u32 snapshot ;
2017-03-17 09:18:50 +03:00
2023-09-13 00:16:02 +03:00
bch2_trans_begin ( trans ) ;
2019-12-21 00:35:24 +03:00
2023-09-13 00:16:02 +03:00
ret = bch2_subvolume_get_snapshot ( trans ,
2021-03-16 07:28:17 +03:00
inode - > ei_subvol , & snapshot ) ;
if ( ret )
goto bkey_err ;
bch2_btree_iter_set_snapshot ( & iter , snapshot ) ;
2021-08-30 22:18:31 +03:00
k = bch2_btree_iter_peek_slot ( & iter ) ;
2019-03-28 05:03:30 +03:00
if ( ( ret = bkey_err ( k ) ) )
goto bkey_err ;
2017-03-17 09:18:50 +03:00
2023-07-10 05:28:08 +03:00
hole_start = iter . pos . offset ;
hole_end = bpos_min ( k . k - > p , end_pos ) . offset ;
is_allocation = bkey_extent_is_allocation ( k . k ) ;
2017-03-17 09:18:50 +03:00
/* already reserved */
2022-11-14 02:59:01 +03:00
if ( bkey_extent_is_reservation ( k ) & &
bch2_bkey_nr_ptrs_fully_allocated ( k ) > = opts . data_replicas ) {
2021-08-30 22:18:31 +03:00
bch2_btree_iter_advance ( & iter ) ;
2017-03-17 09:18:50 +03:00
continue ;
}
2018-08-06 00:48:00 +03:00
if ( bkey_extent_is_data ( k . k ) & &
! ( mode & FALLOC_FL_ZERO_RANGE ) ) {
2021-08-30 22:18:31 +03:00
bch2_btree_iter_advance ( & iter ) ;
2018-08-06 00:48:00 +03:00
continue ;
2017-03-17 09:18:50 +03:00
}
2023-07-10 05:28:08 +03:00
if ( ! ( mode & FALLOC_FL_ZERO_RANGE ) ) {
2023-08-03 10:39:49 +03:00
/*
* Lock ordering - can ' t be holding btree locks while
* blocking on a folio lock :
*/
if ( bch2_clamp_data_hole ( & inode - > v ,
& hole_start ,
& hole_end ,
opts . data_replicas , true ) )
2023-09-13 00:16:02 +03:00
ret = drop_locks_do ( trans ,
2023-08-03 10:39:49 +03:00
( bch2_clamp_data_hole ( & inode - > v ,
& hole_start ,
& hole_end ,
opts . data_replicas , false ) , 0 ) ) ;
2023-07-10 05:28:08 +03:00
bch2_btree_iter_set_pos ( & iter , POS ( iter . pos . inode , hole_start ) ) ;
if ( ret )
goto bkey_err ;
if ( hole_start = = hole_end )
continue ;
}
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 00:12:00 +03:00
2023-07-10 05:28:08 +03:00
sectors = hole_end - hole_start ;
2017-03-17 09:18:50 +03:00
2023-07-10 05:28:08 +03:00
if ( ! is_allocation ) {
2017-03-17 09:18:50 +03:00
ret = bch2_quota_reservation_add ( c , inode ,
2023-07-10 05:28:08 +03:00
& quota_res , sectors , true ) ;
2017-03-17 09:18:50 +03:00
if ( unlikely ( ret ) )
2019-03-28 05:03:30 +03:00
goto bkey_err ;
2017-03-17 09:18:50 +03:00
}
2023-09-13 00:16:02 +03:00
ret = bch2_extent_fallocate ( trans , inode_inum ( inode ) , & iter ,
2022-11-14 02:54:37 +03:00
sectors , opts , & i_sectors_delta ,
writepoint_hashed ( ( unsigned long ) current ) ) ;
2021-11-22 06:34:26 +03:00
if ( ret )
goto bkey_err ;
2022-11-14 02:54:37 +03:00
2023-08-04 01:18:21 +03:00
bch2_i_sectors_acct ( c , inode , & quota_res , i_sectors_delta ) ;
2023-07-10 05:28:08 +03:00
2024-01-16 04:37:23 +03:00
if ( bch2_mark_pagecache_reserved ( inode , & hole_start ,
iter . pos . offset , true ) )
drop_locks_do ( trans ,
bch2_mark_pagecache_reserved ( inode , & hole_start ,
iter . pos . offset , false ) ) ;
2019-03-28 05:03:30 +03:00
bkey_err :
2018-08-06 00:48:00 +03:00
bch2_quota_reservation_put ( c , inode , & quota_res ) ;
2022-07-18 06:06:38 +03:00
if ( bch2_err_matches ( ret , BCH_ERR_transaction_restart ) )
2017-03-17 09:18:50 +03:00
ret = 0 ;
}
2021-11-06 20:39:42 +03:00
2022-09-19 00:10:33 +03:00
if ( bch2_err_matches ( ret , ENOSPC ) & & ( mode & FALLOC_FL_ZERO_RANGE ) ) {
2021-11-06 20:39:42 +03:00
struct quota_res quota_res = { 0 } ;
s64 i_sectors_delta = 0 ;
2023-09-13 00:16:02 +03:00
bch2_fpunch_at ( trans , & iter , inode_inum ( inode ) ,
2021-11-06 20:39:42 +03:00
end_sector , & i_sectors_delta ) ;
2023-08-04 01:18:21 +03:00
bch2_i_sectors_acct ( c , inode , & quota_res , i_sectors_delta ) ;
2021-11-06 20:39:42 +03:00
bch2_quota_reservation_put ( c , inode , & quota_res ) ;
}
2023-09-13 00:16:02 +03:00
bch2_trans_iter_exit ( trans , & iter ) ;
bch2_trans_put ( trans ) ;
2021-04-17 03:35:20 +03:00
return ret ;
}
2021-03-20 03:29:11 +03:00
2021-04-17 03:35:20 +03:00
static long bchfs_fallocate ( struct bch_inode_info * inode , int mode ,
loff_t offset , loff_t len )
{
struct bch_fs * c = inode - > v . i_sb - > s_fs_info ;
2021-11-06 20:39:42 +03:00
u64 end = offset + len ;
u64 block_start = round_down ( offset , block_bytes ( c ) ) ;
u64 block_end = round_up ( end , block_bytes ( c ) ) ;
bool truncated_last_page = false ;
int ret , ret2 = 0 ;
2021-04-17 03:35:20 +03:00
if ( ! ( mode & FALLOC_FL_KEEP_SIZE ) & & end > inode - > v . i_size ) {
ret = inode_newsize_ok ( & inode - > v , end ) ;
if ( ret )
2021-11-06 20:39:42 +03:00
return ret ;
2021-04-17 03:35:20 +03:00
}
if ( mode & FALLOC_FL_ZERO_RANGE ) {
2023-03-20 01:03:22 +03:00
ret = bch2_truncate_folios ( inode , offset , end ) ;
2021-11-06 20:39:42 +03:00
if ( unlikely ( ret < 0 ) )
return ret ;
2021-04-17 03:35:20 +03:00
2021-11-06 20:39:42 +03:00
truncated_last_page = ret ;
2021-04-17 03:35:20 +03:00
truncate_pagecache_range ( & inode - > v , offset , end - 1 ) ;
2021-11-06 20:39:42 +03:00
block_start = round_up ( offset , block_bytes ( c ) ) ;
block_end = round_down ( end , block_bytes ( c ) ) ;
2021-04-17 03:35:20 +03:00
}
ret = __bchfs_fallocate ( inode , mode , block_start > > 9 , block_end > > 9 ) ;
2017-03-17 09:18:50 +03:00
2019-10-09 18:12:48 +03:00
/*
2021-11-06 20:39:42 +03:00
* On - ENOSPC in ZERO_RANGE mode , we still want to do the inode update ,
* so that the VFS cache i_size is consistent with the btree i_size :
2019-10-09 18:12:48 +03:00
*/
2021-11-06 20:39:42 +03:00
if ( ret & &
2022-09-19 00:10:33 +03:00
! ( bch2_err_matches ( ret , ENOSPC ) & & ( mode & FALLOC_FL_ZERO_RANGE ) ) )
2021-11-06 20:39:42 +03:00
return ret ;
2017-03-17 09:18:50 +03:00
2021-11-06 20:39:42 +03:00
if ( mode & FALLOC_FL_KEEP_SIZE & & end > inode - > v . i_size )
end = inode - > v . i_size ;
2017-03-17 09:18:50 +03:00
2021-11-06 20:39:42 +03:00
if ( end > = inode - > v . i_size & &
( ( ( mode & FALLOC_FL_ZERO_RANGE ) & & ! truncated_last_page ) | |
! ( mode & FALLOC_FL_KEEP_SIZE ) ) ) {
spin_lock ( & inode - > v . i_lock ) ;
i_size_write ( & inode - > v , end ) ;
spin_unlock ( & inode - > v . i_lock ) ;
2019-10-09 18:12:48 +03:00
mutex_lock ( & inode - > ei_update_lock ) ;
2021-11-06 20:39:42 +03:00
ret2 = bch2_write_inode_size ( c , inode , end , 0 ) ;
2019-10-09 18:12:48 +03:00
mutex_unlock ( & inode - > ei_update_lock ) ;
2017-03-17 09:18:50 +03:00
}
2021-11-06 20:39:42 +03:00
return ret ? : ret2 ;
2017-03-17 09:18:50 +03:00
}
long bch2_fallocate_dispatch ( struct file * file , int mode ,
loff_t offset , loff_t len )
{
struct bch_inode_info * inode = file_bch_inode ( file ) ;
2019-10-20 02:03:23 +03:00
struct bch_fs * c = inode - > v . i_sb - > s_fs_info ;
long ret ;
2017-03-17 09:18:50 +03:00
2023-02-09 20:21:45 +03:00
if ( ! bch2_write_ref_tryget ( c , BCH_WRITE_REF_fallocate ) )
2019-10-20 02:03:23 +03:00
return - EROFS ;
2019-09-08 01:04:23 +03:00
2021-11-06 20:39:42 +03:00
inode_lock ( & inode - > v ) ;
inode_dio_wait ( & inode - > v ) ;
2022-11-04 20:25:57 +03:00
bch2_pagecache_block_get ( inode ) ;
2021-11-06 20:39:42 +03:00
2022-10-13 07:44:34 +03:00
ret = file_modified ( file ) ;
if ( ret )
goto err ;
2019-10-20 02:03:23 +03:00
if ( ! ( mode & ~ ( FALLOC_FL_KEEP_SIZE | FALLOC_FL_ZERO_RANGE ) ) )
ret = bchfs_fallocate ( inode , mode , offset , len ) ;
else if ( mode = = ( FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE ) )
ret = bchfs_fpunch ( inode , offset , len ) ;
else if ( mode = = FALLOC_FL_INSERT_RANGE )
ret = bchfs_fcollapse_finsert ( inode , offset , len , true ) ;
else if ( mode = = FALLOC_FL_COLLAPSE_RANGE )
ret = bchfs_fcollapse_finsert ( inode , offset , len , false ) ;
else
ret = - EOPNOTSUPP ;
2022-10-13 07:44:34 +03:00
err :
2022-11-04 20:25:57 +03:00
bch2_pagecache_block_put ( inode ) ;
2021-11-06 20:39:42 +03:00
inode_unlock ( & inode - > v ) ;
2023-02-09 20:21:45 +03:00
bch2_write_ref_put ( c , BCH_WRITE_REF_fallocate ) ;
2017-03-17 09:18:50 +03:00
2022-09-18 22:43:50 +03:00
return bch2_err_class ( ret ) ;
2017-03-17 09:18:50 +03:00
}
2022-10-11 11:32:41 +03:00
/*
* Take a quota reservation for unallocated blocks in a given file range
* Does not check pagecache
*/
2022-10-11 11:32:14 +03:00
static int quota_reserve_range ( struct bch_inode_info * inode ,
struct quota_res * res ,
u64 start , u64 end )
{
struct bch_fs * c = inode - > v . i_sb - > s_fs_info ;
2023-09-13 00:16:02 +03:00
struct btree_trans * trans = bch2_trans_get ( c ) ;
2022-10-11 11:32:14 +03:00
struct btree_iter iter ;
struct bkey_s_c k ;
u32 snapshot ;
u64 sectors = end - start ;
u64 pos = start ;
int ret ;
retry :
2023-09-13 00:16:02 +03:00
bch2_trans_begin ( trans ) ;
2022-10-11 11:32:14 +03:00
2023-09-13 00:16:02 +03:00
ret = bch2_subvolume_get_snapshot ( trans , inode - > ei_subvol , & snapshot ) ;
2022-10-11 11:32:14 +03:00
if ( ret )
goto err ;
2023-09-13 00:16:02 +03:00
bch2_trans_iter_init ( trans , & iter , BTREE_ID_extents ,
2022-10-11 11:32:14 +03:00
SPOS ( inode - > v . i_ino , pos , snapshot ) , 0 ) ;
2023-09-13 00:16:02 +03:00
while ( ! ( ret = btree_trans_too_many_iters ( trans ) ) & &
2022-10-11 11:32:14 +03:00
( k = bch2_btree_iter_peek_upto ( & iter , POS ( inode - > v . i_ino , end - 1 ) ) ) . k & &
! ( ret = bkey_err ( k ) ) ) {
if ( bkey_extent_is_allocation ( k . k ) ) {
u64 s = min ( end , k . k - > p . offset ) -
max ( start , bkey_start_offset ( k . k ) ) ;
BUG_ON ( s > sectors ) ;
sectors - = s ;
}
bch2_btree_iter_advance ( & iter ) ;
}
pos = iter . pos . offset ;
2023-09-13 00:16:02 +03:00
bch2_trans_iter_exit ( trans , & iter ) ;
2022-10-11 11:32:14 +03:00
err :
if ( bch2_err_matches ( ret , BCH_ERR_transaction_restart ) )
goto retry ;
2023-09-13 00:16:02 +03:00
bch2_trans_put ( trans ) ;
2022-10-11 11:32:14 +03:00
2023-09-13 00:16:02 +03:00
return ret ? : bch2_quota_reservation_add ( c , inode , res , sectors , true ) ;
2022-10-11 11:32:14 +03:00
}
2019-08-16 16:59:56 +03:00
loff_t bch2_remap_file_range ( struct file * file_src , loff_t pos_src ,
struct file * file_dst , loff_t pos_dst ,
loff_t len , unsigned remap_flags )
{
struct bch_inode_info * src = file_bch_inode ( file_src ) ;
struct bch_inode_info * dst = file_bch_inode ( file_dst ) ;
struct bch_fs * c = src - > v . i_sb - > s_fs_info ;
2022-10-11 11:32:14 +03:00
struct quota_res quota_res = { 0 } ;
2019-10-10 19:47:22 +03:00
s64 i_sectors_delta = 0 ;
2019-11-05 06:22:13 +03:00
u64 aligned_len ;
2019-08-16 16:59:56 +03:00
loff_t ret = 0 ;
if ( remap_flags & ~ ( REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY ) )
return - EINVAL ;
if ( remap_flags & REMAP_FILE_DEDUP )
return - EOPNOTSUPP ;
if ( ( pos_src & ( block_bytes ( c ) - 1 ) ) | |
( pos_dst & ( block_bytes ( c ) - 1 ) ) )
return - EINVAL ;
if ( src = = dst & &
abs ( pos_src - pos_dst ) < len )
return - EINVAL ;
2023-11-16 23:46:50 +03:00
lock_two_nondirectories ( & src - > v , & dst - > v ) ;
bch2_lock_inodes ( INODE_PAGECACHE_BLOCK , src , dst ) ;
2019-08-16 16:59:56 +03:00
inode_dio_wait ( & src - > v ) ;
inode_dio_wait ( & dst - > v ) ;
ret = generic_remap_file_range_prep ( file_src , pos_src ,
file_dst , pos_dst ,
& len , remap_flags ) ;
if ( ret < 0 | | len = = 0 )
2019-10-10 19:47:22 +03:00
goto err ;
2019-08-16 16:59:56 +03:00
2019-11-05 06:22:13 +03:00
aligned_len = round_up ( ( u64 ) len , block_bytes ( c ) ) ;
2019-08-16 16:59:56 +03:00
2023-08-04 01:18:21 +03:00
ret = bch2_write_invalidate_inode_pages_range ( dst - > v . i_mapping ,
2019-11-05 06:22:13 +03:00
pos_dst , pos_dst + len - 1 ) ;
2019-08-16 16:59:56 +03:00
if ( ret )
2019-10-10 19:47:22 +03:00
goto err ;
2019-08-16 16:59:56 +03:00
2022-10-11 11:32:14 +03:00
ret = quota_reserve_range ( dst , & quota_res , pos_dst > > 9 ,
( pos_dst + aligned_len ) > > 9 ) ;
if ( ret )
goto err ;
file_update_time ( file_dst ) ;
2023-08-04 01:18:21 +03:00
bch2_mark_pagecache_unallocated ( src , pos_src > > 9 ,
2021-11-24 02:21:09 +03:00
( pos_src + aligned_len ) > > 9 ) ;
2019-08-16 16:59:56 +03:00
2019-10-10 19:47:22 +03:00
ret = bch2_remap_range ( c ,
2021-03-16 07:28:17 +03:00
inode_inum ( dst ) , pos_dst > > 9 ,
inode_inum ( src ) , pos_src > > 9 ,
2019-08-16 16:59:56 +03:00
aligned_len > > 9 ,
2019-10-10 19:47:22 +03:00
pos_dst + len , & i_sectors_delta ) ;
if ( ret < 0 )
goto err ;
2019-08-16 16:59:56 +03:00
2019-10-10 19:47:22 +03:00
/*
* due to alignment , we might have remapped slightly more than requsted
*/
2019-11-05 06:22:13 +03:00
ret = min ( ( u64 ) ret < < 9 , ( u64 ) len ) ;
2019-10-10 19:47:22 +03:00
2023-08-04 01:18:21 +03:00
bch2_i_sectors_acct ( c , dst , & quota_res , i_sectors_delta ) ;
2019-10-10 19:47:22 +03:00
spin_lock ( & dst - > v . i_lock ) ;
2019-11-05 06:22:13 +03:00
if ( pos_dst + ret > dst - > v . i_size )
i_size_write ( & dst - > v , pos_dst + ret ) ;
2019-10-10 19:47:22 +03:00
spin_unlock ( & dst - > v . i_lock ) ;
2021-05-20 04:21:49 +03:00
2021-11-05 22:17:13 +03:00
if ( ( file_dst - > f_flags & ( __O_SYNC | O_DSYNC ) ) | |
IS_SYNC ( file_inode ( file_dst ) ) )
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 00:12:00 +03:00
ret = bch2_flush_inode ( c , dst ) ;
2019-10-10 19:47:22 +03:00
err :
2022-10-11 11:32:14 +03:00
bch2_quota_reservation_put ( c , dst , & quota_res ) ;
2023-11-16 23:46:50 +03:00
bch2_unlock_inodes ( INODE_PAGECACHE_BLOCK , src , dst ) ;
unlock_two_nondirectories ( & src - > v , & dst - > v ) ;
2019-08-16 16:59:56 +03:00
2022-09-18 22:43:50 +03:00
return bch2_err_class ( ret ) ;
2019-08-16 16:59:56 +03:00
}
2017-03-17 09:18:50 +03:00
/* fseek: */
static loff_t bch2_seek_data ( struct file * file , u64 offset )
{
struct bch_inode_info * inode = file_bch_inode ( file ) ;
struct bch_fs * c = inode - > v . i_sb - > s_fs_info ;
2023-09-13 00:16:02 +03:00
struct btree_trans * trans ;
2021-08-30 22:18:31 +03:00
struct btree_iter iter ;
2017-03-17 09:18:50 +03:00
struct bkey_s_c k ;
2021-03-16 07:28:17 +03:00
subvol_inum inum = inode_inum ( inode ) ;
2017-03-17 09:18:50 +03:00
u64 isize , next_data = MAX_LFS_FILESIZE ;
2021-03-16 07:28:17 +03:00
u32 snapshot ;
2017-03-17 09:18:50 +03:00
int ret ;
isize = i_size_read ( & inode - > v ) ;
if ( offset > = isize )
return - ENXIO ;
2023-09-13 00:16:02 +03:00
trans = bch2_trans_get ( c ) ;
2021-03-16 07:28:17 +03:00
retry :
2023-09-13 00:16:02 +03:00
bch2_trans_begin ( trans ) ;
2021-03-16 07:28:17 +03:00
2023-09-13 00:16:02 +03:00
ret = bch2_subvolume_get_snapshot ( trans , inum . subvol , & snapshot ) ;
2021-03-16 07:28:17 +03:00
if ( ret )
goto err ;
2019-03-25 22:10:15 +03:00
2023-09-13 00:16:02 +03:00
for_each_btree_key_upto_norestart ( trans , iter , BTREE_ID_extents ,
2022-10-11 11:32:41 +03:00
SPOS ( inode - > v . i_ino , offset > > 9 , snapshot ) ,
POS ( inode - > v . i_ino , U64_MAX ) ,
0 , k , ret ) {
if ( bkey_extent_is_data ( k . k ) ) {
2017-03-17 09:18:50 +03:00
next_data = max ( offset , bkey_start_offset ( k . k ) < < 9 ) ;
break ;
} else if ( k . k - > p . offset > > 9 > isize )
break ;
}
2023-09-13 00:16:02 +03:00
bch2_trans_iter_exit ( trans , & iter ) ;
2021-03-16 07:28:17 +03:00
err :
2022-07-18 06:06:38 +03:00
if ( bch2_err_matches ( ret , BCH_ERR_transaction_restart ) )
2021-03-16 07:28:17 +03:00
goto retry ;
2017-03-17 09:18:50 +03:00
2023-09-13 00:16:02 +03:00
bch2_trans_put ( trans ) ;
2017-03-17 09:18:50 +03:00
if ( ret )
return ret ;
if ( next_data > offset )
2019-07-30 20:49:17 +03:00
next_data = bch2_seek_pagecache_data ( & inode - > v ,
2023-08-03 10:39:49 +03:00
offset , next_data , 0 , false ) ;
2017-03-17 09:18:50 +03:00
2019-07-30 19:46:53 +03:00
if ( next_data > = isize )
2017-03-17 09:18:50 +03:00
return - ENXIO ;
return vfs_setpos ( file , next_data , MAX_LFS_FILESIZE ) ;
}
static loff_t bch2_seek_hole ( struct file * file , u64 offset )
{
struct bch_inode_info * inode = file_bch_inode ( file ) ;
struct bch_fs * c = inode - > v . i_sb - > s_fs_info ;
2023-09-13 00:16:02 +03:00
struct btree_trans * trans ;
2021-08-30 22:18:31 +03:00
struct btree_iter iter ;
2017-03-17 09:18:50 +03:00
struct bkey_s_c k ;
2021-03-16 07:28:17 +03:00
subvol_inum inum = inode_inum ( inode ) ;
2017-03-17 09:18:50 +03:00
u64 isize , next_hole = MAX_LFS_FILESIZE ;
2021-03-16 07:28:17 +03:00
u32 snapshot ;
2017-03-17 09:18:50 +03:00
int ret ;
isize = i_size_read ( & inode - > v ) ;
if ( offset > = isize )
return - ENXIO ;
2023-09-13 00:16:02 +03:00
trans = bch2_trans_get ( c ) ;
2021-03-16 07:28:17 +03:00
retry :
2023-09-13 00:16:02 +03:00
bch2_trans_begin ( trans ) ;
2021-03-16 07:28:17 +03:00
2023-09-13 00:16:02 +03:00
ret = bch2_subvolume_get_snapshot ( trans , inum . subvol , & snapshot ) ;
2021-03-16 07:28:17 +03:00
if ( ret )
goto err ;
2019-03-25 22:10:15 +03:00
2023-09-13 00:16:02 +03:00
for_each_btree_key_norestart ( trans , iter , BTREE_ID_extents ,
2021-03-16 07:28:17 +03:00
SPOS ( inode - > v . i_ino , offset > > 9 , snapshot ) ,
2019-04-17 22:49:28 +03:00
BTREE_ITER_SLOTS , k , ret ) {
2017-03-17 09:18:50 +03:00
if ( k . k - > p . inode ! = inode - > v . i_ino ) {
2019-07-30 20:49:17 +03:00
next_hole = bch2_seek_pagecache_hole ( & inode - > v ,
2023-08-03 10:39:49 +03:00
offset , MAX_LFS_FILESIZE , 0 , false ) ;
2017-03-17 09:18:50 +03:00
break ;
} else if ( ! bkey_extent_is_data ( k . k ) ) {
2019-07-30 20:49:17 +03:00
next_hole = bch2_seek_pagecache_hole ( & inode - > v ,
2017-03-17 09:18:50 +03:00
max ( offset , bkey_start_offset ( k . k ) < < 9 ) ,
2023-08-03 10:39:49 +03:00
k . k - > p . offset < < 9 , 0 , false ) ;
2017-03-17 09:18:50 +03:00
if ( next_hole < k . k - > p . offset < < 9 )
break ;
} else {
offset = max ( offset , bkey_start_offset ( k . k ) < < 9 ) ;
}
}
2023-09-13 00:16:02 +03:00
bch2_trans_iter_exit ( trans , & iter ) ;
2021-03-16 07:28:17 +03:00
err :
2022-07-18 06:06:38 +03:00
if ( bch2_err_matches ( ret , BCH_ERR_transaction_restart ) )
2021-03-16 07:28:17 +03:00
goto retry ;
2017-03-17 09:18:50 +03:00
2023-09-13 00:16:02 +03:00
bch2_trans_put ( trans ) ;
2017-03-17 09:18:50 +03:00
if ( ret )
return ret ;
if ( next_hole > isize )
next_hole = isize ;
return vfs_setpos ( file , next_hole , MAX_LFS_FILESIZE ) ;
}
loff_t bch2_llseek ( struct file * file , loff_t offset , int whence )
{
2022-09-18 22:43:50 +03:00
loff_t ret ;
2017-03-17 09:18:50 +03:00
switch ( whence ) {
case SEEK_SET :
case SEEK_CUR :
case SEEK_END :
2022-09-18 22:43:50 +03:00
ret = generic_file_llseek ( file , offset , whence ) ;
break ;
2017-03-17 09:18:50 +03:00
case SEEK_DATA :
2022-09-18 22:43:50 +03:00
ret = bch2_seek_data ( file , offset ) ;
break ;
2017-03-17 09:18:50 +03:00
case SEEK_HOLE :
2022-09-18 22:43:50 +03:00
ret = bch2_seek_hole ( file , offset ) ;
break ;
default :
ret = - EINVAL ;
break ;
2017-03-17 09:18:50 +03:00
}
2022-09-18 22:43:50 +03:00
return bch2_err_class ( ret ) ;
2017-03-17 09:18:50 +03:00
}
void bch2_fs_fsio_exit ( struct bch_fs * c )
{
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 00:12:00 +03:00
bioset_exit ( & c - > nocow_flush_bioset ) ;
2017-03-17 09:18:50 +03:00
}
int bch2_fs_fsio_init ( struct bch_fs * c )
{
2023-03-14 22:35:57 +03:00
if ( bioset_init ( & c - > nocow_flush_bioset ,
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2022-11-03 00:12:00 +03:00
1 , offsetof ( struct nocow_flush , bio ) , 0 ) )
2023-03-14 22:35:57 +03:00
return - BCH_ERR_ENOMEM_nocow_flush_bioset_init ;
2017-03-17 09:18:50 +03:00
2023-07-07 11:38:29 +03:00
return 0 ;
2017-03-17 09:18:50 +03:00
}
# endif /* NO_BCACHEFS_FS */