btrfs: root->fs_info cleanup, add fs_info convenience variables

In routines where someptr->fs_info is referenced multiple times, we
introduce a convenience variable.  This makes the code considerably
more readable.

Signed-off-by: Jeff Mahoney <jeffm@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Jeff Mahoney 2016-06-22 18:54:23 -04:00 committed by David Sterba
parent 6202df6921
commit 0b246afa62
33 changed files with 2251 additions and 2016 deletions

View File

@ -646,11 +646,12 @@ static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(
static int btrfsic_process_superblock(struct btrfsic_state *state, static int btrfsic_process_superblock(struct btrfsic_state *state,
struct btrfs_fs_devices *fs_devices) struct btrfs_fs_devices *fs_devices)
{ {
int ret = 0; struct btrfs_fs_info *fs_info = state->fs_info;
struct btrfs_super_block *selected_super; struct btrfs_super_block *selected_super;
struct list_head *dev_head = &fs_devices->devices; struct list_head *dev_head = &fs_devices->devices;
struct btrfs_device *device; struct btrfs_device *device;
struct btrfsic_dev_state *selected_dev_state = NULL; struct btrfsic_dev_state *selected_dev_state = NULL;
int ret = 0;
int pass; int pass;
BUG_ON(NULL == state); BUG_ON(NULL == state);
@ -716,9 +717,8 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
break; break;
} }
num_copies = num_copies = btrfs_num_copies(fs_info, next_bytenr,
btrfs_num_copies(state->fs_info, state->metablock_size);
next_bytenr, state->metablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
pr_info("num_copies(log_bytenr=%llu) = %d\n", pr_info("num_copies(log_bytenr=%llu) = %d\n",
next_bytenr, num_copies); next_bytenr, num_copies);
@ -783,6 +783,7 @@ static int btrfsic_process_superblock_dev_mirror(
struct btrfsic_dev_state **selected_dev_state, struct btrfsic_dev_state **selected_dev_state,
struct btrfs_super_block *selected_super) struct btrfs_super_block *selected_super)
{ {
struct btrfs_fs_info *fs_info = state->fs_info;
struct btrfs_super_block *super_tmp; struct btrfs_super_block *super_tmp;
u64 dev_bytenr; u64 dev_bytenr;
struct buffer_head *bh; struct buffer_head *bh;
@ -832,7 +833,7 @@ static int btrfsic_process_superblock_dev_mirror(
superblock_tmp->never_written = 0; superblock_tmp->never_written = 0;
superblock_tmp->mirror_num = 1 + superblock_mirror_num; superblock_tmp->mirror_num = 1 + superblock_mirror_num;
if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
btrfs_info_in_rcu(device->fs_info, btrfs_info_in_rcu(fs_info,
"new initial S-block (bdev %p, %s) @%llu (%s/%llu/%d)", "new initial S-block (bdev %p, %s) @%llu (%s/%llu/%d)",
superblock_bdev, superblock_bdev,
rcu_str_deref(device->name), dev_bytenr, rcu_str_deref(device->name), dev_bytenr,
@ -887,9 +888,8 @@ static int btrfsic_process_superblock_dev_mirror(
break; break;
} }
num_copies = num_copies = btrfs_num_copies(fs_info, next_bytenr,
btrfs_num_copies(state->fs_info, state->metablock_size);
next_bytenr, state->metablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
pr_info("num_copies(log_bytenr=%llu) = %d\n", pr_info("num_copies(log_bytenr=%llu) = %d\n",
next_bytenr, num_copies); next_bytenr, num_copies);
@ -1254,6 +1254,7 @@ static int btrfsic_create_link_to_next_block(
struct btrfs_disk_key *disk_key, struct btrfs_disk_key *disk_key,
u64 parent_generation) u64 parent_generation)
{ {
struct btrfs_fs_info *fs_info = state->fs_info;
struct btrfsic_block *next_block = NULL; struct btrfsic_block *next_block = NULL;
int ret; int ret;
struct btrfsic_block_link *l; struct btrfsic_block_link *l;
@ -1262,9 +1263,8 @@ static int btrfsic_create_link_to_next_block(
*next_blockp = NULL; *next_blockp = NULL;
if (0 == *num_copiesp) { if (0 == *num_copiesp) {
*num_copiesp = *num_copiesp = btrfs_num_copies(fs_info, next_bytenr,
btrfs_num_copies(state->fs_info, state->metablock_size);
next_bytenr, state->metablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
pr_info("num_copies(log_bytenr=%llu) = %d\n", pr_info("num_copies(log_bytenr=%llu) = %d\n",
next_bytenr, *num_copiesp); next_bytenr, *num_copiesp);
@ -1390,13 +1390,14 @@ static int btrfsic_handle_extent_data(
struct btrfsic_block_data_ctx *block_ctx, struct btrfsic_block_data_ctx *block_ctx,
u32 item_offset, int force_iodone_flag) u32 item_offset, int force_iodone_flag)
{ {
int ret; struct btrfs_fs_info *fs_info = state->fs_info;
struct btrfs_file_extent_item file_extent_item; struct btrfs_file_extent_item file_extent_item;
u64 file_extent_item_offset; u64 file_extent_item_offset;
u64 next_bytenr; u64 next_bytenr;
u64 num_bytes; u64 num_bytes;
u64 generation; u64 generation;
struct btrfsic_block_link *l; struct btrfsic_block_link *l;
int ret;
file_extent_item_offset = offsetof(struct btrfs_leaf, items) + file_extent_item_offset = offsetof(struct btrfs_leaf, items) +
item_offset; item_offset;
@ -1456,9 +1457,8 @@ static int btrfsic_handle_extent_data(
else else
chunk_len = num_bytes; chunk_len = num_bytes;
num_copies = num_copies = btrfs_num_copies(fs_info, next_bytenr,
btrfs_num_copies(state->fs_info, state->datablock_size);
next_bytenr, state->datablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
pr_info("num_copies(log_bytenr=%llu) = %d\n", pr_info("num_copies(log_bytenr=%llu) = %d\n",
next_bytenr, num_copies); next_bytenr, num_copies);
@ -1533,13 +1533,14 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
struct btrfsic_block_data_ctx *block_ctx_out, struct btrfsic_block_data_ctx *block_ctx_out,
int mirror_num) int mirror_num)
{ {
struct btrfs_fs_info *fs_info = state->fs_info;
int ret; int ret;
u64 length; u64 length;
struct btrfs_bio *multi = NULL; struct btrfs_bio *multi = NULL;
struct btrfs_device *device; struct btrfs_device *device;
length = len; length = len;
ret = btrfs_map_block(state->fs_info, BTRFS_MAP_READ, ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
bytenr, &length, &multi, mirror_num); bytenr, &length, &multi, mirror_num);
if (ret) { if (ret) {
@ -1731,6 +1732,7 @@ static void btrfsic_dump_database(struct btrfsic_state *state)
static int btrfsic_test_for_metadata(struct btrfsic_state *state, static int btrfsic_test_for_metadata(struct btrfsic_state *state,
char **datav, unsigned int num_pages) char **datav, unsigned int num_pages)
{ {
struct btrfs_fs_info *fs_info = state->fs_info;
struct btrfs_header *h; struct btrfs_header *h;
u8 csum[BTRFS_CSUM_SIZE]; u8 csum[BTRFS_CSUM_SIZE];
u32 crc = ~(u32)0; u32 crc = ~(u32)0;
@ -1741,7 +1743,7 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state,
num_pages = state->metablock_size >> PAGE_SHIFT; num_pages = state->metablock_size >> PAGE_SHIFT;
h = (struct btrfs_header *)datav[0]; h = (struct btrfs_header *)datav[0];
if (memcmp(h->fsid, state->fs_info->fsid, BTRFS_UUID_SIZE)) if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
return 1; return 1;
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
@ -2202,6 +2204,7 @@ static int btrfsic_process_written_superblock(
struct btrfsic_block *const superblock, struct btrfsic_block *const superblock,
struct btrfs_super_block *const super_hdr) struct btrfs_super_block *const super_hdr)
{ {
struct btrfs_fs_info *fs_info = state->fs_info;
int pass; int pass;
superblock->generation = btrfs_super_generation(super_hdr); superblock->generation = btrfs_super_generation(super_hdr);
@ -2275,9 +2278,8 @@ static int btrfsic_process_written_superblock(
break; break;
} }
num_copies = num_copies = btrfs_num_copies(fs_info, next_bytenr,
btrfs_num_copies(state->fs_info, BTRFS_SUPER_INFO_SIZE);
next_bytenr, BTRFS_SUPER_INFO_SIZE);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
pr_info("num_copies(log_bytenr=%llu) = %d\n", pr_info("num_copies(log_bytenr=%llu) = %d\n",
next_bytenr, num_copies); next_bytenr, num_copies);
@ -2699,14 +2701,14 @@ static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
struct btrfsic_dev_state *dev_state, struct btrfsic_dev_state *dev_state,
u64 dev_bytenr) u64 dev_bytenr)
{ {
struct btrfs_fs_info *fs_info = state->fs_info;
struct btrfsic_block_data_ctx block_ctx;
int num_copies; int num_copies;
int mirror_num; int mirror_num;
int ret;
struct btrfsic_block_data_ctx block_ctx;
int match = 0; int match = 0;
int ret;
num_copies = btrfs_num_copies(state->fs_info, num_copies = btrfs_num_copies(fs_info, bytenr, state->metablock_size);
bytenr, state->metablock_size);
for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
ret = btrfsic_map_block(state, bytenr, state->metablock_size, ret = btrfsic_map_block(state, bytenr, state->metablock_size,
@ -2909,16 +2911,17 @@ int btrfsic_mount(struct btrfs_root *root,
int ret; int ret;
struct btrfsic_state *state; struct btrfsic_state *state;
struct list_head *dev_head = &fs_devices->devices; struct list_head *dev_head = &fs_devices->devices;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_device *device; struct btrfs_device *device;
if (root->fs_info->nodesize & ((u64)PAGE_SIZE - 1)) { if (fs_info->nodesize & ((u64)PAGE_SIZE - 1)) {
pr_info("btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n", pr_info("btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n",
root->fs_info->nodesize, PAGE_SIZE); fs_info->nodesize, PAGE_SIZE);
return -1; return -1;
} }
if (root->fs_info->sectorsize & ((u64)PAGE_SIZE - 1)) { if (fs_info->sectorsize & ((u64)PAGE_SIZE - 1)) {
pr_info("btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n", pr_info("btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n",
root->fs_info->sectorsize, PAGE_SIZE); fs_info->sectorsize, PAGE_SIZE);
return -1; return -1;
} }
state = kzalloc(sizeof(*state), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); state = kzalloc(sizeof(*state), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
@ -2940,8 +2943,8 @@ int btrfsic_mount(struct btrfs_root *root,
state->print_mask = print_mask; state->print_mask = print_mask;
state->include_extent_data = including_extent_data; state->include_extent_data = including_extent_data;
state->csum_size = 0; state->csum_size = 0;
state->metablock_size = root->fs_info->nodesize; state->metablock_size = fs_info->nodesize;
state->datablock_size = root->fs_info->sectorsize; state->datablock_size = fs_info->sectorsize;
INIT_LIST_HEAD(&state->all_blocks_list); INIT_LIST_HEAD(&state->all_blocks_list);
btrfsic_block_hashtable_init(&state->block_hashtable); btrfsic_block_hashtable_init(&state->block_hashtable);
btrfsic_block_link_hashtable_init(&state->block_link_hashtable); btrfsic_block_link_hashtable_init(&state->block_link_hashtable);

View File

@ -88,10 +88,11 @@ static int btrfs_decompress_bio(int type, struct page **pages_in,
static inline int compressed_bio_size(struct btrfs_root *root, static inline int compressed_bio_size(struct btrfs_root *root,
unsigned long disk_size) unsigned long disk_size)
{ {
u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); struct btrfs_fs_info *fs_info = root->fs_info;
u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
return sizeof(struct compressed_bio) + return sizeof(struct compressed_bio) +
(DIV_ROUND_UP(disk_size, root->fs_info->sectorsize)) * csum_size; (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
} }
static struct bio *compressed_bio_alloc(struct block_device *bdev, static struct bio *compressed_bio_alloc(struct block_device *bdev,
@ -328,6 +329,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
struct page **compressed_pages, struct page **compressed_pages,
unsigned long nr_pages) unsigned long nr_pages)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct bio *bio = NULL; struct bio *bio = NULL;
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
struct compressed_bio *cb; struct compressed_bio *cb;
@ -355,7 +357,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
cb->orig_bio = NULL; cb->orig_bio = NULL;
cb->nr_pages = nr_pages; cb->nr_pages = nr_pages;
bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; bdev = fs_info->fs_devices->latest_bdev;
bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
if (!bio) { if (!bio) {
@ -391,8 +393,8 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
* freed before we're done setting it up * freed before we're done setting it up
*/ */
atomic_inc(&cb->pending_bios); atomic_inc(&cb->pending_bios);
ret = btrfs_bio_wq_end_io(root->fs_info, bio, ret = btrfs_bio_wq_end_io(fs_info, bio,
BTRFS_WQ_ENDIO_DATA); BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */ BUG_ON(ret); /* -ENOMEM */
if (!skip_sum) { if (!skip_sum) {
@ -417,7 +419,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
bio_add_page(bio, page, PAGE_SIZE, 0); bio_add_page(bio, page, PAGE_SIZE, 0);
} }
if (bytes_left < PAGE_SIZE) { if (bytes_left < PAGE_SIZE) {
btrfs_info(BTRFS_I(inode)->root->fs_info, btrfs_info(fs_info,
"bytes left %lu compress len %lu nr %lu", "bytes left %lu compress len %lu nr %lu",
bytes_left, cb->compressed_len, cb->nr_pages); bytes_left, cb->compressed_len, cb->nr_pages);
} }
@ -427,7 +429,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
} }
bio_get(bio); bio_get(bio);
ret = btrfs_bio_wq_end_io(root->fs_info, bio, BTRFS_WQ_ENDIO_DATA); ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */ BUG_ON(ret); /* -ENOMEM */
if (!skip_sum) { if (!skip_sum) {
@ -575,6 +577,7 @@ next:
int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags) int mirror_num, unsigned long bio_flags)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_io_tree *tree; struct extent_io_tree *tree;
struct extent_map_tree *em_tree; struct extent_map_tree *em_tree;
struct compressed_bio *cb; struct compressed_bio *cb;
@ -634,7 +637,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
if (!cb->compressed_pages) if (!cb->compressed_pages)
goto fail1; goto fail1;
bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; bdev = fs_info->fs_devices->latest_bdev;
for (pg_index = 0; pg_index < nr_pages; pg_index++) { for (pg_index = 0; pg_index < nr_pages; pg_index++) {
cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
@ -678,8 +681,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
PAGE_SIZE) { PAGE_SIZE) {
bio_get(comp_bio); bio_get(comp_bio);
ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
BTRFS_WQ_ENDIO_DATA); BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */ BUG_ON(ret); /* -ENOMEM */
/* /*
@ -696,7 +699,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
BUG_ON(ret); /* -ENOMEM */ BUG_ON(ret); /* -ENOMEM */
} }
sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size, sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
root->fs_info->sectorsize); fs_info->sectorsize);
ret = btrfs_map_bio(root, comp_bio, mirror_num, 0); ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
if (ret) { if (ret) {
@ -719,8 +722,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
} }
bio_get(comp_bio); bio_get(comp_bio);
ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */ BUG_ON(ret); /* -ENOMEM */
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {

View File

@ -212,21 +212,23 @@ static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
*/ */
static void add_root_to_dirty_list(struct btrfs_root *root) static void add_root_to_dirty_list(struct btrfs_root *root)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
if (test_bit(BTRFS_ROOT_DIRTY, &root->state) || if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
!test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state)) !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
return; return;
spin_lock(&root->fs_info->trans_lock); spin_lock(&fs_info->trans_lock);
if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) { if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
/* Want the extent tree to be the last on the list */ /* Want the extent tree to be the last on the list */
if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID) if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
list_move_tail(&root->dirty_list, list_move_tail(&root->dirty_list,
&root->fs_info->dirty_cowonly_roots); &fs_info->dirty_cowonly_roots);
else else
list_move(&root->dirty_list, list_move(&root->dirty_list,
&root->fs_info->dirty_cowonly_roots); &fs_info->dirty_cowonly_roots);
} }
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
} }
/* /*
@ -239,13 +241,14 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
struct extent_buffer *buf, struct extent_buffer *buf,
struct extent_buffer **cow_ret, u64 new_root_objectid) struct extent_buffer **cow_ret, u64 new_root_objectid)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *cow; struct extent_buffer *cow;
int ret = 0; int ret = 0;
int level; int level;
struct btrfs_disk_key disk_key; struct btrfs_disk_key disk_key;
WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
trans->transid != root->fs_info->running_transaction->transid); trans->transid != fs_info->running_transaction->transid);
WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
trans->transid != root->last_trans); trans->transid != root->last_trans);
@ -271,7 +274,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
else else
btrfs_set_header_owner(cow, new_root_objectid); btrfs_set_header_owner(cow, new_root_objectid);
write_extent_buffer_fsid(cow, root->fs_info->fsid); write_extent_buffer_fsid(cow, fs_info->fsid);
WARN_ON(btrfs_header_generation(buf) > trans->transid); WARN_ON(btrfs_header_generation(buf) > trans->transid);
if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
@ -977,6 +980,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
struct extent_buffer *cow, struct extent_buffer *cow,
int *last_ref) int *last_ref)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
u64 refs; u64 refs;
u64 owner; u64 owner;
u64 flags; u64 flags;
@ -1008,7 +1012,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
return ret; return ret;
if (refs == 0) { if (refs == 0) {
ret = -EROFS; ret = -EROFS;
btrfs_handle_fs_error(root->fs_info, ret, NULL); btrfs_handle_fs_error(fs_info, ret, NULL);
return ret; return ret;
} }
} else { } else {
@ -1069,7 +1073,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
ret = btrfs_dec_ref(trans, root, buf, 1); ret = btrfs_dec_ref(trans, root, buf, 1);
BUG_ON(ret); /* -ENOMEM */ BUG_ON(ret); /* -ENOMEM */
} }
clean_tree_block(trans, root->fs_info, buf); clean_tree_block(trans, fs_info, buf);
*last_ref = 1; *last_ref = 1;
} }
return 0; return 0;
@ -1094,6 +1098,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
struct extent_buffer **cow_ret, struct extent_buffer **cow_ret,
u64 search_start, u64 empty_size) u64 search_start, u64 empty_size)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_disk_key disk_key; struct btrfs_disk_key disk_key;
struct extent_buffer *cow; struct extent_buffer *cow;
int level, ret; int level, ret;
@ -1107,7 +1112,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
btrfs_assert_tree_locked(buf); btrfs_assert_tree_locked(buf);
WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
trans->transid != root->fs_info->running_transaction->transid); trans->transid != fs_info->running_transaction->transid);
WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
trans->transid != root->last_trans); trans->transid != root->last_trans);
@ -1140,7 +1145,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
else else
btrfs_set_header_owner(cow, root->root_key.objectid); btrfs_set_header_owner(cow, root->root_key.objectid);
write_extent_buffer_fsid(cow, root->fs_info->fsid); write_extent_buffer_fsid(cow, fs_info->fsid);
ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
if (ret) { if (ret) {
@ -1172,7 +1177,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
add_root_to_dirty_list(root); add_root_to_dirty_list(root);
} else { } else {
WARN_ON(trans->transid != btrfs_header_generation(parent)); WARN_ON(trans->transid != btrfs_header_generation(parent));
tree_mod_log_insert_key(root->fs_info, parent, parent_slot, tree_mod_log_insert_key(fs_info, parent, parent_slot,
MOD_LOG_KEY_REPLACE, GFP_NOFS); MOD_LOG_KEY_REPLACE, GFP_NOFS);
btrfs_set_node_blockptr(parent, parent_slot, btrfs_set_node_blockptr(parent, parent_slot,
cow->start); cow->start);
@ -1180,7 +1185,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
trans->transid); trans->transid);
btrfs_mark_buffer_dirty(parent); btrfs_mark_buffer_dirty(parent);
if (last_ref) { if (last_ref) {
ret = tree_mod_log_free_eb(root->fs_info, buf); ret = tree_mod_log_free_eb(fs_info, buf);
if (ret) { if (ret) {
btrfs_abort_transaction(trans, ret); btrfs_abort_transaction(trans, ret);
return ret; return ret;
@ -1400,6 +1405,7 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
static inline struct extent_buffer * static inline struct extent_buffer *
get_old_root(struct btrfs_root *root, u64 time_seq) get_old_root(struct btrfs_root *root, u64 time_seq)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct tree_mod_elem *tm; struct tree_mod_elem *tm;
struct extent_buffer *eb = NULL; struct extent_buffer *eb = NULL;
struct extent_buffer *eb_root; struct extent_buffer *eb_root;
@ -1409,7 +1415,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
u64 logical; u64 logical;
eb_root = btrfs_read_lock_root_node(root); eb_root = btrfs_read_lock_root_node(root);
tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq); tm = __tree_mod_log_oldest_root(fs_info, eb_root, time_seq);
if (!tm) if (!tm)
return eb_root; return eb_root;
@ -1421,7 +1427,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
logical = eb_root->start; logical = eb_root->start;
} }
tm = tree_mod_log_search(root->fs_info, logical, time_seq); tm = tree_mod_log_search(fs_info, logical, time_seq);
if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) { if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
btrfs_tree_read_unlock(eb_root); btrfs_tree_read_unlock(eb_root);
free_extent_buffer(eb_root); free_extent_buffer(eb_root);
@ -1429,8 +1435,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) { if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
if (!IS_ERR(old)) if (!IS_ERR(old))
free_extent_buffer(old); free_extent_buffer(old);
btrfs_warn(root->fs_info, btrfs_warn(fs_info,
"failed to read tree block %llu from get_old_root", logical); "failed to read tree block %llu from get_old_root",
logical);
} else { } else {
eb = btrfs_clone_extent_buffer(old); eb = btrfs_clone_extent_buffer(old);
free_extent_buffer(old); free_extent_buffer(old);
@ -1438,7 +1445,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
} else if (old_root) { } else if (old_root) {
btrfs_tree_read_unlock(eb_root); btrfs_tree_read_unlock(eb_root);
free_extent_buffer(eb_root); free_extent_buffer(eb_root);
eb = alloc_dummy_extent_buffer(root->fs_info, logical); eb = alloc_dummy_extent_buffer(fs_info, logical);
} else { } else {
btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK); btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
eb = btrfs_clone_extent_buffer(eb_root); eb = btrfs_clone_extent_buffer(eb_root);
@ -1458,10 +1465,10 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
btrfs_set_header_generation(eb, old_generation); btrfs_set_header_generation(eb, old_generation);
} }
if (tm) if (tm)
__tree_mod_log_rewind(root->fs_info, eb, time_seq, tm); __tree_mod_log_rewind(fs_info, eb, time_seq, tm);
else else
WARN_ON(btrfs_header_level(eb) != 0); WARN_ON(btrfs_header_level(eb) != 0);
WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root->fs_info)); WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
return eb; return eb;
} }
@ -1523,17 +1530,18 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
struct extent_buffer *parent, int parent_slot, struct extent_buffer *parent, int parent_slot,
struct extent_buffer **cow_ret) struct extent_buffer **cow_ret)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
u64 search_start; u64 search_start;
int ret; int ret;
if (trans->transaction != root->fs_info->running_transaction) if (trans->transaction != fs_info->running_transaction)
WARN(1, KERN_CRIT "trans %llu running %llu\n", WARN(1, KERN_CRIT "trans %llu running %llu\n",
trans->transid, trans->transid,
root->fs_info->running_transaction->transid); fs_info->running_transaction->transid);
if (trans->transid != root->fs_info->generation) if (trans->transid != fs_info->generation)
WARN(1, KERN_CRIT "trans %llu running %llu\n", WARN(1, KERN_CRIT "trans %llu running %llu\n",
trans->transid, root->fs_info->generation); trans->transid, fs_info->generation);
if (!should_cow_block(trans, root, buf)) { if (!should_cow_block(trans, root, buf)) {
trans->dirty = true; trans->dirty = true;
@ -1610,6 +1618,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
int start_slot, u64 *last_ret, int start_slot, u64 *last_ret,
struct btrfs_key *progress) struct btrfs_key *progress)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *cur; struct extent_buffer *cur;
u64 blocknr; u64 blocknr;
u64 gen; u64 gen;
@ -1628,11 +1637,11 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
parent_level = btrfs_header_level(parent); parent_level = btrfs_header_level(parent);
WARN_ON(trans->transaction != root->fs_info->running_transaction); WARN_ON(trans->transaction != fs_info->running_transaction);
WARN_ON(trans->transid != root->fs_info->generation); WARN_ON(trans->transid != fs_info->generation);
parent_nritems = btrfs_header_nritems(parent); parent_nritems = btrfs_header_nritems(parent);
blocksize = root->fs_info->nodesize; blocksize = fs_info->nodesize;
end_slot = parent_nritems - 1; end_slot = parent_nritems - 1;
if (parent_nritems <= 1) if (parent_nritems <= 1)
@ -1666,7 +1675,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
continue; continue;
} }
cur = find_extent_buffer(root->fs_info, blocknr); cur = find_extent_buffer(fs_info, blocknr);
if (cur) if (cur)
uptodate = btrfs_buffer_uptodate(cur, gen, 0); uptodate = btrfs_buffer_uptodate(cur, gen, 0);
else else
@ -1711,7 +1720,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
return err; return err;
} }
/* /*
* search for key in the extent_buffer. The items start at offset p, * search for key in the extent_buffer. The items start at offset p,
* and they are item_size apart. There are 'max' items in p. * and they are item_size apart. There are 'max' items in p.
@ -1865,6 +1873,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct btrfs_path *path, int level) struct btrfs_path *path, int level)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *right = NULL; struct extent_buffer *right = NULL;
struct extent_buffer *mid; struct extent_buffer *mid;
struct extent_buffer *left = NULL; struct extent_buffer *left = NULL;
@ -1905,7 +1914,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
child = read_node_slot(root, mid, 0); child = read_node_slot(root, mid, 0);
if (IS_ERR(child)) { if (IS_ERR(child)) {
ret = PTR_ERR(child); ret = PTR_ERR(child);
btrfs_handle_fs_error(root->fs_info, ret, NULL); btrfs_handle_fs_error(fs_info, ret, NULL);
goto enospc; goto enospc;
} }
@ -1926,7 +1935,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
path->locks[level] = 0; path->locks[level] = 0;
path->nodes[level] = NULL; path->nodes[level] = NULL;
clean_tree_block(trans, root->fs_info, mid); clean_tree_block(trans, fs_info, mid);
btrfs_tree_unlock(mid); btrfs_tree_unlock(mid);
/* once for the path */ /* once for the path */
free_extent_buffer(mid); free_extent_buffer(mid);
@ -1938,7 +1947,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
return 0; return 0;
} }
if (btrfs_header_nritems(mid) > if (btrfs_header_nritems(mid) >
BTRFS_NODEPTRS_PER_BLOCK(root->fs_info) / 4) BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
return 0; return 0;
left = read_node_slot(root, parent, pslot - 1); left = read_node_slot(root, parent, pslot - 1);
@ -1987,7 +1996,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
if (wret < 0 && wret != -ENOSPC) if (wret < 0 && wret != -ENOSPC)
ret = wret; ret = wret;
if (btrfs_header_nritems(right) == 0) { if (btrfs_header_nritems(right) == 0) {
clean_tree_block(trans, root->fs_info, right); clean_tree_block(trans, fs_info, right);
btrfs_tree_unlock(right); btrfs_tree_unlock(right);
del_ptr(root, path, level + 1, pslot + 1); del_ptr(root, path, level + 1, pslot + 1);
root_sub_used(root, right->len); root_sub_used(root, right->len);
@ -1997,7 +2006,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
} else { } else {
struct btrfs_disk_key right_key; struct btrfs_disk_key right_key;
btrfs_node_key(right, &right_key, 0); btrfs_node_key(right, &right_key, 0);
tree_mod_log_set_node_key(root->fs_info, parent, tree_mod_log_set_node_key(fs_info, parent,
pslot + 1, 0); pslot + 1, 0);
btrfs_set_node_key(parent, &right_key, pslot + 1); btrfs_set_node_key(parent, &right_key, pslot + 1);
btrfs_mark_buffer_dirty(parent); btrfs_mark_buffer_dirty(parent);
@ -2015,7 +2024,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
*/ */
if (!left) { if (!left) {
ret = -EROFS; ret = -EROFS;
btrfs_handle_fs_error(root->fs_info, ret, NULL); btrfs_handle_fs_error(fs_info, ret, NULL);
goto enospc; goto enospc;
} }
wret = balance_node_right(trans, root, mid, left); wret = balance_node_right(trans, root, mid, left);
@ -2031,7 +2040,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
BUG_ON(wret == 1); BUG_ON(wret == 1);
} }
if (btrfs_header_nritems(mid) == 0) { if (btrfs_header_nritems(mid) == 0) {
clean_tree_block(trans, root->fs_info, mid); clean_tree_block(trans, fs_info, mid);
btrfs_tree_unlock(mid); btrfs_tree_unlock(mid);
del_ptr(root, path, level + 1, pslot); del_ptr(root, path, level + 1, pslot);
root_sub_used(root, mid->len); root_sub_used(root, mid->len);
@ -2042,8 +2051,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
/* update the parent key to reflect our changes */ /* update the parent key to reflect our changes */
struct btrfs_disk_key mid_key; struct btrfs_disk_key mid_key;
btrfs_node_key(mid, &mid_key, 0); btrfs_node_key(mid, &mid_key, 0);
tree_mod_log_set_node_key(root->fs_info, parent, tree_mod_log_set_node_key(fs_info, parent, pslot, 0);
pslot, 0);
btrfs_set_node_key(parent, &mid_key, pslot); btrfs_set_node_key(parent, &mid_key, pslot);
btrfs_mark_buffer_dirty(parent); btrfs_mark_buffer_dirty(parent);
} }
@ -2090,6 +2098,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct btrfs_path *path, int level) struct btrfs_path *path, int level)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *right = NULL; struct extent_buffer *right = NULL;
struct extent_buffer *mid; struct extent_buffer *mid;
struct extent_buffer *left = NULL; struct extent_buffer *left = NULL;
@ -2125,7 +2134,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
btrfs_set_lock_blocking(left); btrfs_set_lock_blocking(left);
left_nr = btrfs_header_nritems(left); left_nr = btrfs_header_nritems(left);
if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root->fs_info) - 1) { if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
wret = 1; wret = 1;
} else { } else {
ret = btrfs_cow_block(trans, root, left, parent, ret = btrfs_cow_block(trans, root, left, parent,
@ -2143,8 +2152,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
struct btrfs_disk_key disk_key; struct btrfs_disk_key disk_key;
orig_slot += left_nr; orig_slot += left_nr;
btrfs_node_key(mid, &disk_key, 0); btrfs_node_key(mid, &disk_key, 0);
tree_mod_log_set_node_key(root->fs_info, parent, tree_mod_log_set_node_key(fs_info, parent, pslot, 0);
pslot, 0);
btrfs_set_node_key(parent, &disk_key, pslot); btrfs_set_node_key(parent, &disk_key, pslot);
btrfs_mark_buffer_dirty(parent); btrfs_mark_buffer_dirty(parent);
if (btrfs_header_nritems(left) > orig_slot) { if (btrfs_header_nritems(left) > orig_slot) {
@ -2179,7 +2187,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
btrfs_set_lock_blocking(right); btrfs_set_lock_blocking(right);
right_nr = btrfs_header_nritems(right); right_nr = btrfs_header_nritems(right);
if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root->fs_info) - 1) { if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
wret = 1; wret = 1;
} else { } else {
ret = btrfs_cow_block(trans, root, right, ret = btrfs_cow_block(trans, root, right,
@ -2198,7 +2206,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
struct btrfs_disk_key disk_key; struct btrfs_disk_key disk_key;
btrfs_node_key(right, &disk_key, 0); btrfs_node_key(right, &disk_key, 0);
tree_mod_log_set_node_key(root->fs_info, parent, tree_mod_log_set_node_key(fs_info, parent,
pslot + 1, 0); pslot + 1, 0);
btrfs_set_node_key(parent, &disk_key, pslot + 1); btrfs_set_node_key(parent, &disk_key, pslot + 1);
btrfs_mark_buffer_dirty(parent); btrfs_mark_buffer_dirty(parent);
@ -2230,6 +2238,7 @@ static void reada_for_search(struct btrfs_root *root,
struct btrfs_path *path, struct btrfs_path *path,
int level, int slot, u64 objectid) int level, int slot, u64 objectid)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *node; struct extent_buffer *node;
struct btrfs_disk_key disk_key; struct btrfs_disk_key disk_key;
u32 nritems; u32 nritems;
@ -2250,8 +2259,8 @@ static void reada_for_search(struct btrfs_root *root,
node = path->nodes[level]; node = path->nodes[level];
search = btrfs_node_blockptr(node, slot); search = btrfs_node_blockptr(node, slot);
blocksize = root->fs_info->nodesize; blocksize = fs_info->nodesize;
eb = find_extent_buffer(root->fs_info, search); eb = find_extent_buffer(fs_info, search);
if (eb) { if (eb) {
free_extent_buffer(eb); free_extent_buffer(eb);
return; return;
@ -2292,6 +2301,7 @@ static void reada_for_search(struct btrfs_root *root,
static noinline void reada_for_balance(struct btrfs_root *root, static noinline void reada_for_balance(struct btrfs_root *root,
struct btrfs_path *path, int level) struct btrfs_path *path, int level)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
int slot; int slot;
int nritems; int nritems;
struct extent_buffer *parent; struct extent_buffer *parent;
@ -2310,7 +2320,7 @@ static noinline void reada_for_balance(struct btrfs_root *root,
if (slot > 0) { if (slot > 0) {
block1 = btrfs_node_blockptr(parent, slot - 1); block1 = btrfs_node_blockptr(parent, slot - 1);
gen = btrfs_node_ptr_generation(parent, slot - 1); gen = btrfs_node_ptr_generation(parent, slot - 1);
eb = find_extent_buffer(root->fs_info, block1); eb = find_extent_buffer(fs_info, block1);
/* /*
* if we get -eagain from btrfs_buffer_uptodate, we * if we get -eagain from btrfs_buffer_uptodate, we
* don't want to return eagain here. That will loop * don't want to return eagain here. That will loop
@ -2323,7 +2333,7 @@ static noinline void reada_for_balance(struct btrfs_root *root,
if (slot + 1 < nritems) { if (slot + 1 < nritems) {
block2 = btrfs_node_blockptr(parent, slot + 1); block2 = btrfs_node_blockptr(parent, slot + 1);
gen = btrfs_node_ptr_generation(parent, slot + 1); gen = btrfs_node_ptr_generation(parent, slot + 1);
eb = find_extent_buffer(root->fs_info, block2); eb = find_extent_buffer(fs_info, block2);
if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
block2 = 0; block2 = 0;
free_extent_buffer(eb); free_extent_buffer(eb);
@ -2432,6 +2442,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
struct extent_buffer **eb_ret, int level, int slot, struct extent_buffer **eb_ret, int level, int slot,
struct btrfs_key *key, u64 time_seq) struct btrfs_key *key, u64 time_seq)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
u64 blocknr; u64 blocknr;
u64 gen; u64 gen;
struct extent_buffer *b = *eb_ret; struct extent_buffer *b = *eb_ret;
@ -2441,7 +2452,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
blocknr = btrfs_node_blockptr(b, slot); blocknr = btrfs_node_blockptr(b, slot);
gen = btrfs_node_ptr_generation(b, slot); gen = btrfs_node_ptr_generation(b, slot);
tmp = find_extent_buffer(root->fs_info, blocknr); tmp = find_extent_buffer(fs_info, blocknr);
if (tmp) { if (tmp) {
/* first we do an atomic uptodate check */ /* first we do an atomic uptodate check */
if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) { if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
@ -2517,9 +2528,11 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
struct extent_buffer *b, int level, int ins_len, struct extent_buffer *b, int level, int ins_len,
int *write_lock_level) int *write_lock_level)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
int ret; int ret;
if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >= if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
BTRFS_NODEPTRS_PER_BLOCK(root->fs_info) - 3) { BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
int sret; int sret;
if (*write_lock_level < level + 1) { if (*write_lock_level < level + 1) {
@ -2540,7 +2553,7 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
} }
b = p->nodes[level]; b = p->nodes[level];
} else if (ins_len < 0 && btrfs_header_nritems(b) < } else if (ins_len < 0 && btrfs_header_nritems(b) <
BTRFS_NODEPTRS_PER_BLOCK(root->fs_info) / 2) { BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
int sret; int sret;
if (*write_lock_level < level + 1) { if (*write_lock_level < level + 1) {
@ -2659,6 +2672,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_key *key, struct btrfs_path *p, int *root, struct btrfs_key *key, struct btrfs_path *p, int
ins_len, int cow) ins_len, int cow)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *b; struct extent_buffer *b;
int slot; int slot;
int ret; int ret;
@ -2714,12 +2728,12 @@ again:
* so we always do read locks * so we always do read locks
*/ */
if (p->need_commit_sem) if (p->need_commit_sem)
down_read(&root->fs_info->commit_root_sem); down_read(&fs_info->commit_root_sem);
b = root->commit_root; b = root->commit_root;
extent_buffer_get(b); extent_buffer_get(b);
level = btrfs_header_level(b); level = btrfs_header_level(b);
if (p->need_commit_sem) if (p->need_commit_sem)
up_read(&root->fs_info->commit_root_sem); up_read(&fs_info->commit_root_sem);
if (!p->skip_locking) if (!p->skip_locking)
btrfs_tree_read_lock(b); btrfs_tree_read_lock(b);
} else { } else {
@ -2942,6 +2956,7 @@ done:
int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key, int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
struct btrfs_path *p, u64 time_seq) struct btrfs_path *p, u64 time_seq)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *b; struct extent_buffer *b;
int slot; int slot;
int ret; int ret;
@ -3016,7 +3031,7 @@ again:
btrfs_clear_path_blocking(p, b, btrfs_clear_path_blocking(p, b,
BTRFS_READ_LOCK); BTRFS_READ_LOCK);
} }
b = tree_mod_log_rewind(root->fs_info, p, b, time_seq); b = tree_mod_log_rewind(fs_info, p, b, time_seq);
if (!b) { if (!b) {
ret = -ENOMEM; ret = -ENOMEM;
goto done; goto done;
@ -3186,6 +3201,7 @@ static int push_node_left(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *dst, struct btrfs_root *root, struct extent_buffer *dst,
struct extent_buffer *src, int empty) struct extent_buffer *src, int empty)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
int push_items = 0; int push_items = 0;
int src_nritems; int src_nritems;
int dst_nritems; int dst_nritems;
@ -3193,7 +3209,7 @@ static int push_node_left(struct btrfs_trans_handle *trans,
src_nritems = btrfs_header_nritems(src); src_nritems = btrfs_header_nritems(src);
dst_nritems = btrfs_header_nritems(dst); dst_nritems = btrfs_header_nritems(dst);
push_items = BTRFS_NODEPTRS_PER_BLOCK(root->fs_info) - dst_nritems; push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
WARN_ON(btrfs_header_generation(src) != trans->transid); WARN_ON(btrfs_header_generation(src) != trans->transid);
WARN_ON(btrfs_header_generation(dst) != trans->transid); WARN_ON(btrfs_header_generation(dst) != trans->transid);
@ -3218,7 +3234,7 @@ static int push_node_left(struct btrfs_trans_handle *trans,
} else } else
push_items = min(src_nritems - 8, push_items); push_items = min(src_nritems - 8, push_items);
ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0, ret = tree_mod_log_eb_copy(fs_info, dst, src, dst_nritems, 0,
push_items); push_items);
if (ret) { if (ret) {
btrfs_abort_transaction(trans, ret); btrfs_abort_transaction(trans, ret);
@ -3261,6 +3277,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
struct extent_buffer *dst, struct extent_buffer *dst,
struct extent_buffer *src) struct extent_buffer *src)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
int push_items = 0; int push_items = 0;
int max_push; int max_push;
int src_nritems; int src_nritems;
@ -3272,7 +3289,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
src_nritems = btrfs_header_nritems(src); src_nritems = btrfs_header_nritems(src);
dst_nritems = btrfs_header_nritems(dst); dst_nritems = btrfs_header_nritems(dst);
push_items = BTRFS_NODEPTRS_PER_BLOCK(root->fs_info) - dst_nritems; push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
if (push_items <= 0) if (push_items <= 0)
return 1; return 1;
@ -3287,13 +3304,13 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
if (max_push < push_items) if (max_push < push_items)
push_items = max_push; push_items = max_push;
tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems); tree_mod_log_eb_move(fs_info, dst, push_items, 0, dst_nritems);
memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items), memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
btrfs_node_key_ptr_offset(0), btrfs_node_key_ptr_offset(0),
(dst_nritems) * (dst_nritems) *
sizeof(struct btrfs_key_ptr)); sizeof(struct btrfs_key_ptr));
ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0, ret = tree_mod_log_eb_copy(fs_info, dst, src, 0,
src_nritems - push_items, push_items); src_nritems - push_items, push_items);
if (ret) { if (ret) {
btrfs_abort_transaction(trans, ret); btrfs_abort_transaction(trans, ret);
@ -3324,6 +3341,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct btrfs_path *path, int level) struct btrfs_path *path, int level)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
u64 lower_gen; u64 lower_gen;
struct extent_buffer *lower; struct extent_buffer *lower;
struct extent_buffer *c; struct extent_buffer *c;
@ -3344,7 +3362,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
if (IS_ERR(c)) if (IS_ERR(c))
return PTR_ERR(c); return PTR_ERR(c);
root_add_used(root, root->fs_info->nodesize); root_add_used(root, fs_info->nodesize);
memzero_extent_buffer(c, 0, sizeof(struct btrfs_header)); memzero_extent_buffer(c, 0, sizeof(struct btrfs_header));
btrfs_set_header_nritems(c, 1); btrfs_set_header_nritems(c, 1);
@ -3354,8 +3372,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV); btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
btrfs_set_header_owner(c, root->root_key.objectid); btrfs_set_header_owner(c, root->root_key.objectid);
write_extent_buffer_fsid(c, root->fs_info->fsid); write_extent_buffer_fsid(c, fs_info->fsid);
write_extent_buffer_chunk_tree_uuid(c, root->fs_info->chunk_tree_uuid); write_extent_buffer_chunk_tree_uuid(c, fs_info->chunk_tree_uuid);
btrfs_set_node_key(c, &lower_key, 0); btrfs_set_node_key(c, &lower_key, 0);
btrfs_set_node_blockptr(c, 0, lower->start); btrfs_set_node_blockptr(c, 0, lower->start);
@ -3393,6 +3411,7 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
struct btrfs_disk_key *key, u64 bytenr, struct btrfs_disk_key *key, u64 bytenr,
int slot, int level) int slot, int level)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *lower; struct extent_buffer *lower;
int nritems; int nritems;
int ret; int ret;
@ -3402,10 +3421,10 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
lower = path->nodes[level]; lower = path->nodes[level];
nritems = btrfs_header_nritems(lower); nritems = btrfs_header_nritems(lower);
BUG_ON(slot > nritems); BUG_ON(slot > nritems);
BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root->fs_info)); BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(fs_info));
if (slot != nritems) { if (slot != nritems) {
if (level) if (level)
tree_mod_log_eb_move(root->fs_info, lower, slot + 1, tree_mod_log_eb_move(fs_info, lower, slot + 1,
slot, nritems - slot); slot, nritems - slot);
memmove_extent_buffer(lower, memmove_extent_buffer(lower,
btrfs_node_key_ptr_offset(slot + 1), btrfs_node_key_ptr_offset(slot + 1),
@ -3413,7 +3432,7 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
(nritems - slot) * sizeof(struct btrfs_key_ptr)); (nritems - slot) * sizeof(struct btrfs_key_ptr));
} }
if (level) { if (level) {
ret = tree_mod_log_insert_key(root->fs_info, lower, slot, ret = tree_mod_log_insert_key(fs_info, lower, slot,
MOD_LOG_KEY_ADD, GFP_NOFS); MOD_LOG_KEY_ADD, GFP_NOFS);
BUG_ON(ret < 0); BUG_ON(ret < 0);
} }
@ -3438,6 +3457,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct btrfs_path *path, int level) struct btrfs_path *path, int level)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *c; struct extent_buffer *c;
struct extent_buffer *split; struct extent_buffer *split;
struct btrfs_disk_key disk_key; struct btrfs_disk_key disk_key;
@ -3465,7 +3485,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
ret = push_nodes_for_insert(trans, root, path, level); ret = push_nodes_for_insert(trans, root, path, level);
c = path->nodes[level]; c = path->nodes[level];
if (!ret && btrfs_header_nritems(c) < if (!ret && btrfs_header_nritems(c) <
BTRFS_NODEPTRS_PER_BLOCK(root->fs_info) - 3) BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
return 0; return 0;
if (ret < 0) if (ret < 0)
return ret; return ret;
@ -3480,7 +3500,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
if (IS_ERR(split)) if (IS_ERR(split))
return PTR_ERR(split); return PTR_ERR(split);
root_add_used(root, root->fs_info->nodesize); root_add_used(root, fs_info->nodesize);
memzero_extent_buffer(split, 0, sizeof(struct btrfs_header)); memzero_extent_buffer(split, 0, sizeof(struct btrfs_header));
btrfs_set_header_level(split, btrfs_header_level(c)); btrfs_set_header_level(split, btrfs_header_level(c));
@ -3488,12 +3508,10 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
btrfs_set_header_generation(split, trans->transid); btrfs_set_header_generation(split, trans->transid);
btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV); btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
btrfs_set_header_owner(split, root->root_key.objectid); btrfs_set_header_owner(split, root->root_key.objectid);
write_extent_buffer_fsid(split, root->fs_info->fsid); write_extent_buffer_fsid(split, fs_info->fsid);
write_extent_buffer_chunk_tree_uuid(split, write_extent_buffer_chunk_tree_uuid(split, fs_info->chunk_tree_uuid);
root->fs_info->chunk_tree_uuid);
ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0, ret = tree_mod_log_eb_copy(fs_info, split, c, 0, mid, c_nritems - mid);
mid, c_nritems - mid);
if (ret) { if (ret) {
btrfs_abort_transaction(trans, ret); btrfs_abort_transaction(trans, ret);
return ret; return ret;
@ -3560,15 +3578,17 @@ static int leaf_space_used(struct extent_buffer *l, int start, int nr)
noinline int btrfs_leaf_free_space(struct btrfs_root *root, noinline int btrfs_leaf_free_space(struct btrfs_root *root,
struct extent_buffer *leaf) struct extent_buffer *leaf)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
int nritems = btrfs_header_nritems(leaf); int nritems = btrfs_header_nritems(leaf);
int ret; int ret;
ret = BTRFS_LEAF_DATA_SIZE(root->fs_info) - leaf_space_used(leaf, 0, nritems);
ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
if (ret < 0) { if (ret < 0) {
btrfs_crit(root->fs_info, btrfs_crit(fs_info,
"leaf free space ret %d, leaf data size %lu, used %d nritems %d", "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
ret, ret,
(unsigned long) BTRFS_LEAF_DATA_SIZE(root->fs_info), (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
leaf_space_used(leaf, 0, nritems), nritems); leaf_space_used(leaf, 0, nritems), nritems);
} }
return ret; return ret;
} }
@ -3585,6 +3605,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
int free_space, u32 left_nritems, int free_space, u32 left_nritems,
u32 min_slot) u32 min_slot)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *left = path->nodes[0]; struct extent_buffer *left = path->nodes[0];
struct extent_buffer *upper = path->nodes[1]; struct extent_buffer *upper = path->nodes[1];
struct btrfs_map_token token; struct btrfs_map_token token;
@ -3654,11 +3675,11 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
memmove_extent_buffer(right, memmove_extent_buffer(right,
btrfs_leaf_data(right) + data_end - push_space, btrfs_leaf_data(right) + data_end - push_space,
btrfs_leaf_data(right) + data_end, btrfs_leaf_data(right) + data_end,
BTRFS_LEAF_DATA_SIZE(root->fs_info) - data_end); BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
/* copy from the left data area */ /* copy from the left data area */
copy_extent_buffer(right, left, btrfs_leaf_data(right) + copy_extent_buffer(right, left, btrfs_leaf_data(right) +
BTRFS_LEAF_DATA_SIZE(root->fs_info) - push_space, BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
btrfs_leaf_data(left) + leaf_data_end(root, left), btrfs_leaf_data(left) + leaf_data_end(root, left),
push_space); push_space);
@ -3674,7 +3695,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
/* update the item pointers */ /* update the item pointers */
right_nritems += push_items; right_nritems += push_items;
btrfs_set_header_nritems(right, right_nritems); btrfs_set_header_nritems(right, right_nritems);
push_space = BTRFS_LEAF_DATA_SIZE(root->fs_info); push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
for (i = 0; i < right_nritems; i++) { for (i = 0; i < right_nritems; i++) {
item = btrfs_item_nr(i); item = btrfs_item_nr(i);
push_space -= btrfs_token_item_size(right, item, &token); push_space -= btrfs_token_item_size(right, item, &token);
@ -3687,7 +3708,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
if (left_nritems) if (left_nritems)
btrfs_mark_buffer_dirty(left); btrfs_mark_buffer_dirty(left);
else else
clean_tree_block(trans, root->fs_info, left); clean_tree_block(trans, fs_info, left);
btrfs_mark_buffer_dirty(right); btrfs_mark_buffer_dirty(right);
@ -3699,7 +3720,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
if (path->slots[0] >= left_nritems) { if (path->slots[0] >= left_nritems) {
path->slots[0] -= left_nritems; path->slots[0] -= left_nritems;
if (btrfs_header_nritems(path->nodes[0]) == 0) if (btrfs_header_nritems(path->nodes[0]) == 0)
clean_tree_block(trans, root->fs_info, path->nodes[0]); clean_tree_block(trans, fs_info, path->nodes[0]);
btrfs_tree_unlock(path->nodes[0]); btrfs_tree_unlock(path->nodes[0]);
free_extent_buffer(path->nodes[0]); free_extent_buffer(path->nodes[0]);
path->nodes[0] = right; path->nodes[0] = right;
@ -3814,6 +3835,7 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
int free_space, u32 right_nritems, int free_space, u32 right_nritems,
u32 max_slot) u32 max_slot)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_disk_key disk_key; struct btrfs_disk_key disk_key;
struct extent_buffer *right = path->nodes[0]; struct extent_buffer *right = path->nodes[0];
int i; int i;
@ -3870,7 +3892,7 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
btrfs_item_nr_offset(0), btrfs_item_nr_offset(0),
push_items * sizeof(struct btrfs_item)); push_items * sizeof(struct btrfs_item));
push_space = BTRFS_LEAF_DATA_SIZE(root->fs_info) - push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
btrfs_item_offset_nr(right, push_items - 1); btrfs_item_offset_nr(right, push_items - 1);
copy_extent_buffer(left, right, btrfs_leaf_data(left) + copy_extent_buffer(left, right, btrfs_leaf_data(left) +
@ -3889,7 +3911,7 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
ioff = btrfs_token_item_offset(left, item, &token); ioff = btrfs_token_item_offset(left, item, &token);
btrfs_set_token_item_offset(left, item, btrfs_set_token_item_offset(left, item,
ioff - (BTRFS_LEAF_DATA_SIZE(root->fs_info) - old_left_item_size), ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size),
&token); &token);
} }
btrfs_set_header_nritems(left, old_left_nritems + push_items); btrfs_set_header_nritems(left, old_left_nritems + push_items);
@ -3903,7 +3925,7 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
push_space = btrfs_item_offset_nr(right, push_items - 1) - push_space = btrfs_item_offset_nr(right, push_items - 1) -
leaf_data_end(root, right); leaf_data_end(root, right);
memmove_extent_buffer(right, btrfs_leaf_data(right) + memmove_extent_buffer(right, btrfs_leaf_data(right) +
BTRFS_LEAF_DATA_SIZE(root->fs_info) - push_space, BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
btrfs_leaf_data(right) + btrfs_leaf_data(right) +
leaf_data_end(root, right), push_space); leaf_data_end(root, right), push_space);
@ -3914,7 +3936,7 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
} }
right_nritems -= push_items; right_nritems -= push_items;
btrfs_set_header_nritems(right, right_nritems); btrfs_set_header_nritems(right, right_nritems);
push_space = BTRFS_LEAF_DATA_SIZE(root->fs_info); push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
for (i = 0; i < right_nritems; i++) { for (i = 0; i < right_nritems; i++) {
item = btrfs_item_nr(i); item = btrfs_item_nr(i);
@ -3927,10 +3949,10 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
if (right_nritems) if (right_nritems)
btrfs_mark_buffer_dirty(right); btrfs_mark_buffer_dirty(right);
else else
clean_tree_block(trans, root->fs_info, right); clean_tree_block(trans, fs_info, right);
btrfs_item_key(right, &disk_key, 0); btrfs_item_key(right, &disk_key, 0);
fixup_low_keys(root->fs_info, path, &disk_key, 1); fixup_low_keys(fs_info, path, &disk_key, 1);
/* then fixup the leaf pointer in the path */ /* then fixup the leaf pointer in the path */
if (path->slots[0] < push_items) { if (path->slots[0] < push_items) {
@ -4036,6 +4058,7 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
struct extent_buffer *right, struct extent_buffer *right,
int slot, int mid, int nritems) int slot, int mid, int nritems)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
int data_copy_size; int data_copy_size;
int rt_data_off; int rt_data_off;
int i; int i;
@ -4053,12 +4076,11 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
nritems * sizeof(struct btrfs_item)); nritems * sizeof(struct btrfs_item));
copy_extent_buffer(right, l, copy_extent_buffer(right, l,
btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root->fs_info) - btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(fs_info) -
data_copy_size, btrfs_leaf_data(l) + data_copy_size, btrfs_leaf_data(l) +
leaf_data_end(root, l), data_copy_size); leaf_data_end(root, l), data_copy_size);
rt_data_off = BTRFS_LEAF_DATA_SIZE(root->fs_info) - rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
btrfs_item_end_nr(l, mid);
for (i = 0; i < nritems; i++) { for (i = 0; i < nritems; i++) {
struct btrfs_item *item = btrfs_item_nr(i); struct btrfs_item *item = btrfs_item_nr(i);
@ -4181,7 +4203,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
l = path->nodes[0]; l = path->nodes[0];
slot = path->slots[0]; slot = path->slots[0];
if (extend && data_size + btrfs_item_size_nr(l, slot) + if (extend && data_size + btrfs_item_size_nr(l, slot) +
sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root->fs_info)) sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
return -EOVERFLOW; return -EOVERFLOW;
/* first try to make some room by pushing left and right */ /* first try to make some room by pushing left and right */
@ -4223,14 +4245,14 @@ again:
if (mid <= slot) { if (mid <= slot) {
if (nritems == 1 || if (nritems == 1 ||
leaf_space_used(l, mid, nritems - mid) + data_size > leaf_space_used(l, mid, nritems - mid) + data_size >
BTRFS_LEAF_DATA_SIZE(root->fs_info)) { BTRFS_LEAF_DATA_SIZE(fs_info)) {
if (slot >= nritems) { if (slot >= nritems) {
split = 0; split = 0;
} else { } else {
mid = slot; mid = slot;
if (mid != nritems && if (mid != nritems &&
leaf_space_used(l, mid, nritems - mid) + leaf_space_used(l, mid, nritems - mid) +
data_size > BTRFS_LEAF_DATA_SIZE(root->fs_info)) { data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
if (data_size && !tried_avoid_double) if (data_size && !tried_avoid_double)
goto push_for_double; goto push_for_double;
split = 2; split = 2;
@ -4239,7 +4261,7 @@ again:
} }
} else { } else {
if (leaf_space_used(l, 0, mid) + data_size > if (leaf_space_used(l, 0, mid) + data_size >
BTRFS_LEAF_DATA_SIZE(root->fs_info)) { BTRFS_LEAF_DATA_SIZE(fs_info)) {
if (!extend && data_size && slot == 0) { if (!extend && data_size && slot == 0) {
split = 0; split = 0;
} else if ((extend || !data_size) && slot == 0) { } else if ((extend || !data_size) && slot == 0) {
@ -4248,7 +4270,7 @@ again:
mid = slot; mid = slot;
if (mid != nritems && if (mid != nritems &&
leaf_space_used(l, mid, nritems - mid) + leaf_space_used(l, mid, nritems - mid) +
data_size > BTRFS_LEAF_DATA_SIZE(root->fs_info)) { data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
if (data_size && !tried_avoid_double) if (data_size && !tried_avoid_double)
goto push_for_double; goto push_for_double;
split = 2; split = 2;
@ -4267,7 +4289,7 @@ again:
if (IS_ERR(right)) if (IS_ERR(right))
return PTR_ERR(right); return PTR_ERR(right);
root_add_used(root, root->fs_info->nodesize); root_add_used(root, fs_info->nodesize);
memzero_extent_buffer(right, 0, sizeof(struct btrfs_header)); memzero_extent_buffer(right, 0, sizeof(struct btrfs_header));
btrfs_set_header_bytenr(right, right->start); btrfs_set_header_bytenr(right, right->start);
@ -4539,6 +4561,7 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path, void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
u32 new_size, int from_end) u32 new_size, int from_end)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
int slot; int slot;
struct extent_buffer *leaf; struct extent_buffer *leaf;
struct btrfs_item *item; struct btrfs_item *item;
@ -4619,7 +4642,7 @@ void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
btrfs_set_disk_key_offset(&disk_key, offset + size_diff); btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
btrfs_set_item_key(leaf, &disk_key, slot); btrfs_set_item_key(leaf, &disk_key, slot);
if (slot == 0) if (slot == 0)
fixup_low_keys(root->fs_info, path, &disk_key, 1); fixup_low_keys(fs_info, path, &disk_key, 1);
} }
item = btrfs_item_nr(slot); item = btrfs_item_nr(slot);
@ -4638,6 +4661,7 @@ void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path, void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
u32 data_size) u32 data_size)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
int slot; int slot;
struct extent_buffer *leaf; struct extent_buffer *leaf;
struct btrfs_item *item; struct btrfs_item *item;
@ -4665,8 +4689,8 @@ void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
BUG_ON(slot < 0); BUG_ON(slot < 0);
if (slot >= nritems) { if (slot >= nritems) {
btrfs_print_leaf(root, leaf); btrfs_print_leaf(root, leaf);
btrfs_crit(root->fs_info, "slot %d too large, nritems %d", btrfs_crit(fs_info, "slot %d too large, nritems %d",
slot, nritems); slot, nritems);
BUG_ON(1); BUG_ON(1);
} }
@ -4709,6 +4733,7 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_key *cpu_key, u32 *data_size, struct btrfs_key *cpu_key, u32 *data_size,
u32 total_data, u32 total_size, int nr) u32 total_data, u32 total_size, int nr)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_item *item; struct btrfs_item *item;
int i; int i;
u32 nritems; u32 nritems;
@ -4720,7 +4745,7 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
if (path->slots[0] == 0) { if (path->slots[0] == 0) {
btrfs_cpu_key_to_disk(&disk_key, cpu_key); btrfs_cpu_key_to_disk(&disk_key, cpu_key);
fixup_low_keys(root->fs_info, path, &disk_key, 1); fixup_low_keys(fs_info, path, &disk_key, 1);
} }
btrfs_unlock_up_safe(path, 1); btrfs_unlock_up_safe(path, 1);
@ -4734,8 +4759,7 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
if (btrfs_leaf_free_space(root, leaf) < total_size) { if (btrfs_leaf_free_space(root, leaf) < total_size) {
btrfs_print_leaf(root, leaf); btrfs_print_leaf(root, leaf);
btrfs_crit(root->fs_info, btrfs_crit(fs_info, "not enough freespace need %u have %d",
"not enough freespace need %u have %d",
total_size, btrfs_leaf_free_space(root, leaf)); total_size, btrfs_leaf_free_space(root, leaf));
BUG(); BUG();
} }
@ -4745,8 +4769,7 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
if (old_data < data_end) { if (old_data < data_end) {
btrfs_print_leaf(root, leaf); btrfs_print_leaf(root, leaf);
btrfs_crit(root->fs_info, btrfs_crit(fs_info, "slot %d old_data %d data_end %d",
"slot %d old_data %d data_end %d",
slot, old_data, data_end); slot, old_data, data_end);
BUG_ON(1); BUG_ON(1);
} }
@ -4864,6 +4887,7 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
int level, int slot) int level, int slot)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *parent = path->nodes[level]; struct extent_buffer *parent = path->nodes[level];
u32 nritems; u32 nritems;
int ret; int ret;
@ -4871,7 +4895,7 @@ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
nritems = btrfs_header_nritems(parent); nritems = btrfs_header_nritems(parent);
if (slot != nritems - 1) { if (slot != nritems - 1) {
if (level) if (level)
tree_mod_log_eb_move(root->fs_info, parent, slot, tree_mod_log_eb_move(fs_info, parent, slot,
slot + 1, nritems - slot - 1); slot + 1, nritems - slot - 1);
memmove_extent_buffer(parent, memmove_extent_buffer(parent,
btrfs_node_key_ptr_offset(slot), btrfs_node_key_ptr_offset(slot),
@ -4879,7 +4903,7 @@ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
sizeof(struct btrfs_key_ptr) * sizeof(struct btrfs_key_ptr) *
(nritems - slot - 1)); (nritems - slot - 1));
} else if (level) { } else if (level) {
ret = tree_mod_log_insert_key(root->fs_info, parent, slot, ret = tree_mod_log_insert_key(fs_info, parent, slot,
MOD_LOG_KEY_REMOVE, GFP_NOFS); MOD_LOG_KEY_REMOVE, GFP_NOFS);
BUG_ON(ret < 0); BUG_ON(ret < 0);
} }
@ -4894,7 +4918,7 @@ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_disk_key disk_key; struct btrfs_disk_key disk_key;
btrfs_node_key(parent, &disk_key, 0); btrfs_node_key(parent, &disk_key, 0);
fixup_low_keys(root->fs_info, path, &disk_key, level + 1); fixup_low_keys(fs_info, path, &disk_key, level + 1);
} }
btrfs_mark_buffer_dirty(parent); btrfs_mark_buffer_dirty(parent);
} }
@ -4936,6 +4960,7 @@ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int slot, int nr) struct btrfs_path *path, int slot, int nr)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *leaf; struct extent_buffer *leaf;
struct btrfs_item *item; struct btrfs_item *item;
u32 last_off; u32 last_off;
@ -4987,7 +5012,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
btrfs_set_header_level(leaf, 0); btrfs_set_header_level(leaf, 0);
} else { } else {
btrfs_set_path_blocking(path); btrfs_set_path_blocking(path);
clean_tree_block(trans, root->fs_info, leaf); clean_tree_block(trans, fs_info, leaf);
btrfs_del_leaf(trans, root, path, leaf); btrfs_del_leaf(trans, root, path, leaf);
} }
} else { } else {
@ -4996,11 +5021,11 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_disk_key disk_key; struct btrfs_disk_key disk_key;
btrfs_item_key(leaf, &disk_key, 0); btrfs_item_key(leaf, &disk_key, 0);
fixup_low_keys(root->fs_info, path, &disk_key, 1); fixup_low_keys(fs_info, path, &disk_key, 1);
} }
/* delete the leaf if it is mostly empty */ /* delete the leaf if it is mostly empty */
if (used < BTRFS_LEAF_DATA_SIZE(root->fs_info) / 3) { if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
/* push_leaf_left fixes the path. /* push_leaf_left fixes the path.
* make sure the path still points to our leaf * make sure the path still points to our leaf
* for possible call to del_ptr below * for possible call to del_ptr below
@ -5337,6 +5362,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
struct btrfs_root *right_root, struct btrfs_root *right_root,
btrfs_changed_cb_t changed_cb, void *ctx) btrfs_changed_cb_t changed_cb, void *ctx)
{ {
struct btrfs_fs_info *fs_info = left_root->fs_info;
int ret; int ret;
int cmp; int cmp;
struct btrfs_path *left_path = NULL; struct btrfs_path *left_path = NULL;
@ -5368,10 +5394,9 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
goto out; goto out;
} }
tmp_buf = kmalloc(left_root->fs_info->nodesize, tmp_buf = kmalloc(fs_info->nodesize, GFP_KERNEL | __GFP_NOWARN);
GFP_KERNEL | __GFP_NOWARN);
if (!tmp_buf) { if (!tmp_buf) {
tmp_buf = vmalloc(left_root->fs_info->nodesize); tmp_buf = vmalloc(fs_info->nodesize);
if (!tmp_buf) { if (!tmp_buf) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
@ -5419,7 +5444,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
* the right if possible or go up and right. * the right if possible or go up and right.
*/ */
down_read(&left_root->fs_info->commit_root_sem); down_read(&fs_info->commit_root_sem);
left_level = btrfs_header_level(left_root->commit_root); left_level = btrfs_header_level(left_root->commit_root);
left_root_level = left_level; left_root_level = left_level;
left_path->nodes[left_level] = left_root->commit_root; left_path->nodes[left_level] = left_root->commit_root;
@ -5429,7 +5454,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
right_root_level = right_level; right_root_level = right_level;
right_path->nodes[right_level] = right_root->commit_root; right_path->nodes[right_level] = right_root->commit_root;
extent_buffer_get(right_path->nodes[right_level]); extent_buffer_get(right_path->nodes[right_level]);
up_read(&left_root->fs_info->commit_root_sem); up_read(&fs_info->commit_root_sem);
if (left_level == 0) if (left_level == 0)
btrfs_item_key_to_cpu(left_path->nodes[left_level], btrfs_item_key_to_cpu(left_path->nodes[left_level],

View File

@ -1352,9 +1352,11 @@ static inline int
btrfs_should_fragment_free_space(struct btrfs_root *root, btrfs_should_fragment_free_space(struct btrfs_root *root,
struct btrfs_block_group_cache *block_group) struct btrfs_block_group_cache *block_group)
{ {
return (btrfs_test_opt(root->fs_info, FRAGMENT_METADATA) && struct btrfs_fs_info *fs_info = root->fs_info;
return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) &&
block_group->flags & BTRFS_BLOCK_GROUP_METADATA) || block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
(btrfs_test_opt(root->fs_info, FRAGMENT_DATA) && (btrfs_test_opt(fs_info, FRAGMENT_DATA) &&
block_group->flags & BTRFS_BLOCK_GROUP_DATA); block_group->flags & BTRFS_BLOCK_GROUP_DATA);
} }
#endif #endif
@ -2312,10 +2314,11 @@ static inline unsigned long btrfs_leaf_data(struct extent_buffer *l)
static inline unsigned int leaf_data_end(struct btrfs_root *root, static inline unsigned int leaf_data_end(struct btrfs_root *root,
struct extent_buffer *leaf) struct extent_buffer *leaf)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
u32 nr = btrfs_header_nritems(leaf); u32 nr = btrfs_header_nritems(leaf);
if (nr == 0) if (nr == 0)
return BTRFS_LEAF_DATA_SIZE(root->fs_info); return BTRFS_LEAF_DATA_SIZE(fs_info);
return btrfs_item_offset_nr(leaf, nr - 1); return btrfs_item_offset_nr(leaf, nr - 1);
} }
@ -2905,8 +2908,9 @@ static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
*/ */
static inline int btrfs_need_cleaner_sleep(struct btrfs_root *root) static inline int btrfs_need_cleaner_sleep(struct btrfs_root *root)
{ {
return (root->fs_info->sb->s_flags & MS_RDONLY || struct btrfs_fs_info *fs_info = root->fs_info;
btrfs_fs_closing(root->fs_info));
return (fs_info->sb->s_flags & MS_RDONLY || btrfs_fs_closing(fs_info));
} }
static inline void free_fs_info(struct btrfs_fs_info *fs_info) static inline void free_fs_info(struct btrfs_fs_info *fs_info)

View File

@ -538,6 +538,7 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct btrfs_delayed_item *item) struct btrfs_delayed_item *item)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *src_rsv; struct btrfs_block_rsv *src_rsv;
struct btrfs_block_rsv *dst_rsv; struct btrfs_block_rsv *dst_rsv;
u64 num_bytes; u64 num_bytes;
@ -547,12 +548,12 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
return 0; return 0;
src_rsv = trans->block_rsv; src_rsv = trans->block_rsv;
dst_rsv = &root->fs_info->delayed_block_rsv; dst_rsv = &fs_info->delayed_block_rsv;
num_bytes = btrfs_calc_trans_metadata_size(root->fs_info, 1); num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1); ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
if (!ret) { if (!ret) {
trace_btrfs_space_reservation(root->fs_info, "delayed_item", trace_btrfs_space_reservation(fs_info, "delayed_item",
item->key.objectid, item->key.objectid,
num_bytes, 1); num_bytes, 1);
item->bytes_reserved = num_bytes; item->bytes_reserved = num_bytes;
@ -564,13 +565,14 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
struct btrfs_delayed_item *item) struct btrfs_delayed_item *item)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *rsv; struct btrfs_block_rsv *rsv;
if (!item->bytes_reserved) if (!item->bytes_reserved)
return; return;
rsv = &root->fs_info->delayed_block_rsv; rsv = &fs_info->delayed_block_rsv;
trace_btrfs_space_reservation(root->fs_info, "delayed_item", trace_btrfs_space_reservation(fs_info, "delayed_item",
item->key.objectid, item->bytes_reserved, item->key.objectid, item->bytes_reserved,
0); 0);
btrfs_block_rsv_release(root, rsv, btrfs_block_rsv_release(root, rsv,
@ -583,6 +585,7 @@ static int btrfs_delayed_inode_reserve_metadata(
struct inode *inode, struct inode *inode,
struct btrfs_delayed_node *node) struct btrfs_delayed_node *node)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *src_rsv; struct btrfs_block_rsv *src_rsv;
struct btrfs_block_rsv *dst_rsv; struct btrfs_block_rsv *dst_rsv;
u64 num_bytes; u64 num_bytes;
@ -590,9 +593,9 @@ static int btrfs_delayed_inode_reserve_metadata(
bool release = false; bool release = false;
src_rsv = trans->block_rsv; src_rsv = trans->block_rsv;
dst_rsv = &root->fs_info->delayed_block_rsv; dst_rsv = &fs_info->delayed_block_rsv;
num_bytes = btrfs_calc_trans_metadata_size(root->fs_info, 1); num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
/* /*
* If our block_rsv is the delalloc block reserve then check and see if * If our block_rsv is the delalloc block reserve then check and see if
@ -640,7 +643,7 @@ static int btrfs_delayed_inode_reserve_metadata(
ret = -ENOSPC; ret = -ENOSPC;
if (!ret) { if (!ret) {
node->bytes_reserved = num_bytes; node->bytes_reserved = num_bytes;
trace_btrfs_space_reservation(root->fs_info, trace_btrfs_space_reservation(fs_info,
"delayed_inode", "delayed_inode",
btrfs_ino(inode), btrfs_ino(inode),
num_bytes, 1); num_bytes, 1);
@ -664,13 +667,13 @@ static int btrfs_delayed_inode_reserve_metadata(
* how block rsvs. work. * how block rsvs. work.
*/ */
if (!ret) { if (!ret) {
trace_btrfs_space_reservation(root->fs_info, "delayed_inode", trace_btrfs_space_reservation(fs_info, "delayed_inode",
btrfs_ino(inode), num_bytes, 1); btrfs_ino(inode), num_bytes, 1);
node->bytes_reserved = num_bytes; node->bytes_reserved = num_bytes;
} }
if (release) { if (release) {
trace_btrfs_space_reservation(root->fs_info, "delalloc", trace_btrfs_space_reservation(fs_info, "delalloc",
btrfs_ino(inode), num_bytes, 0); btrfs_ino(inode), num_bytes, 0);
btrfs_block_rsv_release(root, src_rsv, num_bytes); btrfs_block_rsv_release(root, src_rsv, num_bytes);
} }
@ -681,13 +684,14 @@ static int btrfs_delayed_inode_reserve_metadata(
static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root, static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
struct btrfs_delayed_node *node) struct btrfs_delayed_node *node)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *rsv; struct btrfs_block_rsv *rsv;
if (!node->bytes_reserved) if (!node->bytes_reserved)
return; return;
rsv = &root->fs_info->delayed_block_rsv; rsv = &fs_info->delayed_block_rsv;
trace_btrfs_space_reservation(root->fs_info, "delayed_inode", trace_btrfs_space_reservation(fs_info, "delayed_inode",
node->inode_id, node->bytes_reserved, 0); node->inode_id, node->bytes_reserved, 0);
btrfs_block_rsv_release(root, rsv, btrfs_block_rsv_release(root, rsv,
node->bytes_reserved); node->bytes_reserved);
@ -1140,6 +1144,7 @@ __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int nr) struct btrfs_root *root, int nr)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_delayed_root *delayed_root; struct btrfs_delayed_root *delayed_root;
struct btrfs_delayed_node *curr_node, *prev_node; struct btrfs_delayed_node *curr_node, *prev_node;
struct btrfs_path *path; struct btrfs_path *path;
@ -1156,7 +1161,7 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
path->leave_spinning = 1; path->leave_spinning = 1;
block_rsv = trans->block_rsv; block_rsv = trans->block_rsv;
trans->block_rsv = &root->fs_info->delayed_block_rsv; trans->block_rsv = &fs_info->delayed_block_rsv;
delayed_root = btrfs_get_delayed_root(root); delayed_root = btrfs_get_delayed_root(root);
@ -1860,6 +1865,7 @@ release_node:
int btrfs_delayed_delete_inode_ref(struct inode *inode) int btrfs_delayed_delete_inode_ref(struct inode *inode)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_delayed_node *delayed_node; struct btrfs_delayed_node *delayed_node;
/* /*
@ -1867,8 +1873,7 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode)
* leads to enospc problems. This means we also can't do * leads to enospc problems. This means we also can't do
* delayed inode refs * delayed inode refs
*/ */
if (test_bit(BTRFS_FS_LOG_RECOVERING, if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
&BTRFS_I(inode)->root->fs_info->flags))
return -EAGAIN; return -EAGAIN;
delayed_node = btrfs_get_or_create_delayed_node(inode); delayed_node = btrfs_get_or_create_delayed_node(inode);
@ -1895,7 +1900,7 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode)
set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags); set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
delayed_node->count++; delayed_node->count++;
atomic_inc(&BTRFS_I(inode)->root->fs_info->delayed_root->items); atomic_inc(&fs_info->delayed_root->items);
release_node: release_node:
mutex_unlock(&delayed_node->mutex); mutex_unlock(&delayed_node->mutex);
btrfs_release_delayed_node(delayed_node); btrfs_release_delayed_node(delayed_node);

View File

@ -142,7 +142,7 @@ no_valid_dev_replace_entry_found:
* missing * missing
*/ */
if (!dev_replace->srcdev && if (!dev_replace->srcdev &&
!btrfs_test_opt(dev_root->fs_info, DEGRADED)) { !btrfs_test_opt(fs_info, DEGRADED)) {
ret = -EIO; ret = -EIO;
btrfs_warn(fs_info, btrfs_warn(fs_info,
"cannot mount because device replace operation is ongoing and"); "cannot mount because device replace operation is ongoing and");
@ -151,7 +151,7 @@ no_valid_dev_replace_entry_found:
src_devid); src_devid);
} }
if (!dev_replace->tgtdev && if (!dev_replace->tgtdev &&
!btrfs_test_opt(dev_root->fs_info, DEGRADED)) { !btrfs_test_opt(fs_info, DEGRADED)) {
ret = -EIO; ret = -EIO;
btrfs_warn(fs_info, btrfs_warn(fs_info,
"cannot mount because device replace operation is ongoing and"); "cannot mount because device replace operation is ongoing and");
@ -387,7 +387,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
if (ret) if (ret)
btrfs_err(fs_info, "kobj add dev failed %d", ret); btrfs_err(fs_info, "kobj add dev failed %d", ret);
btrfs_wait_ordered_roots(root->fs_info, -1, 0, (u64)-1); btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
/* force writing the updated state information to disk */ /* force writing the updated state information to disk */
trans = btrfs_start_transaction(root, 0); trans = btrfs_start_transaction(root, 0);
@ -501,12 +501,12 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
* flush all outstanding I/O and inode extent mappings before the * flush all outstanding I/O and inode extent mappings before the
* copy operation is declared as being finished * copy operation is declared as being finished
*/ */
ret = btrfs_start_delalloc_roots(root->fs_info, 0, -1); ret = btrfs_start_delalloc_roots(fs_info, 0, -1);
if (ret) { if (ret) {
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
return ret; return ret;
} }
btrfs_wait_ordered_roots(root->fs_info, -1, 0, (u64)-1); btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
trans = btrfs_start_transaction(root, 0); trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) { if (IS_ERR(trans)) {
@ -518,8 +518,8 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
mutex_lock(&uuid_mutex); mutex_lock(&uuid_mutex);
/* keep away write_all_supers() during the finishing procedure */ /* keep away write_all_supers() during the finishing procedure */
mutex_lock(&root->fs_info->fs_devices->device_list_mutex); mutex_lock(&fs_info->fs_devices->device_list_mutex);
mutex_lock(&root->fs_info->chunk_mutex); mutex_lock(&fs_info->chunk_mutex);
btrfs_dev_replace_lock(dev_replace, 1); btrfs_dev_replace_lock(dev_replace, 1);
dev_replace->replace_state = dev_replace->replace_state =
scrub_ret ? BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED scrub_ret ? BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED
@ -535,15 +535,15 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
src_device, src_device,
tgt_device); tgt_device);
} else { } else {
btrfs_err_in_rcu(root->fs_info, btrfs_err_in_rcu(fs_info,
"btrfs_scrub_dev(%s, %llu, %s) failed %d", "btrfs_scrub_dev(%s, %llu, %s) failed %d",
src_device->missing ? "<missing disk>" : src_device->missing ? "<missing disk>" :
rcu_str_deref(src_device->name), rcu_str_deref(src_device->name),
src_device->devid, src_device->devid,
rcu_str_deref(tgt_device->name), scrub_ret); rcu_str_deref(tgt_device->name), scrub_ret);
btrfs_dev_replace_unlock(dev_replace, 1); btrfs_dev_replace_unlock(dev_replace, 1);
mutex_unlock(&root->fs_info->chunk_mutex); mutex_unlock(&fs_info->chunk_mutex);
mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); mutex_unlock(&fs_info->fs_devices->device_list_mutex);
mutex_unlock(&uuid_mutex); mutex_unlock(&uuid_mutex);
if (tgt_device) if (tgt_device)
btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device); btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
@ -552,12 +552,12 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
return scrub_ret; return scrub_ret;
} }
btrfs_info_in_rcu(root->fs_info, btrfs_info_in_rcu(fs_info,
"dev_replace from %s (devid %llu) to %s finished", "dev_replace from %s (devid %llu) to %s finished",
src_device->missing ? "<missing disk>" : src_device->missing ? "<missing disk>" :
rcu_str_deref(src_device->name), rcu_str_deref(src_device->name),
src_device->devid, src_device->devid,
rcu_str_deref(tgt_device->name)); rcu_str_deref(tgt_device->name));
tgt_device->is_tgtdev_for_dev_replace = 0; tgt_device->is_tgtdev_for_dev_replace = 0;
tgt_device->devid = src_device->devid; tgt_device->devid = src_device->devid;
src_device->devid = BTRFS_DEV_REPLACE_DEVID; src_device->devid = BTRFS_DEV_REPLACE_DEVID;
@ -592,8 +592,8 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
* superblock is scratched out so that it is no longer marked to * superblock is scratched out so that it is no longer marked to
* belong to this filesystem. * belong to this filesystem.
*/ */
mutex_unlock(&root->fs_info->chunk_mutex); mutex_unlock(&fs_info->chunk_mutex);
mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); mutex_unlock(&fs_info->fs_devices->device_list_mutex);
mutex_unlock(&uuid_mutex); mutex_unlock(&uuid_mutex);
/* replace the sysfs entry */ /* replace the sysfs entry */

View File

@ -451,12 +451,12 @@ int verify_dir_item(struct btrfs_root *root,
struct extent_buffer *leaf, struct extent_buffer *leaf,
struct btrfs_dir_item *dir_item) struct btrfs_dir_item *dir_item)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
u16 namelen = BTRFS_NAME_LEN; u16 namelen = BTRFS_NAME_LEN;
u8 type = btrfs_dir_type(leaf, dir_item); u8 type = btrfs_dir_type(leaf, dir_item);
if (type >= BTRFS_FT_MAX) { if (type >= BTRFS_FT_MAX) {
btrfs_crit(root->fs_info, "invalid dir item type: %d", btrfs_crit(fs_info, "invalid dir item type: %d", (int)type);
(int)type);
return 1; return 1;
} }
@ -464,16 +464,16 @@ int verify_dir_item(struct btrfs_root *root,
namelen = XATTR_NAME_MAX; namelen = XATTR_NAME_MAX;
if (btrfs_dir_name_len(leaf, dir_item) > namelen) { if (btrfs_dir_name_len(leaf, dir_item) > namelen) {
btrfs_crit(root->fs_info, "invalid dir item name len: %u", btrfs_crit(fs_info, "invalid dir item name len: %u",
(unsigned)btrfs_dir_data_len(leaf, dir_item)); (unsigned)btrfs_dir_data_len(leaf, dir_item));
return 1; return 1;
} }
/* BTRFS_MAX_XATTR_SIZE is the same for all dir items */ /* BTRFS_MAX_XATTR_SIZE is the same for all dir items */
if ((btrfs_dir_data_len(leaf, dir_item) + if ((btrfs_dir_data_len(leaf, dir_item) +
btrfs_dir_name_len(leaf, dir_item)) > BTRFS_MAX_XATTR_SIZE(root->fs_info)) { btrfs_dir_name_len(leaf, dir_item)) >
btrfs_crit(root->fs_info, BTRFS_MAX_XATTR_SIZE(fs_info)) {
"invalid dir item name + data len: %u + %u", btrfs_crit(fs_info, "invalid dir item name + data len: %u + %u",
(unsigned)btrfs_dir_name_len(leaf, dir_item), (unsigned)btrfs_dir_name_len(leaf, dir_item),
(unsigned)btrfs_dir_data_len(leaf, dir_item)); (unsigned)btrfs_dir_data_len(leaf, dir_item));
return 1; return 1;

View File

@ -224,6 +224,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
struct page *page, size_t pg_offset, u64 start, u64 len, struct page *page, size_t pg_offset, u64 start, u64 len,
int create) int create)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_map *em; struct extent_map *em;
int ret; int ret;
@ -231,8 +232,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
read_lock(&em_tree->lock); read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len); em = lookup_extent_mapping(em_tree, start, len);
if (em) { if (em) {
em->bdev = em->bdev = fs_info->fs_devices->latest_bdev;
BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
read_unlock(&em_tree->lock); read_unlock(&em_tree->lock);
goto out; goto out;
} }
@ -247,7 +247,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
em->len = (u64)-1; em->len = (u64)-1;
em->block_len = (u64)-1; em->block_len = (u64)-1;
em->block_start = 0; em->block_start = 0;
em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; em->bdev = fs_info->fs_devices->latest_bdev;
write_lock(&em_tree->lock); write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em, 0); ret = add_extent_mapping(em_tree, em, 0);
@ -444,6 +444,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
struct extent_buffer *eb, struct extent_buffer *eb,
u64 parent_transid) u64 parent_transid)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_io_tree *io_tree; struct extent_io_tree *io_tree;
int failed = 0; int failed = 0;
int ret; int ret;
@ -452,7 +453,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
int failed_mirror = 0; int failed_mirror = 0;
clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
while (1) { while (1) {
ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE, ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
btree_get_extent, mirror_num); btree_get_extent, mirror_num);
@ -472,7 +473,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags)) if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
break; break;
num_copies = btrfs_num_copies(root->fs_info, num_copies = btrfs_num_copies(fs_info,
eb->start, eb->len); eb->start, eb->len);
if (num_copies == 1) if (num_copies == 1)
break; break;
@ -545,15 +546,16 @@ static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
return ret; return ret;
} }
#define CORRUPT(reason, eb, root, slot) \ #define CORRUPT(reason, eb, root, slot) \
btrfs_crit(root->fs_info, "corrupt %s, %s: block=%llu," \ btrfs_crit(root->fs_info, \
" root=%llu, slot=%d", \ "corrupt %s, %s: block=%llu, root=%llu, slot=%d", \
btrfs_header_level(eb) == 0 ? "leaf" : "node",\ btrfs_header_level(eb) == 0 ? "leaf" : "node", \
reason, btrfs_header_bytenr(eb), root->objectid, slot) reason, btrfs_header_bytenr(eb), root->objectid, slot)
static noinline int check_leaf(struct btrfs_root *root, static noinline int check_leaf(struct btrfs_root *root,
struct extent_buffer *leaf) struct extent_buffer *leaf)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key; struct btrfs_key key;
struct btrfs_key leaf_key; struct btrfs_key leaf_key;
u32 nritems = btrfs_header_nritems(leaf); u32 nritems = btrfs_header_nritems(leaf);
@ -566,7 +568,7 @@ static noinline int check_leaf(struct btrfs_root *root,
key.type = BTRFS_ROOT_ITEM_KEY; key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = (u64)-1; key.offset = (u64)-1;
check_root = btrfs_get_fs_root(root->fs_info, &key, false); check_root = btrfs_get_fs_root(fs_info, &key, false);
/* /*
* The only reason we also check NULL here is that during * The only reason we also check NULL here is that during
* open_ctree() some roots has not yet been set up. * open_ctree() some roots has not yet been set up.
@ -585,7 +587,7 @@ static noinline int check_leaf(struct btrfs_root *root,
/* Check the 0 item */ /* Check the 0 item */
if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) != if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
BTRFS_LEAF_DATA_SIZE(root->fs_info)) { BTRFS_LEAF_DATA_SIZE(fs_info)) {
CORRUPT("invalid item offset size pair", leaf, root, 0); CORRUPT("invalid item offset size pair", leaf, root, 0);
return -EIO; return -EIO;
} }
@ -624,7 +626,7 @@ static noinline int check_leaf(struct btrfs_root *root,
* all point outside of the leaf. * all point outside of the leaf.
*/ */
if (btrfs_item_end_nr(leaf, slot) > if (btrfs_item_end_nr(leaf, slot) >
BTRFS_LEAF_DATA_SIZE(root->fs_info)) { BTRFS_LEAF_DATA_SIZE(fs_info)) {
CORRUPT("slot end outside of leaf", leaf, root, slot); CORRUPT("slot end outside of leaf", leaf, root, slot);
return -EIO; return -EIO;
} }
@ -1004,6 +1006,7 @@ static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags, int mirror_num, unsigned long bio_flags,
u64 bio_offset) u64 bio_offset)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int async = check_async_write(inode, bio_flags); int async = check_async_write(inode, bio_flags);
int ret; int ret;
@ -1012,8 +1015,8 @@ static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
* called for a read, do the setup so that checksum validation * called for a read, do the setup so that checksum validation
* can happen in the async kernel threads * can happen in the async kernel threads
*/ */
ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info, ret = btrfs_bio_wq_end_io(fs_info, bio,
bio, BTRFS_WQ_ENDIO_METADATA); BTRFS_WQ_ENDIO_METADATA);
if (ret) if (ret)
goto out_w_error; goto out_w_error;
ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0); ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0);
@ -1027,8 +1030,7 @@ static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
* kthread helpers are used to submit writes so that * kthread helpers are used to submit writes so that
* checksumming can happen in parallel across all CPUs * checksumming can happen in parallel across all CPUs
*/ */
ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, ret = btrfs_wq_submit_bio(fs_info, inode, bio, mirror_num, 0,
inode, bio, mirror_num, 0,
bio_offset, bio_offset,
__btree_submit_bio_start, __btree_submit_bio_start,
__btree_submit_bio_done); __btree_submit_bio_done);
@ -1194,9 +1196,11 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
u64 bytenr) u64 bytenr)
{ {
if (btrfs_is_testing(root->fs_info)) struct btrfs_fs_info *fs_info = root->fs_info;
return alloc_test_extent_buffer(root->fs_info, bytenr);
return alloc_extent_buffer(root->fs_info, bytenr); if (btrfs_is_testing(fs_info))
return alloc_test_extent_buffer(fs_info, bytenr);
return alloc_extent_buffer(fs_info, bytenr);
} }
@ -1493,7 +1497,7 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID); btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
root->node = leaf; root->node = leaf;
write_extent_buffer_fsid(root->node, root->fs_info->fsid); write_extent_buffer_fsid(root->node, fs_info->fsid);
btrfs_mark_buffer_dirty(root->node); btrfs_mark_buffer_dirty(root->node);
btrfs_tree_unlock(root->node); btrfs_tree_unlock(root->node);
return root; return root;
@ -1515,10 +1519,11 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
int btrfs_add_log_tree(struct btrfs_trans_handle *trans, int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root) struct btrfs_root *root)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *log_root; struct btrfs_root *log_root;
struct btrfs_inode_item *inode_item; struct btrfs_inode_item *inode_item;
log_root = alloc_log_tree(trans, root->fs_info); log_root = alloc_log_tree(trans, fs_info);
if (IS_ERR(log_root)) if (IS_ERR(log_root))
return PTR_ERR(log_root); return PTR_ERR(log_root);
@ -1530,7 +1535,7 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
btrfs_set_stack_inode_size(inode_item, 3); btrfs_set_stack_inode_size(inode_item, 3);
btrfs_set_stack_inode_nlink(inode_item, 1); btrfs_set_stack_inode_nlink(inode_item, 1);
btrfs_set_stack_inode_nbytes(inode_item, btrfs_set_stack_inode_nbytes(inode_item,
root->fs_info->nodesize); fs_info->nodesize);
btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755); btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
btrfs_set_root_node(&log_root->root_item, log_root->node); btrfs_set_root_node(&log_root->root_item, log_root->node);
@ -1828,6 +1833,7 @@ static void end_workqueue_fn(struct btrfs_work *work)
static int cleaner_kthread(void *arg) static int cleaner_kthread(void *arg)
{ {
struct btrfs_root *root = arg; struct btrfs_root *root = arg;
struct btrfs_fs_info *fs_info = root->fs_info;
int again; int again;
struct btrfs_trans_handle *trans; struct btrfs_trans_handle *trans;
@ -1842,10 +1848,10 @@ static int cleaner_kthread(void *arg)
* Do not do anything if we might cause open_ctree() to block * Do not do anything if we might cause open_ctree() to block
* before we have finished mounting the filesystem. * before we have finished mounting the filesystem.
*/ */
if (!test_bit(BTRFS_FS_OPEN, &root->fs_info->flags)) if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
goto sleep; goto sleep;
if (!mutex_trylock(&root->fs_info->cleaner_mutex)) if (!mutex_trylock(&fs_info->cleaner_mutex))
goto sleep; goto sleep;
/* /*
@ -1853,22 +1859,22 @@ static int cleaner_kthread(void *arg)
* during the above check and trylock. * during the above check and trylock.
*/ */
if (btrfs_need_cleaner_sleep(root)) { if (btrfs_need_cleaner_sleep(root)) {
mutex_unlock(&root->fs_info->cleaner_mutex); mutex_unlock(&fs_info->cleaner_mutex);
goto sleep; goto sleep;
} }
mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex); mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
btrfs_run_delayed_iputs(root); btrfs_run_delayed_iputs(root);
mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex); mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
again = btrfs_clean_one_deleted_snapshot(root); again = btrfs_clean_one_deleted_snapshot(root);
mutex_unlock(&root->fs_info->cleaner_mutex); mutex_unlock(&fs_info->cleaner_mutex);
/* /*
* The defragger has dealt with the R/O remount and umount, * The defragger has dealt with the R/O remount and umount,
* needn't do anything special here. * needn't do anything special here.
*/ */
btrfs_run_defrag_inodes(root->fs_info); btrfs_run_defrag_inodes(fs_info);
/* /*
* Acquires fs_info->delete_unused_bgs_mutex to avoid racing * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
@ -1878,7 +1884,7 @@ static int cleaner_kthread(void *arg)
* can't hold, nor need to, fs_info->cleaner_mutex when deleting * can't hold, nor need to, fs_info->cleaner_mutex when deleting
* unused block groups. * unused block groups.
*/ */
btrfs_delete_unused_bgs(root->fs_info); btrfs_delete_unused_bgs(fs_info);
sleep: sleep:
if (!again) { if (!again) {
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
@ -1902,7 +1908,7 @@ sleep:
trans = btrfs_attach_transaction(root); trans = btrfs_attach_transaction(root);
if (IS_ERR(trans)) { if (IS_ERR(trans)) {
if (PTR_ERR(trans) != -ENOENT) if (PTR_ERR(trans) != -ENOENT)
btrfs_err(root->fs_info, btrfs_err(fs_info,
"cleaner transaction attach returned %ld", "cleaner transaction attach returned %ld",
PTR_ERR(trans)); PTR_ERR(trans));
} else { } else {
@ -1910,7 +1916,7 @@ sleep:
ret = btrfs_commit_transaction(trans, root); ret = btrfs_commit_transaction(trans, root);
if (ret) if (ret)
btrfs_err(root->fs_info, btrfs_err(fs_info,
"cleaner open transaction commit returned %d", "cleaner open transaction commit returned %d",
ret); ret);
} }
@ -1921,6 +1927,7 @@ sleep:
static int transaction_kthread(void *arg) static int transaction_kthread(void *arg)
{ {
struct btrfs_root *root = arg; struct btrfs_root *root = arg;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans; struct btrfs_trans_handle *trans;
struct btrfs_transaction *cur; struct btrfs_transaction *cur;
u64 transid; u64 transid;
@ -1930,26 +1937,26 @@ static int transaction_kthread(void *arg)
do { do {
cannot_commit = false; cannot_commit = false;
delay = HZ * root->fs_info->commit_interval; delay = HZ * fs_info->commit_interval;
mutex_lock(&root->fs_info->transaction_kthread_mutex); mutex_lock(&fs_info->transaction_kthread_mutex);
spin_lock(&root->fs_info->trans_lock); spin_lock(&fs_info->trans_lock);
cur = root->fs_info->running_transaction; cur = fs_info->running_transaction;
if (!cur) { if (!cur) {
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
goto sleep; goto sleep;
} }
now = get_seconds(); now = get_seconds();
if (cur->state < TRANS_STATE_BLOCKED && if (cur->state < TRANS_STATE_BLOCKED &&
(now < cur->start_time || (now < cur->start_time ||
now - cur->start_time < root->fs_info->commit_interval)) { now - cur->start_time < fs_info->commit_interval)) {
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
delay = HZ * 5; delay = HZ * 5;
goto sleep; goto sleep;
} }
transid = cur->transid; transid = cur->transid;
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
/* If the file system is aborted, this will always fail. */ /* If the file system is aborted, this will always fail. */
trans = btrfs_attach_transaction(root); trans = btrfs_attach_transaction(root);
@ -1964,15 +1971,15 @@ static int transaction_kthread(void *arg)
btrfs_end_transaction(trans, root); btrfs_end_transaction(trans, root);
} }
sleep: sleep:
wake_up_process(root->fs_info->cleaner_kthread); wake_up_process(fs_info->cleaner_kthread);
mutex_unlock(&root->fs_info->transaction_kthread_mutex); mutex_unlock(&fs_info->transaction_kthread_mutex);
if (unlikely(test_bit(BTRFS_FS_STATE_ERROR, if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
&root->fs_info->fs_state))) &fs_info->fs_state)))
btrfs_cleanup_transaction(root); btrfs_cleanup_transaction(root);
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
if (!kthread_should_stop() && if (!kthread_should_stop() &&
(!btrfs_transaction_blocked(root->fs_info) || (!btrfs_transaction_blocked(fs_info) ||
cannot_commit)) cannot_commit))
schedule_timeout(delay); schedule_timeout(delay);
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
@ -2464,8 +2471,8 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
/* returns with log_tree_root freed on success */ /* returns with log_tree_root freed on success */
ret = btrfs_recover_log_trees(log_tree_root); ret = btrfs_recover_log_trees(log_tree_root);
if (ret) { if (ret) {
btrfs_handle_fs_error(tree_root->fs_info, ret, btrfs_handle_fs_error(fs_info, ret,
"Failed to recover log tree"); "Failed to recover log tree");
free_extent_buffer(log_tree_root->node); free_extent_buffer(log_tree_root->node);
kfree(log_tree_root); kfree(log_tree_root);
return ret; return ret;
@ -2830,7 +2837,7 @@ int open_ctree(struct super_block *sb,
features = btrfs_super_incompat_flags(disk_super); features = btrfs_super_incompat_flags(disk_super);
features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO) if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
@ -3059,8 +3066,8 @@ retry_root_backup:
if (IS_ERR(fs_info->transaction_kthread)) if (IS_ERR(fs_info->transaction_kthread))
goto fail_cleaner; goto fail_cleaner;
if (!btrfs_test_opt(tree_root->fs_info, SSD) && if (!btrfs_test_opt(fs_info, SSD) &&
!btrfs_test_opt(tree_root->fs_info, NOSSD) && !btrfs_test_opt(fs_info, NOSSD) &&
!fs_info->fs_devices->rotating) { !fs_info->fs_devices->rotating) {
btrfs_info(fs_info, "detected SSD devices, enabling SSD mode"); btrfs_info(fs_info, "detected SSD devices, enabling SSD mode");
btrfs_set_opt(fs_info->mount_opt, SSD); btrfs_set_opt(fs_info->mount_opt, SSD);
@ -3073,9 +3080,9 @@ retry_root_backup:
btrfs_apply_pending_changes(fs_info); btrfs_apply_pending_changes(fs_info);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
if (btrfs_test_opt(tree_root->fs_info, CHECK_INTEGRITY)) { if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
ret = btrfsic_mount(tree_root, fs_devices, ret = btrfsic_mount(tree_root, fs_devices,
btrfs_test_opt(tree_root->fs_info, btrfs_test_opt(fs_info,
CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ? CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
1 : 0, 1 : 0,
fs_info->check_integrity_print_mask); fs_info->check_integrity_print_mask);
@ -3091,7 +3098,7 @@ retry_root_backup:
/* do not make disk changes in broken FS or nologreplay is given */ /* do not make disk changes in broken FS or nologreplay is given */
if (btrfs_super_log_root(disk_super) != 0 && if (btrfs_super_log_root(disk_super) != 0 &&
!btrfs_test_opt(tree_root->fs_info, NOLOGREPLAY)) { !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
ret = btrfs_replay_log(fs_info, fs_devices); ret = btrfs_replay_log(fs_info, fs_devices);
if (ret) { if (ret) {
err = ret; err = ret;
@ -3152,7 +3159,7 @@ retry_root_backup:
} }
} }
if (btrfs_test_opt(tree_root->fs_info, FREE_SPACE_TREE) && if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
btrfs_info(fs_info, "creating free space tree"); btrfs_info(fs_info, "creating free space tree");
ret = btrfs_create_free_space_tree(fs_info); ret = btrfs_create_free_space_tree(fs_info);
@ -3198,7 +3205,7 @@ retry_root_backup:
close_ctree(fs_info); close_ctree(fs_info);
return ret; return ret;
} }
} else if (btrfs_test_opt(tree_root->fs_info, RESCAN_UUID_TREE) || } else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
fs_info->generation != fs_info->generation !=
btrfs_super_uuid_tree_generation(disk_super)) { btrfs_super_uuid_tree_generation(disk_super)) {
btrfs_info(fs_info, "checking UUID tree"); btrfs_info(fs_info, "checking UUID tree");
@ -3274,7 +3281,7 @@ fail:
return err; return err;
recovery_tree_root: recovery_tree_root:
if (!btrfs_test_opt(tree_root->fs_info, USEBACKUPROOT)) if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
goto fail_tree_roots; goto fail_tree_roots;
free_root_pointers(fs_info, 0); free_root_pointers(fs_info, 0);
@ -3680,6 +3687,7 @@ int btrfs_calc_num_tolerated_disk_barrier_failures(
static int write_all_supers(struct btrfs_root *root, int max_mirrors) static int write_all_supers(struct btrfs_root *root, int max_mirrors)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct list_head *head; struct list_head *head;
struct btrfs_device *dev; struct btrfs_device *dev;
struct btrfs_super_block *sb; struct btrfs_super_block *sb;
@ -3690,23 +3698,23 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors)
int total_errors = 0; int total_errors = 0;
u64 flags; u64 flags;
do_barriers = !btrfs_test_opt(root->fs_info, NOBARRIER); do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
backup_super_roots(root->fs_info); backup_super_roots(fs_info);
sb = root->fs_info->super_for_commit; sb = fs_info->super_for_commit;
dev_item = &sb->dev_item; dev_item = &sb->dev_item;
mutex_lock(&root->fs_info->fs_devices->device_list_mutex); mutex_lock(&fs_info->fs_devices->device_list_mutex);
head = &root->fs_info->fs_devices->devices; head = &fs_info->fs_devices->devices;
max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1; max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
if (do_barriers) { if (do_barriers) {
ret = barrier_all_devices(root->fs_info); ret = barrier_all_devices(fs_info);
if (ret) { if (ret) {
mutex_unlock( mutex_unlock(
&root->fs_info->fs_devices->device_list_mutex); &fs_info->fs_devices->device_list_mutex);
btrfs_handle_fs_error(root->fs_info, ret, btrfs_handle_fs_error(fs_info, ret,
"errors while submitting device barriers."); "errors while submitting device barriers.");
return ret; return ret;
} }
} }
@ -3740,13 +3748,14 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors)
total_errors++; total_errors++;
} }
if (total_errors > max_errors) { if (total_errors > max_errors) {
btrfs_err(root->fs_info, "%d errors while writing supers", btrfs_err(fs_info, "%d errors while writing supers",
total_errors); total_errors);
mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); mutex_unlock(&fs_info->fs_devices->device_list_mutex);
/* FUA is masked off if unsupported and can't be the reason */ /* FUA is masked off if unsupported and can't be the reason */
btrfs_handle_fs_error(root->fs_info, -EIO, btrfs_handle_fs_error(fs_info, -EIO,
"%d errors while writing supers", total_errors); "%d errors while writing supers",
total_errors);
return -EIO; return -EIO;
} }
@ -3761,10 +3770,11 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors)
if (ret) if (ret)
total_errors++; total_errors++;
} }
mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); mutex_unlock(&fs_info->fs_devices->device_list_mutex);
if (total_errors > max_errors) { if (total_errors > max_errors) {
btrfs_handle_fs_error(root->fs_info, -EIO, btrfs_handle_fs_error(fs_info, -EIO,
"%d errors while writing supers", total_errors); "%d errors while writing supers",
total_errors);
return -EIO; return -EIO;
} }
return 0; return 0;
@ -3884,14 +3894,14 @@ int btrfs_commit_super(struct btrfs_fs_info *fs_info)
struct btrfs_root *root = fs_info->tree_root; struct btrfs_root *root = fs_info->tree_root;
struct btrfs_trans_handle *trans; struct btrfs_trans_handle *trans;
mutex_lock(&root->fs_info->cleaner_mutex); mutex_lock(&fs_info->cleaner_mutex);
btrfs_run_delayed_iputs(root); btrfs_run_delayed_iputs(root);
mutex_unlock(&root->fs_info->cleaner_mutex); mutex_unlock(&fs_info->cleaner_mutex);
wake_up_process(root->fs_info->cleaner_kthread); wake_up_process(fs_info->cleaner_kthread);
/* wait until ongoing cleanup work done */ /* wait until ongoing cleanup work done */
down_write(&root->fs_info->cleanup_work_sem); down_write(&fs_info->cleanup_work_sem);
up_write(&root->fs_info->cleanup_work_sem); up_write(&fs_info->cleanup_work_sem);
trans = btrfs_join_transaction(root); trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) if (IS_ERR(trans))
@ -3936,7 +3946,7 @@ void close_ctree(struct btrfs_fs_info *fs_info)
* block groups queued for removal, the deletion will be * block groups queued for removal, the deletion will be
* skipped when we quit the cleaner thread. * skipped when we quit the cleaner thread.
*/ */
btrfs_delete_unused_bgs(root->fs_info); btrfs_delete_unused_bgs(fs_info);
ret = btrfs_commit_super(fs_info); ret = btrfs_commit_super(fs_info);
if (ret) if (ret)
@ -3980,7 +3990,7 @@ void close_ctree(struct btrfs_fs_info *fs_info)
iput(fs_info->btree_inode); iput(fs_info->btree_inode);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
if (btrfs_test_opt(root->fs_info, CHECK_INTEGRITY)) if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
btrfsic_unmount(root, fs_info->fs_devices); btrfsic_unmount(root, fs_info->fs_devices);
#endif #endif
@ -3998,7 +4008,7 @@ void close_ctree(struct btrfs_fs_info *fs_info)
__btrfs_free_block_rsv(root->orphan_block_rsv); __btrfs_free_block_rsv(root->orphan_block_rsv);
root->orphan_block_rsv = NULL; root->orphan_block_rsv = NULL;
lock_chunks(root->fs_info); lock_chunks(fs_info);
while (!list_empty(&fs_info->pinned_chunks)) { while (!list_empty(&fs_info->pinned_chunks)) {
struct extent_map *em; struct extent_map *em;
@ -4007,7 +4017,7 @@ void close_ctree(struct btrfs_fs_info *fs_info)
list_del_init(&em->list); list_del_init(&em->list);
free_extent_map(em); free_extent_map(em);
} }
unlock_chunks(root->fs_info); unlock_chunks(fs_info);
} }
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
@ -4029,6 +4039,7 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
void btrfs_mark_buffer_dirty(struct extent_buffer *buf) void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
{ {
struct btrfs_fs_info *fs_info;
struct btrfs_root *root; struct btrfs_root *root;
u64 transid = btrfs_header_generation(buf); u64 transid = btrfs_header_generation(buf);
int was_dirty; int was_dirty;
@ -4043,15 +4054,16 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
return; return;
#endif #endif
root = BTRFS_I(buf->pages[0]->mapping->host)->root; root = BTRFS_I(buf->pages[0]->mapping->host)->root;
fs_info = root->fs_info;
btrfs_assert_tree_locked(buf); btrfs_assert_tree_locked(buf);
if (transid != root->fs_info->generation) if (transid != fs_info->generation)
WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n", WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
buf->start, transid, root->fs_info->generation); buf->start, transid, fs_info->generation);
was_dirty = set_extent_buffer_dirty(buf); was_dirty = set_extent_buffer_dirty(buf);
if (!was_dirty) if (!was_dirty)
__percpu_counter_add(&root->fs_info->dirty_metadata_bytes, __percpu_counter_add(&fs_info->dirty_metadata_bytes,
buf->len, buf->len,
root->fs_info->dirty_metadata_batch); fs_info->dirty_metadata_batch);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) { if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) {
btrfs_print_leaf(root, buf); btrfs_print_leaf(root, buf);
@ -4063,6 +4075,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
static void __btrfs_btree_balance_dirty(struct btrfs_root *root, static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
int flush_delayed) int flush_delayed)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
/* /*
* looks as though older kernels can get into trouble with * looks as though older kernels can get into trouble with
* this code, they end up stuck in balance_dirty_pages forever * this code, they end up stuck in balance_dirty_pages forever
@ -4075,11 +4088,10 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
if (flush_delayed) if (flush_delayed)
btrfs_balance_delayed_items(root); btrfs_balance_delayed_items(root);
ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes, ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
BTRFS_DIRTY_METADATA_THRESH); BTRFS_DIRTY_METADATA_THRESH);
if (ret > 0) { if (ret > 0) {
balance_dirty_pages_ratelimited( balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
root->fs_info->btree_inode->i_mapping);
} }
} }
@ -4249,12 +4261,14 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
static void btrfs_error_commit_super(struct btrfs_root *root) static void btrfs_error_commit_super(struct btrfs_root *root)
{ {
mutex_lock(&root->fs_info->cleaner_mutex); struct btrfs_fs_info *fs_info = root->fs_info;
btrfs_run_delayed_iputs(root);
mutex_unlock(&root->fs_info->cleaner_mutex);
down_write(&root->fs_info->cleanup_work_sem); mutex_lock(&fs_info->cleaner_mutex);
up_write(&root->fs_info->cleanup_work_sem); btrfs_run_delayed_iputs(root);
mutex_unlock(&fs_info->cleaner_mutex);
down_write(&fs_info->cleanup_work_sem);
up_write(&fs_info->cleanup_work_sem);
/* cleanup FS via transaction */ /* cleanup FS via transaction */
btrfs_cleanup_transaction(root); btrfs_cleanup_transaction(root);
@ -4302,6 +4316,7 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
struct btrfs_root *root) struct btrfs_root *root)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *node; struct rb_node *node;
struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_node *ref; struct btrfs_delayed_ref_node *ref;
@ -4312,7 +4327,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
spin_lock(&delayed_refs->lock); spin_lock(&delayed_refs->lock);
if (atomic_read(&delayed_refs->num_entries) == 0) { if (atomic_read(&delayed_refs->num_entries) == 0) {
spin_unlock(&delayed_refs->lock); spin_unlock(&delayed_refs->lock);
btrfs_info(root->fs_info, "delayed_refs has NO entry"); btrfs_info(fs_info, "delayed_refs has NO entry");
return ret; return ret;
} }
@ -4425,6 +4440,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
struct extent_io_tree *dirty_pages, struct extent_io_tree *dirty_pages,
int mark) int mark)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
int ret; int ret;
struct extent_buffer *eb; struct extent_buffer *eb;
u64 start = 0; u64 start = 0;
@ -4438,8 +4454,8 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
clear_extent_bits(dirty_pages, start, end, mark); clear_extent_bits(dirty_pages, start, end, mark);
while (start <= end) { while (start <= end) {
eb = find_extent_buffer(root->fs_info, start); eb = find_extent_buffer(fs_info, start);
start += root->fs_info->nodesize; start += fs_info->nodesize;
if (!eb) if (!eb)
continue; continue;
wait_on_extent_buffer_writeback(eb); wait_on_extent_buffer_writeback(eb);
@ -4457,6 +4473,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
static int btrfs_destroy_pinned_extent(struct btrfs_root *root, static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
struct extent_io_tree *pinned_extents) struct extent_io_tree *pinned_extents)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_io_tree *unpin; struct extent_io_tree *unpin;
u64 start; u64 start;
u64 end; u64 end;
@ -4477,10 +4494,10 @@ again:
} }
if (loop) { if (loop) {
if (unpin == &root->fs_info->freed_extents[0]) if (unpin == &fs_info->freed_extents[0])
unpin = &root->fs_info->freed_extents[1]; unpin = &fs_info->freed_extents[1];
else else
unpin = &root->fs_info->freed_extents[0]; unpin = &fs_info->freed_extents[0];
loop = false; loop = false;
goto again; goto again;
} }
@ -4505,6 +4522,7 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans, void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
struct btrfs_root *root) struct btrfs_root *root)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *cache; struct btrfs_block_group_cache *cache;
spin_lock(&cur_trans->dirty_bgs_lock); spin_lock(&cur_trans->dirty_bgs_lock);
@ -4513,8 +4531,7 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
struct btrfs_block_group_cache, struct btrfs_block_group_cache,
dirty_list); dirty_list);
if (!cache) { if (!cache) {
btrfs_err(root->fs_info, btrfs_err(fs_info, "orphan block group dirty_bgs list");
"orphan block group dirty_bgs list");
spin_unlock(&cur_trans->dirty_bgs_lock); spin_unlock(&cur_trans->dirty_bgs_lock);
return; return;
} }
@ -4542,8 +4559,7 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
struct btrfs_block_group_cache, struct btrfs_block_group_cache,
io_list); io_list);
if (!cache) { if (!cache) {
btrfs_err(root->fs_info, btrfs_err(fs_info, "orphan block group on io_bgs list");
"orphan block group on io_bgs list");
return; return;
} }
@ -4558,6 +4574,7 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
struct btrfs_root *root) struct btrfs_root *root)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
btrfs_cleanup_dirty_bgs(cur_trans, root); btrfs_cleanup_dirty_bgs(cur_trans, root);
ASSERT(list_empty(&cur_trans->dirty_bgs)); ASSERT(list_empty(&cur_trans->dirty_bgs));
ASSERT(list_empty(&cur_trans->io_bgs)); ASSERT(list_empty(&cur_trans->io_bgs));
@ -4565,10 +4582,10 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
btrfs_destroy_delayed_refs(cur_trans, root); btrfs_destroy_delayed_refs(cur_trans, root);
cur_trans->state = TRANS_STATE_COMMIT_START; cur_trans->state = TRANS_STATE_COMMIT_START;
wake_up(&root->fs_info->transaction_blocked_wait); wake_up(&fs_info->transaction_blocked_wait);
cur_trans->state = TRANS_STATE_UNBLOCKED; cur_trans->state = TRANS_STATE_UNBLOCKED;
wake_up(&root->fs_info->transaction_wait); wake_up(&fs_info->transaction_wait);
btrfs_destroy_delayed_inodes(root); btrfs_destroy_delayed_inodes(root);
btrfs_assert_delayed_root_empty(root); btrfs_assert_delayed_root_empty(root);
@ -4576,7 +4593,7 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages, btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
EXTENT_DIRTY); EXTENT_DIRTY);
btrfs_destroy_pinned_extent(root, btrfs_destroy_pinned_extent(root,
root->fs_info->pinned_extents); fs_info->pinned_extents);
cur_trans->state =TRANS_STATE_COMPLETED; cur_trans->state =TRANS_STATE_COMPLETED;
wake_up(&cur_trans->commit_wait); wake_up(&cur_trans->commit_wait);
@ -4589,25 +4606,26 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
static int btrfs_cleanup_transaction(struct btrfs_root *root) static int btrfs_cleanup_transaction(struct btrfs_root *root)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_transaction *t; struct btrfs_transaction *t;
mutex_lock(&root->fs_info->transaction_kthread_mutex); mutex_lock(&fs_info->transaction_kthread_mutex);
spin_lock(&root->fs_info->trans_lock); spin_lock(&fs_info->trans_lock);
while (!list_empty(&root->fs_info->trans_list)) { while (!list_empty(&fs_info->trans_list)) {
t = list_first_entry(&root->fs_info->trans_list, t = list_first_entry(&fs_info->trans_list,
struct btrfs_transaction, list); struct btrfs_transaction, list);
if (t->state >= TRANS_STATE_COMMIT_START) { if (t->state >= TRANS_STATE_COMMIT_START) {
atomic_inc(&t->use_count); atomic_inc(&t->use_count);
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
btrfs_wait_for_commit(root, t->transid); btrfs_wait_for_commit(root, t->transid);
btrfs_put_transaction(t); btrfs_put_transaction(t);
spin_lock(&root->fs_info->trans_lock); spin_lock(&fs_info->trans_lock);
continue; continue;
} }
if (t == root->fs_info->running_transaction) { if (t == fs_info->running_transaction) {
t->state = TRANS_STATE_COMMIT_DOING; t->state = TRANS_STATE_COMMIT_DOING;
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
/* /*
* We wait for 0 num_writers since we don't hold a trans * We wait for 0 num_writers since we don't hold a trans
* handle open currently for this transaction. * handle open currently for this transaction.
@ -4615,27 +4633,27 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
wait_event(t->writer_wait, wait_event(t->writer_wait,
atomic_read(&t->num_writers) == 0); atomic_read(&t->num_writers) == 0);
} else { } else {
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
} }
btrfs_cleanup_one_transaction(t, root); btrfs_cleanup_one_transaction(t, root);
spin_lock(&root->fs_info->trans_lock); spin_lock(&fs_info->trans_lock);
if (t == root->fs_info->running_transaction) if (t == fs_info->running_transaction)
root->fs_info->running_transaction = NULL; fs_info->running_transaction = NULL;
list_del_init(&t->list); list_del_init(&t->list);
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
btrfs_put_transaction(t); btrfs_put_transaction(t);
trace_btrfs_transaction_commit(root); trace_btrfs_transaction_commit(root);
spin_lock(&root->fs_info->trans_lock); spin_lock(&fs_info->trans_lock);
} }
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
btrfs_destroy_all_ordered_extents(root->fs_info); btrfs_destroy_all_ordered_extents(fs_info);
btrfs_destroy_delayed_inodes(root); btrfs_destroy_delayed_inodes(root);
btrfs_assert_delayed_root_empty(root); btrfs_assert_delayed_root_empty(root);
btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents); btrfs_destroy_pinned_extent(root, fs_info->pinned_extents);
btrfs_destroy_all_delalloc_inodes(root->fs_info); btrfs_destroy_all_delalloc_inodes(fs_info);
mutex_unlock(&root->fs_info->transaction_kthread_mutex); mutex_unlock(&fs_info->transaction_kthread_mutex);
return 0; return 0;
} }

View File

@ -153,6 +153,7 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
static struct dentry *btrfs_get_parent(struct dentry *child) static struct dentry *btrfs_get_parent(struct dentry *child)
{ {
struct inode *dir = d_inode(child); struct inode *dir = d_inode(child);
struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_path *path; struct btrfs_path *path;
struct extent_buffer *leaf; struct extent_buffer *leaf;
@ -169,7 +170,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
key.objectid = root->root_key.objectid; key.objectid = root->root_key.objectid;
key.type = BTRFS_ROOT_BACKREF_KEY; key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = (u64)-1; key.offset = (u64)-1;
root = root->fs_info->tree_root; root = fs_info->tree_root;
} else { } else {
key.objectid = btrfs_ino(dir); key.objectid = btrfs_ino(dir);
key.type = BTRFS_INODE_REF_KEY; key.type = BTRFS_INODE_REF_KEY;
@ -205,13 +206,13 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
btrfs_free_path(path); btrfs_free_path(path);
if (found_key.type == BTRFS_ROOT_BACKREF_KEY) { if (found_key.type == BTRFS_ROOT_BACKREF_KEY) {
return btrfs_get_dentry(root->fs_info->sb, key.objectid, return btrfs_get_dentry(fs_info->sb, key.objectid,
found_key.offset, 0, 0); found_key.offset, 0, 0);
} }
key.type = BTRFS_INODE_ITEM_KEY; key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0; key.offset = 0;
return d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL)); return d_obtain_alias(btrfs_iget(fs_info->sb, &key, root, NULL));
fail: fail:
btrfs_free_path(path); btrfs_free_path(path);
return ERR_PTR(ret); return ERR_PTR(ret);
@ -222,6 +223,7 @@ static int btrfs_get_name(struct dentry *parent, char *name,
{ {
struct inode *inode = d_inode(child); struct inode *inode = d_inode(child);
struct inode *dir = d_inode(parent); struct inode *dir = d_inode(parent);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_path *path; struct btrfs_path *path;
struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_inode_ref *iref; struct btrfs_inode_ref *iref;
@ -250,7 +252,7 @@ static int btrfs_get_name(struct dentry *parent, char *name,
key.objectid = BTRFS_I(inode)->root->root_key.objectid; key.objectid = BTRFS_I(inode)->root->root_key.objectid;
key.type = BTRFS_ROOT_BACKREF_KEY; key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = (u64)-1; key.offset = (u64)-1;
root = root->fs_info->tree_root; root = fs_info->tree_root;
} else { } else {
key.objectid = ino; key.objectid = ino;
key.offset = btrfs_ino(dir); key.offset = btrfs_ino(dir);

File diff suppressed because it is too large Load Diff

View File

@ -2070,17 +2070,18 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb, int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
int mirror_num) int mirror_num)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
u64 start = eb->start; u64 start = eb->start;
unsigned long i, num_pages = num_extent_pages(eb->start, eb->len); unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
int ret = 0; int ret = 0;
if (root->fs_info->sb->s_flags & MS_RDONLY) if (fs_info->sb->s_flags & MS_RDONLY)
return -EROFS; return -EROFS;
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
struct page *p = eb->pages[i]; struct page *p = eb->pages[i];
ret = repair_io_failure(root->fs_info->btree_inode, start, ret = repair_io_failure(fs_info->btree_inode, start,
PAGE_SIZE, start, p, PAGE_SIZE, start, p,
start - page_offset(p), mirror_num); start - page_offset(p), mirror_num);
if (ret) if (ret)
@ -2341,6 +2342,7 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
struct page *page, int pg_offset, int icsum, struct page *page, int pg_offset, int icsum,
bio_end_io_t *endio_func, void *data) bio_end_io_t *endio_func, void *data)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct bio *bio; struct bio *bio;
struct btrfs_io_bio *btrfs_failed_bio; struct btrfs_io_bio *btrfs_failed_bio;
struct btrfs_io_bio *btrfs_bio; struct btrfs_io_bio *btrfs_bio;
@ -2351,13 +2353,12 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
bio->bi_end_io = endio_func; bio->bi_end_io = endio_func;
bio->bi_iter.bi_sector = failrec->logical >> 9; bio->bi_iter.bi_sector = failrec->logical >> 9;
bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; bio->bi_bdev = fs_info->fs_devices->latest_bdev;
bio->bi_iter.bi_size = 0; bio->bi_iter.bi_size = 0;
bio->bi_private = data; bio->bi_private = data;
btrfs_failed_bio = btrfs_io_bio(failed_bio); btrfs_failed_bio = btrfs_io_bio(failed_bio);
if (btrfs_failed_bio->csum) { if (btrfs_failed_bio->csum) {
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
btrfs_bio = btrfs_io_bio(bio); btrfs_bio = btrfs_io_bio(bio);
@ -2476,6 +2477,8 @@ static void end_bio_extent_writepage(struct bio *bio)
bio_for_each_segment_all(bvec, bio, i) { bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page; struct page *page = bvec->bv_page;
struct inode *inode = page->mapping->host;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
/* We always issue full-page reads, but if some block /* We always issue full-page reads, but if some block
* in a page fails to read, blk_update_request() will * in a page fails to read, blk_update_request() will
@ -2484,11 +2487,11 @@ static void end_bio_extent_writepage(struct bio *bio)
* if they don't add up to a full page. */ * if they don't add up to a full page. */
if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) { if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE) if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info, btrfs_err(fs_info,
"partial page write in btrfs with offset %u and length %u", "partial page write in btrfs with offset %u and length %u",
bvec->bv_offset, bvec->bv_len); bvec->bv_offset, bvec->bv_len);
else else
btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info, btrfs_info(fs_info,
"incomplete page write in btrfs with offset %u and length %u", "incomplete page write in btrfs with offset %u and length %u",
bvec->bv_offset, bvec->bv_len); bvec->bv_offset, bvec->bv_len);
} }
@ -5789,6 +5792,7 @@ static void copy_pages(struct page *dst_page, struct page *src_page,
void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_offset, unsigned long len) unsigned long src_offset, unsigned long len)
{ {
struct btrfs_fs_info *fs_info = dst->fs_info;
size_t cur; size_t cur;
size_t dst_off_in_page; size_t dst_off_in_page;
size_t src_off_in_page; size_t src_off_in_page;
@ -5797,13 +5801,13 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_i; unsigned long src_i;
if (src_offset + len > dst->len) { if (src_offset + len > dst->len) {
btrfs_err(dst->fs_info, btrfs_err(fs_info,
"memmove bogus src_offset %lu move len %lu dst len %lu", "memmove bogus src_offset %lu move len %lu dst len %lu",
src_offset, len, dst->len); src_offset, len, dst->len);
BUG_ON(1); BUG_ON(1);
} }
if (dst_offset + len > dst->len) { if (dst_offset + len > dst->len) {
btrfs_err(dst->fs_info, btrfs_err(fs_info,
"memmove bogus dst_offset %lu move len %lu dst len %lu", "memmove bogus dst_offset %lu move len %lu dst len %lu",
dst_offset, len, dst->len); dst_offset, len, dst->len);
BUG_ON(1); BUG_ON(1);
@ -5835,6 +5839,7 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_offset, unsigned long len) unsigned long src_offset, unsigned long len)
{ {
struct btrfs_fs_info *fs_info = dst->fs_info;
size_t cur; size_t cur;
size_t dst_off_in_page; size_t dst_off_in_page;
size_t src_off_in_page; size_t src_off_in_page;
@ -5845,13 +5850,13 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_i; unsigned long src_i;
if (src_offset + len > dst->len) { if (src_offset + len > dst->len) {
btrfs_err(dst->fs_info, btrfs_err(fs_info,
"memmove bogus src_offset %lu move len %lu len %lu", "memmove bogus src_offset %lu move len %lu len %lu",
src_offset, len, dst->len); src_offset, len, dst->len);
BUG_ON(1); BUG_ON(1);
} }
if (dst_offset + len > dst->len) { if (dst_offset + len > dst->len) {
btrfs_err(dst->fs_info, btrfs_err(fs_info,
"memmove bogus dst_offset %lu move len %lu len %lu", "memmove bogus dst_offset %lu move len %lu len %lu",
dst_offset, len, dst->len); dst_offset, len, dst->len);
BUG_ON(1); BUG_ON(1);

View File

@ -90,13 +90,14 @@ btrfs_lookup_csum(struct btrfs_trans_handle *trans,
struct btrfs_path *path, struct btrfs_path *path,
u64 bytenr, int cow) u64 bytenr, int cow)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
int ret; int ret;
struct btrfs_key file_key; struct btrfs_key file_key;
struct btrfs_key found_key; struct btrfs_key found_key;
struct btrfs_csum_item *item; struct btrfs_csum_item *item;
struct extent_buffer *leaf; struct extent_buffer *leaf;
u64 csum_offset = 0; u64 csum_offset = 0;
u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
int csums_in_item; int csums_in_item;
file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
@ -116,7 +117,7 @@ btrfs_lookup_csum(struct btrfs_trans_handle *trans,
goto fail; goto fail;
csum_offset = (bytenr - found_key.offset) >> csum_offset = (bytenr - found_key.offset) >>
root->fs_info->sb->s_blocksize_bits; fs_info->sb->s_blocksize_bits;
csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]); csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
csums_in_item /= csum_size; csums_in_item /= csum_size;
@ -163,6 +164,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
struct inode *inode, struct bio *bio, struct inode *inode, struct bio *bio,
u64 logical_offset, u32 *dst, int dio) u64 logical_offset, u32 *dst, int dio)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct bio_vec *bvec; struct bio_vec *bvec;
struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio); struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
struct btrfs_csum_item *item = NULL; struct btrfs_csum_item *item = NULL;
@ -177,7 +179,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
u32 diff; u32 diff;
int nblocks; int nblocks;
int count = 0, i; int count = 0, i;
u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
@ -241,7 +243,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
if (item) if (item)
btrfs_release_path(path); btrfs_release_path(path);
item = btrfs_lookup_csum(NULL, root->fs_info->csum_root, item = btrfs_lookup_csum(NULL, fs_info->csum_root,
path, disk_bytenr, 0); path, disk_bytenr, 0);
if (IS_ERR(item)) { if (IS_ERR(item)) {
count = 1; count = 1;
@ -249,10 +251,10 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
if (BTRFS_I(inode)->root->root_key.objectid == if (BTRFS_I(inode)->root->root_key.objectid ==
BTRFS_DATA_RELOC_TREE_OBJECTID) { BTRFS_DATA_RELOC_TREE_OBJECTID) {
set_extent_bits(io_tree, offset, set_extent_bits(io_tree, offset,
offset + root->fs_info->sectorsize - 1, offset + fs_info->sectorsize - 1,
EXTENT_NODATASUM); EXTENT_NODATASUM);
} else { } else {
btrfs_info_rl(BTRFS_I(inode)->root->fs_info, btrfs_info_rl(fs_info,
"no csum found for inode %llu start %llu", "no csum found for inode %llu start %llu",
btrfs_ino(inode), offset); btrfs_ino(inode), offset);
} }
@ -268,7 +270,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
path->slots[0]); path->slots[0]);
item_last_offset = item_start_offset + item_last_offset = item_start_offset +
(item_size / csum_size) * (item_size / csum_size) *
root->fs_info->sectorsize; fs_info->sectorsize;
item = btrfs_item_ptr(path->nodes[0], path->slots[0], item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_csum_item); struct btrfs_csum_item);
} }
@ -277,7 +279,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
* a single leaf so it will also fit inside a u32 * a single leaf so it will also fit inside a u32
*/ */
diff = disk_bytenr - item_start_offset; diff = disk_bytenr - item_start_offset;
diff = diff / root->fs_info->sectorsize; diff = diff / fs_info->sectorsize;
diff = diff * csum_size; diff = diff * csum_size;
count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >> count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >>
inode->i_sb->s_blocksize_bits); inode->i_sb->s_blocksize_bits);
@ -289,9 +291,9 @@ found:
nblocks -= count; nblocks -= count;
next: next:
while (count--) { while (count--) {
disk_bytenr += root->fs_info->sectorsize; disk_bytenr += fs_info->sectorsize;
offset += root->fs_info->sectorsize; offset += fs_info->sectorsize;
page_bytes_left -= root->fs_info->sectorsize; page_bytes_left -= fs_info->sectorsize;
if (!page_bytes_left) if (!page_bytes_left)
break; /* move to next bio */ break; /* move to next bio */
} }
@ -317,6 +319,7 @@ int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
struct list_head *list, int search_commit) struct list_head *list, int search_commit)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key; struct btrfs_key key;
struct btrfs_path *path; struct btrfs_path *path;
struct extent_buffer *leaf; struct extent_buffer *leaf;
@ -327,10 +330,10 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
int ret; int ret;
size_t size; size_t size;
u64 csum_end; u64 csum_end;
u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
ASSERT(IS_ALIGNED(start, root->fs_info->sectorsize) && ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
IS_ALIGNED(end + 1, root->fs_info->sectorsize)); IS_ALIGNED(end + 1, fs_info->sectorsize));
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
@ -355,7 +358,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID && if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
key.type == BTRFS_EXTENT_CSUM_KEY) { key.type == BTRFS_EXTENT_CSUM_KEY) {
offset = (start - key.offset) >> offset = (start - key.offset) >>
root->fs_info->sb->s_blocksize_bits; fs_info->sb->s_blocksize_bits;
if (offset * csum_size < if (offset * csum_size <
btrfs_item_size_nr(leaf, path->slots[0] - 1)) btrfs_item_size_nr(leaf, path->slots[0] - 1))
path->slots[0]--; path->slots[0]--;
@ -383,7 +386,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
start = key.offset; start = key.offset;
size = btrfs_item_size_nr(leaf, path->slots[0]); size = btrfs_item_size_nr(leaf, path->slots[0]);
csum_end = key.offset + (size / csum_size) * root->fs_info->sectorsize; csum_end = key.offset + (size / csum_size) * fs_info->sectorsize;
if (csum_end <= start) { if (csum_end <= start) {
path->slots[0]++; path->slots[0]++;
continue; continue;
@ -394,9 +397,8 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
struct btrfs_csum_item); struct btrfs_csum_item);
while (start < csum_end) { while (start < csum_end) {
size = min_t(size_t, csum_end - start, size = min_t(size_t, csum_end - start,
MAX_ORDERED_SUM_BYTES(root->fs_info)); MAX_ORDERED_SUM_BYTES(fs_info));
sums = kzalloc(btrfs_ordered_sum_size(root->fs_info, sums = kzalloc(btrfs_ordered_sum_size(fs_info, size),
size),
GFP_NOFS); GFP_NOFS);
if (!sums) { if (!sums) {
ret = -ENOMEM; ret = -ENOMEM;
@ -407,16 +409,16 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
sums->len = (int)size; sums->len = (int)size;
offset = (start - key.offset) >> offset = (start - key.offset) >>
root->fs_info->sb->s_blocksize_bits; fs_info->sb->s_blocksize_bits;
offset *= csum_size; offset *= csum_size;
size >>= root->fs_info->sb->s_blocksize_bits; size >>= fs_info->sb->s_blocksize_bits;
read_extent_buffer(path->nodes[0], read_extent_buffer(path->nodes[0],
sums->sums, sums->sums,
((unsigned long)item) + offset, ((unsigned long)item) + offset,
csum_size * size); csum_size * size);
start += root->fs_info->sectorsize * size; start += fs_info->sectorsize * size;
list_add_tail(&sums->list, &tmplist); list_add_tail(&sums->list, &tmplist);
} }
path->slots[0]++; path->slots[0]++;
@ -437,6 +439,7 @@ fail:
int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
struct bio *bio, u64 file_start, int contig) struct bio *bio, u64 file_start, int contig)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_sum *sums; struct btrfs_ordered_sum *sums;
struct btrfs_ordered_extent *ordered = NULL; struct btrfs_ordered_extent *ordered = NULL;
char *data; char *data;
@ -449,8 +452,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
u64 offset; u64 offset;
WARN_ON(bio->bi_vcnt <= 0); WARN_ON(bio->bi_vcnt <= 0);
sums = kzalloc(btrfs_ordered_sum_size(root->fs_info, sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
bio->bi_iter.bi_size),
GFP_NOFS); GFP_NOFS);
if (!sums) if (!sums)
return -ENOMEM; return -ENOMEM;
@ -477,9 +479,9 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
data = kmap_atomic(bvec->bv_page); data = kmap_atomic(bvec->bv_page);
nr_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info, nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
bvec->bv_len + root->fs_info->sectorsize bvec->bv_len + fs_info->sectorsize
- 1); - 1);
for (i = 0; i < nr_sectors; i++) { for (i = 0; i < nr_sectors; i++) {
if (offset >= ordered->file_offset + ordered->len || if (offset >= ordered->file_offset + ordered->len ||
@ -494,8 +496,8 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
bytes_left = bio->bi_iter.bi_size - total_bytes; bytes_left = bio->bi_iter.bi_size - total_bytes;
sums = kzalloc(btrfs_ordered_sum_size(root->fs_info, bytes_left), sums = kzalloc(btrfs_ordered_sum_size(fs_info, bytes_left),
GFP_NOFS); GFP_NOFS);
BUG_ON(!sums); /* -ENOMEM */ BUG_ON(!sums); /* -ENOMEM */
sums->len = bytes_left; sums->len = bytes_left;
ordered = btrfs_lookup_ordered_extent(inode, ordered = btrfs_lookup_ordered_extent(inode,
@ -511,15 +513,15 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
sums->sums[index] = ~(u32)0; sums->sums[index] = ~(u32)0;
sums->sums[index] sums->sums[index]
= btrfs_csum_data(data + bvec->bv_offset = btrfs_csum_data(data + bvec->bv_offset
+ (i * root->fs_info->sectorsize), + (i * fs_info->sectorsize),
sums->sums[index], sums->sums[index],
root->fs_info->sectorsize); fs_info->sectorsize);
btrfs_csum_final(sums->sums[index], btrfs_csum_final(sums->sums[index],
(char *)(sums->sums + index)); (char *)(sums->sums + index));
index++; index++;
offset += root->fs_info->sectorsize; offset += fs_info->sectorsize;
this_sum_bytes += root->fs_info->sectorsize; this_sum_bytes += fs_info->sectorsize;
total_bytes += root->fs_info->sectorsize; total_bytes += fs_info->sectorsize;
} }
kunmap_atomic(data); kunmap_atomic(data);
@ -546,15 +548,16 @@ static noinline void truncate_one_csum(struct btrfs_root *root,
struct btrfs_key *key, struct btrfs_key *key,
u64 bytenr, u64 len) u64 bytenr, u64 len)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *leaf; struct extent_buffer *leaf;
u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
u64 csum_end; u64 csum_end;
u64 end_byte = bytenr + len; u64 end_byte = bytenr + len;
u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits; u32 blocksize_bits = fs_info->sb->s_blocksize_bits;
leaf = path->nodes[0]; leaf = path->nodes[0];
csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
csum_end <<= root->fs_info->sb->s_blocksize_bits; csum_end <<= fs_info->sb->s_blocksize_bits;
csum_end += key->offset; csum_end += key->offset;
if (key->offset < bytenr && csum_end <= end_byte) { if (key->offset < bytenr && csum_end <= end_byte) {
@ -581,7 +584,7 @@ static noinline void truncate_one_csum(struct btrfs_root *root,
btrfs_truncate_item(root, path, new_size, 0); btrfs_truncate_item(root, path, new_size, 0);
key->offset = end_byte; key->offset = end_byte;
btrfs_set_item_key_safe(root->fs_info, path, key); btrfs_set_item_key_safe(fs_info, path, key);
} else { } else {
BUG(); BUG();
} }
@ -601,8 +604,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
u64 csum_end; u64 csum_end;
struct extent_buffer *leaf; struct extent_buffer *leaf;
int ret; int ret;
u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
int blocksize_bits = root->fs_info->sb->s_blocksize_bits; int blocksize_bits = fs_info->sb->s_blocksize_bits;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
@ -711,6 +714,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct btrfs_ordered_sum *sums) struct btrfs_ordered_sum *sums)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key file_key; struct btrfs_key file_key;
struct btrfs_key found_key; struct btrfs_key found_key;
struct btrfs_path *path; struct btrfs_path *path;
@ -726,7 +730,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
int index = 0; int index = 0;
int found_next; int found_next;
int ret; int ret;
u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
@ -759,7 +763,7 @@ again:
leaf = path->nodes[0]; leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]); item_size = btrfs_item_size_nr(leaf, path->slots[0]);
if ((item_size / csum_size) >= if ((item_size / csum_size) >=
MAX_CSUM_ITEMS(root->fs_info, csum_size)) { MAX_CSUM_ITEMS(fs_info, csum_size)) {
/* already at max size, make a new one */ /* already at max size, make a new one */
goto insert; goto insert;
} }
@ -805,11 +809,11 @@ again:
leaf = path->nodes[0]; leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
csum_offset = (bytenr - found_key.offset) >> csum_offset = (bytenr - found_key.offset) >>
root->fs_info->sb->s_blocksize_bits; fs_info->sb->s_blocksize_bits;
if (found_key.type != BTRFS_EXTENT_CSUM_KEY || if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
csum_offset >= MAX_CSUM_ITEMS(root->fs_info, csum_size)) { csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) {
goto insert; goto insert;
} }
@ -827,12 +831,13 @@ again:
free_space = btrfs_leaf_free_space(root, leaf) - free_space = btrfs_leaf_free_space(root, leaf) -
sizeof(struct btrfs_item) - csum_size; sizeof(struct btrfs_item) - csum_size;
tmp = sums->len - total_bytes; tmp = sums->len - total_bytes;
tmp >>= root->fs_info->sb->s_blocksize_bits; tmp >>= fs_info->sb->s_blocksize_bits;
WARN_ON(tmp < 1); WARN_ON(tmp < 1);
extend_nr = max_t(int, 1, (int)tmp); extend_nr = max_t(int, 1, (int)tmp);
diff = (csum_offset + extend_nr) * csum_size; diff = (csum_offset + extend_nr) * csum_size;
diff = min(diff, MAX_CSUM_ITEMS(root->fs_info, csum_size) * csum_size); diff = min(diff,
MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
diff = diff - btrfs_item_size_nr(leaf, path->slots[0]); diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
diff = min(free_space, diff); diff = min(free_space, diff);
@ -851,12 +856,12 @@ insert:
u64 tmp; u64 tmp;
tmp = sums->len - total_bytes; tmp = sums->len - total_bytes;
tmp >>= root->fs_info->sb->s_blocksize_bits; tmp >>= fs_info->sb->s_blocksize_bits;
tmp = min(tmp, (next_offset - file_key.offset) >> tmp = min(tmp, (next_offset - file_key.offset) >>
root->fs_info->sb->s_blocksize_bits); fs_info->sb->s_blocksize_bits);
tmp = max((u64)1, tmp); tmp = max((u64)1, tmp);
tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root->fs_info, csum_size)); tmp = min(tmp, (u64)MAX_CSUM_ITEMS(fs_info, csum_size));
ins_size = csum_size * tmp; ins_size = csum_size * tmp;
} else { } else {
ins_size = csum_size; ins_size = csum_size;
@ -878,7 +883,7 @@ csum:
csum_offset * csum_size); csum_offset * csum_size);
found: found:
ins_size = (u32)(sums->len - total_bytes) >> ins_size = (u32)(sums->len - total_bytes) >>
root->fs_info->sb->s_blocksize_bits; fs_info->sb->s_blocksize_bits;
ins_size *= csum_size; ins_size *= csum_size;
ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item, ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
ins_size); ins_size);
@ -886,7 +891,7 @@ found:
ins_size); ins_size);
ins_size /= csum_size; ins_size /= csum_size;
total_bytes += ins_size * root->fs_info->sectorsize; total_bytes += ins_size * fs_info->sectorsize;
index += ins_size; index += ins_size;
btrfs_mark_buffer_dirty(path->nodes[0]); btrfs_mark_buffer_dirty(path->nodes[0]);
@ -909,6 +914,7 @@ void btrfs_extent_item_to_extent_map(struct inode *inode,
const bool new_inline, const bool new_inline,
struct extent_map *em) struct extent_map *em)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_buffer *leaf = path->nodes[0]; struct extent_buffer *leaf = path->nodes[0];
const int slot = path->slots[0]; const int slot = path->slots[0];
@ -918,7 +924,7 @@ void btrfs_extent_item_to_extent_map(struct inode *inode,
u8 type = btrfs_file_extent_type(leaf, fi); u8 type = btrfs_file_extent_type(leaf, fi);
int compress_type = btrfs_file_extent_compression(leaf, fi); int compress_type = btrfs_file_extent_compression(leaf, fi);
em->bdev = root->fs_info->fs_devices->latest_bdev; em->bdev = fs_info->fs_devices->latest_bdev;
btrfs_item_key_to_cpu(leaf, &key, slot); btrfs_item_key_to_cpu(leaf, &key, slot);
extent_start = key.offset; extent_start = key.offset;
@ -930,7 +936,7 @@ void btrfs_extent_item_to_extent_map(struct inode *inode,
size_t size; size_t size;
size = btrfs_file_extent_inline_len(leaf, slot, fi); size = btrfs_file_extent_inline_len(leaf, slot, fi);
extent_end = ALIGN(extent_start + size, extent_end = ALIGN(extent_start + size,
root->fs_info->sectorsize); fs_info->sectorsize);
} }
em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
@ -973,7 +979,7 @@ void btrfs_extent_item_to_extent_map(struct inode *inode,
em->compress_type = compress_type; em->compress_type = compress_type;
} }
} else { } else {
btrfs_err(root->fs_info, btrfs_err(fs_info,
"unknown file extent item type %d, inode %llu, offset %llu, root %llu", "unknown file extent item type %d, inode %llu, offset %llu, root %llu",
type, btrfs_ino(inode), extent_start, type, btrfs_ino(inode), extent_start,
root->root_key.objectid); root->root_key.objectid);

View File

@ -95,13 +95,13 @@ static int __compare_inode_defrag(struct inode_defrag *defrag1,
static int __btrfs_add_inode_defrag(struct inode *inode, static int __btrfs_add_inode_defrag(struct inode *inode,
struct inode_defrag *defrag) struct inode_defrag *defrag)
{ {
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct inode_defrag *entry; struct inode_defrag *entry;
struct rb_node **p; struct rb_node **p;
struct rb_node *parent = NULL; struct rb_node *parent = NULL;
int ret; int ret;
p = &root->fs_info->defrag_inodes.rb_node; p = &fs_info->defrag_inodes.rb_node;
while (*p) { while (*p) {
parent = *p; parent = *p;
entry = rb_entry(parent, struct inode_defrag, rb_node); entry = rb_entry(parent, struct inode_defrag, rb_node);
@ -125,16 +125,18 @@ static int __btrfs_add_inode_defrag(struct inode *inode,
} }
set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
rb_link_node(&defrag->rb_node, parent, p); rb_link_node(&defrag->rb_node, parent, p);
rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes); rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
return 0; return 0;
} }
static inline int __need_auto_defrag(struct btrfs_root *root) static inline int __need_auto_defrag(struct btrfs_root *root)
{ {
if (!btrfs_test_opt(root->fs_info, AUTO_DEFRAG)) struct btrfs_fs_info *fs_info = root->fs_info;
if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
return 0; return 0;
if (btrfs_fs_closing(root->fs_info)) if (btrfs_fs_closing(fs_info))
return 0; return 0;
return 1; return 1;
@ -147,6 +149,7 @@ static inline int __need_auto_defrag(struct btrfs_root *root)
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
struct inode *inode) struct inode *inode)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
struct inode_defrag *defrag; struct inode_defrag *defrag;
u64 transid; u64 transid;
@ -171,7 +174,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
defrag->transid = transid; defrag->transid = transid;
defrag->root = root->root_key.objectid; defrag->root = root->root_key.objectid;
spin_lock(&root->fs_info->defrag_inodes_lock); spin_lock(&fs_info->defrag_inodes_lock);
if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) { if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) {
/* /*
* If we set IN_DEFRAG flag and evict the inode from memory, * If we set IN_DEFRAG flag and evict the inode from memory,
@ -184,7 +187,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
} else { } else {
kmem_cache_free(btrfs_inode_defrag_cachep, defrag); kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
} }
spin_unlock(&root->fs_info->defrag_inodes_lock); spin_unlock(&fs_info->defrag_inodes_lock);
return 0; return 0;
} }
@ -196,6 +199,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
static void btrfs_requeue_inode_defrag(struct inode *inode, static void btrfs_requeue_inode_defrag(struct inode *inode,
struct inode_defrag *defrag) struct inode_defrag *defrag)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
int ret; int ret;
@ -206,9 +210,9 @@ static void btrfs_requeue_inode_defrag(struct inode *inode,
* Here we don't check the IN_DEFRAG flag, because we need merge * Here we don't check the IN_DEFRAG flag, because we need merge
* them together. * them together.
*/ */
spin_lock(&root->fs_info->defrag_inodes_lock); spin_lock(&fs_info->defrag_inodes_lock);
ret = __btrfs_add_inode_defrag(inode, defrag); ret = __btrfs_add_inode_defrag(inode, defrag);
spin_unlock(&root->fs_info->defrag_inodes_lock); spin_unlock(&fs_info->defrag_inodes_lock);
if (ret) if (ret)
goto out; goto out;
return; return;
@ -489,6 +493,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
loff_t pos, size_t write_bytes, loff_t pos, size_t write_bytes,
struct extent_state **cached) struct extent_state **cached)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int err = 0; int err = 0;
int i; int i;
u64 num_bytes; u64 num_bytes;
@ -497,9 +502,9 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
u64 end_pos = pos + write_bytes; u64 end_pos = pos + write_bytes;
loff_t isize = i_size_read(inode); loff_t isize = i_size_read(inode);
start_pos = pos & ~((u64) root->fs_info->sectorsize - 1); start_pos = pos & ~((u64) fs_info->sectorsize - 1);
num_bytes = round_up(write_bytes + pos - start_pos, num_bytes = round_up(write_bytes + pos - start_pos,
root->fs_info->sectorsize); fs_info->sectorsize);
end_of_last_block = start_pos + num_bytes - 1; end_of_last_block = start_pos + num_bytes - 1;
err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
@ -696,6 +701,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
u32 extent_item_size, u32 extent_item_size,
int *key_inserted) int *key_inserted)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *leaf; struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi; struct btrfs_file_extent_item *fi;
struct btrfs_key key; struct btrfs_key key;
@ -724,7 +730,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
modify_tree = 0; modify_tree = 0;
update_refs = (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || update_refs = (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
root == root->fs_info->tree_root); root == fs_info->tree_root);
while (1) { while (1) {
recow = 0; recow = 0;
ret = btrfs_lookup_file_extent(trans, root, path, ino, ret = btrfs_lookup_file_extent(trans, root, path, ino,
@ -881,7 +887,7 @@ next_slot:
memcpy(&new_key, &key, sizeof(new_key)); memcpy(&new_key, &key, sizeof(new_key));
new_key.offset = end; new_key.offset = end;
btrfs_set_item_key_safe(root->fs_info, path, &new_key); btrfs_set_item_key_safe(fs_info, path, &new_key);
extent_offset += end - key.offset; extent_offset += end - key.offset;
btrfs_set_file_extent_offset(leaf, fi, extent_offset); btrfs_set_file_extent_offset(leaf, fi, extent_offset);
@ -936,7 +942,7 @@ delete_extent_item:
inode_sub_bytes(inode, inode_sub_bytes(inode,
extent_end - key.offset); extent_end - key.offset);
extent_end = ALIGN(extent_end, extent_end = ALIGN(extent_end,
root->fs_info->sectorsize); fs_info->sectorsize);
} else if (update_refs && disk_bytenr > 0) { } else if (update_refs && disk_bytenr > 0) {
ret = btrfs_free_extent(trans, root, ret = btrfs_free_extent(trans, root,
disk_bytenr, num_bytes, 0, disk_bytenr, num_bytes, 0,
@ -1082,6 +1088,7 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct inode *inode, u64 start, u64 end) struct inode *inode, u64 start, u64 end)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_buffer *leaf; struct extent_buffer *leaf;
struct btrfs_path *path; struct btrfs_path *path;
@ -1151,7 +1158,7 @@ again:
ino, bytenr, orig_offset, ino, bytenr, orig_offset,
&other_start, &other_end)) { &other_start, &other_end)) {
new_key.offset = end; new_key.offset = end;
btrfs_set_item_key_safe(root->fs_info, path, &new_key); btrfs_set_item_key_safe(fs_info, path, &new_key);
fi = btrfs_item_ptr(leaf, path->slots[0], fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item); struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, fi, btrfs_set_file_extent_generation(leaf, fi,
@ -1185,7 +1192,7 @@ again:
trans->transid); trans->transid);
path->slots[0]++; path->slots[0]++;
new_key.offset = start; new_key.offset = start;
btrfs_set_item_key_safe(root->fs_info, path, &new_key); btrfs_set_item_key_safe(fs_info, path, &new_key);
fi = btrfs_item_ptr(leaf, path->slots[0], fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item); struct btrfs_file_extent_item);
@ -1418,16 +1425,16 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
u64 *lockstart, u64 *lockend, u64 *lockstart, u64 *lockend,
struct extent_state **cached_state) struct extent_state **cached_state)
{ {
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 start_pos; u64 start_pos;
u64 last_pos; u64 last_pos;
int i; int i;
int ret = 0; int ret = 0;
start_pos = round_down(pos, root->fs_info->sectorsize); start_pos = round_down(pos, fs_info->sectorsize);
last_pos = start_pos last_pos = start_pos
+ round_up(pos + write_bytes - start_pos, + round_up(pos + write_bytes - start_pos,
root->fs_info->sectorsize) - 1; fs_info->sectorsize) - 1;
if (start_pos < inode->i_size) { if (start_pos < inode->i_size) {
struct btrfs_ordered_extent *ordered; struct btrfs_ordered_extent *ordered;
@ -1474,6 +1481,7 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
static noinline int check_can_nocow(struct inode *inode, loff_t pos, static noinline int check_can_nocow(struct inode *inode, loff_t pos,
size_t *write_bytes) size_t *write_bytes)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ordered_extent *ordered; struct btrfs_ordered_extent *ordered;
u64 lockstart, lockend; u64 lockstart, lockend;
@ -1484,9 +1492,9 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos,
if (!ret) if (!ret)
return -ENOSPC; return -ENOSPC;
lockstart = round_down(pos, root->fs_info->sectorsize); lockstart = round_down(pos, fs_info->sectorsize);
lockend = round_up(pos + *write_bytes, lockend = round_up(pos + *write_bytes,
root->fs_info->sectorsize) - 1; fs_info->sectorsize) - 1;
while (1) { while (1) {
lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend); lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
@ -1520,8 +1528,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
loff_t pos) loff_t pos)
{ {
struct inode *inode = file_inode(file); struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct page **pages = NULL; struct page **pages = NULL;
struct extent_state *cached_state = NULL; struct extent_state *cached_state = NULL;
u64 release_bytes = 0; u64 release_bytes = 0;
@ -1633,12 +1641,10 @@ again:
copied = btrfs_copy_from_user(pos, write_bytes, pages, i); copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info, num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
reserve_bytes);
dirty_sectors = round_up(copied + sector_offset, dirty_sectors = round_up(copied + sector_offset,
root->fs_info->sectorsize); fs_info->sectorsize);
dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info, dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
dirty_sectors);
/* /*
* if we have trouble faulting in the pages, fall * if we have trouble faulting in the pages, fall
@ -1666,11 +1672,9 @@ again:
* managed to copy. * managed to copy.
*/ */
if (num_sectors > dirty_sectors) { if (num_sectors > dirty_sectors) {
/* release everything except the sectors we dirtied */ /* release everything except the sectors we dirtied */
release_bytes -= dirty_sectors << release_bytes -= dirty_sectors <<
root->fs_info->sb->s_blocksize_bits; fs_info->sb->s_blocksize_bits;
if (copied > 0) { if (copied > 0) {
spin_lock(&BTRFS_I(inode)->lock); spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++; BTRFS_I(inode)->outstanding_extents++;
@ -1683,7 +1687,7 @@ again:
u64 __pos; u64 __pos;
__pos = round_down(pos, __pos = round_down(pos,
root->fs_info->sectorsize) + fs_info->sectorsize) +
(dirty_pages << PAGE_SHIFT); (dirty_pages << PAGE_SHIFT);
btrfs_delalloc_release_space(inode, __pos, btrfs_delalloc_release_space(inode, __pos,
release_bytes); release_bytes);
@ -1691,7 +1695,7 @@ again:
} }
release_bytes = round_up(copied + sector_offset, release_bytes = round_up(copied + sector_offset,
root->fs_info->sectorsize); fs_info->sectorsize);
if (copied > 0) if (copied > 0)
ret = btrfs_dirty_pages(root, inode, pages, ret = btrfs_dirty_pages(root, inode, pages,
@ -1712,9 +1716,9 @@ again:
if (only_release_metadata && copied > 0) { if (only_release_metadata && copied > 0) {
lockstart = round_down(pos, lockstart = round_down(pos,
root->fs_info->sectorsize); fs_info->sectorsize);
lockend = round_up(pos + copied, lockend = round_up(pos + copied,
root->fs_info->sectorsize) - 1; fs_info->sectorsize) - 1;
set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
lockend, EXTENT_NORESERVE, NULL, lockend, EXTENT_NORESERVE, NULL,
@ -1727,7 +1731,7 @@ again:
cond_resched(); cond_resched();
balance_dirty_pages_ratelimited(inode->i_mapping); balance_dirty_pages_ratelimited(inode->i_mapping);
if (dirty_pages < (root->fs_info->nodesize >> PAGE_SHIFT) + 1) if (dirty_pages < (fs_info->nodesize >> PAGE_SHIFT) + 1)
btrfs_btree_balance_dirty(root); btrfs_btree_balance_dirty(root);
pos += copied; pos += copied;
@ -1742,7 +1746,7 @@ again:
btrfs_delalloc_release_metadata(inode, release_bytes); btrfs_delalloc_release_metadata(inode, release_bytes);
} else { } else {
btrfs_delalloc_release_space(inode, btrfs_delalloc_release_space(inode,
round_down(pos, root->fs_info->sectorsize), round_down(pos, fs_info->sectorsize),
release_bytes); release_bytes);
} }
} }
@ -1813,6 +1817,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
{ {
struct file *file = iocb->ki_filp; struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file); struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
u64 start_pos; u64 start_pos;
u64 end_pos; u64 end_pos;
@ -1844,7 +1849,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
* although we have opened a file as writable, we have * although we have opened a file as writable, we have
* to stop this write operation to ensure FS consistency. * to stop this write operation to ensure FS consistency.
*/ */
if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) { if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
inode_unlock(inode); inode_unlock(inode);
err = -EROFS; err = -EROFS;
goto out; goto out;
@ -1860,18 +1865,18 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
pos = iocb->ki_pos; pos = iocb->ki_pos;
count = iov_iter_count(from); count = iov_iter_count(from);
start_pos = round_down(pos, root->fs_info->sectorsize); start_pos = round_down(pos, fs_info->sectorsize);
oldsize = i_size_read(inode); oldsize = i_size_read(inode);
if (start_pos > oldsize) { if (start_pos > oldsize) {
/* Expand hole size to cover write data, preventing empty gap */ /* Expand hole size to cover write data, preventing empty gap */
end_pos = round_up(pos + count, end_pos = round_up(pos + count,
root->fs_info->sectorsize); fs_info->sectorsize);
err = btrfs_cont_expand(inode, oldsize, end_pos); err = btrfs_cont_expand(inode, oldsize, end_pos);
if (err) { if (err) {
inode_unlock(inode); inode_unlock(inode);
goto out; goto out;
} }
if (start_pos > round_up(oldsize, root->fs_info->sectorsize)) if (start_pos > round_up(oldsize, fs_info->sectorsize))
clean_page = 1; clean_page = 1;
} }
@ -1951,6 +1956,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{ {
struct dentry *dentry = file_dentry(file); struct dentry *dentry = file_dentry(file);
struct inode *inode = d_inode(dentry); struct inode *inode = d_inode(dentry);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans; struct btrfs_trans_handle *trans;
struct btrfs_log_ctx ctx; struct btrfs_log_ctx ctx;
@ -2061,12 +2067,12 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* commit does not start nor waits for ordered extents to complete. * commit does not start nor waits for ordered extents to complete.
*/ */
smp_mb(); smp_mb();
if (btrfs_inode_in_log(inode, root->fs_info->generation) || if (btrfs_inode_in_log(inode, fs_info->generation) ||
(full_sync && BTRFS_I(inode)->last_trans <= (full_sync && BTRFS_I(inode)->last_trans <=
root->fs_info->last_trans_committed) || fs_info->last_trans_committed) ||
(!btrfs_have_ordered_extents_in_range(inode, start, len) && (!btrfs_have_ordered_extents_in_range(inode, start, len) &&
BTRFS_I(inode)->last_trans BTRFS_I(inode)->last_trans
<= root->fs_info->last_trans_committed)) { <= fs_info->last_trans_committed)) {
/* /*
* We've had everything committed since the last time we were * We've had everything committed since the last time we were
* modified so clear this flag in case it was set for whatever * modified so clear this flag in case it was set for whatever
@ -2224,6 +2230,7 @@ static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode, static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
struct btrfs_path *path, u64 offset, u64 end) struct btrfs_path *path, u64 offset, u64 end)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_buffer *leaf; struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi; struct btrfs_file_extent_item *fi;
@ -2232,7 +2239,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
struct btrfs_key key; struct btrfs_key key;
int ret; int ret;
if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) if (btrfs_fs_incompat(fs_info, NO_HOLES))
goto out; goto out;
key.objectid = btrfs_ino(inode); key.objectid = btrfs_ino(inode);
@ -2270,7 +2277,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
u64 num_bytes; u64 num_bytes;
key.offset = offset; key.offset = offset;
btrfs_set_item_key_safe(root->fs_info, path, &key); btrfs_set_item_key_safe(fs_info, path, &key);
fi = btrfs_item_ptr(leaf, path->slots[0], fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item); struct btrfs_file_extent_item);
num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end - num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
@ -2306,7 +2313,7 @@ out:
hole_em->block_start = EXTENT_MAP_HOLE; hole_em->block_start = EXTENT_MAP_HOLE;
hole_em->block_len = 0; hole_em->block_len = 0;
hole_em->orig_block_len = 0; hole_em->orig_block_len = 0;
hole_em->bdev = root->fs_info->fs_devices->latest_bdev; hole_em->bdev = fs_info->fs_devices->latest_bdev;
hole_em->compress_type = BTRFS_COMPRESS_NONE; hole_em->compress_type = BTRFS_COMPRESS_NONE;
hole_em->generation = trans->transid; hole_em->generation = trans->transid;
@ -2358,6 +2365,7 @@ static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_state *cached_state = NULL; struct extent_state *cached_state = NULL;
struct btrfs_path *path; struct btrfs_path *path;
@ -2369,13 +2377,13 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
u64 tail_len; u64 tail_len;
u64 orig_start = offset; u64 orig_start = offset;
u64 cur_offset; u64 cur_offset;
u64 min_size = btrfs_calc_trunc_metadata_size(root->fs_info, 1); u64 min_size = btrfs_calc_trunc_metadata_size(fs_info, 1);
u64 drop_end; u64 drop_end;
int ret = 0; int ret = 0;
int err = 0; int err = 0;
unsigned int rsv_count; unsigned int rsv_count;
bool same_block; bool same_block;
bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES); bool no_holes = btrfs_fs_incompat(fs_info, NO_HOLES);
u64 ino_size; u64 ino_size;
bool truncated_block = false; bool truncated_block = false;
bool updated_inode = false; bool updated_inode = false;
@ -2385,7 +2393,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
return ret; return ret;
inode_lock(inode); inode_lock(inode);
ino_size = round_up(inode->i_size, root->fs_info->sectorsize); ino_size = round_up(inode->i_size, fs_info->sectorsize);
ret = find_first_non_hole(inode, &offset, &len); ret = find_first_non_hole(inode, &offset, &len);
if (ret < 0) if (ret < 0)
goto out_only_mutex; goto out_only_mutex;
@ -2398,8 +2406,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
lockstart = round_up(offset, btrfs_inode_sectorsize(inode)); lockstart = round_up(offset, btrfs_inode_sectorsize(inode));
lockend = round_down(offset + len, lockend = round_down(offset + len,
btrfs_inode_sectorsize(inode)) - 1; btrfs_inode_sectorsize(inode)) - 1;
same_block = (BTRFS_BYTES_TO_BLKS(root->fs_info, offset)) same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
== (BTRFS_BYTES_TO_BLKS(root->fs_info, offset + len - 1)); == (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
/* /*
* We needn't truncate any block which is beyond the end of the file * We needn't truncate any block which is beyond the end of the file
* because we are sure there is no data there. * because we are sure there is no data there.
@ -2408,7 +2416,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
* Only do this if we are in the same block and we aren't doing the * Only do this if we are in the same block and we aren't doing the
* entire block. * entire block.
*/ */
if (same_block && len < root->fs_info->sectorsize) { if (same_block && len < fs_info->sectorsize) {
if (offset < ino_size) { if (offset < ino_size) {
truncated_block = true; truncated_block = true;
ret = btrfs_truncate_block(inode, offset, len, 0); ret = btrfs_truncate_block(inode, offset, len, 0);
@ -2516,7 +2524,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
ret = -ENOMEM; ret = -ENOMEM;
goto out_free; goto out_free;
} }
rsv->size = btrfs_calc_trunc_metadata_size(root->fs_info, 1); rsv->size = btrfs_calc_trunc_metadata_size(fs_info, 1);
rsv->failfast = 1; rsv->failfast = 1;
/* /*
@ -2531,7 +2539,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
goto out_free; goto out_free;
} }
ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv, ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
min_size, 0); min_size, 0);
BUG_ON(ret); BUG_ON(ret);
trans->block_rsv = rsv; trans->block_rsv = rsv;
@ -2545,7 +2553,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
if (ret != -ENOSPC) if (ret != -ENOSPC)
break; break;
trans->block_rsv = &root->fs_info->trans_block_rsv; trans->block_rsv = &fs_info->trans_block_rsv;
if (cur_offset < drop_end && cur_offset < ino_size) { if (cur_offset < drop_end && cur_offset < ino_size) {
ret = fill_holes(trans, inode, path, cur_offset, ret = fill_holes(trans, inode, path, cur_offset,
@ -2581,7 +2589,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
break; break;
} }
ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
rsv, min_size, 0); rsv, min_size, 0);
BUG_ON(ret); /* shouldn't happen */ BUG_ON(ret); /* shouldn't happen */
trans->block_rsv = rsv; trans->block_rsv = rsv;
@ -2600,7 +2608,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
goto out_trans; goto out_trans;
} }
trans->block_rsv = &root->fs_info->trans_block_rsv; trans->block_rsv = &fs_info->trans_block_rsv;
/* /*
* If we are using the NO_HOLES feature we might have had already an * If we are using the NO_HOLES feature we might have had already an
* hole that overlaps a part of the region [lockstart, lockend] and * hole that overlaps a part of the region [lockstart, lockend] and
@ -2636,7 +2644,7 @@ out_trans:
inode_inc_iversion(inode); inode_inc_iversion(inode);
inode->i_mtime = inode->i_ctime = current_time(inode); inode->i_mtime = inode->i_ctime = current_time(inode);
trans->block_rsv = &root->fs_info->trans_block_rsv; trans->block_rsv = &fs_info->trans_block_rsv;
ret = btrfs_update_inode(trans, root, inode); ret = btrfs_update_inode(trans, root, inode);
updated_inode = true; updated_inode = true;
btrfs_end_transaction(trans, root); btrfs_end_transaction(trans, root);
@ -2922,7 +2930,7 @@ out:
static int find_desired_extent(struct inode *inode, loff_t *offset, int whence) static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
{ {
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_map *em = NULL; struct extent_map *em = NULL;
struct extent_state *cached_state = NULL; struct extent_state *cached_state = NULL;
u64 lockstart; u64 lockstart;
@ -2940,11 +2948,11 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
*/ */
start = max_t(loff_t, 0, *offset); start = max_t(loff_t, 0, *offset);
lockstart = round_down(start, root->fs_info->sectorsize); lockstart = round_down(start, fs_info->sectorsize);
lockend = round_up(i_size_read(inode), lockend = round_up(i_size_read(inode),
root->fs_info->sectorsize); fs_info->sectorsize);
if (lockend <= lockstart) if (lockend <= lockstart)
lockend = lockstart + root->fs_info->sectorsize; lockend = lockstart + fs_info->sectorsize;
lockend--; lockend--;
len = lockend - lockstart + 1; len = lockend - lockstart + 1;

View File

@ -47,6 +47,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
struct btrfs_path *path, struct btrfs_path *path,
u64 offset) u64 offset)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key; struct btrfs_key key;
struct btrfs_key location; struct btrfs_key location;
struct btrfs_disk_key disk_key; struct btrfs_disk_key disk_key;
@ -74,7 +75,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
btrfs_disk_key_to_cpu(&location, &disk_key); btrfs_disk_key_to_cpu(&location, &disk_key);
btrfs_release_path(path); btrfs_release_path(path);
inode = btrfs_iget(root->fs_info->sb, &location, root, NULL); inode = btrfs_iget(fs_info->sb, &location, root, NULL);
if (IS_ERR(inode)) if (IS_ERR(inode))
return inode; return inode;
if (is_bad_inode(inode)) { if (is_bad_inode(inode)) {
@ -94,6 +95,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
*block_group, struct btrfs_path *path) *block_group, struct btrfs_path *path)
{ {
struct inode *inode = NULL; struct inode *inode = NULL;
struct btrfs_fs_info *fs_info = root->fs_info;
u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW; u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
spin_lock(&block_group->lock); spin_lock(&block_group->lock);
@ -110,8 +112,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
spin_lock(&block_group->lock); spin_lock(&block_group->lock);
if (!((BTRFS_I(inode)->flags & flags) == flags)) { if (!((BTRFS_I(inode)->flags & flags) == flags)) {
btrfs_info(root->fs_info, btrfs_info(fs_info, "Old style space inode found, converting.");
"Old style space inode found, converting.");
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
BTRFS_INODE_NODATACOW; BTRFS_INODE_NODATACOW;
block_group->disk_cache_state = BTRFS_DC_CLEAR; block_group->disk_cache_state = BTRFS_DC_CLEAR;
@ -206,12 +207,13 @@ int create_free_space_inode(struct btrfs_root *root,
int btrfs_check_trunc_cache_free_space(struct btrfs_root *root, int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
struct btrfs_block_rsv *rsv) struct btrfs_block_rsv *rsv)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
u64 needed_bytes; u64 needed_bytes;
int ret; int ret;
/* 1 for slack space, 1 for updating the inode */ /* 1 for slack space, 1 for updating the inode */
needed_bytes = btrfs_calc_trunc_metadata_size(root->fs_info, 1) + needed_bytes = btrfs_calc_trunc_metadata_size(fs_info, 1) +
btrfs_calc_trans_metadata_size(root->fs_info, 1); btrfs_calc_trans_metadata_size(fs_info, 1);
spin_lock(&rsv->lock); spin_lock(&rsv->lock);
if (rsv->reserved < needed_bytes) if (rsv->reserved < needed_bytes)
@ -667,6 +669,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
struct btrfs_free_space_ctl *ctl, struct btrfs_free_space_ctl *ctl,
struct btrfs_path *path, u64 offset) struct btrfs_path *path, u64 offset)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_free_space_header *header; struct btrfs_free_space_header *header;
struct extent_buffer *leaf; struct extent_buffer *leaf;
struct btrfs_io_ctl io_ctl; struct btrfs_io_ctl io_ctl;
@ -706,16 +709,16 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
btrfs_release_path(path); btrfs_release_path(path);
if (!BTRFS_I(inode)->generation) { if (!BTRFS_I(inode)->generation) {
btrfs_info(root->fs_info, btrfs_info(fs_info,
"The free space cache file (%llu) is invalid. skip it\n", "The free space cache file (%llu) is invalid. skip it\n",
offset); offset);
return 0; return 0;
} }
if (BTRFS_I(inode)->generation != generation) { if (BTRFS_I(inode)->generation != generation) {
btrfs_err(root->fs_info, btrfs_err(fs_info,
"free space inode generation (%llu) did not match free space cache generation (%llu)", "free space inode generation (%llu) did not match free space cache generation (%llu)",
BTRFS_I(inode)->generation, generation); BTRFS_I(inode)->generation, generation);
return 0; return 0;
} }
@ -764,7 +767,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
ret = link_free_space(ctl, e); ret = link_free_space(ctl, e);
spin_unlock(&ctl->tree_lock); spin_unlock(&ctl->tree_lock);
if (ret) { if (ret) {
btrfs_err(root->fs_info, btrfs_err(fs_info,
"Duplicate entries in free space cache, dumping"); "Duplicate entries in free space cache, dumping");
kmem_cache_free(btrfs_free_space_cachep, e); kmem_cache_free(btrfs_free_space_cachep, e);
goto free_cache; goto free_cache;
@ -784,7 +787,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
ctl->op->recalc_thresholds(ctl); ctl->op->recalc_thresholds(ctl);
spin_unlock(&ctl->tree_lock); spin_unlock(&ctl->tree_lock);
if (ret) { if (ret) {
btrfs_err(root->fs_info, btrfs_err(fs_info,
"Duplicate entries in free space cache, dumping"); "Duplicate entries in free space cache, dumping");
kmem_cache_free(btrfs_free_space_cachep, e); kmem_cache_free(btrfs_free_space_cachep, e);
goto free_cache; goto free_cache;
@ -1036,6 +1039,7 @@ write_pinned_extent_entries(struct btrfs_root *root,
struct btrfs_io_ctl *io_ctl, struct btrfs_io_ctl *io_ctl,
int *entries) int *entries)
{ {
struct btrfs_fs_info *fs_info;
u64 start, extent_start, extent_end, len; u64 start, extent_start, extent_end, len;
struct extent_io_tree *unpin = NULL; struct extent_io_tree *unpin = NULL;
int ret; int ret;
@ -1043,6 +1047,8 @@ write_pinned_extent_entries(struct btrfs_root *root,
if (!block_group) if (!block_group)
return 0; return 0;
fs_info = block_group->fs_info;
/* /*
* We want to add any pinned extents to our free space cache * We want to add any pinned extents to our free space cache
* so we don't leak the space * so we don't leak the space
@ -1050,7 +1056,7 @@ write_pinned_extent_entries(struct btrfs_root *root,
* We shouldn't have switched the pinned extents yet so this is the * We shouldn't have switched the pinned extents yet so this is the
* right one * right one
*/ */
unpin = root->fs_info->pinned_extents; unpin = fs_info->pinned_extents;
start = block_group->key.objectid; start = block_group->key.objectid;
@ -1141,12 +1147,15 @@ int btrfs_wait_cache_io(struct btrfs_root *root,
{ {
int ret; int ret;
struct inode *inode = io_ctl->inode; struct inode *inode = io_ctl->inode;
struct btrfs_fs_info *fs_info;
if (!inode) if (!inode)
return 0; return 0;
fs_info = btrfs_sb(inode->i_sb);
if (block_group) if (block_group)
root = root->fs_info->tree_root; root = fs_info->tree_root;
/* Flush the dirty pages in the cache file. */ /* Flush the dirty pages in the cache file. */
ret = flush_dirty_cache(inode); ret = flush_dirty_cache(inode);
@ -1163,9 +1172,9 @@ out:
BTRFS_I(inode)->generation = 0; BTRFS_I(inode)->generation = 0;
if (block_group) { if (block_group) {
#ifdef DEBUG #ifdef DEBUG
btrfs_err(root->fs_info, btrfs_err(fs_info,
"failed to write free space cache for block group %llu", "failed to write free space cache for block group %llu",
block_group->key.objectid); block_group->key.objectid);
#endif #endif
} }
} }
@ -1376,9 +1385,9 @@ int btrfs_write_out_cache(struct btrfs_fs_info *fs_info,
path, block_group->key.objectid); path, block_group->key.objectid);
if (ret) { if (ret) {
#ifdef DEBUG #ifdef DEBUG
btrfs_err(root->fs_info, btrfs_err(fs_info,
"failed to write free space cache for block group %llu", "failed to write free space cache for block group %llu",
block_group->key.objectid); block_group->key.objectid);
#endif #endif
spin_lock(&block_group->lock); spin_lock(&block_group->lock);
block_group->disk_cache_state = BTRFS_DC_ERROR; block_group->disk_cache_state = BTRFS_DC_ERROR;
@ -1965,11 +1974,11 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info) struct btrfs_free_space *info)
{ {
struct btrfs_block_group_cache *block_group = ctl->private; struct btrfs_block_group_cache *block_group = ctl->private;
struct btrfs_fs_info *fs_info = block_group->fs_info;
bool forced = false; bool forced = false;
#ifdef CONFIG_BTRFS_DEBUG #ifdef CONFIG_BTRFS_DEBUG
if (btrfs_should_fragment_free_space(block_group->fs_info->extent_root, if (btrfs_should_fragment_free_space(fs_info->extent_root, block_group))
block_group))
forced = true; forced = true;
#endif #endif
@ -1985,7 +1994,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
* of cache left then go ahead an dadd them, no sense in adding * of cache left then go ahead an dadd them, no sense in adding
* the overhead of a bitmap if we don't have to. * the overhead of a bitmap if we don't have to.
*/ */
if (info->bytes <= block_group->fs_info->sectorsize * 4) { if (info->bytes <= fs_info->sectorsize * 4) {
if (ctl->free_extents * 2 <= ctl->extents_thresh) if (ctl->free_extents * 2 <= ctl->extents_thresh)
return false; return false;
} else { } else {
@ -2444,6 +2453,7 @@ out:
void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
u64 bytes) u64 bytes)
{ {
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *info; struct btrfs_free_space *info;
struct rb_node *n; struct rb_node *n;
@ -2453,23 +2463,23 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
info = rb_entry(n, struct btrfs_free_space, offset_index); info = rb_entry(n, struct btrfs_free_space, offset_index);
if (info->bytes >= bytes && !block_group->ro) if (info->bytes >= bytes && !block_group->ro)
count++; count++;
btrfs_crit(block_group->fs_info, btrfs_crit(fs_info, "entry offset %llu, bytes %llu, bitmap %s",
"entry offset %llu, bytes %llu, bitmap %s",
info->offset, info->bytes, info->offset, info->bytes,
(info->bitmap) ? "yes" : "no"); (info->bitmap) ? "yes" : "no");
} }
btrfs_info(block_group->fs_info, "block group has cluster?: %s", btrfs_info(fs_info, "block group has cluster?: %s",
list_empty(&block_group->cluster_list) ? "no" : "yes"); list_empty(&block_group->cluster_list) ? "no" : "yes");
btrfs_info(block_group->fs_info, btrfs_info(fs_info,
"%d blocks of free space at or bigger than bytes is", count); "%d blocks of free space at or bigger than bytes is", count);
} }
void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
{ {
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
spin_lock_init(&ctl->tree_lock); spin_lock_init(&ctl->tree_lock);
ctl->unit = block_group->fs_info->sectorsize; ctl->unit = fs_info->sectorsize;
ctl->start = block_group->key.objectid; ctl->start = block_group->key.objectid;
ctl->private = block_group; ctl->private = block_group;
ctl->op = &free_space_op; ctl->op = &free_space_op;
@ -3014,6 +3024,7 @@ int btrfs_find_space_cluster(struct btrfs_root *root,
u64 offset, u64 bytes, u64 empty_size) u64 offset, u64 bytes, u64 empty_size)
{ {
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_free_space *entry, *tmp; struct btrfs_free_space *entry, *tmp;
LIST_HEAD(bitmaps); LIST_HEAD(bitmaps);
u64 min_bytes; u64 min_bytes;
@ -3026,14 +3037,14 @@ int btrfs_find_space_cluster(struct btrfs_root *root,
* For metadata, allow allocates with smaller extents. For * For metadata, allow allocates with smaller extents. For
* data, keep it dense. * data, keep it dense.
*/ */
if (btrfs_test_opt(root->fs_info, SSD_SPREAD)) { if (btrfs_test_opt(fs_info, SSD_SPREAD)) {
cont1_bytes = min_bytes = bytes + empty_size; cont1_bytes = min_bytes = bytes + empty_size;
} else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) { } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
cont1_bytes = bytes; cont1_bytes = bytes;
min_bytes = block_group->fs_info->sectorsize; min_bytes = fs_info->sectorsize;
} else { } else {
cont1_bytes = max(bytes, (bytes + empty_size) >> 2); cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
min_bytes = block_group->fs_info->sectorsize; min_bytes = fs_info->sectorsize;
} }
spin_lock(&ctl->tree_lock); spin_lock(&ctl->tree_lock);
@ -3318,6 +3329,7 @@ void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache)
void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group) void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
{ {
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct extent_map_tree *em_tree; struct extent_map_tree *em_tree;
struct extent_map *em; struct extent_map *em;
bool cleanup; bool cleanup;
@ -3328,8 +3340,8 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
if (cleanup) { if (cleanup) {
lock_chunks(block_group->fs_info); lock_chunks(fs_info);
em_tree = &block_group->fs_info->mapping_tree.map_tree; em_tree = &fs_info->mapping_tree.map_tree;
write_lock(&em_tree->lock); write_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, block_group->key.objectid, em = lookup_extent_mapping(em_tree, block_group->key.objectid,
1); 1);
@ -3340,7 +3352,7 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
*/ */
remove_extent_mapping(em_tree, em); remove_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock); write_unlock(&em_tree->lock);
unlock_chunks(block_group->fs_info); unlock_chunks(fs_info);
/* once for us and once for the tree */ /* once for us and once for the tree */
free_extent_map(em); free_extent_map(em);
@ -3470,7 +3482,7 @@ int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
int ret = 0; int ret = 0;
u64 root_gen = btrfs_root_generation(&root->root_item); u64 root_gen = btrfs_root_generation(&root->root_item);
if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE)) if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
return 0; return 0;
/* /*
@ -3509,12 +3521,13 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
struct btrfs_path *path, struct btrfs_path *path,
struct inode *inode) struct inode *inode)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
int ret; int ret;
struct btrfs_io_ctl io_ctl; struct btrfs_io_ctl io_ctl;
bool release_metadata = true; bool release_metadata = true;
if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE)) if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
return 0; return 0;
memset(&io_ctl, 0, sizeof(io_ctl)); memset(&io_ctl, 0, sizeof(io_ctl));
@ -3535,9 +3548,9 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
if (release_metadata) if (release_metadata)
btrfs_delalloc_release_metadata(inode, inode->i_size); btrfs_delalloc_release_metadata(inode, inode->i_size);
#ifdef DEBUG #ifdef DEBUG
btrfs_err(root->fs_info, btrfs_err(fs_info,
"failed to write free ino cache for root %llu", "failed to write free ino cache for root %llu",
root->root_key.objectid); root->root_key.objectid);
#endif #endif
} }

View File

@ -189,7 +189,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
int ret; int ret;
bitmap_size = free_space_bitmap_size(block_group->key.offset, bitmap_size = free_space_bitmap_size(block_group->key.offset,
block_group->fs_info->sectorsize); fs_info->sectorsize);
bitmap = alloc_bitmap(bitmap_size); bitmap = alloc_bitmap(bitmap_size);
if (!bitmap) { if (!bitmap) {
ret = -ENOMEM; ret = -ENOMEM;
@ -227,9 +227,9 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
ASSERT(found_key.objectid + found_key.offset <= end); ASSERT(found_key.objectid + found_key.offset <= end);
first = div_u64(found_key.objectid - start, first = div_u64(found_key.objectid - start,
block_group->fs_info->sectorsize); fs_info->sectorsize);
last = div_u64(found_key.objectid + found_key.offset - start, last = div_u64(found_key.objectid + found_key.offset - start,
block_group->fs_info->sectorsize); fs_info->sectorsize);
le_bitmap_set(bitmap, first, last - first); le_bitmap_set(bitmap, first, last - first);
extent_count++; extent_count++;
@ -270,7 +270,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
} }
bitmap_cursor = bitmap; bitmap_cursor = bitmap;
bitmap_range = block_group->fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS; bitmap_range = fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
i = start; i = start;
while (i < end) { while (i < end) {
unsigned long ptr; unsigned long ptr;
@ -279,7 +279,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
extent_size = min(end - i, bitmap_range); extent_size = min(end - i, bitmap_range);
data_size = free_space_bitmap_size(extent_size, data_size = free_space_bitmap_size(extent_size,
block_group->fs_info->sectorsize); fs_info->sectorsize);
key.objectid = i; key.objectid = i;
key.type = BTRFS_FREE_SPACE_BITMAP_KEY; key.type = BTRFS_FREE_SPACE_BITMAP_KEY;
@ -330,7 +330,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
int ret; int ret;
bitmap_size = free_space_bitmap_size(block_group->key.offset, bitmap_size = free_space_bitmap_size(block_group->key.offset,
block_group->fs_info->sectorsize); fs_info->sectorsize);
bitmap = alloc_bitmap(bitmap_size); bitmap = alloc_bitmap(bitmap_size);
if (!bitmap) { if (!bitmap) {
ret = -ENOMEM; ret = -ENOMEM;
@ -370,11 +370,11 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
ASSERT(found_key.objectid + found_key.offset <= end); ASSERT(found_key.objectid + found_key.offset <= end);
bitmap_pos = div_u64(found_key.objectid - start, bitmap_pos = div_u64(found_key.objectid - start,
block_group->fs_info->sectorsize * fs_info->sectorsize *
BITS_PER_BYTE); BITS_PER_BYTE);
bitmap_cursor = bitmap + bitmap_pos; bitmap_cursor = bitmap + bitmap_pos;
data_size = free_space_bitmap_size(found_key.offset, data_size = free_space_bitmap_size(found_key.offset,
block_group->fs_info->sectorsize); fs_info->sectorsize);
ptr = btrfs_item_ptr_offset(leaf, path->slots[0] - 1); ptr = btrfs_item_ptr_offset(leaf, path->slots[0] - 1);
read_extent_buffer(leaf, bitmap_cursor, ptr, read_extent_buffer(leaf, bitmap_cursor, ptr,
@ -425,7 +425,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
extent_count++; extent_count++;
} }
prev_bit = bit; prev_bit = bit;
offset += block_group->fs_info->sectorsize; offset += fs_info->sectorsize;
bitnr++; bitnr++;
} }
if (prev_bit == 1) { if (prev_bit == 1) {
@ -526,6 +526,7 @@ static void free_space_set_bits(struct btrfs_block_group_cache *block_group,
struct btrfs_path *path, u64 *start, u64 *size, struct btrfs_path *path, u64 *start, u64 *size,
int bit) int bit)
{ {
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct extent_buffer *leaf; struct extent_buffer *leaf;
struct btrfs_key key; struct btrfs_key key;
u64 end = *start + *size; u64 end = *start + *size;
@ -545,10 +546,8 @@ static void free_space_set_bits(struct btrfs_block_group_cache *block_group,
end = found_end; end = found_end;
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
first = div_u64(*start - found_start, first = div_u64(*start - found_start, fs_info->sectorsize);
block_group->fs_info->sectorsize); last = div_u64(end - found_start, fs_info->sectorsize);
last = div_u64(end - found_start,
block_group->fs_info->sectorsize);
if (bit) if (bit)
extent_buffer_bitmap_set(leaf, ptr, first, last - first); extent_buffer_bitmap_set(leaf, ptr, first, last - first);
else else
@ -1270,7 +1269,7 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
list_del(&free_space_root->dirty_list); list_del(&free_space_root->dirty_list);
btrfs_tree_lock(free_space_root->node); btrfs_tree_lock(free_space_root->node);
clean_tree_block(trans, tree_root->fs_info, free_space_root->node); clean_tree_block(trans, fs_info, free_space_root->node);
btrfs_tree_unlock(free_space_root->node); btrfs_tree_unlock(free_space_root->node);
btrfs_free_tree_block(trans, free_space_root, free_space_root->node, btrfs_free_tree_block(trans, free_space_root, free_space_root->node,
0, 1); 0, 1);
@ -1476,7 +1475,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
extent_count++; extent_count++;
} }
prev_bit = bit; prev_bit = bit;
offset += block_group->fs_info->sectorsize; offset += fs_info->sectorsize;
} }
} }
if (prev_bit == 1) { if (prev_bit == 1) {

View File

@ -328,6 +328,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
const char *name, int name_len, const char *name, int name_len,
u64 inode_objectid, u64 ref_objectid, u64 index) u64 inode_objectid, u64 ref_objectid, u64 index)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path; struct btrfs_path *path;
struct btrfs_key key; struct btrfs_key key;
struct btrfs_inode_ref *ref; struct btrfs_inode_ref *ref;
@ -384,7 +385,7 @@ out:
btrfs_free_path(path); btrfs_free_path(path);
if (ret == -EMLINK) { if (ret == -EMLINK) {
struct btrfs_super_block *disk_super = root->fs_info->super_copy; struct btrfs_super_block *disk_super = fs_info->super_copy;
/* We ran out of space in the ref array. Need to /* We ran out of space in the ref array. Need to
* add an extended ref. */ * add an extended ref. */
if (btrfs_super_incompat_flags(disk_super) if (btrfs_super_incompat_flags(disk_super)

View File

@ -38,7 +38,7 @@ static int caching_kthread(void *data)
int slot; int slot;
int ret; int ret;
if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE)) if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
return 0; return 0;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
@ -180,7 +180,7 @@ static void start_caching(struct btrfs_root *root)
if (IS_ERR(tsk)) { if (IS_ERR(tsk)) {
btrfs_warn(fs_info, "failed to start inode caching task"); btrfs_warn(fs_info, "failed to start inode caching task");
btrfs_clear_pending_and_info(fs_info, INODE_MAP_CACHE, btrfs_clear_pending_and_info(fs_info, INODE_MAP_CACHE,
"disabling inode map caching"); "disabling inode map caching");
} }
} }
@ -395,6 +395,7 @@ void btrfs_init_free_ino_ctl(struct btrfs_root *root)
int btrfs_save_ino_cache(struct btrfs_root *root, int btrfs_save_ino_cache(struct btrfs_root *root,
struct btrfs_trans_handle *trans) struct btrfs_trans_handle *trans)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
struct btrfs_path *path; struct btrfs_path *path;
struct inode *inode; struct inode *inode;
@ -415,7 +416,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
if (btrfs_root_refs(&root->root_item) == 0) if (btrfs_root_refs(&root->root_item) == 0)
return 0; return 0;
if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE)) if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
return 0; return 0;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
@ -423,7 +424,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
return -ENOMEM; return -ENOMEM;
rsv = trans->block_rsv; rsv = trans->block_rsv;
trans->block_rsv = &root->fs_info->trans_block_rsv; trans->block_rsv = &fs_info->trans_block_rsv;
num_bytes = trans->bytes_reserved; num_bytes = trans->bytes_reserved;
/* /*
@ -433,15 +434,14 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
* 1 item for free space object * 1 item for free space object
* 3 items for pre-allocation * 3 items for pre-allocation
*/ */
trans->bytes_reserved = btrfs_calc_trans_metadata_size(root->fs_info, trans->bytes_reserved = btrfs_calc_trans_metadata_size(fs_info, 10);
10);
ret = btrfs_block_rsv_add(root, trans->block_rsv, ret = btrfs_block_rsv_add(root, trans->block_rsv,
trans->bytes_reserved, trans->bytes_reserved,
BTRFS_RESERVE_NO_FLUSH); BTRFS_RESERVE_NO_FLUSH);
if (ret) if (ret)
goto out; goto out;
trace_btrfs_space_reservation(root->fs_info, "ino_cache", trace_btrfs_space_reservation(fs_info, "ino_cache", trans->transid,
trans->transid, trans->bytes_reserved, 1); trans->bytes_reserved, 1);
again: again:
inode = lookup_free_ino_inode(root, path); inode = lookup_free_ino_inode(root, path);
if (IS_ERR(inode) && (PTR_ERR(inode) != -ENOENT || retry)) { if (IS_ERR(inode) && (PTR_ERR(inode) != -ENOENT || retry)) {
@ -507,8 +507,8 @@ again:
out_put: out_put:
iput(inode); iput(inode);
out_release: out_release:
trace_btrfs_space_reservation(root->fs_info, "ino_cache", trace_btrfs_space_reservation(fs_info, "ino_cache", trans->transid,
trans->transid, trans->bytes_reserved, 0); trans->bytes_reserved, 0);
btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved); btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
out: out:
trans->block_rsv = rsv; trans->block_rsv = rsv;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -186,6 +186,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
u64 start, u64 len, u64 disk_len, u64 start, u64 len, u64 disk_len,
int type, int dio, int compress_type) int type, int dio, int compress_type)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ordered_inode_tree *tree; struct btrfs_ordered_inode_tree *tree;
struct rb_node *node; struct rb_node *node;
@ -234,11 +235,10 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
&root->ordered_extents); &root->ordered_extents);
root->nr_ordered_extents++; root->nr_ordered_extents++;
if (root->nr_ordered_extents == 1) { if (root->nr_ordered_extents == 1) {
spin_lock(&root->fs_info->ordered_root_lock); spin_lock(&fs_info->ordered_root_lock);
BUG_ON(!list_empty(&root->ordered_root)); BUG_ON(!list_empty(&root->ordered_root));
list_add_tail(&root->ordered_root, list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
&root->fs_info->ordered_roots); spin_unlock(&fs_info->ordered_root_lock);
spin_unlock(&root->fs_info->ordered_root_lock);
} }
spin_unlock(&root->ordered_extent_lock); spin_unlock(&root->ordered_extent_lock);
@ -303,6 +303,7 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode,
struct btrfs_ordered_extent **cached, struct btrfs_ordered_extent **cached,
u64 *file_offset, u64 io_size, int uptodate) u64 *file_offset, u64 io_size, int uptodate)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_inode_tree *tree; struct btrfs_ordered_inode_tree *tree;
struct rb_node *node; struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL; struct btrfs_ordered_extent *entry = NULL;
@ -331,14 +332,14 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode,
entry->len); entry->len);
*file_offset = dec_end; *file_offset = dec_end;
if (dec_start > dec_end) { if (dec_start > dec_end) {
btrfs_crit(BTRFS_I(inode)->root->fs_info, btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu",
"bad ordering dec_start %llu end %llu", dec_start, dec_end); dec_start, dec_end);
} }
to_dec = dec_end - dec_start; to_dec = dec_end - dec_start;
if (to_dec > entry->bytes_left) { if (to_dec > entry->bytes_left) {
btrfs_crit(BTRFS_I(inode)->root->fs_info, btrfs_crit(fs_info,
"bad ordered accounting left %llu size %llu", "bad ordered accounting left %llu size %llu",
entry->bytes_left, to_dec); entry->bytes_left, to_dec);
} }
entry->bytes_left -= to_dec; entry->bytes_left -= to_dec;
if (!uptodate) if (!uptodate)
@ -588,6 +589,7 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
void btrfs_remove_ordered_extent(struct inode *inode, void btrfs_remove_ordered_extent(struct inode *inode,
struct btrfs_ordered_extent *entry) struct btrfs_ordered_extent *entry)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_inode_tree *tree; struct btrfs_ordered_inode_tree *tree;
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
struct rb_node *node; struct rb_node *node;
@ -618,11 +620,11 @@ void btrfs_remove_ordered_extent(struct inode *inode,
* lock, so be nice and check if trans is set, but ASSERT() so * lock, so be nice and check if trans is set, but ASSERT() so
* if it isn't set a developer will notice. * if it isn't set a developer will notice.
*/ */
spin_lock(&root->fs_info->trans_lock); spin_lock(&fs_info->trans_lock);
trans = root->fs_info->running_transaction; trans = fs_info->running_transaction;
if (trans) if (trans)
atomic_inc(&trans->use_count); atomic_inc(&trans->use_count);
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
ASSERT(trans); ASSERT(trans);
if (trans) { if (trans) {
@ -639,10 +641,10 @@ void btrfs_remove_ordered_extent(struct inode *inode,
trace_btrfs_ordered_extent_remove(inode, entry); trace_btrfs_ordered_extent_remove(inode, entry);
if (!root->nr_ordered_extents) { if (!root->nr_ordered_extents) {
spin_lock(&root->fs_info->ordered_root_lock); spin_lock(&fs_info->ordered_root_lock);
BUG_ON(list_empty(&root->ordered_root)); BUG_ON(list_empty(&root->ordered_root));
list_del_init(&root->ordered_root); list_del_init(&root->ordered_root);
spin_unlock(&root->fs_info->ordered_root_lock); spin_unlock(&fs_info->ordered_root_lock);
} }
spin_unlock(&root->ordered_extent_lock); spin_unlock(&root->ordered_extent_lock);
wake_up(&entry->wait); wake_up(&entry->wait);
@ -664,6 +666,7 @@ static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr, int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
const u64 range_start, const u64 range_len) const u64 range_start, const u64 range_len)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
LIST_HEAD(splice); LIST_HEAD(splice);
LIST_HEAD(skipped); LIST_HEAD(skipped);
LIST_HEAD(works); LIST_HEAD(works);
@ -694,8 +697,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
btrfs_flush_delalloc_helper, btrfs_flush_delalloc_helper,
btrfs_run_ordered_extent_work, NULL, NULL); btrfs_run_ordered_extent_work, NULL, NULL);
list_add_tail(&ordered->work_list, &works); list_add_tail(&ordered->work_list, &works);
btrfs_queue_work(root->fs_info->flush_workers, btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
&ordered->flush_work);
cond_resched(); cond_resched();
spin_lock(&root->ordered_extent_lock); spin_lock(&root->ordered_extent_lock);

View File

@ -163,6 +163,7 @@ static void print_uuid_item(struct extent_buffer *l, unsigned long offset,
void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
int i; int i;
u32 type, nr; u32 type, nr;
struct btrfs_item *item; struct btrfs_item *item;
@ -182,7 +183,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
nr = btrfs_header_nritems(l); nr = btrfs_header_nritems(l);
btrfs_info(root->fs_info, "leaf %llu total ptrs %d free space %d", btrfs_info(fs_info, "leaf %llu total ptrs %d free space %d",
btrfs_header_bytenr(l), nr, btrfs_leaf_free_space(root, l)); btrfs_header_bytenr(l), nr, btrfs_leaf_free_space(root, l));
for (i = 0 ; i < nr ; i++) { for (i = 0 ; i < nr ; i++) {
item = btrfs_item_nr(i); item = btrfs_item_nr(i);
@ -316,6 +317,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c) void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
int i; u32 nr; int i; u32 nr;
struct btrfs_key key; struct btrfs_key key;
int level; int level;
@ -328,10 +330,10 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
btrfs_print_leaf(root, c); btrfs_print_leaf(root, c);
return; return;
} }
btrfs_info(root->fs_info, btrfs_info(fs_info,
"node %llu level %d total ptrs %d free spc %u", "node %llu level %d total ptrs %d free spc %u",
btrfs_header_bytenr(c), level, nr, btrfs_header_bytenr(c), level, nr,
(u32)BTRFS_NODEPTRS_PER_BLOCK(root->fs_info) - nr); (u32)BTRFS_NODEPTRS_PER_BLOCK(fs_info) - nr);
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
btrfs_node_key_to_cpu(c, &key, i); btrfs_node_key_to_cpu(c, &key, i);
pr_info("\tkey %d (%llu %u %llu) block %llu\n", pr_info("\tkey %d (%llu %u %llu) block %llu\n",

View File

@ -1019,7 +1019,7 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
list_del(&quota_root->dirty_list); list_del(&quota_root->dirty_list);
btrfs_tree_lock(quota_root->node); btrfs_tree_lock(quota_root->node);
clean_tree_block(trans, tree_root->fs_info, quota_root->node); clean_tree_block(trans, fs_info, quota_root->node);
btrfs_tree_unlock(quota_root->node); btrfs_tree_unlock(quota_root->node);
btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1); btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
@ -1192,7 +1192,7 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
} }
spin_lock(&fs_info->qgroup_lock); spin_lock(&fs_info->qgroup_lock);
ret = add_relation_rb(quota_root->fs_info, src, dst); ret = add_relation_rb(fs_info, src, dst);
if (ret < 0) { if (ret < 0) {
spin_unlock(&fs_info->qgroup_lock); spin_unlock(&fs_info->qgroup_lock);
goto out; goto out;
@ -1340,7 +1340,7 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
} }
spin_lock(&fs_info->qgroup_lock); spin_lock(&fs_info->qgroup_lock);
del_qgroup_rb(quota_root->fs_info, qgroupid); del_qgroup_rb(fs_info, qgroupid);
spin_unlock(&fs_info->qgroup_lock); spin_unlock(&fs_info->qgroup_lock);
out: out:
mutex_unlock(&fs_info->qgroup_ioctl_lock); mutex_unlock(&fs_info->qgroup_ioctl_lock);
@ -1521,6 +1521,7 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct extent_buffer *eb) struct extent_buffer *eb)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
int nr = btrfs_header_nritems(eb); int nr = btrfs_header_nritems(eb);
int i, extent_type, ret; int i, extent_type, ret;
struct btrfs_key key; struct btrfs_key key;
@ -1528,7 +1529,7 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
u64 bytenr, num_bytes; u64 bytenr, num_bytes;
/* We can be called directly from walk_up_proc() */ /* We can be called directly from walk_up_proc() */
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags)) if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
return 0; return 0;
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
@ -1550,8 +1551,8 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
ret = btrfs_qgroup_trace_extent(trans, root->fs_info, ret = btrfs_qgroup_trace_extent(trans, fs_info, bytenr,
bytenr, num_bytes, GFP_NOFS); num_bytes, GFP_NOFS);
if (ret) if (ret)
return ret; return ret;
} }
@ -1625,6 +1626,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
struct extent_buffer *root_eb, struct extent_buffer *root_eb,
u64 root_gen, int root_level) u64 root_gen, int root_level)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0; int ret = 0;
int level; int level;
struct extent_buffer *eb = root_eb; struct extent_buffer *eb = root_eb;
@ -1633,7 +1635,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL); BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
BUG_ON(root_eb == NULL); BUG_ON(root_eb == NULL);
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags)) if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
return 0; return 0;
if (!extent_buffer_uptodate(root_eb)) { if (!extent_buffer_uptodate(root_eb)) {
@ -1698,9 +1700,10 @@ walk_down:
btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
path->locks[level] = BTRFS_READ_LOCK_BLOCKING; path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
ret = btrfs_qgroup_trace_extent(trans, ret = btrfs_qgroup_trace_extent(trans, fs_info,
root->fs_info, child_bytenr, child_bytenr,
root->fs_info->nodesize, GFP_NOFS); fs_info->nodesize,
GFP_NOFS);
if (ret) if (ret)
goto out; goto out;
} }
@ -2170,7 +2173,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
} }
rcu_read_lock(); rcu_read_lock();
level_size = srcroot->fs_info->nodesize; level_size = fs_info->nodesize;
rcu_read_unlock(); rcu_read_unlock();
} }
@ -2254,8 +2257,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
i_qgroups = (u64 *)(inherit + 1); i_qgroups = (u64 *)(inherit + 1);
for (i = 0; i < inherit->num_qgroups; ++i) { for (i = 0; i < inherit->num_qgroups; ++i) {
if (*i_qgroups) { if (*i_qgroups) {
ret = add_relation_rb(quota_root->fs_info, objectid, ret = add_relation_rb(fs_info, objectid, *i_qgroups);
*i_qgroups);
if (ret) if (ret)
goto unlock; goto unlock;
} }
@ -2897,13 +2899,14 @@ int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len)
int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes) int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
int ret; int ret;
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) || if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
!is_fstree(root->objectid) || num_bytes == 0) !is_fstree(root->objectid) || num_bytes == 0)
return 0; return 0;
BUG_ON(num_bytes != round_down(num_bytes, root->fs_info->nodesize)); BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
ret = qgroup_reserve(root, num_bytes); ret = qgroup_reserve(root, num_bytes);
if (ret < 0) if (ret < 0)
return ret; return ret;
@ -2913,9 +2916,10 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes)
void btrfs_qgroup_free_meta_all(struct btrfs_root *root) void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
int reserved; int reserved;
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) || if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
!is_fstree(root->objectid)) !is_fstree(root->objectid))
return; return;
@ -2927,11 +2931,13 @@ void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes) void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
{ {
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) || struct btrfs_fs_info *fs_info = root->fs_info;
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
!is_fstree(root->objectid)) !is_fstree(root->objectid))
return; return;
BUG_ON(num_bytes != round_down(num_bytes, root->fs_info->nodesize)); BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
WARN_ON(atomic_read(&root->qgroup_meta_rsv) < num_bytes); WARN_ON(atomic_read(&root->qgroup_meta_rsv) < num_bytes);
atomic_sub(num_bytes, &root->qgroup_meta_rsv); atomic_sub(num_bytes, &root->qgroup_meta_rsv);
qgroup_free(root, num_bytes); qgroup_free(root, num_bytes);

View File

@ -1478,11 +1478,8 @@ cleanup:
static void async_rmw_stripe(struct btrfs_raid_bio *rbio) static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
{ {
btrfs_init_work(&rbio->work, btrfs_rmw_helper, btrfs_init_work(&rbio->work, btrfs_rmw_helper, rmw_work, NULL, NULL);
rmw_work, NULL, NULL); btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
btrfs_queue_work(rbio->fs_info->rmw_workers,
&rbio->work);
} }
static void async_read_rebuild(struct btrfs_raid_bio *rbio) static void async_read_rebuild(struct btrfs_raid_bio *rbio)
@ -1490,8 +1487,7 @@ static void async_read_rebuild(struct btrfs_raid_bio *rbio)
btrfs_init_work(&rbio->work, btrfs_rmw_helper, btrfs_init_work(&rbio->work, btrfs_rmw_helper,
read_rebuild_work, NULL, NULL); read_rebuild_work, NULL, NULL);
btrfs_queue_work(rbio->fs_info->rmw_workers, btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
&rbio->work);
} }
/* /*
@ -1573,8 +1569,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
bio->bi_end_io = raid_rmw_end_io; bio->bi_end_io = raid_rmw_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, 0); bio_set_op_attrs(bio, REQ_OP_READ, 0);
btrfs_bio_wq_end_io(rbio->fs_info, bio, btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
BTRFS_WQ_ENDIO_RAID56);
submit_bio(bio); submit_bio(bio);
} }
@ -1742,6 +1737,7 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
int raid56_parity_write(struct btrfs_root *root, struct bio *bio, int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len) struct btrfs_bio *bbio, u64 stripe_len)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_raid_bio *rbio; struct btrfs_raid_bio *rbio;
struct btrfs_plug_cb *plug = NULL; struct btrfs_plug_cb *plug = NULL;
struct blk_plug_cb *cb; struct blk_plug_cb *cb;
@ -1756,7 +1752,7 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
rbio->bio_list_bytes = bio->bi_iter.bi_size; rbio->bio_list_bytes = bio->bi_iter.bi_size;
rbio->operation = BTRFS_RBIO_WRITE; rbio->operation = BTRFS_RBIO_WRITE;
btrfs_bio_counter_inc_noblocked(root->fs_info); btrfs_bio_counter_inc_noblocked(fs_info);
rbio->generic_bio_cnt = 1; rbio->generic_bio_cnt = 1;
/* /*
@ -1766,16 +1762,15 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
if (rbio_is_full(rbio)) { if (rbio_is_full(rbio)) {
ret = full_stripe_write(rbio); ret = full_stripe_write(rbio);
if (ret) if (ret)
btrfs_bio_counter_dec(root->fs_info); btrfs_bio_counter_dec(fs_info);
return ret; return ret;
} }
cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info, cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
sizeof(*plug));
if (cb) { if (cb) {
plug = container_of(cb, struct btrfs_plug_cb, cb); plug = container_of(cb, struct btrfs_plug_cb, cb);
if (!plug->info) { if (!plug->info) {
plug->info = root->fs_info; plug->info = fs_info;
INIT_LIST_HEAD(&plug->rbio_list); INIT_LIST_HEAD(&plug->rbio_list);
} }
list_add_tail(&rbio->plug_list, &plug->rbio_list); list_add_tail(&rbio->plug_list, &plug->rbio_list);
@ -1783,7 +1778,7 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
} else { } else {
ret = __raid56_parity_write(rbio); ret = __raid56_parity_write(rbio);
if (ret) if (ret)
btrfs_bio_counter_dec(root->fs_info); btrfs_bio_counter_dec(fs_info);
} }
return ret; return ret;
} }
@ -2098,8 +2093,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
bio->bi_end_io = raid_recover_end_io; bio->bi_end_io = raid_recover_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, 0); bio_set_op_attrs(bio, REQ_OP_READ, 0);
btrfs_bio_wq_end_io(rbio->fs_info, bio, btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
BTRFS_WQ_ENDIO_RAID56);
submit_bio(bio); submit_bio(bio);
} }
@ -2123,6 +2117,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len, struct btrfs_bio *bbio, u64 stripe_len,
int mirror_num, int generic_io) int mirror_num, int generic_io)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_raid_bio *rbio; struct btrfs_raid_bio *rbio;
int ret; int ret;
@ -2139,7 +2134,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
rbio->faila = find_logical_bio_stripe(rbio, bio); rbio->faila = find_logical_bio_stripe(rbio, bio);
if (rbio->faila == -1) { if (rbio->faila == -1) {
btrfs_warn(root->fs_info, btrfs_warn(fs_info,
"%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)", "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
__func__, (u64)bio->bi_iter.bi_sector << 9, __func__, (u64)bio->bi_iter.bi_sector << 9,
(u64)bio->bi_iter.bi_size, bbio->map_type); (u64)bio->bi_iter.bi_size, bbio->map_type);
@ -2150,7 +2145,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
} }
if (generic_io) { if (generic_io) {
btrfs_bio_counter_inc_noblocked(root->fs_info); btrfs_bio_counter_inc_noblocked(fs_info);
rbio->generic_bio_cnt = 1; rbio->generic_bio_cnt = 1;
} else { } else {
btrfs_get_bbio(bbio); btrfs_get_bbio(bbio);
@ -2213,6 +2208,7 @@ raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
struct btrfs_device *scrub_dev, struct btrfs_device *scrub_dev,
unsigned long *dbitmap, int stripe_nsectors) unsigned long *dbitmap, int stripe_nsectors)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_raid_bio *rbio; struct btrfs_raid_bio *rbio;
int i; int i;
@ -2235,7 +2231,7 @@ raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
} }
/* Now we just support the sectorsize equals to page size */ /* Now we just support the sectorsize equals to page size */
ASSERT(root->fs_info->sectorsize == PAGE_SIZE); ASSERT(fs_info->sectorsize == PAGE_SIZE);
ASSERT(rbio->stripe_npages == stripe_nsectors); ASSERT(rbio->stripe_npages == stripe_nsectors);
bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors); bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
@ -2617,8 +2613,7 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
bio->bi_end_io = raid56_parity_scrub_end_io; bio->bi_end_io = raid56_parity_scrub_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, 0); bio_set_op_attrs(bio, REQ_OP_READ, 0);
btrfs_bio_wq_end_io(rbio->fs_info, bio, btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
BTRFS_WQ_ENDIO_RAID56);
submit_bio(bio); submit_bio(bio);
} }
@ -2646,8 +2641,7 @@ static void async_scrub_parity(struct btrfs_raid_bio *rbio)
btrfs_init_work(&rbio->work, btrfs_rmw_helper, btrfs_init_work(&rbio->work, btrfs_rmw_helper,
scrub_parity_work, NULL, NULL); scrub_parity_work, NULL, NULL);
btrfs_queue_work(rbio->fs_info->rmw_workers, btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
&rbio->work);
} }
void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio) void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)

View File

@ -335,7 +335,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
if (!re) if (!re)
return NULL; return NULL;
blocksize = root->fs_info->nodesize; blocksize = fs_info->nodesize;
re->logical = logical; re->logical = logical;
re->top = *top; re->top = *top;
INIT_LIST_HEAD(&re->extctl); INIT_LIST_HEAD(&re->extctl);
@ -352,7 +352,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
goto error; goto error;
if (bbio->num_stripes > BTRFS_MAX_MIRRORS) { if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
btrfs_err(root->fs_info, btrfs_err(fs_info,
"readahead: more than %d copies not supported", "readahead: more than %d copies not supported",
BTRFS_MAX_MIRRORS); BTRFS_MAX_MIRRORS);
goto error; goto error;
@ -950,11 +950,10 @@ int btrfs_reada_wait(void *handle)
reada_start_machine(fs_info); reada_start_machine(fs_info);
wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0, wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
5 * HZ); 5 * HZ);
dump_devs(rc->root->fs_info, dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
atomic_read(&rc->elems) < 10 ? 1 : 0);
} }
dump_devs(rc->root->fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0); dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
kref_put(&rc->refcnt, reada_control_release); kref_put(&rc->refcnt, reada_control_release);

View File

@ -1288,9 +1288,10 @@ fail:
*/ */
static int __must_check __add_reloc_root(struct btrfs_root *root) static int __must_check __add_reloc_root(struct btrfs_root *root)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *rb_node; struct rb_node *rb_node;
struct mapping_node *node; struct mapping_node *node;
struct reloc_control *rc = root->fs_info->reloc_ctl; struct reloc_control *rc = fs_info->reloc_ctl;
node = kmalloc(sizeof(*node), GFP_NOFS); node = kmalloc(sizeof(*node), GFP_NOFS);
if (!node) if (!node)
@ -1304,7 +1305,7 @@ static int __must_check __add_reloc_root(struct btrfs_root *root)
node->bytenr, &node->rb_node); node->bytenr, &node->rb_node);
spin_unlock(&rc->reloc_root_tree.lock); spin_unlock(&rc->reloc_root_tree.lock);
if (rb_node) { if (rb_node) {
btrfs_panic(root->fs_info, -EEXIST, btrfs_panic(fs_info, -EEXIST,
"Duplicate root found for start=%llu while inserting into relocation tree", "Duplicate root found for start=%llu while inserting into relocation tree",
node->bytenr); node->bytenr);
kfree(node); kfree(node);
@ -1321,9 +1322,10 @@ static int __must_check __add_reloc_root(struct btrfs_root *root)
*/ */
static void __del_reloc_root(struct btrfs_root *root) static void __del_reloc_root(struct btrfs_root *root)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *rb_node; struct rb_node *rb_node;
struct mapping_node *node = NULL; struct mapping_node *node = NULL;
struct reloc_control *rc = root->fs_info->reloc_ctl; struct reloc_control *rc = fs_info->reloc_ctl;
spin_lock(&rc->reloc_root_tree.lock); spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_search(&rc->reloc_root_tree.rb_root, rb_node = tree_search(&rc->reloc_root_tree.rb_root,
@ -1338,9 +1340,9 @@ static void __del_reloc_root(struct btrfs_root *root)
return; return;
BUG_ON((struct btrfs_root *)node->data != root); BUG_ON((struct btrfs_root *)node->data != root);
spin_lock(&root->fs_info->trans_lock); spin_lock(&fs_info->trans_lock);
list_del_init(&root->root_list); list_del_init(&root->root_list);
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
kfree(node); kfree(node);
} }
@ -1350,9 +1352,10 @@ static void __del_reloc_root(struct btrfs_root *root)
*/ */
static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr) static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *rb_node; struct rb_node *rb_node;
struct mapping_node *node = NULL; struct mapping_node *node = NULL;
struct reloc_control *rc = root->fs_info->reloc_ctl; struct reloc_control *rc = fs_info->reloc_ctl;
spin_lock(&rc->reloc_root_tree.lock); spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_search(&rc->reloc_root_tree.rb_root, rb_node = tree_search(&rc->reloc_root_tree.rb_root,
@ -1380,6 +1383,7 @@ static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 objectid) struct btrfs_root *root, u64 objectid)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *reloc_root; struct btrfs_root *reloc_root;
struct extent_buffer *eb; struct extent_buffer *eb;
struct btrfs_root_item *root_item; struct btrfs_root_item *root_item;
@ -1437,12 +1441,12 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
btrfs_tree_unlock(eb); btrfs_tree_unlock(eb);
free_extent_buffer(eb); free_extent_buffer(eb);
ret = btrfs_insert_root(trans, root->fs_info->tree_root, ret = btrfs_insert_root(trans, fs_info->tree_root,
&root_key, root_item); &root_key, root_item);
BUG_ON(ret); BUG_ON(ret);
kfree(root_item); kfree(root_item);
reloc_root = btrfs_read_fs_root(root->fs_info->tree_root, &root_key); reloc_root = btrfs_read_fs_root(fs_info->tree_root, &root_key);
BUG_ON(IS_ERR(reloc_root)); BUG_ON(IS_ERR(reloc_root));
reloc_root->last_trans = trans->transid; reloc_root->last_trans = trans->transid;
return reloc_root; return reloc_root;
@ -1455,8 +1459,9 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root) struct btrfs_root *root)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *reloc_root; struct btrfs_root *reloc_root;
struct reloc_control *rc = root->fs_info->reloc_ctl; struct reloc_control *rc = fs_info->reloc_ctl;
struct btrfs_block_rsv *rsv; struct btrfs_block_rsv *rsv;
int clear_rsv = 0; int clear_rsv = 0;
int ret; int ret;
@ -1492,6 +1497,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root) struct btrfs_root *root)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *reloc_root; struct btrfs_root *reloc_root;
struct btrfs_root_item *root_item; struct btrfs_root_item *root_item;
int ret; int ret;
@ -1502,7 +1508,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
reloc_root = root->reloc_root; reloc_root = root->reloc_root;
root_item = &reloc_root->root_item; root_item = &reloc_root->root_item;
if (root->fs_info->reloc_ctl->merge_reloc_tree && if (fs_info->reloc_ctl->merge_reloc_tree &&
btrfs_root_refs(root_item) == 0) { btrfs_root_refs(root_item) == 0) {
root->reloc_root = NULL; root->reloc_root = NULL;
__del_reloc_root(reloc_root); __del_reloc_root(reloc_root);
@ -1514,7 +1520,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
reloc_root->commit_root = btrfs_root_node(reloc_root); reloc_root->commit_root = btrfs_root_node(reloc_root);
} }
ret = btrfs_update_root(trans, root->fs_info->tree_root, ret = btrfs_update_root(trans, fs_info->tree_root,
&reloc_root->root_key, root_item); &reloc_root->root_key, root_item);
BUG_ON(ret); BUG_ON(ret);
@ -1642,6 +1648,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct extent_buffer *leaf) struct extent_buffer *leaf)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key; struct btrfs_key key;
struct btrfs_file_extent_item *fi; struct btrfs_file_extent_item *fi;
struct inode *inode = NULL; struct inode *inode = NULL;
@ -1698,8 +1705,8 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
end = key.offset + end = key.offset +
btrfs_file_extent_num_bytes(leaf, fi); btrfs_file_extent_num_bytes(leaf, fi);
WARN_ON(!IS_ALIGNED(key.offset, WARN_ON(!IS_ALIGNED(key.offset,
root->fs_info->sectorsize)); fs_info->sectorsize));
WARN_ON(!IS_ALIGNED(end, root->fs_info->sectorsize)); WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
end--; end--;
ret = try_lock_extent(&BTRFS_I(inode)->io_tree, ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
key.offset, end); key.offset, end);
@ -1777,6 +1784,7 @@ int replace_path(struct btrfs_trans_handle *trans,
struct btrfs_path *path, struct btrfs_key *next_key, struct btrfs_path *path, struct btrfs_key *next_key,
int lowest_level, int max_level) int lowest_level, int max_level)
{ {
struct btrfs_fs_info *fs_info = dest->fs_info;
struct extent_buffer *eb; struct extent_buffer *eb;
struct extent_buffer *parent; struct extent_buffer *parent;
struct btrfs_key key; struct btrfs_key key;
@ -1834,7 +1842,7 @@ again:
btrfs_node_key_to_cpu(parent, next_key, slot + 1); btrfs_node_key_to_cpu(parent, next_key, slot + 1);
old_bytenr = btrfs_node_blockptr(parent, slot); old_bytenr = btrfs_node_blockptr(parent, slot);
blocksize = dest->fs_info->nodesize; blocksize = fs_info->nodesize;
old_ptr_gen = btrfs_node_ptr_generation(parent, slot); old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
if (level <= max_level) { if (level <= max_level) {
@ -2061,6 +2069,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
struct btrfs_key *min_key, struct btrfs_key *min_key,
struct btrfs_key *max_key) struct btrfs_key *max_key)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct inode *inode = NULL; struct inode *inode = NULL;
u64 objectid; u64 objectid;
u64 start, end; u64 start, end;
@ -2095,7 +2104,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
start = 0; start = 0;
else { else {
start = min_key->offset; start = min_key->offset;
WARN_ON(!IS_ALIGNED(start, root->fs_info->sectorsize)); WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
} }
} else { } else {
start = 0; start = 0;
@ -2110,7 +2119,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
if (max_key->offset == 0) if (max_key->offset == 0)
continue; continue;
end = max_key->offset; end = max_key->offset;
WARN_ON(!IS_ALIGNED(end, root->fs_info->sectorsize)); WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
end--; end--;
} }
} else { } else {
@ -2150,6 +2159,7 @@ static int find_next_key(struct btrfs_path *path, int level,
static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
struct btrfs_root *root) struct btrfs_root *root)
{ {
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
LIST_HEAD(inode_list); LIST_HEAD(inode_list);
struct btrfs_key key; struct btrfs_key key;
struct btrfs_key next_key; struct btrfs_key next_key;
@ -2198,7 +2208,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
btrfs_unlock_up_safe(path, 0); btrfs_unlock_up_safe(path, 0);
} }
min_reserved = root->fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; min_reserved = fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
memset(&next_key, 0, sizeof(next_key)); memset(&next_key, 0, sizeof(next_key));
while (1) { while (1) {
@ -2304,16 +2314,17 @@ static noinline_for_stack
int prepare_to_merge(struct reloc_control *rc, int err) int prepare_to_merge(struct reloc_control *rc, int err)
{ {
struct btrfs_root *root = rc->extent_root; struct btrfs_root *root = rc->extent_root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *reloc_root; struct btrfs_root *reloc_root;
struct btrfs_trans_handle *trans; struct btrfs_trans_handle *trans;
LIST_HEAD(reloc_roots); LIST_HEAD(reloc_roots);
u64 num_bytes = 0; u64 num_bytes = 0;
int ret; int ret;
mutex_lock(&root->fs_info->reloc_mutex); mutex_lock(&fs_info->reloc_mutex);
rc->merging_rsv_size += root->fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
rc->merging_rsv_size += rc->nodes_relocated * 2; rc->merging_rsv_size += rc->nodes_relocated * 2;
mutex_unlock(&root->fs_info->reloc_mutex); mutex_unlock(&fs_info->reloc_mutex);
again: again:
if (!err) { if (!err) {
@ -2348,8 +2359,7 @@ again:
struct btrfs_root, root_list); struct btrfs_root, root_list);
list_del_init(&reloc_root->root_list); list_del_init(&reloc_root->root_list);
root = read_fs_root(reloc_root->fs_info, root = read_fs_root(fs_info, reloc_root->root_key.offset);
reloc_root->root_key.offset);
BUG_ON(IS_ERR(root)); BUG_ON(IS_ERR(root));
BUG_ON(root->reloc_root != reloc_root); BUG_ON(root->reloc_root != reloc_root);
@ -2392,6 +2402,7 @@ void free_reloc_roots(struct list_head *list)
static noinline_for_stack static noinline_for_stack
void merge_reloc_roots(struct reloc_control *rc) void merge_reloc_roots(struct reloc_control *rc)
{ {
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct btrfs_root *root; struct btrfs_root *root;
struct btrfs_root *reloc_root; struct btrfs_root *reloc_root;
u64 last_snap; u64 last_snap;
@ -2409,9 +2420,9 @@ again:
* adding their roots to the list while we are * adding their roots to the list while we are
* doing this splice * doing this splice
*/ */
mutex_lock(&root->fs_info->reloc_mutex); mutex_lock(&fs_info->reloc_mutex);
list_splice_init(&rc->reloc_roots, &reloc_roots); list_splice_init(&rc->reloc_roots, &reloc_roots);
mutex_unlock(&root->fs_info->reloc_mutex); mutex_unlock(&fs_info->reloc_mutex);
while (!list_empty(&reloc_roots)) { while (!list_empty(&reloc_roots)) {
found = 1; found = 1;
@ -2419,7 +2430,7 @@ again:
struct btrfs_root, root_list); struct btrfs_root, root_list);
if (btrfs_root_refs(&reloc_root->root_item) > 0) { if (btrfs_root_refs(&reloc_root->root_item) > 0) {
root = read_fs_root(reloc_root->fs_info, root = read_fs_root(fs_info,
reloc_root->root_key.offset); reloc_root->root_key.offset);
BUG_ON(IS_ERR(root)); BUG_ON(IS_ERR(root));
BUG_ON(root->reloc_root != reloc_root); BUG_ON(root->reloc_root != reloc_root);
@ -2458,14 +2469,14 @@ again:
} }
out: out:
if (ret) { if (ret) {
btrfs_handle_fs_error(root->fs_info, ret, NULL); btrfs_handle_fs_error(fs_info, ret, NULL);
if (!list_empty(&reloc_roots)) if (!list_empty(&reloc_roots))
free_reloc_roots(&reloc_roots); free_reloc_roots(&reloc_roots);
/* new reloc root may be added */ /* new reloc root may be added */
mutex_lock(&root->fs_info->reloc_mutex); mutex_lock(&fs_info->reloc_mutex);
list_splice_init(&rc->reloc_roots, &reloc_roots); list_splice_init(&rc->reloc_roots, &reloc_roots);
mutex_unlock(&root->fs_info->reloc_mutex); mutex_unlock(&fs_info->reloc_mutex);
if (!list_empty(&reloc_roots)) if (!list_empty(&reloc_roots))
free_reloc_roots(&reloc_roots); free_reloc_roots(&reloc_roots);
} }
@ -2487,12 +2498,13 @@ static void free_block_list(struct rb_root *blocks)
static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans, static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *reloc_root) struct btrfs_root *reloc_root)
{ {
struct btrfs_fs_info *fs_info = reloc_root->fs_info;
struct btrfs_root *root; struct btrfs_root *root;
if (reloc_root->last_trans == trans->transid) if (reloc_root->last_trans == trans->transid)
return 0; return 0;
root = read_fs_root(reloc_root->fs_info, reloc_root->root_key.offset); root = read_fs_root(fs_info, reloc_root->root_key.offset);
BUG_ON(IS_ERR(root)); BUG_ON(IS_ERR(root));
BUG_ON(root->reloc_root != reloc_root); BUG_ON(root->reloc_root != reloc_root);
@ -2602,6 +2614,7 @@ static noinline_for_stack
u64 calcu_metadata_size(struct reloc_control *rc, u64 calcu_metadata_size(struct reloc_control *rc,
struct backref_node *node, int reserve) struct backref_node *node, int reserve)
{ {
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct backref_node *next = node; struct backref_node *next = node;
struct backref_edge *edge; struct backref_edge *edge;
struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
@ -2616,7 +2629,7 @@ u64 calcu_metadata_size(struct reloc_control *rc,
if (next->processed && (reserve || next != node)) if (next->processed && (reserve || next != node))
break; break;
num_bytes += rc->extent_root->fs_info->nodesize; num_bytes += fs_info->nodesize;
if (list_empty(&next->upper)) if (list_empty(&next->upper))
break; break;
@ -3131,7 +3144,7 @@ static noinline_for_stack
int setup_extent_mapping(struct inode *inode, u64 start, u64 end, int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
u64 block_start) u64 block_start)
{ {
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_map *em; struct extent_map *em;
int ret = 0; int ret = 0;
@ -3144,7 +3157,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
em->len = end + 1 - start; em->len = end + 1 - start;
em->block_len = em->len; em->block_len = em->len;
em->block_start = block_start; em->block_start = block_start;
em->bdev = root->fs_info->fs_devices->latest_bdev; em->bdev = fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PINNED, &em->flags); set_bit(EXTENT_FLAG_PINNED, &em->flags);
lock_extent(&BTRFS_I(inode)->io_tree, start, end); lock_extent(&BTRFS_I(inode)->io_tree, start, end);
@ -3419,11 +3432,11 @@ static int __add_tree_block(struct reloc_control *rc,
u64 bytenr, u32 blocksize, u64 bytenr, u32 blocksize,
struct rb_root *blocks) struct rb_root *blocks)
{ {
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct btrfs_path *path; struct btrfs_path *path;
struct btrfs_key key; struct btrfs_key key;
int ret; int ret;
bool skinny = btrfs_fs_incompat(rc->extent_root->fs_info, bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
SKINNY_METADATA);
if (tree_block_processed(bytenr, rc)) if (tree_block_processed(bytenr, rc))
return 0; return 0;
@ -3556,6 +3569,7 @@ static int find_data_references(struct reloc_control *rc,
struct btrfs_extent_data_ref *ref, struct btrfs_extent_data_ref *ref,
struct rb_root *blocks) struct rb_root *blocks)
{ {
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct btrfs_path *path; struct btrfs_path *path;
struct tree_block *block; struct tree_block *block;
struct btrfs_root *root; struct btrfs_root *root;
@ -3582,8 +3596,7 @@ static int find_data_references(struct reloc_control *rc,
* it and redo the search. * it and redo the search.
*/ */
if (ref_root == BTRFS_ROOT_TREE_OBJECTID) { if (ref_root == BTRFS_ROOT_TREE_OBJECTID) {
ret = delete_block_group_cache(rc->extent_root->fs_info, ret = delete_block_group_cache(fs_info, rc->block_group,
rc->block_group,
NULL, ref_objectid); NULL, ref_objectid);
if (ret != -ENOENT) if (ret != -ENOENT)
return ret; return ret;
@ -3595,7 +3608,7 @@ static int find_data_references(struct reloc_control *rc,
return -ENOMEM; return -ENOMEM;
path->reada = READA_FORWARD; path->reada = READA_FORWARD;
root = read_fs_root(rc->extent_root->fs_info, ref_root); root = read_fs_root(fs_info, ref_root);
if (IS_ERR(root)) { if (IS_ERR(root)) {
err = PTR_ERR(root); err = PTR_ERR(root);
goto out; goto out;
@ -3821,6 +3834,7 @@ static noinline_for_stack
int find_next_extent(struct reloc_control *rc, struct btrfs_path *path, int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
struct btrfs_key *extent_key) struct btrfs_key *extent_key)
{ {
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct btrfs_key key; struct btrfs_key key;
struct extent_buffer *leaf; struct extent_buffer *leaf;
u64 start, end, last; u64 start, end, last;
@ -3872,7 +3886,7 @@ next:
} }
if (key.type == BTRFS_METADATA_ITEM_KEY && if (key.type == BTRFS_METADATA_ITEM_KEY &&
key.objectid + rc->extent_root->fs_info->nodesize <= key.objectid + fs_info->nodesize <=
rc->search_start) { rc->search_start) {
path->slots[0]++; path->slots[0]++;
goto next; goto next;
@ -3890,7 +3904,7 @@ next:
rc->search_start = key.objectid + key.offset; rc->search_start = key.objectid + key.offset;
else else
rc->search_start = key.objectid + rc->search_start = key.objectid +
rc->extent_root->fs_info->nodesize; fs_info->nodesize;
memcpy(extent_key, &key, sizeof(key)); memcpy(extent_key, &key, sizeof(key));
return 0; return 0;
} }
@ -4233,7 +4247,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
key.objectid = objectid; key.objectid = objectid;
key.type = BTRFS_INODE_ITEM_KEY; key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0; key.offset = 0;
inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); inode = btrfs_iget(fs_info->sb, &key, root, NULL);
BUG_ON(IS_ERR(inode) || is_bad_inode(inode)); BUG_ON(IS_ERR(inode) || is_bad_inode(inode));
BTRFS_I(inode)->index_cnt = group->key.objectid; BTRFS_I(inode)->index_cnt = group->key.objectid;
@ -4360,7 +4374,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
goto out; goto out;
} }
describe_relocation(extent_root->fs_info, rc->block_group); describe_relocation(fs_info, rc->block_group);
btrfs_wait_block_group_reservations(rc->block_group); btrfs_wait_block_group_reservations(rc->block_group);
btrfs_wait_nocow_writers(rc->block_group); btrfs_wait_nocow_writers(rc->block_group);
@ -4380,8 +4394,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
if (rc->extents_found == 0) if (rc->extents_found == 0)
break; break;
btrfs_info(extent_root->fs_info, "found %llu extents", btrfs_info(fs_info, "found %llu extents", rc->extents_found);
rc->extents_found);
if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) { if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
ret = btrfs_wait_ordered_range(rc->data_inode, 0, ret = btrfs_wait_ordered_range(rc->data_inode, 0,
@ -4410,10 +4423,11 @@ out:
static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans; struct btrfs_trans_handle *trans;
int ret, err; int ret, err;
trans = btrfs_start_transaction(root->fs_info->tree_root, 0); trans = btrfs_start_transaction(fs_info->tree_root, 0);
if (IS_ERR(trans)) if (IS_ERR(trans))
return PTR_ERR(trans); return PTR_ERR(trans);
@ -4421,10 +4435,10 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
sizeof(root->root_item.drop_progress)); sizeof(root->root_item.drop_progress));
root->root_item.drop_level = 0; root->root_item.drop_level = 0;
btrfs_set_root_refs(&root->root_item, 0); btrfs_set_root_refs(&root->root_item, 0);
ret = btrfs_update_root(trans, root->fs_info->tree_root, ret = btrfs_update_root(trans, fs_info->tree_root,
&root->root_key, &root->root_item); &root->root_key, &root->root_item);
err = btrfs_end_transaction(trans, root->fs_info->tree_root); err = btrfs_end_transaction(trans, fs_info->tree_root);
if (err) if (err)
return err; return err;
return ret; return ret;
@ -4438,6 +4452,7 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
*/ */
int btrfs_recover_relocation(struct btrfs_root *root) int btrfs_recover_relocation(struct btrfs_root *root)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
LIST_HEAD(reloc_roots); LIST_HEAD(reloc_roots);
struct btrfs_key key; struct btrfs_key key;
struct btrfs_root *fs_root; struct btrfs_root *fs_root;
@ -4459,7 +4474,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
key.offset = (u64)-1; key.offset = (u64)-1;
while (1) { while (1) {
ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
path, 0, 0); path, 0, 0);
if (ret < 0) { if (ret < 0) {
err = ret; err = ret;
@ -4487,7 +4502,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
list_add(&reloc_root->root_list, &reloc_roots); list_add(&reloc_root->root_list, &reloc_roots);
if (btrfs_root_refs(&reloc_root->root_item) > 0) { if (btrfs_root_refs(&reloc_root->root_item) > 0) {
fs_root = read_fs_root(root->fs_info, fs_root = read_fs_root(fs_info,
reloc_root->root_key.offset); reloc_root->root_key.offset);
if (IS_ERR(fs_root)) { if (IS_ERR(fs_root)) {
ret = PTR_ERR(fs_root); ret = PTR_ERR(fs_root);
@ -4513,13 +4528,13 @@ int btrfs_recover_relocation(struct btrfs_root *root)
if (list_empty(&reloc_roots)) if (list_empty(&reloc_roots))
goto out; goto out;
rc = alloc_reloc_control(root->fs_info); rc = alloc_reloc_control(fs_info);
if (!rc) { if (!rc) {
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
} }
rc->extent_root = root->fs_info->extent_root; rc->extent_root = fs_info->extent_root;
set_reloc_control(rc); set_reloc_control(rc);
@ -4543,8 +4558,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
continue; continue;
} }
fs_root = read_fs_root(root->fs_info, fs_root = read_fs_root(fs_info, reloc_root->root_key.offset);
reloc_root->root_key.offset);
if (IS_ERR(fs_root)) { if (IS_ERR(fs_root)) {
err = PTR_ERR(fs_root); err = PTR_ERR(fs_root);
goto out_free; goto out_free;
@ -4579,8 +4593,7 @@ out:
if (err == 0) { if (err == 0) {
/* cleanup orphan inode in data relocation tree */ /* cleanup orphan inode in data relocation tree */
fs_root = read_fs_root(root->fs_info, fs_root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
BTRFS_DATA_RELOC_TREE_OBJECTID);
if (IS_ERR(fs_root)) if (IS_ERR(fs_root))
err = PTR_ERR(fs_root); err = PTR_ERR(fs_root);
else else
@ -4597,9 +4610,9 @@ out:
*/ */
int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len) int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_sum *sums; struct btrfs_ordered_sum *sums;
struct btrfs_ordered_extent *ordered; struct btrfs_ordered_extent *ordered;
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret; int ret;
u64 disk_bytenr; u64 disk_bytenr;
u64 new_bytenr; u64 new_bytenr;
@ -4609,7 +4622,7 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
BUG_ON(ordered->file_offset != file_pos || ordered->len != len); BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt; disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr, ret = btrfs_lookup_csums_range(fs_info->csum_root, disk_bytenr,
disk_bytenr + len - 1, &list, 0); disk_bytenr + len - 1, &list, 0);
if (ret) if (ret)
goto out; goto out;
@ -4644,13 +4657,14 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *buf, struct btrfs_root *root, struct extent_buffer *buf,
struct extent_buffer *cow) struct extent_buffer *cow)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct reloc_control *rc; struct reloc_control *rc;
struct backref_node *node; struct backref_node *node;
int first_cow = 0; int first_cow = 0;
int level; int level;
int ret = 0; int ret = 0;
rc = root->fs_info->reloc_ctl; rc = fs_info->reloc_ctl;
if (!rc) if (!rc)
return 0; return 0;

View File

@ -132,6 +132,7 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_key *key, struct btrfs_root_item *root, struct btrfs_key *key, struct btrfs_root_item
*item) *item)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path; struct btrfs_path *path;
struct extent_buffer *l; struct extent_buffer *l;
int ret; int ret;
@ -151,8 +152,7 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
if (ret != 0) { if (ret != 0) {
btrfs_print_leaf(root, path->nodes[0]); btrfs_print_leaf(root, path->nodes[0]);
btrfs_crit(root->fs_info, btrfs_crit(fs_info, "unable to update root key %llu %u %llu",
"unable to update root key %llu %u %llu",
key->objectid, key->type, key->offset); key->objectid, key->type, key->offset);
BUG_ON(1); BUG_ON(1);
} }
@ -228,7 +228,7 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
int ret; int ret;
bool can_recover = true; bool can_recover = true;
if (tree_root->fs_info->sb->s_flags & MS_RDONLY) if (fs_info->sb->s_flags & MS_RDONLY)
can_recover = false; can_recover = false;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
@ -276,8 +276,7 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
* in turn reads and inserts fs roots while doing backref * in turn reads and inserts fs roots while doing backref
* walking. * walking.
*/ */
root = btrfs_lookup_fs_root(tree_root->fs_info, root = btrfs_lookup_fs_root(fs_info, root_key.objectid);
root_key.objectid);
if (root) { if (root) {
WARN_ON(!test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, WARN_ON(!test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
&root->state)); &root->state));
@ -298,7 +297,7 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
trans = btrfs_join_transaction(tree_root); trans = btrfs_join_transaction(tree_root);
if (IS_ERR(trans)) { if (IS_ERR(trans)) {
err = PTR_ERR(trans); err = PTR_ERR(trans);
btrfs_handle_fs_error(tree_root->fs_info, err, btrfs_handle_fs_error(fs_info, err,
"Failed to start trans to delete orphan item"); "Failed to start trans to delete orphan item");
break; break;
} }
@ -306,7 +305,7 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
root_key.objectid); root_key.objectid);
btrfs_end_transaction(trans, tree_root); btrfs_end_transaction(trans, tree_root);
if (err) { if (err) {
btrfs_handle_fs_error(tree_root->fs_info, err, btrfs_handle_fs_error(fs_info, err,
"Failed to delete root orphan item"); "Failed to delete root orphan item");
break; break;
} }
@ -321,7 +320,7 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state); set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
err = btrfs_insert_fs_root(root->fs_info, root); err = btrfs_insert_fs_root(fs_info, root);
if (err) { if (err) {
BUG_ON(err == -EEXIST); BUG_ON(err == -EEXIST);
btrfs_free_fs_root(root); btrfs_free_fs_root(root);

View File

@ -489,8 +489,8 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
sctx->bios[i]->next_free = -1; sctx->bios[i]->next_free = -1;
} }
sctx->first_free = 0; sctx->first_free = 0;
sctx->nodesize = dev->fs_info->nodesize; sctx->nodesize = fs_info->nodesize;
sctx->sectorsize = dev->fs_info->sectorsize; sctx->sectorsize = fs_info->sectorsize;
atomic_set(&sctx->bios_in_flight, 0); atomic_set(&sctx->bios_in_flight, 0);
atomic_set(&sctx->workers_pending, 0); atomic_set(&sctx->workers_pending, 0);
atomic_set(&sctx->cancel_req, 0); atomic_set(&sctx->cancel_req, 0);
@ -789,6 +789,7 @@ out:
static void scrub_fixup_nodatasum(struct btrfs_work *work) static void scrub_fixup_nodatasum(struct btrfs_work *work)
{ {
struct btrfs_fs_info *fs_info;
int ret; int ret;
struct scrub_fixup_nodatasum *fixup; struct scrub_fixup_nodatasum *fixup;
struct scrub_ctx *sctx; struct scrub_ctx *sctx;
@ -798,6 +799,7 @@ static void scrub_fixup_nodatasum(struct btrfs_work *work)
fixup = container_of(work, struct scrub_fixup_nodatasum, work); fixup = container_of(work, struct scrub_fixup_nodatasum, work);
sctx = fixup->sctx; sctx = fixup->sctx;
fs_info = fixup->root->fs_info;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) { if (!path) {
@ -823,9 +825,8 @@ static void scrub_fixup_nodatasum(struct btrfs_work *work)
* (once it's finished) and rewrite the failed sector if a good copy * (once it's finished) and rewrite the failed sector if a good copy
* can be found. * can be found.
*/ */
ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info, ret = iterate_inodes_from_logical(fixup->logical, fs_info, path,
path, scrub_fixup_readpage, scrub_fixup_readpage, fixup);
fixup);
if (ret < 0) { if (ret < 0) {
uncorrectable = 1; uncorrectable = 1;
goto out; goto out;
@ -843,9 +844,9 @@ out:
spin_lock(&sctx->stat_lock); spin_lock(&sctx->stat_lock);
++sctx->stat.uncorrectable_errors; ++sctx->stat.uncorrectable_errors;
spin_unlock(&sctx->stat_lock); spin_unlock(&sctx->stat_lock);
btrfs_dev_replace_stats_inc(&sctx->fs_info->dev_replace. btrfs_dev_replace_stats_inc(
num_uncorrectable_read_errors); &fs_info->dev_replace.num_uncorrectable_read_errors);
btrfs_err_rl_in_rcu(sctx->fs_info, btrfs_err_rl_in_rcu(fs_info,
"unable to fixup (nodatasum) error at logical %llu on dev %s", "unable to fixup (nodatasum) error at logical %llu on dev %s",
fixup->logical, rcu_str_deref(fixup->dev->name)); fixup->logical, rcu_str_deref(fixup->dev->name));
} }
@ -1176,8 +1177,7 @@ nodatasum_case:
if (scrub_write_page_to_dev_replace(sblock_other, if (scrub_write_page_to_dev_replace(sblock_other,
page_num) != 0) { page_num) != 0) {
btrfs_dev_replace_stats_inc( btrfs_dev_replace_stats_inc(
&sctx->fs_info->dev_replace. &fs_info->dev_replace.num_write_errors);
num_write_errors);
success = 0; success = 0;
} }
} else if (sblock_other) { } else if (sblock_other) {
@ -1563,6 +1563,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
{ {
struct scrub_page *page_bad = sblock_bad->pagev[page_num]; struct scrub_page *page_bad = sblock_bad->pagev[page_num];
struct scrub_page *page_good = sblock_good->pagev[page_num]; struct scrub_page *page_good = sblock_good->pagev[page_num];
struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
BUG_ON(page_bad->page == NULL); BUG_ON(page_bad->page == NULL);
BUG_ON(page_good->page == NULL); BUG_ON(page_good->page == NULL);
@ -1572,7 +1573,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
int ret; int ret;
if (!page_bad->dev->bdev) { if (!page_bad->dev->bdev) {
btrfs_warn_rl(sblock_bad->sctx->fs_info, btrfs_warn_rl(fs_info,
"scrub_repair_page_from_good_copy(bdev == NULL) is unexpected"); "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
return -EIO; return -EIO;
} }
@ -1594,8 +1595,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
btrfs_dev_stat_inc_and_print(page_bad->dev, btrfs_dev_stat_inc_and_print(page_bad->dev,
BTRFS_DEV_STAT_WRITE_ERRS); BTRFS_DEV_STAT_WRITE_ERRS);
btrfs_dev_replace_stats_inc( btrfs_dev_replace_stats_inc(
&sblock_bad->sctx->fs_info-> &fs_info->dev_replace.num_write_errors);
dev_replace.num_write_errors);
bio_put(bio); bio_put(bio);
return -EIO; return -EIO;
} }
@ -1607,6 +1607,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
static void scrub_write_block_to_dev_replace(struct scrub_block *sblock) static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
{ {
struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
int page_num; int page_num;
/* /*
@ -1622,8 +1623,7 @@ static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
ret = scrub_write_page_to_dev_replace(sblock, page_num); ret = scrub_write_page_to_dev_replace(sblock, page_num);
if (ret) if (ret)
btrfs_dev_replace_stats_inc( btrfs_dev_replace_stats_inc(
&sblock->sctx->fs_info->dev_replace. &fs_info->dev_replace.num_write_errors);
num_write_errors);
} }
} }
@ -1857,8 +1857,7 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
{ {
struct scrub_ctx *sctx = sblock->sctx; struct scrub_ctx *sctx = sblock->sctx;
struct btrfs_header *h; struct btrfs_header *h;
struct btrfs_root *root = sctx->fs_info->dev_root; struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_fs_info *fs_info = root->fs_info;
u8 calculated_csum[BTRFS_CSUM_SIZE]; u8 calculated_csum[BTRFS_CSUM_SIZE];
u8 on_disk_csum[BTRFS_CSUM_SIZE]; u8 on_disk_csum[BTRFS_CSUM_SIZE];
struct page *page; struct page *page;
@ -2138,6 +2137,7 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work)
{ {
struct scrub_block *sblock = container_of(work, struct scrub_block, work); struct scrub_block *sblock = container_of(work, struct scrub_block, work);
struct scrub_ctx *sctx = sblock->sctx; struct scrub_ctx *sctx = sblock->sctx;
struct btrfs_fs_info *fs_info = sctx->fs_info;
u64 logical; u64 logical;
struct btrfs_device *dev; struct btrfs_device *dev;
@ -2151,14 +2151,14 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work)
spin_lock(&sctx->stat_lock); spin_lock(&sctx->stat_lock);
sctx->stat.read_errors++; sctx->stat.read_errors++;
spin_unlock(&sctx->stat_lock); spin_unlock(&sctx->stat_lock);
btrfs_err_rl_in_rcu(sctx->fs_info, btrfs_err_rl_in_rcu(fs_info,
"IO error rebuilding logical %llu for dev %s", "IO error rebuilding logical %llu for dev %s",
logical, rcu_str_deref(dev->name)); logical, rcu_str_deref(dev->name));
} else if (sblock->header_error || sblock->checksum_error) { } else if (sblock->header_error || sblock->checksum_error) {
spin_lock(&sctx->stat_lock); spin_lock(&sctx->stat_lock);
sctx->stat.uncorrectable_errors++; sctx->stat.uncorrectable_errors++;
spin_unlock(&sctx->stat_lock); spin_unlock(&sctx->stat_lock);
btrfs_err_rl_in_rcu(sctx->fs_info, btrfs_err_rl_in_rcu(fs_info,
"failed to rebuild valid logical %llu for dev %s", "failed to rebuild valid logical %llu for dev %s",
logical, rcu_str_deref(dev->name)); logical, rcu_str_deref(dev->name));
} else { } else {
@ -2749,6 +2749,7 @@ static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
static void scrub_parity_bio_endio(struct bio *bio) static void scrub_parity_bio_endio(struct bio *bio)
{ {
struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private; struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
if (bio->bi_error) if (bio->bi_error)
bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
@ -2758,14 +2759,14 @@ static void scrub_parity_bio_endio(struct bio *bio)
btrfs_init_work(&sparity->work, btrfs_scrubparity_helper, btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
scrub_parity_bio_endio_worker, NULL, NULL); scrub_parity_bio_endio_worker, NULL, NULL);
btrfs_queue_work(sparity->sctx->fs_info->scrub_parity_workers, btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
&sparity->work);
} }
static void scrub_parity_check_and_repair(struct scrub_parity *sparity) static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
{ {
struct scrub_ctx *sctx = sparity->sctx; struct scrub_ctx *sctx = sparity->sctx;
struct btrfs_root *dev_root = sctx->fs_info->dev_root; struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_root *dev_root = fs_info->dev_root;
struct bio *bio; struct bio *bio;
struct btrfs_raid_bio *rbio; struct btrfs_raid_bio *rbio;
struct scrub_page *spage; struct scrub_page *spage;
@ -2778,8 +2779,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
goto out; goto out;
length = sparity->logic_end - sparity->logic_start; length = sparity->logic_end - sparity->logic_start;
ret = btrfs_map_sblock(sctx->fs_info, BTRFS_MAP_WRITE, ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
sparity->logic_start,
&length, &bbio, 0, 1); &length, &bbio, 0, 1);
if (ret || !bbio || !bbio->raid_map) if (ret || !bbio || !bbio->raid_map)
goto bbio_out; goto bbio_out;
@ -2866,7 +2866,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
int extent_mirror_num; int extent_mirror_num;
int stop_loop = 0; int stop_loop = 0;
nsectors = div_u64(map->stripe_len, root->fs_info->sectorsize); nsectors = div_u64(map->stripe_len, fs_info->sectorsize);
bitmap_len = scrub_calc_parity_bitmap_len(nsectors); bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len, sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
GFP_NOFS); GFP_NOFS);
@ -2937,7 +2937,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
goto next; goto next;
if (key.type == BTRFS_METADATA_ITEM_KEY) if (key.type == BTRFS_METADATA_ITEM_KEY)
bytes = root->fs_info->nodesize; bytes = fs_info->nodesize;
else else
bytes = key.offset; bytes = key.offset;
@ -3290,7 +3290,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
goto next; goto next;
if (key.type == BTRFS_METADATA_ITEM_KEY) if (key.type == BTRFS_METADATA_ITEM_KEY)
bytes = root->fs_info->nodesize; bytes = fs_info->nodesize;
else else
bytes = key.offset; bytes = key.offset;
@ -3497,8 +3497,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
{ {
struct btrfs_dev_extent *dev_extent = NULL; struct btrfs_dev_extent *dev_extent = NULL;
struct btrfs_path *path; struct btrfs_path *path;
struct btrfs_root *root = sctx->fs_info->dev_root; struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_root *root = fs_info->dev_root;
u64 length; u64 length;
u64 chunk_offset; u64 chunk_offset;
int ret = 0; int ret = 0;
@ -3747,16 +3747,16 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
u64 bytenr; u64 bytenr;
u64 gen; u64 gen;
int ret; int ret;
struct btrfs_root *root = sctx->fs_info->dev_root; struct btrfs_fs_info *fs_info = sctx->fs_info;
if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
return -EIO; return -EIO;
/* Seed devices of a new filesystem has their own generation. */ /* Seed devices of a new filesystem has their own generation. */
if (scrub_dev->fs_devices != root->fs_info->fs_devices) if (scrub_dev->fs_devices != fs_info->fs_devices)
gen = scrub_dev->generation; gen = scrub_dev->generation;
else else
gen = root->fs_info->last_trans_committed; gen = fs_info->last_trans_committed;
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
bytenr = btrfs_sb_offset(i); bytenr = btrfs_sb_offset(i);
@ -4052,16 +4052,17 @@ int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
struct btrfs_scrub_progress *progress) struct btrfs_scrub_progress *progress)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_device *dev; struct btrfs_device *dev;
struct scrub_ctx *sctx = NULL; struct scrub_ctx *sctx = NULL;
mutex_lock(&root->fs_info->fs_devices->device_list_mutex); mutex_lock(&fs_info->fs_devices->device_list_mutex);
dev = btrfs_find_device(root->fs_info, devid, NULL, NULL); dev = btrfs_find_device(fs_info, devid, NULL, NULL);
if (dev) if (dev)
sctx = dev->scrub_device; sctx = dev->scrub_device;
if (sctx) if (sctx)
memcpy(progress, &sctx->stat, sizeof(*progress)); memcpy(progress, &sctx->stat, sizeof(*progress));
mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); mutex_unlock(&fs_info->fs_devices->device_list_mutex);
return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
} }
@ -4171,20 +4172,17 @@ static void copy_nocow_pages_worker(struct btrfs_work *work)
struct scrub_copy_nocow_ctx *nocow_ctx = struct scrub_copy_nocow_ctx *nocow_ctx =
container_of(work, struct scrub_copy_nocow_ctx, work); container_of(work, struct scrub_copy_nocow_ctx, work);
struct scrub_ctx *sctx = nocow_ctx->sctx; struct scrub_ctx *sctx = nocow_ctx->sctx;
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_root *root = fs_info->extent_root;
u64 logical = nocow_ctx->logical; u64 logical = nocow_ctx->logical;
u64 len = nocow_ctx->len; u64 len = nocow_ctx->len;
int mirror_num = nocow_ctx->mirror_num; int mirror_num = nocow_ctx->mirror_num;
u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace; u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
int ret; int ret;
struct btrfs_trans_handle *trans = NULL; struct btrfs_trans_handle *trans = NULL;
struct btrfs_fs_info *fs_info;
struct btrfs_path *path; struct btrfs_path *path;
struct btrfs_root *root;
int not_written = 0; int not_written = 0;
fs_info = sctx->fs_info;
root = fs_info->extent_root;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) { if (!path) {
spin_lock(&sctx->stat_lock); spin_lock(&sctx->stat_lock);

View File

@ -1431,9 +1431,9 @@ static int find_extent_clone(struct send_ctx *sctx,
extent_item_pos = logical - found_key.objectid; extent_item_pos = logical - found_key.objectid;
else else
extent_item_pos = 0; extent_item_pos = 0;
ret = iterate_extent_inodes(fs_info, ret = iterate_extent_inodes(fs_info, found_key.objectid,
found_key.objectid, extent_item_pos, 1, extent_item_pos, 1, __iterate_backrefs,
__iterate_backrefs, backref_ctx); backref_ctx);
if (ret < 0) if (ret < 0)
goto out; goto out;
@ -6137,17 +6137,17 @@ static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
*/ */
if (root->send_in_progress < 0) if (root->send_in_progress < 0)
btrfs_err(root->fs_info, btrfs_err(root->fs_info,
"send_in_progres unbalanced %d root %llu", "send_in_progres unbalanced %d root %llu",
root->send_in_progress, root->root_key.objectid); root->send_in_progress, root->root_key.objectid);
spin_unlock(&root->root_item_lock); spin_unlock(&root->root_item_lock);
} }
long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_) long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
{ {
int ret = 0; int ret = 0;
struct btrfs_root *send_root; struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root;
struct btrfs_fs_info *fs_info = send_root->fs_info;
struct btrfs_root *clone_root; struct btrfs_root *clone_root;
struct btrfs_fs_info *fs_info;
struct btrfs_ioctl_send_args *arg = NULL; struct btrfs_ioctl_send_args *arg = NULL;
struct btrfs_key key; struct btrfs_key key;
struct send_ctx *sctx = NULL; struct send_ctx *sctx = NULL;
@ -6161,9 +6161,6 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
send_root = BTRFS_I(file_inode(mnt_file))->root;
fs_info = send_root->fs_info;
/* /*
* The subvolume must remain read-only during send, protect against * The subvolume must remain read-only during send, protect against
* making it RW. This also protects against deletion. * making it RW. This also protects against deletion.

View File

@ -411,8 +411,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
bool saved_compress_force; bool saved_compress_force;
int no_compress = 0; int no_compress = 0;
cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy); cache_gen = btrfs_super_cache_generation(info->super_copy);
if (btrfs_fs_compat_ro(root->fs_info, FREE_SPACE_TREE)) if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE))
btrfs_set_opt(info->mount_opt, FREE_SPACE_TREE); btrfs_set_opt(info->mount_opt, FREE_SPACE_TREE);
else if (cache_gen) else if (cache_gen)
btrfs_set_opt(info->mount_opt, SPACE_CACHE); btrfs_set_opt(info->mount_opt, SPACE_CACHE);
@ -442,7 +442,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
token = match_token(p, tokens, args); token = match_token(p, tokens, args);
switch (token) { switch (token) {
case Opt_degraded: case Opt_degraded:
btrfs_info(root->fs_info, "allowing degraded mounts"); btrfs_info(info, "allowing degraded mounts");
btrfs_set_opt(info->mount_opt, DEGRADED); btrfs_set_opt(info->mount_opt, DEGRADED);
break; break;
case Opt_subvol: case Opt_subvol:
@ -461,11 +461,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
case Opt_datasum: case Opt_datasum:
if (btrfs_test_opt(info, NODATASUM)) { if (btrfs_test_opt(info, NODATASUM)) {
if (btrfs_test_opt(info, NODATACOW)) if (btrfs_test_opt(info, NODATACOW))
btrfs_info(root->fs_info, btrfs_info(info,
"setting datasum, datacow enabled"); "setting datasum, datacow enabled");
else else
btrfs_info(root->fs_info, btrfs_info(info, "setting datasum");
"setting datasum");
} }
btrfs_clear_opt(info->mount_opt, NODATACOW); btrfs_clear_opt(info->mount_opt, NODATACOW);
btrfs_clear_opt(info->mount_opt, NODATASUM); btrfs_clear_opt(info->mount_opt, NODATASUM);
@ -474,11 +473,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
if (!btrfs_test_opt(info, NODATACOW)) { if (!btrfs_test_opt(info, NODATACOW)) {
if (!btrfs_test_opt(info, COMPRESS) || if (!btrfs_test_opt(info, COMPRESS) ||
!btrfs_test_opt(info, FORCE_COMPRESS)) { !btrfs_test_opt(info, FORCE_COMPRESS)) {
btrfs_info(root->fs_info, btrfs_info(info,
"setting nodatacow, compression disabled"); "setting nodatacow, compression disabled");
} else { } else {
btrfs_info(root->fs_info, btrfs_info(info, "setting nodatacow");
"setting nodatacow");
} }
} }
btrfs_clear_opt(info->mount_opt, COMPRESS); btrfs_clear_opt(info->mount_opt, COMPRESS);
@ -545,8 +543,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
compress_force != saved_compress_force)) || compress_force != saved_compress_force)) ||
(!btrfs_test_opt(info, COMPRESS) && (!btrfs_test_opt(info, COMPRESS) &&
no_compress == 1)) { no_compress == 1)) {
btrfs_info(root->fs_info, btrfs_info(info, "%s %s compression",
"%s %s compression",
(compress_force) ? "force" : "use", (compress_force) ? "force" : "use",
compress_type); compress_type);
} }
@ -594,10 +591,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
if (info->max_inline) { if (info->max_inline) {
info->max_inline = min_t(u64, info->max_inline = min_t(u64,
info->max_inline, info->max_inline,
root->fs_info->sectorsize); info->sectorsize);
} }
btrfs_info(root->fs_info, "max_inline at %llu", btrfs_info(info, "max_inline at %llu",
info->max_inline); info->max_inline);
} else { } else {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
@ -610,8 +607,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
info->alloc_start = memparse(num, NULL); info->alloc_start = memparse(num, NULL);
mutex_unlock(&info->chunk_mutex); mutex_unlock(&info->chunk_mutex);
kfree(num); kfree(num);
btrfs_info(root->fs_info, btrfs_info(info, "allocations start at %llu",
"allocations start at %llu",
info->alloc_start); info->alloc_start);
} else { } else {
ret = -ENOMEM; ret = -ENOMEM;
@ -620,16 +616,15 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
break; break;
case Opt_acl: case Opt_acl:
#ifdef CONFIG_BTRFS_FS_POSIX_ACL #ifdef CONFIG_BTRFS_FS_POSIX_ACL
root->fs_info->sb->s_flags |= MS_POSIXACL; info->sb->s_flags |= MS_POSIXACL;
break; break;
#else #else
btrfs_err(root->fs_info, btrfs_err(info, "support for ACL not compiled in!");
"support for ACL not compiled in!");
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
#endif #endif
case Opt_noacl: case Opt_noacl:
root->fs_info->sb->s_flags &= ~MS_POSIXACL; info->sb->s_flags &= ~MS_POSIXACL;
break; break;
case Opt_notreelog: case Opt_notreelog:
btrfs_set_and_info(info, NOTREELOG, btrfs_set_and_info(info, NOTREELOG,
@ -658,8 +653,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
goto out; goto out;
} else if (intarg >= 0) { } else if (intarg >= 0) {
info->metadata_ratio = intarg; info->metadata_ratio = intarg;
btrfs_info(root->fs_info, "metadata ratio %d", btrfs_info(info, "metadata ratio %d",
info->metadata_ratio); info->metadata_ratio);
} else { } else {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
@ -677,15 +672,14 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
case Opt_space_cache_version: case Opt_space_cache_version:
if (token == Opt_space_cache || if (token == Opt_space_cache ||
strcmp(args[0].from, "v1") == 0) { strcmp(args[0].from, "v1") == 0) {
btrfs_clear_opt(root->fs_info->mount_opt, btrfs_clear_opt(info->mount_opt,
FREE_SPACE_TREE); FREE_SPACE_TREE);
btrfs_set_and_info(info, SPACE_CACHE, btrfs_set_and_info(info, SPACE_CACHE,
"enabling disk space caching"); "enabling disk space caching");
} else if (strcmp(args[0].from, "v2") == 0) { } else if (strcmp(args[0].from, "v2") == 0) {
btrfs_clear_opt(root->fs_info->mount_opt, btrfs_clear_opt(info->mount_opt,
SPACE_CACHE); SPACE_CACHE);
btrfs_set_and_info(info, btrfs_set_and_info(info, FREE_SPACE_TREE,
FREE_SPACE_TREE,
"enabling free space tree"); "enabling free space tree");
} else { } else {
ret = -EINVAL; ret = -EINVAL;
@ -697,14 +691,12 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
break; break;
case Opt_no_space_cache: case Opt_no_space_cache:
if (btrfs_test_opt(info, SPACE_CACHE)) { if (btrfs_test_opt(info, SPACE_CACHE)) {
btrfs_clear_and_info(info, btrfs_clear_and_info(info, SPACE_CACHE,
SPACE_CACHE, "disabling disk space caching");
"disabling disk space caching");
} }
if (btrfs_test_opt(info, FREE_SPACE_TREE)) { if (btrfs_test_opt(info, FREE_SPACE_TREE)) {
btrfs_clear_and_info(info, btrfs_clear_and_info(info, FREE_SPACE_TREE,
FREE_SPACE_TREE, "disabling free space tree");
"disabling free space tree");
} }
break; break;
case Opt_inode_cache: case Opt_inode_cache:
@ -737,10 +729,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
"disabling auto defrag"); "disabling auto defrag");
break; break;
case Opt_recovery: case Opt_recovery:
btrfs_warn(root->fs_info, btrfs_warn(info,
"'recovery' is deprecated, use 'usebackuproot' instead"); "'recovery' is deprecated, use 'usebackuproot' instead");
case Opt_usebackuproot: case Opt_usebackuproot:
btrfs_info(root->fs_info, btrfs_info(info,
"trying to use backup root at mount time"); "trying to use backup root at mount time");
btrfs_set_opt(info->mount_opt, USEBACKUPROOT); btrfs_set_opt(info->mount_opt, USEBACKUPROOT);
break; break;
@ -749,14 +741,14 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
break; break;
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
case Opt_check_integrity_including_extent_data: case Opt_check_integrity_including_extent_data:
btrfs_info(root->fs_info, btrfs_info(info,
"enabling check integrity including extent data"); "enabling check integrity including extent data");
btrfs_set_opt(info->mount_opt, btrfs_set_opt(info->mount_opt,
CHECK_INTEGRITY_INCLUDING_EXTENT_DATA); CHECK_INTEGRITY_INCLUDING_EXTENT_DATA);
btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY); btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
break; break;
case Opt_check_integrity: case Opt_check_integrity:
btrfs_info(root->fs_info, "enabling check integrity"); btrfs_info(info, "enabling check integrity");
btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY); btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
break; break;
case Opt_check_integrity_print_mask: case Opt_check_integrity_print_mask:
@ -765,7 +757,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
goto out; goto out;
} else if (intarg >= 0) { } else if (intarg >= 0) {
info->check_integrity_print_mask = intarg; info->check_integrity_print_mask = intarg;
btrfs_info(root->fs_info, btrfs_info(info,
"check_integrity_print_mask 0x%x", "check_integrity_print_mask 0x%x",
info->check_integrity_print_mask); info->check_integrity_print_mask);
} else { } else {
@ -777,8 +769,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
case Opt_check_integrity_including_extent_data: case Opt_check_integrity_including_extent_data:
case Opt_check_integrity: case Opt_check_integrity:
case Opt_check_integrity_print_mask: case Opt_check_integrity_print_mask:
btrfs_err(root->fs_info, btrfs_err(info,
"support for check_integrity* not compiled in!"); "support for check_integrity* not compiled in!");
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
#endif #endif
@ -798,20 +790,19 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
intarg = 0; intarg = 0;
ret = match_int(&args[0], &intarg); ret = match_int(&args[0], &intarg);
if (ret < 0) { if (ret < 0) {
btrfs_err(root->fs_info, btrfs_err(info, "invalid commit interval");
"invalid commit interval");
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
if (intarg > 0) { if (intarg > 0) {
if (intarg > 300) { if (intarg > 300) {
btrfs_warn(root->fs_info, btrfs_warn(info,
"excessive commit interval %d", "excessive commit interval %d",
intarg); intarg);
} }
info->commit_interval = intarg; info->commit_interval = intarg;
} else { } else {
btrfs_info(root->fs_info, btrfs_info(info,
"using default commit interval %ds", "using default commit interval %ds",
BTRFS_DEFAULT_COMMIT_INTERVAL); BTRFS_DEFAULT_COMMIT_INTERVAL);
info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
@ -819,23 +810,22 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
break; break;
#ifdef CONFIG_BTRFS_DEBUG #ifdef CONFIG_BTRFS_DEBUG
case Opt_fragment_all: case Opt_fragment_all:
btrfs_info(root->fs_info, "fragmenting all space"); btrfs_info(info, "fragmenting all space");
btrfs_set_opt(info->mount_opt, FRAGMENT_DATA); btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
btrfs_set_opt(info->mount_opt, FRAGMENT_METADATA); btrfs_set_opt(info->mount_opt, FRAGMENT_METADATA);
break; break;
case Opt_fragment_metadata: case Opt_fragment_metadata:
btrfs_info(root->fs_info, "fragmenting metadata"); btrfs_info(info, "fragmenting metadata");
btrfs_set_opt(info->mount_opt, btrfs_set_opt(info->mount_opt,
FRAGMENT_METADATA); FRAGMENT_METADATA);
break; break;
case Opt_fragment_data: case Opt_fragment_data:
btrfs_info(root->fs_info, "fragmenting data"); btrfs_info(info, "fragmenting data");
btrfs_set_opt(info->mount_opt, FRAGMENT_DATA); btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
break; break;
#endif #endif
case Opt_err: case Opt_err:
btrfs_info(root->fs_info, btrfs_info(info, "unrecognized mount option '%s'", p);
"unrecognized mount option '%s'", p);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
default: default:
@ -847,22 +837,22 @@ check:
* Extra check for current option against current flag * Extra check for current option against current flag
*/ */
if (btrfs_test_opt(info, NOLOGREPLAY) && !(new_flags & MS_RDONLY)) { if (btrfs_test_opt(info, NOLOGREPLAY) && !(new_flags & MS_RDONLY)) {
btrfs_err(root->fs_info, btrfs_err(info,
"nologreplay must be used with ro mount option"); "nologreplay must be used with ro mount option");
ret = -EINVAL; ret = -EINVAL;
} }
out: out:
if (btrfs_fs_compat_ro(root->fs_info, FREE_SPACE_TREE) && if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE) &&
!btrfs_test_opt(info, FREE_SPACE_TREE) && !btrfs_test_opt(info, FREE_SPACE_TREE) &&
!btrfs_test_opt(info, CLEAR_CACHE)) { !btrfs_test_opt(info, CLEAR_CACHE)) {
btrfs_err(root->fs_info, "cannot disable free space tree"); btrfs_err(info, "cannot disable free space tree");
ret = -EINVAL; ret = -EINVAL;
} }
if (!ret && btrfs_test_opt(info, SPACE_CACHE)) if (!ret && btrfs_test_opt(info, SPACE_CACHE))
btrfs_info(root->fs_info, "disk space caching is enabled"); btrfs_info(info, "disk space caching is enabled");
if (!ret && btrfs_test_opt(info, FREE_SPACE_TREE)) if (!ret && btrfs_test_opt(info, FREE_SPACE_TREE))
btrfs_info(root->fs_info, "using free space tree"); btrfs_info(info, "using free space tree");
kfree(orig); kfree(orig);
return ret; return ret;
} }
@ -1223,7 +1213,6 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
{ {
struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb); struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
struct btrfs_root *root = info->tree_root;
char *compress_type; char *compress_type;
if (btrfs_test_opt(info, DEGRADED)) if (btrfs_test_opt(info, DEGRADED))
@ -1265,7 +1254,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
seq_puts(seq, ",flushoncommit"); seq_puts(seq, ",flushoncommit");
if (btrfs_test_opt(info, DISCARD)) if (btrfs_test_opt(info, DISCARD))
seq_puts(seq, ",discard"); seq_puts(seq, ",discard");
if (!(root->fs_info->sb->s_flags & MS_POSIXACL)) if (!(info->sb->s_flags & MS_POSIXACL))
seq_puts(seq, ",noacl"); seq_puts(seq, ",noacl");
if (btrfs_test_opt(info, SPACE_CACHE)) if (btrfs_test_opt(info, SPACE_CACHE))
seq_puts(seq, ",space_cache"); seq_puts(seq, ",space_cache");
@ -1788,7 +1777,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
if (ret) if (ret)
goto restore; goto restore;
} else { } else {
if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) { if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
btrfs_err(fs_info, btrfs_err(fs_info,
"Remounting read-write after error is not allowed"); "Remounting read-write after error is not allowed");
ret = -EINVAL; ret = -EINVAL;
@ -2246,9 +2235,10 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
static int btrfs_freeze(struct super_block *sb) static int btrfs_freeze(struct super_block *sb)
{ {
struct btrfs_trans_handle *trans; struct btrfs_trans_handle *trans;
struct btrfs_root *root = btrfs_sb(sb)->tree_root; struct btrfs_fs_info *fs_info = btrfs_sb(sb);
struct btrfs_root *root = fs_info->tree_root;
root->fs_info->fs_frozen = 1; fs_info->fs_frozen = 1;
/* /*
* We don't need a barrier here, we'll wait for any transaction that * We don't need a barrier here, we'll wait for any transaction that
* could be in progress on other threads (and do delayed iputs that * could be in progress on other threads (and do delayed iputs that
@ -2267,9 +2257,7 @@ static int btrfs_freeze(struct super_block *sb)
static int btrfs_unfreeze(struct super_block *sb) static int btrfs_unfreeze(struct super_block *sb)
{ {
struct btrfs_root *root = btrfs_sb(sb)->tree_root; btrfs_sb(sb)->fs_frozen = 0;
root->fs_info->fs_frozen = 0;
return 0; return 0;
} }

View File

@ -314,9 +314,11 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
int force) int force)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
if ((test_bit(BTRFS_ROOT_REF_COWS, &root->state) && if ((test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
root->last_trans < trans->transid) || force) { root->last_trans < trans->transid) || force) {
WARN_ON(root == root->fs_info->extent_root); WARN_ON(root == fs_info->extent_root);
WARN_ON(root->commit_root != root->node); WARN_ON(root->commit_root != root->node);
/* /*
@ -331,15 +333,15 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans,
*/ */
smp_wmb(); smp_wmb();
spin_lock(&root->fs_info->fs_roots_radix_lock); spin_lock(&fs_info->fs_roots_radix_lock);
if (root->last_trans == trans->transid && !force) { if (root->last_trans == trans->transid && !force) {
spin_unlock(&root->fs_info->fs_roots_radix_lock); spin_unlock(&fs_info->fs_roots_radix_lock);
return 0; return 0;
} }
radix_tree_tag_set(&root->fs_info->fs_roots_radix, radix_tree_tag_set(&fs_info->fs_roots_radix,
(unsigned long)root->root_key.objectid, (unsigned long)root->root_key.objectid,
BTRFS_ROOT_TRANS_TAG); BTRFS_ROOT_TRANS_TAG);
spin_unlock(&root->fs_info->fs_roots_radix_lock); spin_unlock(&fs_info->fs_roots_radix_lock);
root->last_trans = trans->transid; root->last_trans = trans->transid;
/* this is pretty tricky. We don't want to /* this is pretty tricky. We don't want to
@ -372,6 +374,7 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans,
void btrfs_add_dropped_root(struct btrfs_trans_handle *trans, void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root) struct btrfs_root *root)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_transaction *cur_trans = trans->transaction; struct btrfs_transaction *cur_trans = trans->transaction;
/* Add ourselves to the transaction dropped list */ /* Add ourselves to the transaction dropped list */
@ -380,16 +383,18 @@ void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
spin_unlock(&cur_trans->dropped_roots_lock); spin_unlock(&cur_trans->dropped_roots_lock);
/* Make sure we don't try to update the root at commit time */ /* Make sure we don't try to update the root at commit time */
spin_lock(&root->fs_info->fs_roots_radix_lock); spin_lock(&fs_info->fs_roots_radix_lock);
radix_tree_tag_clear(&root->fs_info->fs_roots_radix, radix_tree_tag_clear(&fs_info->fs_roots_radix,
(unsigned long)root->root_key.objectid, (unsigned long)root->root_key.objectid,
BTRFS_ROOT_TRANS_TAG); BTRFS_ROOT_TRANS_TAG);
spin_unlock(&root->fs_info->fs_roots_radix_lock); spin_unlock(&fs_info->fs_roots_radix_lock);
} }
int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root) struct btrfs_root *root)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
return 0; return 0;
@ -402,9 +407,9 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
!test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state)) !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
return 0; return 0;
mutex_lock(&root->fs_info->reloc_mutex); mutex_lock(&fs_info->reloc_mutex);
record_root_in_trans(trans, root, 0); record_root_in_trans(trans, root, 0);
mutex_unlock(&root->fs_info->reloc_mutex); mutex_unlock(&fs_info->reloc_mutex);
return 0; return 0;
} }
@ -422,33 +427,36 @@ static inline int is_transaction_blocked(struct btrfs_transaction *trans)
*/ */
static void wait_current_trans(struct btrfs_root *root) static void wait_current_trans(struct btrfs_root *root)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_transaction *cur_trans; struct btrfs_transaction *cur_trans;
spin_lock(&root->fs_info->trans_lock); spin_lock(&fs_info->trans_lock);
cur_trans = root->fs_info->running_transaction; cur_trans = fs_info->running_transaction;
if (cur_trans && is_transaction_blocked(cur_trans)) { if (cur_trans && is_transaction_blocked(cur_trans)) {
atomic_inc(&cur_trans->use_count); atomic_inc(&cur_trans->use_count);
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
wait_event(root->fs_info->transaction_wait, wait_event(fs_info->transaction_wait,
cur_trans->state >= TRANS_STATE_UNBLOCKED || cur_trans->state >= TRANS_STATE_UNBLOCKED ||
cur_trans->aborted); cur_trans->aborted);
btrfs_put_transaction(cur_trans); btrfs_put_transaction(cur_trans);
} else { } else {
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
} }
} }
static int may_wait_transaction(struct btrfs_root *root, int type) static int may_wait_transaction(struct btrfs_root *root, int type)
{ {
if (test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) struct btrfs_fs_info *fs_info = root->fs_info;
if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
return 0; return 0;
if (type == TRANS_USERSPACE) if (type == TRANS_USERSPACE)
return 1; return 1;
if (type == TRANS_START && if (type == TRANS_START &&
!atomic_read(&root->fs_info->open_ioctl_trans)) !atomic_read(&fs_info->open_ioctl_trans))
return 1; return 1;
return 0; return 0;
@ -456,7 +464,9 @@ static int may_wait_transaction(struct btrfs_root *root, int type)
static inline bool need_reserve_reloc_root(struct btrfs_root *root) static inline bool need_reserve_reloc_root(struct btrfs_root *root)
{ {
if (!root->fs_info->reloc_ctl || struct btrfs_fs_info *fs_info = root->fs_info;
if (!fs_info->reloc_ctl ||
!test_bit(BTRFS_ROOT_REF_COWS, &root->state) || !test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
root->reloc_root) root->reloc_root)
@ -469,6 +479,8 @@ static struct btrfs_trans_handle *
start_transaction(struct btrfs_root *root, unsigned int num_items, start_transaction(struct btrfs_root *root, unsigned int num_items,
unsigned int type, enum btrfs_reserve_flush_enum flush) unsigned int type, enum btrfs_reserve_flush_enum flush)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *h; struct btrfs_trans_handle *h;
struct btrfs_transaction *cur_trans; struct btrfs_transaction *cur_trans;
u64 num_bytes = 0; u64 num_bytes = 0;
@ -479,7 +491,7 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
/* Send isn't supposed to start transactions. */ /* Send isn't supposed to start transactions. */
ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB); ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB);
if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
return ERR_PTR(-EROFS); return ERR_PTR(-EROFS);
if (current->journal_info) { if (current->journal_info) {
@ -496,24 +508,22 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
* Do the reservation before we join the transaction so we can do all * Do the reservation before we join the transaction so we can do all
* the appropriate flushing if need be. * the appropriate flushing if need be.
*/ */
if (num_items > 0 && root != root->fs_info->chunk_root) { if (num_items > 0 && root != fs_info->chunk_root) {
qgroup_reserved = num_items * root->fs_info->nodesize; qgroup_reserved = num_items * fs_info->nodesize;
ret = btrfs_qgroup_reserve_meta(root, qgroup_reserved); ret = btrfs_qgroup_reserve_meta(root, qgroup_reserved);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
num_bytes = btrfs_calc_trans_metadata_size(root->fs_info, num_bytes = btrfs_calc_trans_metadata_size(fs_info, num_items);
num_items);
/* /*
* Do the reservation for the relocation root creation * Do the reservation for the relocation root creation
*/ */
if (need_reserve_reloc_root(root)) { if (need_reserve_reloc_root(root)) {
num_bytes += root->fs_info->nodesize; num_bytes += fs_info->nodesize;
reloc_reserved = true; reloc_reserved = true;
} }
ret = btrfs_block_rsv_add(root, ret = btrfs_block_rsv_add(root, &fs_info->trans_block_rsv,
&root->fs_info->trans_block_rsv,
num_bytes, flush); num_bytes, flush);
if (ret) if (ret)
goto reserve_fail; goto reserve_fail;
@ -536,7 +546,7 @@ again:
* transaction and commit it, so we needn't do sb_start_intwrite(). * transaction and commit it, so we needn't do sb_start_intwrite().
*/ */
if (type & __TRANS_FREEZABLE) if (type & __TRANS_FREEZABLE)
sb_start_intwrite(root->fs_info->sb); sb_start_intwrite(fs_info->sb);
if (may_wait_transaction(root, type)) if (may_wait_transaction(root, type))
wait_current_trans(root); wait_current_trans(root);
@ -553,7 +563,7 @@ again:
if (ret < 0) if (ret < 0)
goto join_fail; goto join_fail;
cur_trans = root->fs_info->running_transaction; cur_trans = fs_info->running_transaction;
h->transid = cur_trans->transid; h->transid = cur_trans->transid;
h->transaction = cur_trans; h->transaction = cur_trans;
@ -575,9 +585,9 @@ again:
} }
if (num_bytes) { if (num_bytes) {
trace_btrfs_space_reservation(root->fs_info, "transaction", trace_btrfs_space_reservation(fs_info, "transaction",
h->transid, num_bytes, 1); h->transid, num_bytes, 1);
h->block_rsv = &root->fs_info->trans_block_rsv; h->block_rsv = &fs_info->trans_block_rsv;
h->bytes_reserved = num_bytes; h->bytes_reserved = num_bytes;
h->reloc_reserved = reloc_reserved; h->reloc_reserved = reloc_reserved;
} }
@ -591,11 +601,11 @@ got_it:
join_fail: join_fail:
if (type & __TRANS_FREEZABLE) if (type & __TRANS_FREEZABLE)
sb_end_intwrite(root->fs_info->sb); sb_end_intwrite(fs_info->sb);
kmem_cache_free(btrfs_trans_handle_cachep, h); kmem_cache_free(btrfs_trans_handle_cachep, h);
alloc_fail: alloc_fail:
if (num_bytes) if (num_bytes)
btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv, btrfs_block_rsv_release(root, &fs_info->trans_block_rsv,
num_bytes); num_bytes);
reserve_fail: reserve_fail:
btrfs_qgroup_free_meta(root, qgroup_reserved); btrfs_qgroup_free_meta(root, qgroup_reserved);
@ -613,6 +623,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
unsigned int num_items, unsigned int num_items,
int min_factor) int min_factor)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans; struct btrfs_trans_handle *trans;
u64 num_bytes; u64 num_bytes;
int ret; int ret;
@ -625,19 +636,17 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
if (IS_ERR(trans)) if (IS_ERR(trans))
return trans; return trans;
num_bytes = btrfs_calc_trans_metadata_size(root->fs_info, num_items); num_bytes = btrfs_calc_trans_metadata_size(fs_info, num_items);
ret = btrfs_cond_migrate_bytes(root->fs_info, ret = btrfs_cond_migrate_bytes(fs_info, &fs_info->trans_block_rsv,
&root->fs_info->trans_block_rsv, num_bytes, min_factor);
num_bytes,
min_factor);
if (ret) { if (ret) {
btrfs_end_transaction(trans, root); btrfs_end_transaction(trans, root);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
trans->block_rsv = &root->fs_info->trans_block_rsv; trans->block_rsv = &fs_info->trans_block_rsv;
trans->bytes_reserved = num_bytes; trans->bytes_reserved = num_bytes;
trace_btrfs_space_reservation(root->fs_info, "transaction", trace_btrfs_space_reservation(fs_info, "transaction",
trans->transid, num_bytes, 1); trans->transid, num_bytes, 1);
return trans; return trans;
@ -717,16 +726,17 @@ static noinline void wait_for_commit(struct btrfs_root *root,
int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_transaction *cur_trans = NULL, *t; struct btrfs_transaction *cur_trans = NULL, *t;
int ret = 0; int ret = 0;
if (transid) { if (transid) {
if (transid <= root->fs_info->last_trans_committed) if (transid <= fs_info->last_trans_committed)
goto out; goto out;
/* find specified transaction */ /* find specified transaction */
spin_lock(&root->fs_info->trans_lock); spin_lock(&fs_info->trans_lock);
list_for_each_entry(t, &root->fs_info->trans_list, list) { list_for_each_entry(t, &fs_info->trans_list, list) {
if (t->transid == transid) { if (t->transid == transid) {
cur_trans = t; cur_trans = t;
atomic_inc(&cur_trans->use_count); atomic_inc(&cur_trans->use_count);
@ -738,21 +748,21 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
break; break;
} }
} }
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
/* /*
* The specified transaction doesn't exist, or we * The specified transaction doesn't exist, or we
* raced with btrfs_commit_transaction * raced with btrfs_commit_transaction
*/ */
if (!cur_trans) { if (!cur_trans) {
if (transid > root->fs_info->last_trans_committed) if (transid > fs_info->last_trans_committed)
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
} else { } else {
/* find newest transaction that is committing | committed */ /* find newest transaction that is committing | committed */
spin_lock(&root->fs_info->trans_lock); spin_lock(&fs_info->trans_lock);
list_for_each_entry_reverse(t, &root->fs_info->trans_list, list_for_each_entry_reverse(t, &fs_info->trans_list,
list) { list) {
if (t->state >= TRANS_STATE_COMMIT_START) { if (t->state >= TRANS_STATE_COMMIT_START) {
if (t->state == TRANS_STATE_COMPLETED) if (t->state == TRANS_STATE_COMPLETED)
@ -762,7 +772,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
break; break;
} }
} }
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
if (!cur_trans) if (!cur_trans)
goto out; /* nothing committing|committed */ goto out; /* nothing committing|committed */
} }
@ -775,18 +785,22 @@ out:
void btrfs_throttle(struct btrfs_root *root) void btrfs_throttle(struct btrfs_root *root)
{ {
if (!atomic_read(&root->fs_info->open_ioctl_trans)) struct btrfs_fs_info *fs_info = root->fs_info;
if (!atomic_read(&fs_info->open_ioctl_trans))
wait_current_trans(root); wait_current_trans(root);
} }
static int should_end_transaction(struct btrfs_trans_handle *trans, static int should_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root) struct btrfs_root *root)
{ {
if (root->fs_info->global_block_rsv.space_info->full && struct btrfs_fs_info *fs_info = root->fs_info;
if (fs_info->global_block_rsv.space_info->full &&
btrfs_check_space_for_delayed_refs(trans, root)) btrfs_check_space_for_delayed_refs(trans, root))
return 1; return 1;
return !!btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5); return !!btrfs_block_rsv_check(root, &fs_info->global_block_rsv, 5);
} }
int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
@ -858,7 +872,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
btrfs_trans_release_chunk_metadata(trans); btrfs_trans_release_chunk_metadata(trans);
if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) && if (lock && !atomic_read(&info->open_ioctl_trans) &&
should_end_transaction(trans, root) && should_end_transaction(trans, root) &&
ACCESS_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) { ACCESS_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
spin_lock(&info->trans_lock); spin_lock(&info->trans_lock);
@ -875,7 +889,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
} }
if (trans->type & __TRANS_FREEZABLE) if (trans->type & __TRANS_FREEZABLE)
sb_end_intwrite(root->fs_info->sb); sb_end_intwrite(info->sb);
WARN_ON(cur_trans != info->running_transaction); WARN_ON(cur_trans != info->running_transaction);
WARN_ON(atomic_read(&cur_trans->num_writers) < 1); WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
@ -897,7 +911,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
btrfs_run_delayed_iputs(root); btrfs_run_delayed_iputs(root);
if (trans->aborted || if (trans->aborted ||
test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) { test_bit(BTRFS_FS_STATE_ERROR, &info->fs_state)) {
wake_up_process(info->transaction_kthread); wake_up_process(info->transaction_kthread);
err = -EIO; err = -EIO;
} }
@ -933,7 +947,8 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
{ {
int err = 0; int err = 0;
int werr = 0; int werr = 0;
struct address_space *mapping = root->fs_info->btree_inode->i_mapping; struct btrfs_fs_info *fs_info = root->fs_info;
struct address_space *mapping = fs_info->btree_inode->i_mapping;
struct extent_state *cached_state = NULL; struct extent_state *cached_state = NULL;
u64 start = 0; u64 start = 0;
u64 end; u64 end;
@ -987,7 +1002,8 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
{ {
int err = 0; int err = 0;
int werr = 0; int werr = 0;
struct address_space *mapping = root->fs_info->btree_inode->i_mapping; struct btrfs_fs_info *fs_info = root->fs_info;
struct address_space *mapping = fs_info->btree_inode->i_mapping;
struct extent_state *cached_state = NULL; struct extent_state *cached_state = NULL;
u64 start = 0; u64 start = 0;
u64 end; u64 end;
@ -1022,17 +1038,14 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
if ((mark & EXTENT_DIRTY) && if ((mark & EXTENT_DIRTY) &&
test_and_clear_bit(BTRFS_FS_LOG1_ERR, test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags))
&root->fs_info->flags))
errors = true; errors = true;
if ((mark & EXTENT_NEW) && if ((mark & EXTENT_NEW) &&
test_and_clear_bit(BTRFS_FS_LOG2_ERR, test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags))
&root->fs_info->flags))
errors = true; errors = true;
} else { } else {
if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags))
&root->fs_info->flags))
errors = true; errors = true;
} }
@ -1095,7 +1108,8 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
int ret; int ret;
u64 old_root_bytenr; u64 old_root_bytenr;
u64 old_root_used; u64 old_root_used;
struct btrfs_root *tree_root = root->fs_info->tree_root; struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *tree_root = fs_info->tree_root;
old_root_used = btrfs_root_used(&root->root_item); old_root_used = btrfs_root_used(&root->root_item);
@ -1148,13 +1162,13 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
if (ret) if (ret)
return ret; return ret;
ret = btrfs_run_dev_stats(trans, root->fs_info); ret = btrfs_run_dev_stats(trans, fs_info);
if (ret) if (ret)
return ret; return ret;
ret = btrfs_run_dev_replace(trans, root->fs_info); ret = btrfs_run_dev_replace(trans, fs_info);
if (ret) if (ret)
return ret; return ret;
ret = btrfs_run_qgroups(trans, root->fs_info); ret = btrfs_run_qgroups(trans, fs_info);
if (ret) if (ret)
return ret; return ret;
@ -1210,10 +1224,12 @@ again:
*/ */
void btrfs_add_dead_root(struct btrfs_root *root) void btrfs_add_dead_root(struct btrfs_root *root)
{ {
spin_lock(&root->fs_info->trans_lock); struct btrfs_fs_info *fs_info = root->fs_info;
spin_lock(&fs_info->trans_lock);
if (list_empty(&root->root_list)) if (list_empty(&root->root_list))
list_add_tail(&root->root_list, &root->fs_info->dead_roots); list_add_tail(&root->root_list, &fs_info->dead_roots);
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
} }
/* /*
@ -1462,7 +1478,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
rsv = trans->block_rsv; rsv = trans->block_rsv;
trans->block_rsv = &pending->block_rsv; trans->block_rsv = &pending->block_rsv;
trans->bytes_reserved = trans->block_rsv->reserved; trans->bytes_reserved = trans->block_rsv->reserved;
trace_btrfs_space_reservation(root->fs_info, "transaction", trace_btrfs_space_reservation(fs_info, "transaction",
trans->transid, trans->transid,
trans->bytes_reserved, 1); trans->bytes_reserved, 1);
dentry = pending->dentry; dentry = pending->dentry;
@ -1582,7 +1598,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
} }
key.offset = (u64)-1; key.offset = (u64)-1;
pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key); pending->snap = btrfs_read_fs_root_no_name(fs_info, &key);
if (IS_ERR(pending->snap)) { if (IS_ERR(pending->snap)) {
ret = PTR_ERR(pending->snap); ret = PTR_ERR(pending->snap);
btrfs_abort_transaction(trans, ret); btrfs_abort_transaction(trans, ret);
@ -1692,23 +1708,24 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
static void update_super_roots(struct btrfs_root *root) static void update_super_roots(struct btrfs_root *root)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root_item *root_item; struct btrfs_root_item *root_item;
struct btrfs_super_block *super; struct btrfs_super_block *super;
super = root->fs_info->super_copy; super = fs_info->super_copy;
root_item = &root->fs_info->chunk_root->root_item; root_item = &fs_info->chunk_root->root_item;
super->chunk_root = root_item->bytenr; super->chunk_root = root_item->bytenr;
super->chunk_root_generation = root_item->generation; super->chunk_root_generation = root_item->generation;
super->chunk_root_level = root_item->level; super->chunk_root_level = root_item->level;
root_item = &root->fs_info->tree_root->root_item; root_item = &fs_info->tree_root->root_item;
super->root = root_item->bytenr; super->root = root_item->bytenr;
super->generation = root_item->generation; super->generation = root_item->generation;
super->root_level = root_item->level; super->root_level = root_item->level;
if (btrfs_test_opt(root->fs_info, SPACE_CACHE)) if (btrfs_test_opt(fs_info, SPACE_CACHE))
super->cache_generation = root_item->generation; super->cache_generation = root_item->generation;
if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &root->fs_info->flags)) if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
super->uuid_tree_generation = root_item->generation; super->uuid_tree_generation = root_item->generation;
} }
@ -1794,6 +1811,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
int wait_for_unblock) int wait_for_unblock)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_async_commit *ac; struct btrfs_async_commit *ac;
struct btrfs_transaction *cur_trans; struct btrfs_transaction *cur_trans;
@ -1821,7 +1839,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
* async commit thread will be the one to unlock it. * async commit thread will be the one to unlock it.
*/ */
if (ac->newtrans->type & __TRANS_FREEZABLE) if (ac->newtrans->type & __TRANS_FREEZABLE)
__sb_writers_release(root->fs_info->sb, SB_FREEZE_FS); __sb_writers_release(fs_info->sb, SB_FREEZE_FS);
schedule_work(&ac->work); schedule_work(&ac->work);
@ -1842,6 +1860,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
static void cleanup_transaction(struct btrfs_trans_handle *trans, static void cleanup_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int err) struct btrfs_root *root, int err)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_transaction *cur_trans = trans->transaction; struct btrfs_transaction *cur_trans = trans->transaction;
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
@ -1849,7 +1868,7 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
btrfs_abort_transaction(trans, err); btrfs_abort_transaction(trans, err);
spin_lock(&root->fs_info->trans_lock); spin_lock(&fs_info->trans_lock);
/* /*
* If the transaction is removed from the list, it means this * If the transaction is removed from the list, it means this
@ -1859,25 +1878,25 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
BUG_ON(list_empty(&cur_trans->list)); BUG_ON(list_empty(&cur_trans->list));
list_del_init(&cur_trans->list); list_del_init(&cur_trans->list);
if (cur_trans == root->fs_info->running_transaction) { if (cur_trans == fs_info->running_transaction) {
cur_trans->state = TRANS_STATE_COMMIT_DOING; cur_trans->state = TRANS_STATE_COMMIT_DOING;
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
wait_event(cur_trans->writer_wait, wait_event(cur_trans->writer_wait,
atomic_read(&cur_trans->num_writers) == 1); atomic_read(&cur_trans->num_writers) == 1);
spin_lock(&root->fs_info->trans_lock); spin_lock(&fs_info->trans_lock);
} }
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
btrfs_cleanup_one_transaction(trans->transaction, root); btrfs_cleanup_one_transaction(trans->transaction, root);
spin_lock(&root->fs_info->trans_lock); spin_lock(&fs_info->trans_lock);
if (cur_trans == root->fs_info->running_transaction) if (cur_trans == fs_info->running_transaction)
root->fs_info->running_transaction = NULL; fs_info->running_transaction = NULL;
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
if (trans->type & __TRANS_FREEZABLE) if (trans->type & __TRANS_FREEZABLE)
sb_end_intwrite(root->fs_info->sb); sb_end_intwrite(fs_info->sb);
btrfs_put_transaction(cur_trans); btrfs_put_transaction(cur_trans);
btrfs_put_transaction(cur_trans); btrfs_put_transaction(cur_trans);
@ -1885,7 +1904,7 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
if (current->journal_info == trans) if (current->journal_info == trans)
current->journal_info = NULL; current->journal_info = NULL;
btrfs_scrub_cancel(root->fs_info); btrfs_scrub_cancel(fs_info);
kmem_cache_free(btrfs_trans_handle_cachep, trans); kmem_cache_free(btrfs_trans_handle_cachep, trans);
} }
@ -1913,6 +1932,7 @@ btrfs_wait_pending_ordered(struct btrfs_transaction *cur_trans)
int btrfs_commit_transaction(struct btrfs_trans_handle *trans, int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root) struct btrfs_root *root)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_transaction *cur_trans = trans->transaction; struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_transaction *prev_trans = NULL; struct btrfs_transaction *prev_trans = NULL;
int ret; int ret;
@ -1970,11 +1990,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
* hurt to have more than one go through, but there's no * hurt to have more than one go through, but there's no
* real advantage to it either. * real advantage to it either.
*/ */
mutex_lock(&root->fs_info->ro_block_group_mutex); mutex_lock(&fs_info->ro_block_group_mutex);
if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN, if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN,
&cur_trans->flags)) &cur_trans->flags))
run_it = 1; run_it = 1;
mutex_unlock(&root->fs_info->ro_block_group_mutex); mutex_unlock(&fs_info->ro_block_group_mutex);
if (run_it) if (run_it)
ret = btrfs_start_dirty_block_groups(trans, root); ret = btrfs_start_dirty_block_groups(trans, root);
@ -1984,9 +2004,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
return ret; return ret;
} }
spin_lock(&root->fs_info->trans_lock); spin_lock(&fs_info->trans_lock);
if (cur_trans->state >= TRANS_STATE_COMMIT_START) { if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
atomic_inc(&cur_trans->use_count); atomic_inc(&cur_trans->use_count);
ret = btrfs_end_transaction(trans, root); ret = btrfs_end_transaction(trans, root);
@ -2001,14 +2021,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
} }
cur_trans->state = TRANS_STATE_COMMIT_START; cur_trans->state = TRANS_STATE_COMMIT_START;
wake_up(&root->fs_info->transaction_blocked_wait); wake_up(&fs_info->transaction_blocked_wait);
if (cur_trans->list.prev != &root->fs_info->trans_list) { if (cur_trans->list.prev != &fs_info->trans_list) {
prev_trans = list_entry(cur_trans->list.prev, prev_trans = list_entry(cur_trans->list.prev,
struct btrfs_transaction, list); struct btrfs_transaction, list);
if (prev_trans->state != TRANS_STATE_COMPLETED) { if (prev_trans->state != TRANS_STATE_COMPLETED) {
atomic_inc(&prev_trans->use_count); atomic_inc(&prev_trans->use_count);
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
wait_for_commit(root, prev_trans); wait_for_commit(root, prev_trans);
ret = prev_trans->aborted; ret = prev_trans->aborted;
@ -2017,15 +2037,15 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
if (ret) if (ret)
goto cleanup_transaction; goto cleanup_transaction;
} else { } else {
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
} }
} else { } else {
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
} }
extwriter_counter_dec(cur_trans, trans->type); extwriter_counter_dec(cur_trans, trans->type);
ret = btrfs_start_delalloc_flush(root->fs_info); ret = btrfs_start_delalloc_flush(fs_info);
if (ret) if (ret)
goto cleanup_transaction; goto cleanup_transaction;
@ -2041,7 +2061,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
if (ret) if (ret)
goto cleanup_transaction; goto cleanup_transaction;
btrfs_wait_delalloc_flush(root->fs_info); btrfs_wait_delalloc_flush(fs_info);
btrfs_wait_pending_ordered(cur_trans); btrfs_wait_pending_ordered(cur_trans);
@ -2051,9 +2071,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
* commit the transaction. We could have started a join before setting * commit the transaction. We could have started a join before setting
* COMMIT_DOING so make sure to wait for num_writers to == 1 again. * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
*/ */
spin_lock(&root->fs_info->trans_lock); spin_lock(&fs_info->trans_lock);
cur_trans->state = TRANS_STATE_COMMIT_DOING; cur_trans->state = TRANS_STATE_COMMIT_DOING;
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
wait_event(cur_trans->writer_wait, wait_event(cur_trans->writer_wait,
atomic_read(&cur_trans->num_writers) == 1); atomic_read(&cur_trans->num_writers) == 1);
@ -2067,16 +2087,16 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
* the balancing code from coming in and moving * the balancing code from coming in and moving
* extents around in the middle of the commit * extents around in the middle of the commit
*/ */
mutex_lock(&root->fs_info->reloc_mutex); mutex_lock(&fs_info->reloc_mutex);
/* /*
* We needn't worry about the delayed items because we will * We needn't worry about the delayed items because we will
* deal with them in create_pending_snapshot(), which is the * deal with them in create_pending_snapshot(), which is the
* core function of the snapshot creation. * core function of the snapshot creation.
*/ */
ret = create_pending_snapshots(trans, root->fs_info); ret = create_pending_snapshots(trans, fs_info);
if (ret) { if (ret) {
mutex_unlock(&root->fs_info->reloc_mutex); mutex_unlock(&fs_info->reloc_mutex);
goto scrub_continue; goto scrub_continue;
} }
@ -2092,20 +2112,20 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
*/ */
ret = btrfs_run_delayed_items(trans, root); ret = btrfs_run_delayed_items(trans, root);
if (ret) { if (ret) {
mutex_unlock(&root->fs_info->reloc_mutex); mutex_unlock(&fs_info->reloc_mutex);
goto scrub_continue; goto scrub_continue;
} }
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
if (ret) { if (ret) {
mutex_unlock(&root->fs_info->reloc_mutex); mutex_unlock(&fs_info->reloc_mutex);
goto scrub_continue; goto scrub_continue;
} }
/* Reocrd old roots for later qgroup accounting */ /* Reocrd old roots for later qgroup accounting */
ret = btrfs_qgroup_prepare_account_extents(trans, root->fs_info); ret = btrfs_qgroup_prepare_account_extents(trans, fs_info);
if (ret) { if (ret) {
mutex_unlock(&root->fs_info->reloc_mutex); mutex_unlock(&fs_info->reloc_mutex);
goto scrub_continue; goto scrub_continue;
} }
@ -2130,12 +2150,12 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
* from now until after the super is written, we avoid races * from now until after the super is written, we avoid races
* with the tree-log code. * with the tree-log code.
*/ */
mutex_lock(&root->fs_info->tree_log_mutex); mutex_lock(&fs_info->tree_log_mutex);
ret = commit_fs_roots(trans, root->fs_info); ret = commit_fs_roots(trans, fs_info);
if (ret) { if (ret) {
mutex_unlock(&root->fs_info->tree_log_mutex); mutex_unlock(&fs_info->tree_log_mutex);
mutex_unlock(&root->fs_info->reloc_mutex); mutex_unlock(&fs_info->reloc_mutex);
goto scrub_continue; goto scrub_continue;
} }
@ -2143,28 +2163,28 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
* Since the transaction is done, we can apply the pending changes * Since the transaction is done, we can apply the pending changes
* before the next transaction. * before the next transaction.
*/ */
btrfs_apply_pending_changes(root->fs_info); btrfs_apply_pending_changes(fs_info);
/* commit_fs_roots gets rid of all the tree log roots, it is now /* commit_fs_roots gets rid of all the tree log roots, it is now
* safe to free the root of tree log roots * safe to free the root of tree log roots
*/ */
btrfs_free_log_root_tree(trans, root->fs_info); btrfs_free_log_root_tree(trans, fs_info);
/* /*
* Since fs roots are all committed, we can get a quite accurate * Since fs roots are all committed, we can get a quite accurate
* new_roots. So let's do quota accounting. * new_roots. So let's do quota accounting.
*/ */
ret = btrfs_qgroup_account_extents(trans, root->fs_info); ret = btrfs_qgroup_account_extents(trans, fs_info);
if (ret < 0) { if (ret < 0) {
mutex_unlock(&root->fs_info->tree_log_mutex); mutex_unlock(&fs_info->tree_log_mutex);
mutex_unlock(&root->fs_info->reloc_mutex); mutex_unlock(&fs_info->reloc_mutex);
goto scrub_continue; goto scrub_continue;
} }
ret = commit_cowonly_roots(trans, root); ret = commit_cowonly_roots(trans, root);
if (ret) { if (ret) {
mutex_unlock(&root->fs_info->tree_log_mutex); mutex_unlock(&fs_info->tree_log_mutex);
mutex_unlock(&root->fs_info->reloc_mutex); mutex_unlock(&fs_info->reloc_mutex);
goto scrub_continue; goto scrub_continue;
} }
@ -2174,64 +2194,64 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
*/ */
if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
ret = cur_trans->aborted; ret = cur_trans->aborted;
mutex_unlock(&root->fs_info->tree_log_mutex); mutex_unlock(&fs_info->tree_log_mutex);
mutex_unlock(&root->fs_info->reloc_mutex); mutex_unlock(&fs_info->reloc_mutex);
goto scrub_continue; goto scrub_continue;
} }
btrfs_prepare_extent_commit(trans, root); btrfs_prepare_extent_commit(trans, root);
cur_trans = root->fs_info->running_transaction; cur_trans = fs_info->running_transaction;
btrfs_set_root_node(&root->fs_info->tree_root->root_item, btrfs_set_root_node(&fs_info->tree_root->root_item,
root->fs_info->tree_root->node); fs_info->tree_root->node);
list_add_tail(&root->fs_info->tree_root->dirty_list, list_add_tail(&fs_info->tree_root->dirty_list,
&cur_trans->switch_commits); &cur_trans->switch_commits);
btrfs_set_root_node(&root->fs_info->chunk_root->root_item, btrfs_set_root_node(&fs_info->chunk_root->root_item,
root->fs_info->chunk_root->node); fs_info->chunk_root->node);
list_add_tail(&root->fs_info->chunk_root->dirty_list, list_add_tail(&fs_info->chunk_root->dirty_list,
&cur_trans->switch_commits); &cur_trans->switch_commits);
switch_commit_roots(cur_trans, root->fs_info); switch_commit_roots(cur_trans, fs_info);
assert_qgroups_uptodate(trans); assert_qgroups_uptodate(trans);
ASSERT(list_empty(&cur_trans->dirty_bgs)); ASSERT(list_empty(&cur_trans->dirty_bgs));
ASSERT(list_empty(&cur_trans->io_bgs)); ASSERT(list_empty(&cur_trans->io_bgs));
update_super_roots(root); update_super_roots(root);
btrfs_set_super_log_root(root->fs_info->super_copy, 0); btrfs_set_super_log_root(fs_info->super_copy, 0);
btrfs_set_super_log_root_level(root->fs_info->super_copy, 0); btrfs_set_super_log_root_level(fs_info->super_copy, 0);
memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy, memcpy(fs_info->super_for_commit, fs_info->super_copy,
sizeof(*root->fs_info->super_copy)); sizeof(*fs_info->super_copy));
btrfs_update_commit_device_size(root->fs_info); btrfs_update_commit_device_size(fs_info);
btrfs_update_commit_device_bytes_used(root, cur_trans); btrfs_update_commit_device_bytes_used(root, cur_trans);
clear_bit(BTRFS_FS_LOG1_ERR, &root->fs_info->flags); clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
clear_bit(BTRFS_FS_LOG2_ERR, &root->fs_info->flags); clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
btrfs_trans_release_chunk_metadata(trans); btrfs_trans_release_chunk_metadata(trans);
spin_lock(&root->fs_info->trans_lock); spin_lock(&fs_info->trans_lock);
cur_trans->state = TRANS_STATE_UNBLOCKED; cur_trans->state = TRANS_STATE_UNBLOCKED;
root->fs_info->running_transaction = NULL; fs_info->running_transaction = NULL;
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
mutex_unlock(&root->fs_info->reloc_mutex); mutex_unlock(&fs_info->reloc_mutex);
wake_up(&root->fs_info->transaction_wait); wake_up(&fs_info->transaction_wait);
ret = btrfs_write_and_wait_transaction(trans, root); ret = btrfs_write_and_wait_transaction(trans, root);
if (ret) { if (ret) {
btrfs_handle_fs_error(root->fs_info, ret, btrfs_handle_fs_error(fs_info, ret,
"Error while writing out transaction"); "Error while writing out transaction");
mutex_unlock(&root->fs_info->tree_log_mutex); mutex_unlock(&fs_info->tree_log_mutex);
goto scrub_continue; goto scrub_continue;
} }
ret = write_ctree_super(trans, root, 0); ret = write_ctree_super(trans, root, 0);
if (ret) { if (ret) {
mutex_unlock(&root->fs_info->tree_log_mutex); mutex_unlock(&fs_info->tree_log_mutex);
goto scrub_continue; goto scrub_continue;
} }
@ -2239,14 +2259,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
* the super is written, we can safely allow the tree-loggers * the super is written, we can safely allow the tree-loggers
* to go about their business * to go about their business
*/ */
mutex_unlock(&root->fs_info->tree_log_mutex); mutex_unlock(&fs_info->tree_log_mutex);
btrfs_finish_extent_commit(trans, root); btrfs_finish_extent_commit(trans, root);
if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags)) if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
btrfs_clear_space_info_full(root->fs_info); btrfs_clear_space_info_full(fs_info);
root->fs_info->last_trans_committed = cur_trans->transid; fs_info->last_trans_committed = cur_trans->transid;
/* /*
* We needn't acquire the lock here because there is no other task * We needn't acquire the lock here because there is no other task
* which can change it. * which can change it.
@ -2254,15 +2274,15 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
cur_trans->state = TRANS_STATE_COMPLETED; cur_trans->state = TRANS_STATE_COMPLETED;
wake_up(&cur_trans->commit_wait); wake_up(&cur_trans->commit_wait);
spin_lock(&root->fs_info->trans_lock); spin_lock(&fs_info->trans_lock);
list_del_init(&cur_trans->list); list_del_init(&cur_trans->list);
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&fs_info->trans_lock);
btrfs_put_transaction(cur_trans); btrfs_put_transaction(cur_trans);
btrfs_put_transaction(cur_trans); btrfs_put_transaction(cur_trans);
if (trans->type & __TRANS_FREEZABLE) if (trans->type & __TRANS_FREEZABLE)
sb_end_intwrite(root->fs_info->sb); sb_end_intwrite(fs_info->sb);
trace_btrfs_transaction_commit(root); trace_btrfs_transaction_commit(root);
@ -2277,9 +2297,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
* If fs has been frozen, we can not handle delayed iputs, otherwise * If fs has been frozen, we can not handle delayed iputs, otherwise
* it'll result in deadlock about SB_FREEZE_FS. * it'll result in deadlock about SB_FREEZE_FS.
*/ */
if (current != root->fs_info->transaction_kthread && if (current != fs_info->transaction_kthread &&
current != root->fs_info->cleaner_kthread && current != fs_info->cleaner_kthread && !fs_info->fs_frozen)
!root->fs_info->fs_frozen)
btrfs_run_delayed_iputs(root); btrfs_run_delayed_iputs(root);
return ret; return ret;
@ -2290,7 +2309,7 @@ cleanup_transaction:
btrfs_trans_release_metadata(trans, root); btrfs_trans_release_metadata(trans, root);
btrfs_trans_release_chunk_metadata(trans); btrfs_trans_release_chunk_metadata(trans);
trans->block_rsv = NULL; trans->block_rsv = NULL;
btrfs_warn(root->fs_info, "Skipping commit of aborted transaction."); btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
if (current->journal_info == trans) if (current->journal_info == trans)
current->journal_info = NULL; current->journal_info = NULL;
cleanup_transaction(trans, root, ret); cleanup_transaction(trans, root, ret);

View File

@ -142,12 +142,13 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct btrfs_log_ctx *ctx) struct btrfs_log_ctx *ctx)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0; int ret = 0;
mutex_lock(&root->log_mutex); mutex_lock(&root->log_mutex);
if (root->log_root) { if (root->log_root) {
if (btrfs_need_log_full_commit(root->fs_info, trans)) { if (btrfs_need_log_full_commit(fs_info, trans)) {
ret = -EAGAIN; ret = -EAGAIN;
goto out; goto out;
} }
@ -159,10 +160,10 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
} }
} else { } else {
mutex_lock(&root->fs_info->tree_log_mutex); mutex_lock(&fs_info->tree_log_mutex);
if (!root->fs_info->log_root_tree) if (!fs_info->log_root_tree)
ret = btrfs_init_log_root_tree(trans, root->fs_info); ret = btrfs_init_log_root_tree(trans, fs_info);
mutex_unlock(&root->fs_info->tree_log_mutex); mutex_unlock(&fs_info->tree_log_mutex);
if (ret) if (ret)
goto out; goto out;
@ -292,20 +293,21 @@ static int process_one_buffer(struct btrfs_root *log,
struct extent_buffer *eb, struct extent_buffer *eb,
struct walk_control *wc, u64 gen) struct walk_control *wc, u64 gen)
{ {
struct btrfs_fs_info *fs_info = log->fs_info;
int ret = 0; int ret = 0;
/* /*
* If this fs is mixed then we need to be able to process the leaves to * If this fs is mixed then we need to be able to process the leaves to
* pin down any logged extents, so we have to read the block. * pin down any logged extents, so we have to read the block.
*/ */
if (btrfs_fs_incompat(log->fs_info, MIXED_GROUPS)) { if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
ret = btrfs_read_buffer(eb, gen); ret = btrfs_read_buffer(eb, gen);
if (ret) if (ret)
return ret; return ret;
} }
if (wc->pin) if (wc->pin)
ret = btrfs_pin_extent_for_log_replay(log->fs_info->extent_root, ret = btrfs_pin_extent_for_log_replay(fs_info->extent_root,
eb->start, eb->len); eb->start, eb->len);
if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) { if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
@ -582,6 +584,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
struct extent_buffer *eb, int slot, struct extent_buffer *eb, int slot,
struct btrfs_key *key) struct btrfs_key *key)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
int found_type; int found_type;
u64 extent_end; u64 extent_end;
u64 start = key->offset; u64 start = key->offset;
@ -609,7 +612,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
size = btrfs_file_extent_inline_len(eb, slot, item); size = btrfs_file_extent_inline_len(eb, slot, item);
nbytes = btrfs_file_extent_ram_bytes(eb, item); nbytes = btrfs_file_extent_ram_bytes(eb, item);
extent_end = ALIGN(start + size, extent_end = ALIGN(start + size,
root->fs_info->sectorsize); fs_info->sectorsize);
} else { } else {
ret = 0; ret = 0;
goto out; goto out;
@ -690,7 +693,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
* as the owner of the file extent changed from log tree * as the owner of the file extent changed from log tree
* (doesn't affect qgroup) to fs/file tree(affects qgroup) * (doesn't affect qgroup) to fs/file tree(affects qgroup)
*/ */
ret = btrfs_qgroup_trace_extent(trans, root->fs_info, ret = btrfs_qgroup_trace_extent(trans, fs_info,
btrfs_file_extent_disk_bytenr(eb, item), btrfs_file_extent_disk_bytenr(eb, item),
btrfs_file_extent_disk_num_bytes(eb, item), btrfs_file_extent_disk_num_bytes(eb, item),
GFP_NOFS); GFP_NOFS);
@ -797,14 +800,12 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
struct btrfs_ordered_sum, struct btrfs_ordered_sum,
list); list);
if (!ret) if (!ret)
ret = btrfs_del_csums(trans, ret = btrfs_del_csums(trans, fs_info,
root->fs_info,
sums->bytenr, sums->bytenr,
sums->len); sums->len);
if (!ret) if (!ret)
ret = btrfs_csum_file_blocks(trans, ret = btrfs_csum_file_blocks(trans,
root->fs_info->csum_root, fs_info->csum_root, sums);
sums);
list_del(&sums->list); list_del(&sums->list);
kfree(sums); kfree(sums);
} }
@ -2408,6 +2409,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_path *path, int *level, struct btrfs_path *path, int *level,
struct walk_control *wc) struct walk_control *wc)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
u64 root_owner; u64 root_owner;
u64 bytenr; u64 bytenr;
u64 ptr_gen; u64 ptr_gen;
@ -2433,7 +2435,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
bytenr = btrfs_node_blockptr(cur, path->slots[*level]); bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
blocksize = root->fs_info->nodesize; blocksize = fs_info->nodesize;
parent = path->nodes[*level]; parent = path->nodes[*level];
root_owner = btrfs_header_owner(parent); root_owner = btrfs_header_owner(parent);
@ -2460,8 +2462,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
if (trans) { if (trans) {
btrfs_tree_lock(next); btrfs_tree_lock(next);
btrfs_set_lock_blocking(next); btrfs_set_lock_blocking(next);
clean_tree_block(trans, root->fs_info, clean_tree_block(trans, fs_info, next);
next);
btrfs_wait_tree_block_writeback(next); btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next); btrfs_tree_unlock(next);
} }
@ -2506,6 +2507,7 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_path *path, int *level, struct btrfs_path *path, int *level,
struct walk_control *wc) struct walk_control *wc)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
u64 root_owner; u64 root_owner;
int i; int i;
int slot; int slot;
@ -2539,8 +2541,7 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
if (trans) { if (trans) {
btrfs_tree_lock(next); btrfs_tree_lock(next);
btrfs_set_lock_blocking(next); btrfs_set_lock_blocking(next);
clean_tree_block(trans, root->fs_info, clean_tree_block(trans, fs_info, next);
next);
btrfs_wait_tree_block_writeback(next); btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next); btrfs_tree_unlock(next);
} }
@ -2642,14 +2643,15 @@ out:
static int update_log_root(struct btrfs_trans_handle *trans, static int update_log_root(struct btrfs_trans_handle *trans,
struct btrfs_root *log) struct btrfs_root *log)
{ {
struct btrfs_fs_info *fs_info = log->fs_info;
int ret; int ret;
if (log->log_transid == 1) { if (log->log_transid == 1) {
/* insert root item on the first sync */ /* insert root item on the first sync */
ret = btrfs_insert_root(trans, log->fs_info->log_root_tree, ret = btrfs_insert_root(trans, fs_info->log_root_tree,
&log->root_key, &log->root_item); &log->root_key, &log->root_item);
} else { } else {
ret = btrfs_update_root(trans, log->fs_info->log_root_tree, ret = btrfs_update_root(trans, fs_info->log_root_tree,
&log->root_key, &log->root_item); &log->root_key, &log->root_item);
} }
return ret; return ret;
@ -2743,8 +2745,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
int index2; int index2;
int mark; int mark;
int ret; int ret;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *log = root->log_root; struct btrfs_root *log = root->log_root;
struct btrfs_root *log_root_tree = root->fs_info->log_root_tree; struct btrfs_root *log_root_tree = fs_info->log_root_tree;
int log_transid = 0; int log_transid = 0;
struct btrfs_log_ctx root_log_ctx; struct btrfs_log_ctx root_log_ctx;
struct blk_plug plug; struct blk_plug plug;
@ -2772,7 +2775,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
while (1) { while (1) {
int batch = atomic_read(&root->log_batch); int batch = atomic_read(&root->log_batch);
/* when we're on an ssd, just kick the log commit out */ /* when we're on an ssd, just kick the log commit out */
if (!btrfs_test_opt(root->fs_info, SSD) && if (!btrfs_test_opt(fs_info, SSD) &&
test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) { test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
mutex_unlock(&root->log_mutex); mutex_unlock(&root->log_mutex);
schedule_timeout_uninterruptible(1); schedule_timeout_uninterruptible(1);
@ -2784,7 +2787,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
} }
/* bail out if we need to do a full commit */ /* bail out if we need to do a full commit */
if (btrfs_need_log_full_commit(root->fs_info, trans)) { if (btrfs_need_log_full_commit(fs_info, trans)) {
ret = -EAGAIN; ret = -EAGAIN;
btrfs_free_logged_extents(log, log_transid); btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&root->log_mutex); mutex_unlock(&root->log_mutex);
@ -2805,7 +2808,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
blk_finish_plug(&plug); blk_finish_plug(&plug);
btrfs_abort_transaction(trans, ret); btrfs_abort_transaction(trans, ret);
btrfs_free_logged_extents(log, log_transid); btrfs_free_logged_extents(log, log_transid);
btrfs_set_log_full_commit(root->fs_info, trans); btrfs_set_log_full_commit(fs_info, trans);
mutex_unlock(&root->log_mutex); mutex_unlock(&root->log_mutex);
goto out; goto out;
} }
@ -2850,7 +2853,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
list_del_init(&root_log_ctx.list); list_del_init(&root_log_ctx.list);
blk_finish_plug(&plug); blk_finish_plug(&plug);
btrfs_set_log_full_commit(root->fs_info, trans); btrfs_set_log_full_commit(fs_info, trans);
if (ret != -ENOSPC) { if (ret != -ENOSPC) {
btrfs_abort_transaction(trans, ret); btrfs_abort_transaction(trans, ret);
@ -2899,7 +2902,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
* now that we've moved on to the tree of log tree roots, * now that we've moved on to the tree of log tree roots,
* check the full commit flag again * check the full commit flag again
*/ */
if (btrfs_need_log_full_commit(root->fs_info, trans)) { if (btrfs_need_log_full_commit(fs_info, trans)) {
blk_finish_plug(&plug); blk_finish_plug(&plug);
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
btrfs_free_logged_extents(log, log_transid); btrfs_free_logged_extents(log, log_transid);
@ -2913,7 +2916,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
EXTENT_DIRTY | EXTENT_NEW); EXTENT_DIRTY | EXTENT_NEW);
blk_finish_plug(&plug); blk_finish_plug(&plug);
if (ret) { if (ret) {
btrfs_set_log_full_commit(root->fs_info, trans); btrfs_set_log_full_commit(fs_info, trans);
btrfs_abort_transaction(trans, ret); btrfs_abort_transaction(trans, ret);
btrfs_free_logged_extents(log, log_transid); btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex); mutex_unlock(&log_root_tree->log_mutex);
@ -2925,17 +2928,17 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
&log_root_tree->dirty_log_pages, &log_root_tree->dirty_log_pages,
EXTENT_NEW | EXTENT_DIRTY); EXTENT_NEW | EXTENT_DIRTY);
if (ret) { if (ret) {
btrfs_set_log_full_commit(root->fs_info, trans); btrfs_set_log_full_commit(fs_info, trans);
btrfs_free_logged_extents(log, log_transid); btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex); mutex_unlock(&log_root_tree->log_mutex);
goto out_wake_log_root; goto out_wake_log_root;
} }
btrfs_wait_logged_extents(trans, log, log_transid); btrfs_wait_logged_extents(trans, log, log_transid);
btrfs_set_super_log_root(root->fs_info->super_for_commit, btrfs_set_super_log_root(fs_info->super_for_commit,
log_root_tree->node->start); log_root_tree->node->start);
btrfs_set_super_log_root_level(root->fs_info->super_for_commit, btrfs_set_super_log_root_level(fs_info->super_for_commit,
btrfs_header_level(log_root_tree->node)); btrfs_header_level(log_root_tree->node));
log_root_tree->log_transid++; log_root_tree->log_transid++;
mutex_unlock(&log_root_tree->log_mutex); mutex_unlock(&log_root_tree->log_mutex);
@ -2947,9 +2950,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
* the running transaction open, so a full commit can't hop * the running transaction open, so a full commit can't hop
* in and cause problems either. * in and cause problems either.
*/ */
ret = write_ctree_super(trans, root->fs_info->tree_root, 1); ret = write_ctree_super(trans, fs_info->tree_root, 1);
if (ret) { if (ret) {
btrfs_set_log_full_commit(root->fs_info, trans); btrfs_set_log_full_commit(fs_info, trans);
btrfs_abort_transaction(trans, ret); btrfs_abort_transaction(trans, ret);
goto out_wake_log_root; goto out_wake_log_root;
} }
@ -3183,6 +3186,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
const char *name, int name_len, const char *name, int name_len,
struct inode *inode, u64 dirid) struct inode *inode, u64 dirid)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *log; struct btrfs_root *log;
u64 index; u64 index;
int ret; int ret;
@ -3200,7 +3204,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
dirid, &index); dirid, &index);
mutex_unlock(&BTRFS_I(inode)->log_mutex); mutex_unlock(&BTRFS_I(inode)->log_mutex);
if (ret == -ENOSPC) { if (ret == -ENOSPC) {
btrfs_set_log_full_commit(root->fs_info, trans); btrfs_set_log_full_commit(fs_info, trans);
ret = 0; ret = 0;
} else if (ret < 0 && ret != -ENOENT) } else if (ret < 0 && ret != -ENOENT)
btrfs_abort_transaction(trans, ret); btrfs_abort_transaction(trans, ret);
@ -3607,6 +3611,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
int start_slot, int nr, int inode_only, int start_slot, int nr, int inode_only,
u64 logged_isize) u64 logged_isize)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
unsigned long src_offset; unsigned long src_offset;
unsigned long dst_offset; unsigned long dst_offset;
struct btrfs_root *log = BTRFS_I(inode)->root->log_root; struct btrfs_root *log = BTRFS_I(inode)->root->log_root;
@ -3717,7 +3722,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
} }
ret = btrfs_lookup_csums_range( ret = btrfs_lookup_csums_range(
log->fs_info->csum_root, fs_info->csum_root,
ds + cs, ds + cs + cl - 1, ds + cs, ds + cs + cl - 1,
&ordered_sums, 0); &ordered_sums, 0);
if (ret) { if (ret) {
@ -3790,7 +3795,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
src_path->slots[0], src_path->slots[0],
extent); extent);
*last_extent = ALIGN(key.offset + len, *last_extent = ALIGN(key.offset + len,
log->fs_info->sectorsize); fs_info->sectorsize);
} else { } else {
len = btrfs_file_extent_num_bytes(src, extent); len = btrfs_file_extent_num_bytes(src, extent);
*last_extent = key.offset + len; *last_extent = key.offset + len;
@ -3854,7 +3859,7 @@ fill_holes:
BTRFS_FILE_EXTENT_INLINE) { BTRFS_FILE_EXTENT_INLINE) {
len = btrfs_file_extent_inline_len(src, i, extent); len = btrfs_file_extent_inline_len(src, i, extent);
extent_end = ALIGN(key.offset + len, extent_end = ALIGN(key.offset + len,
log->fs_info->sectorsize); fs_info->sectorsize);
} else { } else {
len = btrfs_file_extent_num_bytes(src, extent); len = btrfs_file_extent_num_bytes(src, extent);
extent_end = key.offset + len; extent_end = key.offset + len;
@ -3904,6 +3909,7 @@ static int wait_ordered_extents(struct btrfs_trans_handle *trans,
const struct list_head *logged_list, const struct list_head *logged_list,
bool *ordered_io_error) bool *ordered_io_error)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ordered_extent *ordered; struct btrfs_ordered_extent *ordered;
struct btrfs_root *log = root->log_root; struct btrfs_root *log = root->log_root;
u64 mod_start = em->mod_start; u64 mod_start = em->mod_start;
@ -4020,7 +4026,7 @@ static int wait_ordered_extents(struct btrfs_trans_handle *trans,
} }
/* block start is already adjusted for the file extent offset. */ /* block start is already adjusted for the file extent offset. */
ret = btrfs_lookup_csums_range(log->fs_info->csum_root, ret = btrfs_lookup_csums_range(fs_info->csum_root,
em->block_start + csum_offset, em->block_start + csum_offset,
em->block_start + csum_offset + em->block_start + csum_offset +
csum_len - 1, &ordered_sums, 0); csum_len - 1, &ordered_sums, 0);
@ -4363,6 +4369,7 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
struct inode *inode, struct inode *inode,
struct btrfs_path *path) struct btrfs_path *path)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
int ret; int ret;
struct btrfs_key key; struct btrfs_key key;
u64 hole_start; u64 hole_start;
@ -4372,7 +4379,7 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
const u64 ino = btrfs_ino(inode); const u64 ino = btrfs_ino(inode);
const u64 i_size = i_size_read(inode); const u64 i_size = i_size_read(inode);
if (!btrfs_fs_incompat(root->fs_info, NO_HOLES)) if (!btrfs_fs_incompat(fs_info, NO_HOLES))
return 0; return 0;
key.objectid = ino; key.objectid = ino;
@ -4429,7 +4436,7 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
if (hole_size == 0) if (hole_size == 0)
return 0; return 0;
hole_size = ALIGN(hole_size, root->fs_info->sectorsize); hole_size = ALIGN(hole_size, fs_info->sectorsize);
ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0, ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
hole_size, 0, hole_size, 0, 0, 0); hole_size, 0, hole_size, 0, 0, 0);
return ret; return ret;
@ -4587,6 +4594,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
const loff_t end, const loff_t end,
struct btrfs_log_ctx *ctx) struct btrfs_log_ctx *ctx)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path; struct btrfs_path *path;
struct btrfs_path *dst_path; struct btrfs_path *dst_path;
struct btrfs_key min_key; struct btrfs_key min_key;
@ -4639,7 +4647,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
* fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items). * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
*/ */
if (S_ISDIR(inode->i_mode) || if (S_ISDIR(inode->i_mode) ||
BTRFS_I(inode)->generation > root->fs_info->last_trans_committed) BTRFS_I(inode)->generation > fs_info->last_trans_committed)
ret = btrfs_commit_inode_delayed_items(trans, inode); ret = btrfs_commit_inode_delayed_items(trans, inode);
else else
ret = btrfs_commit_inode_delayed_inode(inode); ret = btrfs_commit_inode_delayed_inode(inode);
@ -4776,7 +4784,7 @@ again:
inode_key.objectid = other_ino; inode_key.objectid = other_ino;
inode_key.type = BTRFS_INODE_ITEM_KEY; inode_key.type = BTRFS_INODE_ITEM_KEY;
inode_key.offset = 0; inode_key.offset = 0;
other_inode = btrfs_iget(root->fs_info->sb, other_inode = btrfs_iget(fs_info->sb,
&inode_key, root, &inode_key, root,
NULL); NULL);
/* /*
@ -5140,6 +5148,7 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
struct inode *start_inode, struct inode *start_inode,
struct btrfs_log_ctx *ctx) struct btrfs_log_ctx *ctx)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *log = root->log_root; struct btrfs_root *log = root->log_root;
struct btrfs_path *path; struct btrfs_path *path;
LIST_HEAD(dir_list); LIST_HEAD(dir_list);
@ -5207,8 +5216,7 @@ process_leaf:
if (di_key.type == BTRFS_ROOT_ITEM_KEY) if (di_key.type == BTRFS_ROOT_ITEM_KEY)
continue; continue;
di_inode = btrfs_iget(root->fs_info->sb, &di_key, di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL);
root, NULL);
if (IS_ERR(di_inode)) { if (IS_ERR(di_inode)) {
ret = PTR_ERR(di_inode); ret = PTR_ERR(di_inode);
goto next_dir_inode; goto next_dir_inode;
@ -5270,6 +5278,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
struct inode *inode, struct inode *inode,
struct btrfs_log_ctx *ctx) struct btrfs_log_ctx *ctx)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int ret; int ret;
struct btrfs_path *path; struct btrfs_path *path;
struct btrfs_key key; struct btrfs_key key;
@ -5334,7 +5343,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
cur_offset = item_size; cur_offset = item_size;
} }
dir_inode = btrfs_iget(root->fs_info->sb, &inode_key, dir_inode = btrfs_iget(fs_info->sb, &inode_key,
root, NULL); root, NULL);
/* If parent inode was deleted, skip it. */ /* If parent inode was deleted, skip it. */
if (IS_ERR(dir_inode)) if (IS_ERR(dir_inode))
@ -5376,17 +5385,18 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
int exists_only, int exists_only,
struct btrfs_log_ctx *ctx) struct btrfs_log_ctx *ctx)
{ {
struct btrfs_fs_info *fs_info = root->fs_info;
int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL; int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
struct super_block *sb; struct super_block *sb;
struct dentry *old_parent = NULL; struct dentry *old_parent = NULL;
int ret = 0; int ret = 0;
u64 last_committed = root->fs_info->last_trans_committed; u64 last_committed = fs_info->last_trans_committed;
bool log_dentries = false; bool log_dentries = false;
struct inode *orig_inode = inode; struct inode *orig_inode = inode;
sb = inode->i_sb; sb = inode->i_sb;
if (btrfs_test_opt(root->fs_info, NOTREELOG)) { if (btrfs_test_opt(fs_info, NOTREELOG)) {
ret = 1; ret = 1;
goto end_no_trans; goto end_no_trans;
} }
@ -5395,8 +5405,8 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
* The prev transaction commit doesn't complete, we need do * The prev transaction commit doesn't complete, we need do
* full commit by ourselves. * full commit by ourselves.
*/ */
if (root->fs_info->last_trans_log_full_commit > if (fs_info->last_trans_log_full_commit >
root->fs_info->last_trans_committed) { fs_info->last_trans_committed) {
ret = 1; ret = 1;
goto end_no_trans; goto end_no_trans;
} }
@ -5517,7 +5527,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
end_trans: end_trans:
dput(old_parent); dput(old_parent);
if (ret < 0) { if (ret < 0) {
btrfs_set_log_full_commit(root->fs_info, trans); btrfs_set_log_full_commit(fs_info, trans);
ret = 1; ret = 1;
} }
@ -5788,6 +5798,7 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
struct inode *inode, struct inode *old_dir, struct inode *inode, struct inode *old_dir,
struct dentry *parent) struct dentry *parent)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root * root = BTRFS_I(inode)->root; struct btrfs_root * root = BTRFS_I(inode)->root;
/* /*
@ -5802,9 +5813,9 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
* from hasn't been logged, we don't need to log it * from hasn't been logged, we don't need to log it
*/ */
if (BTRFS_I(inode)->logged_trans <= if (BTRFS_I(inode)->logged_trans <=
root->fs_info->last_trans_committed && fs_info->last_trans_committed &&
(!old_dir || BTRFS_I(old_dir)->logged_trans <= (!old_dir || BTRFS_I(old_dir)->logged_trans <=
root->fs_info->last_trans_committed)) fs_info->last_trans_committed))
return 0; return 0;
return btrfs_log_inode_parent(trans, root, inode, parent, 0, return btrfs_log_inode_parent(trans, root, inode, parent, 0,

View File

@ -187,8 +187,8 @@ int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, uuid_root, &key, path, -1, 1); ret = btrfs_search_slot(trans, uuid_root, &key, path, -1, 1);
if (ret < 0) { if (ret < 0) {
btrfs_warn(uuid_root->fs_info, btrfs_warn(fs_info, "error %d while searching for uuid item!",
"error %d while searching for uuid item!", ret); ret);
goto out; goto out;
} }
if (ret > 0) { if (ret > 0) {
@ -201,8 +201,7 @@ int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans,
offset = btrfs_item_ptr_offset(eb, slot); offset = btrfs_item_ptr_offset(eb, slot);
item_size = btrfs_item_size_nr(eb, slot); item_size = btrfs_item_size_nr(eb, slot);
if (!IS_ALIGNED(item_size, sizeof(u64))) { if (!IS_ALIGNED(item_size, sizeof(u64))) {
btrfs_warn(uuid_root->fs_info, btrfs_warn(fs_info, "uuid item with illegal size %lu!",
"uuid item with illegal size %lu!",
(unsigned long)item_size); (unsigned long)item_size);
ret = -ENOENT; ret = -ENOENT;
goto out; goto out;

File diff suppressed because it is too large Load Diff