f2fs: fix overflow of size calculation
We have potential overflow issue when calculating size of object, when we left shift index with PAGE_CACHE_SHIFT bits, if type of index has only 32-bits space in 32-bit architecture, left shifting will incur overflow, i.e: pgoff_t index = 0xFFFFFFFF; loff_t size = index << PAGE_CACHE_SHIFT; size: 0xFFFFF000 So we should cast index with 64-bits type to avoid this issue. Signed-off-by: Chao Yu <chao2.yu@samsung.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
parent
100136acfb
commit
9edcdabf36
@ -447,9 +447,9 @@ repeat:
|
|||||||
lock_page(page);
|
lock_page(page);
|
||||||
}
|
}
|
||||||
got_it:
|
got_it:
|
||||||
if (new_i_size &&
|
if (new_i_size && i_size_read(inode) <
|
||||||
i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
|
((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) {
|
||||||
i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
|
i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT));
|
||||||
/* Only the directory inode sets new_i_size */
|
/* Only the directory inode sets new_i_size */
|
||||||
set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
|
set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
|
||||||
}
|
}
|
||||||
@ -489,8 +489,9 @@ alloc:
|
|||||||
/* update i_size */
|
/* update i_size */
|
||||||
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
|
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
|
||||||
dn->ofs_in_node;
|
dn->ofs_in_node;
|
||||||
if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT))
|
if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT))
|
||||||
i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT));
|
i_size_write(dn->inode,
|
||||||
|
((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT));
|
||||||
|
|
||||||
/* direct IO doesn't use extent cache to maximize the performance */
|
/* direct IO doesn't use extent cache to maximize the performance */
|
||||||
f2fs_drop_largest_extent(dn->inode, fofs);
|
f2fs_drop_largest_extent(dn->inode, fofs);
|
||||||
|
@ -198,9 +198,9 @@ get_cache:
|
|||||||
|
|
||||||
si->page_mem = 0;
|
si->page_mem = 0;
|
||||||
npages = NODE_MAPPING(sbi)->nrpages;
|
npages = NODE_MAPPING(sbi)->nrpages;
|
||||||
si->page_mem += npages << PAGE_CACHE_SHIFT;
|
si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT;
|
||||||
npages = META_MAPPING(sbi)->nrpages;
|
npages = META_MAPPING(sbi)->nrpages;
|
||||||
si->page_mem += npages << PAGE_CACHE_SHIFT;
|
si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int stat_show(struct seq_file *s, void *v)
|
static int stat_show(struct seq_file *s, void *v)
|
||||||
@ -333,13 +333,13 @@ static int stat_show(struct seq_file *s, void *v)
|
|||||||
|
|
||||||
/* memory footprint */
|
/* memory footprint */
|
||||||
update_mem_info(si->sbi);
|
update_mem_info(si->sbi);
|
||||||
seq_printf(s, "\nMemory: %u KB\n",
|
seq_printf(s, "\nMemory: %llu KB\n",
|
||||||
(si->base_mem + si->cache_mem + si->page_mem) >> 10);
|
(si->base_mem + si->cache_mem + si->page_mem) >> 10);
|
||||||
seq_printf(s, " - static: %u KB\n",
|
seq_printf(s, " - static: %llu KB\n",
|
||||||
si->base_mem >> 10);
|
si->base_mem >> 10);
|
||||||
seq_printf(s, " - cached: %u KB\n",
|
seq_printf(s, " - cached: %llu KB\n",
|
||||||
si->cache_mem >> 10);
|
si->cache_mem >> 10);
|
||||||
seq_printf(s, " - paged : %u KB\n",
|
seq_printf(s, " - paged : %llu KB\n",
|
||||||
si->page_mem >> 10);
|
si->page_mem >> 10);
|
||||||
}
|
}
|
||||||
mutex_unlock(&f2fs_stat_mutex);
|
mutex_unlock(&f2fs_stat_mutex);
|
||||||
|
@ -1844,7 +1844,7 @@ struct f2fs_stat_info {
|
|||||||
unsigned int segment_count[2];
|
unsigned int segment_count[2];
|
||||||
unsigned int block_count[2];
|
unsigned int block_count[2];
|
||||||
unsigned int inplace_count;
|
unsigned int inplace_count;
|
||||||
unsigned base_mem, cache_mem, page_mem;
|
unsigned long long base_mem, cache_mem, page_mem;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
|
static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
|
||||||
|
@ -74,7 +74,8 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
|
|||||||
goto mapped;
|
goto mapped;
|
||||||
|
|
||||||
/* page is wholly or partially inside EOF */
|
/* page is wholly or partially inside EOF */
|
||||||
if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) {
|
if (((loff_t)(page->index + 1) << PAGE_CACHE_SHIFT) >
|
||||||
|
i_size_read(inode)) {
|
||||||
unsigned offset;
|
unsigned offset;
|
||||||
offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
|
offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
|
||||||
zero_user_segment(page, offset, PAGE_CACHE_SIZE);
|
zero_user_segment(page, offset, PAGE_CACHE_SIZE);
|
||||||
@ -343,7 +344,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
|
|||||||
|
|
||||||
dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
|
dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
|
||||||
|
|
||||||
for (; data_ofs < isize; data_ofs = pgofs << PAGE_CACHE_SHIFT) {
|
for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
|
||||||
set_new_dnode(&dn, inode, NULL, NULL, 0);
|
set_new_dnode(&dn, inode, NULL, NULL, 0);
|
||||||
err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
|
err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
|
||||||
if (err && err != -ENOENT) {
|
if (err && err != -ENOENT) {
|
||||||
@ -802,8 +803,8 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
|||||||
|
|
||||||
f2fs_balance_fs(sbi);
|
f2fs_balance_fs(sbi);
|
||||||
|
|
||||||
blk_start = pg_start << PAGE_CACHE_SHIFT;
|
blk_start = (loff_t)pg_start << PAGE_CACHE_SHIFT;
|
||||||
blk_end = pg_end << PAGE_CACHE_SHIFT;
|
blk_end = (loff_t)pg_end << PAGE_CACHE_SHIFT;
|
||||||
truncate_inode_pages_range(mapping, blk_start,
|
truncate_inode_pages_range(mapping, blk_start,
|
||||||
blk_end - 1);
|
blk_end - 1);
|
||||||
|
|
||||||
@ -994,7 +995,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
new_size = max_t(loff_t, new_size,
|
new_size = max_t(loff_t, new_size,
|
||||||
pg_start << PAGE_CACHE_SHIFT);
|
(loff_t)pg_start << PAGE_CACHE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (index = pg_start; index < pg_end; index++) {
|
for (index = pg_start; index < pg_end; index++) {
|
||||||
@ -1030,7 +1031,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
|
|||||||
f2fs_unlock_op(sbi);
|
f2fs_unlock_op(sbi);
|
||||||
|
|
||||||
new_size = max_t(loff_t, new_size,
|
new_size = max_t(loff_t, new_size,
|
||||||
(index + 1) << PAGE_CACHE_SHIFT);
|
(loff_t)(index + 1) << PAGE_CACHE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (off_end) {
|
if (off_end) {
|
||||||
@ -1192,9 +1193,10 @@ noalloc:
|
|||||||
if (pg_start == pg_end)
|
if (pg_start == pg_end)
|
||||||
new_size = offset + len;
|
new_size = offset + len;
|
||||||
else if (index == pg_start && off_start)
|
else if (index == pg_start && off_start)
|
||||||
new_size = (index + 1) << PAGE_CACHE_SHIFT;
|
new_size = (loff_t)(index + 1) << PAGE_CACHE_SHIFT;
|
||||||
else if (index == pg_end)
|
else if (index == pg_end)
|
||||||
new_size = (index << PAGE_CACHE_SHIFT) + off_end;
|
new_size = ((loff_t)index << PAGE_CACHE_SHIFT) +
|
||||||
|
off_end;
|
||||||
else
|
else
|
||||||
new_size += PAGE_CACHE_SIZE;
|
new_size += PAGE_CACHE_SIZE;
|
||||||
}
|
}
|
||||||
|
@ -570,7 +570,7 @@ out:
|
|||||||
|
|
||||||
/* truncate meta pages to be used by the recovery */
|
/* truncate meta pages to be used by the recovery */
|
||||||
truncate_inode_pages_range(META_MAPPING(sbi),
|
truncate_inode_pages_range(META_MAPPING(sbi),
|
||||||
MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1);
|
(loff_t)MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1);
|
||||||
|
|
||||||
if (err) {
|
if (err) {
|
||||||
truncate_inode_pages_final(NODE_MAPPING(sbi));
|
truncate_inode_pages_final(NODE_MAPPING(sbi));
|
||||||
|
Loading…
Reference in New Issue
Block a user