2005-04-17 02:20:36 +04:00
/*
* linux / fs / affs / file . c
*
* ( c ) 1996 Hans - Joachim Widmaier - Rewritten
*
* ( C ) 1993 Ray Burr - Modified for Amiga FFS filesystem .
*
* ( C ) 1992 Eric Youngdale Modified for ISO 9660 filesystem .
*
* ( C ) 1991 Linus Torvalds - minix filesystem
*
* affs regular file handling primitives
*/
# include "affs.h"
# if PAGE_SIZE < 4096
# error PAGE_SIZE must be at least 4096
# endif
static int affs_grow_extcache ( struct inode * inode , u32 lc_idx ) ;
static struct buffer_head * affs_alloc_extblock ( struct inode * inode , struct buffer_head * bh , u32 ext ) ;
static inline struct buffer_head * affs_get_extblock ( struct inode * inode , u32 ext ) ;
static struct buffer_head * affs_get_extblock_slow ( struct inode * inode , u32 ext ) ;
static int affs_file_open ( struct inode * inode , struct file * filp ) ;
static int affs_file_release ( struct inode * inode , struct file * filp ) ;
2006-03-28 13:56:42 +04:00
const struct file_operations affs_file_operations = {
2005-04-17 02:20:36 +04:00
. llseek = generic_file_llseek ,
2006-10-01 10:28:48 +04:00
. read = do_sync_read ,
. aio_read = generic_file_aio_read ,
. write = do_sync_write ,
. aio_write = generic_file_aio_write ,
2005-04-17 02:20:36 +04:00
. mmap = generic_file_mmap ,
. open = affs_file_open ,
. release = affs_file_release ,
2009-06-08 09:22:00 +04:00
. fsync = affs_file_fsync ,
2007-06-01 13:49:19 +04:00
. splice_read = generic_file_splice_read ,
2005-04-17 02:20:36 +04:00
} ;
2007-02-12 11:55:38 +03:00
const struct inode_operations affs_file_inode_operations = {
2005-04-17 02:20:36 +04:00
. setattr = affs_notify_change ,
} ;
static int
affs_file_open ( struct inode * inode , struct file * filp )
{
2008-04-29 19:02:20 +04:00
pr_debug ( " AFFS: open(%lu,%d) \n " ,
inode - > i_ino , atomic_read ( & AFFS_I ( inode ) - > i_opencnt ) ) ;
atomic_inc ( & AFFS_I ( inode ) - > i_opencnt ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
static int
affs_file_release ( struct inode * inode , struct file * filp )
{
2008-04-29 19:02:20 +04:00
pr_debug ( " AFFS: release(%lu, %d) \n " ,
inode - > i_ino , atomic_read ( & AFFS_I ( inode ) - > i_opencnt ) ) ;
if ( atomic_dec_and_test ( & AFFS_I ( inode ) - > i_opencnt ) ) {
mutex_lock ( & inode - > i_mutex ) ;
if ( inode - > i_size ! = AFFS_I ( inode ) - > mmu_private )
affs_truncate ( inode ) ;
2005-04-17 02:20:36 +04:00
affs_free_prealloc ( inode ) ;
2008-04-29 19:02:20 +04:00
mutex_unlock ( & inode - > i_mutex ) ;
}
2005-04-17 02:20:36 +04:00
return 0 ;
}
static int
affs_grow_extcache ( struct inode * inode , u32 lc_idx )
{
struct super_block * sb = inode - > i_sb ;
struct buffer_head * bh ;
u32 lc_max ;
int i , j , key ;
if ( ! AFFS_I ( inode ) - > i_lc ) {
char * ptr = ( char * ) get_zeroed_page ( GFP_NOFS ) ;
if ( ! ptr )
return - ENOMEM ;
AFFS_I ( inode ) - > i_lc = ( u32 * ) ptr ;
AFFS_I ( inode ) - > i_ac = ( struct affs_ext_key * ) ( ptr + AFFS_CACHE_SIZE / 2 ) ;
}
lc_max = AFFS_LC_SIZE < < AFFS_I ( inode ) - > i_lc_shift ;
if ( AFFS_I ( inode ) - > i_extcnt > lc_max ) {
u32 lc_shift , lc_mask , tmp , off ;
/* need to recalculate linear cache, start from old size */
lc_shift = AFFS_I ( inode ) - > i_lc_shift ;
tmp = ( AFFS_I ( inode ) - > i_extcnt / AFFS_LC_SIZE ) > > lc_shift ;
for ( ; tmp ; tmp > > = 1 )
lc_shift + + ;
lc_mask = ( 1 < < lc_shift ) - 1 ;
/* fix idx and old size to new shift */
lc_idx > > = ( lc_shift - AFFS_I ( inode ) - > i_lc_shift ) ;
AFFS_I ( inode ) - > i_lc_size > > = ( lc_shift - AFFS_I ( inode ) - > i_lc_shift ) ;
/* first shrink old cache to make more space */
off = 1 < < ( lc_shift - AFFS_I ( inode ) - > i_lc_shift ) ;
for ( i = 1 , j = off ; j < AFFS_LC_SIZE ; i + + , j + = off )
AFFS_I ( inode ) - > i_ac [ i ] = AFFS_I ( inode ) - > i_ac [ j ] ;
AFFS_I ( inode ) - > i_lc_shift = lc_shift ;
AFFS_I ( inode ) - > i_lc_mask = lc_mask ;
}
/* fill cache to the needed index */
i = AFFS_I ( inode ) - > i_lc_size ;
AFFS_I ( inode ) - > i_lc_size = lc_idx + 1 ;
for ( ; i < = lc_idx ; i + + ) {
if ( ! i ) {
AFFS_I ( inode ) - > i_lc [ 0 ] = inode - > i_ino ;
continue ;
}
key = AFFS_I ( inode ) - > i_lc [ i - 1 ] ;
j = AFFS_I ( inode ) - > i_lc_mask + 1 ;
// unlock cache
for ( ; j > 0 ; j - - ) {
bh = affs_bread ( sb , key ) ;
if ( ! bh )
goto err ;
key = be32_to_cpu ( AFFS_TAIL ( sb , bh ) - > extension ) ;
affs_brelse ( bh ) ;
}
// lock cache
AFFS_I ( inode ) - > i_lc [ i ] = key ;
}
return 0 ;
err :
// lock cache
return - EIO ;
}
static struct buffer_head *
affs_alloc_extblock ( struct inode * inode , struct buffer_head * bh , u32 ext )
{
struct super_block * sb = inode - > i_sb ;
struct buffer_head * new_bh ;
u32 blocknr , tmp ;
blocknr = affs_alloc_block ( inode , bh - > b_blocknr ) ;
if ( ! blocknr )
return ERR_PTR ( - ENOSPC ) ;
new_bh = affs_getzeroblk ( sb , blocknr ) ;
if ( ! new_bh ) {
affs_free_block ( sb , blocknr ) ;
return ERR_PTR ( - EIO ) ;
}
AFFS_HEAD ( new_bh ) - > ptype = cpu_to_be32 ( T_LIST ) ;
AFFS_HEAD ( new_bh ) - > key = cpu_to_be32 ( blocknr ) ;
AFFS_TAIL ( sb , new_bh ) - > stype = cpu_to_be32 ( ST_FILE ) ;
AFFS_TAIL ( sb , new_bh ) - > parent = cpu_to_be32 ( inode - > i_ino ) ;
affs_fix_checksum ( sb , new_bh ) ;
mark_buffer_dirty_inode ( new_bh , inode ) ;
tmp = be32_to_cpu ( AFFS_TAIL ( sb , bh ) - > extension ) ;
if ( tmp )
affs_warning ( sb , " alloc_ext " , " previous extension set (%x) " , tmp ) ;
AFFS_TAIL ( sb , bh ) - > extension = cpu_to_be32 ( blocknr ) ;
affs_adjust_checksum ( bh , blocknr - tmp ) ;
mark_buffer_dirty_inode ( bh , inode ) ;
AFFS_I ( inode ) - > i_extcnt + + ;
mark_inode_dirty ( inode ) ;
return new_bh ;
}
static inline struct buffer_head *
affs_get_extblock ( struct inode * inode , u32 ext )
{
/* inline the simplest case: same extended block as last time */
struct buffer_head * bh = AFFS_I ( inode ) - > i_ext_bh ;
if ( ext = = AFFS_I ( inode ) - > i_ext_last )
2008-04-29 19:02:20 +04:00
get_bh ( bh ) ;
2005-04-17 02:20:36 +04:00
else
/* we have to do more (not inlined) */
bh = affs_get_extblock_slow ( inode , ext ) ;
return bh ;
}
static struct buffer_head *
affs_get_extblock_slow ( struct inode * inode , u32 ext )
{
struct super_block * sb = inode - > i_sb ;
struct buffer_head * bh ;
u32 ext_key ;
u32 lc_idx , lc_off , ac_idx ;
u32 tmp , idx ;
if ( ext = = AFFS_I ( inode ) - > i_ext_last + 1 ) {
/* read the next extended block from the current one */
bh = AFFS_I ( inode ) - > i_ext_bh ;
ext_key = be32_to_cpu ( AFFS_TAIL ( sb , bh ) - > extension ) ;
if ( ext < AFFS_I ( inode ) - > i_extcnt )
goto read_ext ;
if ( ext > AFFS_I ( inode ) - > i_extcnt )
BUG ( ) ;
bh = affs_alloc_extblock ( inode , bh , ext ) ;
if ( IS_ERR ( bh ) )
return bh ;
goto store_ext ;
}
if ( ext = = 0 ) {
/* we seek back to the file header block */
ext_key = inode - > i_ino ;
goto read_ext ;
}
if ( ext > = AFFS_I ( inode ) - > i_extcnt ) {
struct buffer_head * prev_bh ;
/* allocate a new extended block */
if ( ext > AFFS_I ( inode ) - > i_extcnt )
BUG ( ) ;
/* get previous extended block */
prev_bh = affs_get_extblock ( inode , ext - 1 ) ;
if ( IS_ERR ( prev_bh ) )
return prev_bh ;
bh = affs_alloc_extblock ( inode , prev_bh , ext ) ;
affs_brelse ( prev_bh ) ;
if ( IS_ERR ( bh ) )
return bh ;
goto store_ext ;
}
again :
/* check if there is an extended cache and whether it's large enough */
lc_idx = ext > > AFFS_I ( inode ) - > i_lc_shift ;
lc_off = ext & AFFS_I ( inode ) - > i_lc_mask ;
if ( lc_idx > = AFFS_I ( inode ) - > i_lc_size ) {
int err ;
err = affs_grow_extcache ( inode , lc_idx ) ;
if ( err )
return ERR_PTR ( err ) ;
goto again ;
}
/* every n'th key we find in the linear cache */
if ( ! lc_off ) {
ext_key = AFFS_I ( inode ) - > i_lc [ lc_idx ] ;
goto read_ext ;
}
/* maybe it's still in the associative cache */
ac_idx = ( ext - lc_idx - 1 ) & AFFS_AC_MASK ;
if ( AFFS_I ( inode ) - > i_ac [ ac_idx ] . ext = = ext ) {
ext_key = AFFS_I ( inode ) - > i_ac [ ac_idx ] . key ;
goto read_ext ;
}
/* try to find one of the previous extended blocks */
tmp = ext ;
idx = ac_idx ;
while ( - - tmp , - - lc_off > 0 ) {
idx = ( idx - 1 ) & AFFS_AC_MASK ;
if ( AFFS_I ( inode ) - > i_ac [ idx ] . ext = = tmp ) {
ext_key = AFFS_I ( inode ) - > i_ac [ idx ] . key ;
goto find_ext ;
}
}
/* fall back to the linear cache */
ext_key = AFFS_I ( inode ) - > i_lc [ lc_idx ] ;
find_ext :
/* read all extended blocks until we find the one we need */
//unlock cache
do {
bh = affs_bread ( sb , ext_key ) ;
if ( ! bh )
goto err_bread ;
ext_key = be32_to_cpu ( AFFS_TAIL ( sb , bh ) - > extension ) ;
affs_brelse ( bh ) ;
tmp + + ;
} while ( tmp < ext ) ;
//lock cache
/* store it in the associative cache */
// recalculate ac_idx?
AFFS_I ( inode ) - > i_ac [ ac_idx ] . ext = ext ;
AFFS_I ( inode ) - > i_ac [ ac_idx ] . key = ext_key ;
read_ext :
/* finally read the right extended block */
//unlock cache
bh = affs_bread ( sb , ext_key ) ;
if ( ! bh )
goto err_bread ;
//lock cache
store_ext :
/* release old cached extended block and store the new one */
affs_brelse ( AFFS_I ( inode ) - > i_ext_bh ) ;
AFFS_I ( inode ) - > i_ext_last = ext ;
AFFS_I ( inode ) - > i_ext_bh = bh ;
2008-04-29 19:02:20 +04:00
get_bh ( bh ) ;
2005-04-17 02:20:36 +04:00
return bh ;
err_bread :
affs_brelse ( bh ) ;
return ERR_PTR ( - EIO ) ;
}
static int
affs_get_block ( struct inode * inode , sector_t block , struct buffer_head * bh_result , int create )
{
struct super_block * sb = inode - > i_sb ;
struct buffer_head * ext_bh ;
u32 ext ;
pr_debug ( " AFFS: get_block(%u, %lu) \n " , ( u32 ) inode - > i_ino , ( unsigned long ) block ) ;
2008-04-29 11:59:12 +04:00
BUG_ON ( block > ( sector_t ) 0x7fffffffUL ) ;
2005-04-17 02:20:36 +04:00
if ( block > = AFFS_I ( inode ) - > i_blkcnt ) {
if ( block > AFFS_I ( inode ) - > i_blkcnt | | ! create )
goto err_big ;
} else
create = 0 ;
//lock cache
affs_lock_ext ( inode ) ;
ext = ( u32 ) block / AFFS_SB ( sb ) - > s_hashsize ;
block - = ext * AFFS_SB ( sb ) - > s_hashsize ;
ext_bh = affs_get_extblock ( inode , ext ) ;
if ( IS_ERR ( ext_bh ) )
goto err_ext ;
map_bh ( bh_result , sb , ( sector_t ) be32_to_cpu ( AFFS_BLOCK ( sb , ext_bh , block ) ) ) ;
if ( create ) {
u32 blocknr = affs_alloc_block ( inode , ext_bh - > b_blocknr ) ;
if ( ! blocknr )
goto err_alloc ;
set_buffer_new ( bh_result ) ;
AFFS_I ( inode ) - > mmu_private + = AFFS_SB ( sb ) - > s_data_blksize ;
AFFS_I ( inode ) - > i_blkcnt + + ;
/* store new block */
if ( bh_result - > b_blocknr )
affs_warning ( sb , " get_block " , " block already set (%x) " , bh_result - > b_blocknr ) ;
AFFS_BLOCK ( sb , ext_bh , block ) = cpu_to_be32 ( blocknr ) ;
AFFS_HEAD ( ext_bh ) - > block_count = cpu_to_be32 ( block + 1 ) ;
affs_adjust_checksum ( ext_bh , blocknr - bh_result - > b_blocknr + 1 ) ;
bh_result - > b_blocknr = blocknr ;
if ( ! block ) {
/* insert first block into header block */
u32 tmp = be32_to_cpu ( AFFS_HEAD ( ext_bh ) - > first_data ) ;
if ( tmp )
affs_warning ( sb , " get_block " , " first block already set (%d) " , tmp ) ;
AFFS_HEAD ( ext_bh ) - > first_data = cpu_to_be32 ( blocknr ) ;
affs_adjust_checksum ( ext_bh , blocknr - tmp ) ;
}
}
affs_brelse ( ext_bh ) ;
//unlock cache
affs_unlock_ext ( inode ) ;
return 0 ;
err_big :
affs_error ( inode - > i_sb , " get_block " , " strange block request %d " , block ) ;
return - EIO ;
err_ext :
// unlock cache
affs_unlock_ext ( inode ) ;
return PTR_ERR ( ext_bh ) ;
err_alloc :
brelse ( ext_bh ) ;
clear_buffer_mapped ( bh_result ) ;
bh_result - > b_bdev = NULL ;
// unlock cache
affs_unlock_ext ( inode ) ;
return - ENOSPC ;
}
static int affs_writepage ( struct page * page , struct writeback_control * wbc )
{
return block_write_full_page ( page , affs_get_block , wbc ) ;
}
2007-10-16 12:25:24 +04:00
2005-04-17 02:20:36 +04:00
static int affs_readpage ( struct file * file , struct page * page )
{
return block_read_full_page ( page , affs_get_block ) ;
}
2007-10-16 12:25:24 +04:00
2012-12-15 14:51:53 +04:00
static void affs_write_failed ( struct address_space * mapping , loff_t to )
{
struct inode * inode = mapping - > host ;
if ( to > inode - > i_size ) {
2013-09-13 02:13:56 +04:00
truncate_pagecache ( inode , inode - > i_size ) ;
2012-12-15 14:51:53 +04:00
affs_truncate ( inode ) ;
}
}
2007-10-16 12:25:24 +04:00
static int affs_write_begin ( struct file * file , struct address_space * mapping ,
loff_t pos , unsigned len , unsigned flags ,
struct page * * pagep , void * * fsdata )
2005-04-17 02:20:36 +04:00
{
2010-06-04 13:29:55 +04:00
int ret ;
2007-10-16 12:25:24 +04:00
* pagep = NULL ;
2010-06-04 13:29:55 +04:00
ret = cont_write_begin ( file , mapping , pos , len , flags , pagep , fsdata ,
2007-10-16 12:25:24 +04:00
affs_get_block ,
& AFFS_I ( mapping - > host ) - > mmu_private ) ;
2012-12-15 14:51:53 +04:00
if ( unlikely ( ret ) )
affs_write_failed ( mapping , pos + len ) ;
2010-06-04 13:29:55 +04:00
return ret ;
2005-04-17 02:20:36 +04:00
}
2007-10-16 12:25:24 +04:00
2005-04-17 02:20:36 +04:00
static sector_t _affs_bmap ( struct address_space * mapping , sector_t block )
{
return generic_block_bmap ( mapping , block , affs_get_block ) ;
}
2007-10-16 12:25:24 +04:00
2006-06-28 15:26:44 +04:00
const struct address_space_operations affs_aops = {
2005-04-17 02:20:36 +04:00
. readpage = affs_readpage ,
. writepage = affs_writepage ,
2007-10-16 12:25:24 +04:00
. write_begin = affs_write_begin ,
. write_end = generic_write_end ,
2005-04-17 02:20:36 +04:00
. bmap = _affs_bmap
} ;
static inline struct buffer_head *
affs_bread_ino ( struct inode * inode , int block , int create )
{
struct buffer_head * bh , tmp_bh ;
int err ;
tmp_bh . b_state = 0 ;
err = affs_get_block ( inode , block , & tmp_bh , create ) ;
if ( ! err ) {
bh = affs_bread ( inode - > i_sb , tmp_bh . b_blocknr ) ;
if ( bh ) {
bh - > b_state | = tmp_bh . b_state ;
return bh ;
}
err = - EIO ;
}
return ERR_PTR ( err ) ;
}
static inline struct buffer_head *
affs_getzeroblk_ino ( struct inode * inode , int block )
{
struct buffer_head * bh , tmp_bh ;
int err ;
tmp_bh . b_state = 0 ;
err = affs_get_block ( inode , block , & tmp_bh , 1 ) ;
if ( ! err ) {
bh = affs_getzeroblk ( inode - > i_sb , tmp_bh . b_blocknr ) ;
if ( bh ) {
bh - > b_state | = tmp_bh . b_state ;
return bh ;
}
err = - EIO ;
}
return ERR_PTR ( err ) ;
}
static inline struct buffer_head *
affs_getemptyblk_ino ( struct inode * inode , int block )
{
struct buffer_head * bh , tmp_bh ;
int err ;
tmp_bh . b_state = 0 ;
err = affs_get_block ( inode , block , & tmp_bh , 1 ) ;
if ( ! err ) {
bh = affs_getemptyblk ( inode - > i_sb , tmp_bh . b_blocknr ) ;
if ( bh ) {
bh - > b_state | = tmp_bh . b_state ;
return bh ;
}
err = - EIO ;
}
return ERR_PTR ( err ) ;
}
static int
affs_do_readpage_ofs ( struct file * file , struct page * page , unsigned from , unsigned to )
{
struct inode * inode = page - > mapping - > host ;
struct super_block * sb = inode - > i_sb ;
struct buffer_head * bh ;
char * data ;
u32 bidx , boff , bsize ;
u32 tmp ;
pr_debug ( " AFFS: read_page(%u, %ld, %d, %d) \n " , ( u32 ) inode - > i_ino , page - > index , from , to ) ;
2008-04-29 11:59:12 +04:00
BUG_ON ( from > to | | to > PAGE_CACHE_SIZE ) ;
2005-04-17 02:20:36 +04:00
kmap ( page ) ;
data = page_address ( page ) ;
bsize = AFFS_SB ( sb ) - > s_data_blksize ;
tmp = ( page - > index < < PAGE_CACHE_SHIFT ) + from ;
bidx = tmp / bsize ;
boff = tmp % bsize ;
while ( from < to ) {
bh = affs_bread_ino ( inode , bidx , 0 ) ;
if ( IS_ERR ( bh ) )
return PTR_ERR ( bh ) ;
tmp = min ( bsize - boff , to - from ) ;
2008-04-29 11:59:12 +04:00
BUG_ON ( from + tmp > to | | tmp > bsize ) ;
2005-04-17 02:20:36 +04:00
memcpy ( data + from , AFFS_DATA ( bh ) + boff , tmp ) ;
affs_brelse ( bh ) ;
bidx + + ;
from + = tmp ;
boff = 0 ;
}
flush_dcache_page ( page ) ;
kunmap ( page ) ;
return 0 ;
}
static int
affs_extent_file_ofs ( struct inode * inode , u32 newsize )
{
struct super_block * sb = inode - > i_sb ;
struct buffer_head * bh , * prev_bh ;
u32 bidx , boff ;
u32 size , bsize ;
u32 tmp ;
pr_debug ( " AFFS: extent_file(%u, %d) \n " , ( u32 ) inode - > i_ino , newsize ) ;
bsize = AFFS_SB ( sb ) - > s_data_blksize ;
bh = NULL ;
size = AFFS_I ( inode ) - > mmu_private ;
bidx = size / bsize ;
boff = size % bsize ;
if ( boff ) {
bh = affs_bread_ino ( inode , bidx , 0 ) ;
if ( IS_ERR ( bh ) )
return PTR_ERR ( bh ) ;
tmp = min ( bsize - boff , newsize - size ) ;
2008-04-29 11:59:12 +04:00
BUG_ON ( boff + tmp > bsize | | tmp > bsize ) ;
2005-04-17 02:20:36 +04:00
memset ( AFFS_DATA ( bh ) + boff , 0 , tmp ) ;
2008-04-30 11:54:47 +04:00
be32_add_cpu ( & AFFS_DATA_HEAD ( bh ) - > size , tmp ) ;
2005-04-17 02:20:36 +04:00
affs_fix_checksum ( sb , bh ) ;
mark_buffer_dirty_inode ( bh , inode ) ;
size + = tmp ;
bidx + + ;
} else if ( bidx ) {
bh = affs_bread_ino ( inode , bidx - 1 , 0 ) ;
if ( IS_ERR ( bh ) )
return PTR_ERR ( bh ) ;
}
while ( size < newsize ) {
prev_bh = bh ;
bh = affs_getzeroblk_ino ( inode , bidx ) ;
if ( IS_ERR ( bh ) )
goto out ;
tmp = min ( bsize , newsize - size ) ;
2008-04-29 11:59:12 +04:00
BUG_ON ( tmp > bsize ) ;
2005-04-17 02:20:36 +04:00
AFFS_DATA_HEAD ( bh ) - > ptype = cpu_to_be32 ( T_DATA ) ;
AFFS_DATA_HEAD ( bh ) - > key = cpu_to_be32 ( inode - > i_ino ) ;
AFFS_DATA_HEAD ( bh ) - > sequence = cpu_to_be32 ( bidx ) ;
AFFS_DATA_HEAD ( bh ) - > size = cpu_to_be32 ( tmp ) ;
affs_fix_checksum ( sb , bh ) ;
bh - > b_state & = ~ ( 1UL < < BH_New ) ;
mark_buffer_dirty_inode ( bh , inode ) ;
if ( prev_bh ) {
u32 tmp = be32_to_cpu ( AFFS_DATA_HEAD ( prev_bh ) - > next ) ;
if ( tmp )
affs_warning ( sb , " extent_file_ofs " , " next block already set for %d (%d) " , bidx , tmp ) ;
AFFS_DATA_HEAD ( prev_bh ) - > next = cpu_to_be32 ( bh - > b_blocknr ) ;
affs_adjust_checksum ( prev_bh , bh - > b_blocknr - tmp ) ;
mark_buffer_dirty_inode ( prev_bh , inode ) ;
affs_brelse ( prev_bh ) ;
}
size + = bsize ;
bidx + + ;
}
affs_brelse ( bh ) ;
inode - > i_size = AFFS_I ( inode ) - > mmu_private = newsize ;
return 0 ;
out :
inode - > i_size = AFFS_I ( inode ) - > mmu_private = newsize ;
return PTR_ERR ( bh ) ;
}
static int
affs_readpage_ofs ( struct file * file , struct page * page )
{
struct inode * inode = page - > mapping - > host ;
u32 to ;
int err ;
pr_debug ( " AFFS: read_page(%u, %ld) \n " , ( u32 ) inode - > i_ino , page - > index ) ;
to = PAGE_CACHE_SIZE ;
if ( ( ( page - > index + 1 ) < < PAGE_CACHE_SHIFT ) > inode - > i_size ) {
to = inode - > i_size & ~ PAGE_CACHE_MASK ;
memset ( page_address ( page ) + to , 0 , PAGE_CACHE_SIZE - to ) ;
}
err = affs_do_readpage_ofs ( file , page , 0 , to ) ;
if ( ! err )
SetPageUptodate ( page ) ;
unlock_page ( page ) ;
return err ;
}
2007-10-16 12:25:24 +04:00
static int affs_write_begin_ofs ( struct file * file , struct address_space * mapping ,
loff_t pos , unsigned len , unsigned flags ,
struct page * * pagep , void * * fsdata )
2005-04-17 02:20:36 +04:00
{
2007-10-16 12:25:24 +04:00
struct inode * inode = mapping - > host ;
struct page * page ;
pgoff_t index ;
2005-04-17 02:20:36 +04:00
int err = 0 ;
2007-10-16 12:25:24 +04:00
pr_debug ( " AFFS: write_begin(%u, %llu, %llu) \n " , ( u32 ) inode - > i_ino , ( unsigned long long ) pos , ( unsigned long long ) pos + len ) ;
if ( pos > AFFS_I ( inode ) - > mmu_private ) {
/* XXX: this probably leaves a too-big i_size in case of
* failure . Should really be updating i_size at write_end time
*/
err = affs_extent_file_ofs ( inode , pos ) ;
2005-04-17 02:20:36 +04:00
if ( err )
return err ;
}
2007-10-16 12:25:24 +04:00
index = pos > > PAGE_CACHE_SHIFT ;
fs: symlink write_begin allocation context fix
With the write_begin/write_end aops, page_symlink was broken because it
could no longer pass a GFP_NOFS type mask into the point where the
allocations happened. They are done in write_begin, which would always
assume that the filesystem can be entered from reclaim. This bug could
cause filesystem deadlocks.
The funny thing with having a gfp_t mask there is that it doesn't really
allow the caller to arbitrarily tinker with the context in which it can be
called. It couldn't ever be GFP_ATOMIC, for example, because it needs to
take the page lock. The only thing any callers care about is __GFP_FS
anyway, so turn that into a single flag.
Add a new flag for write_begin, AOP_FLAG_NOFS. Filesystems can now act on
this flag in their write_begin function. Change __grab_cache_page to
accept a nofs argument as well, to honour that flag (while we're there,
change the name to grab_cache_page_write_begin which is more instructive
and does away with random leading underscores).
This is really a more flexible way to go in the end anyway -- if a
filesystem happens to want any extra allocations aside from the pagecache
ones in ints write_begin function, it may now use GFP_KERNEL (rather than
GFP_NOFS) for common case allocations (eg. ocfs2_alloc_write_ctxt, for a
random example).
[kosaki.motohiro@jp.fujitsu.com: fix ubifs]
[kosaki.motohiro@jp.fujitsu.com: fix fuse]
Signed-off-by: Nick Piggin <npiggin@suse.de>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: <stable@kernel.org> [2.6.28.x]
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
[ Cleaned up the calling convention: just pass in the AOP flags
untouched to the grab_cache_page_write_begin() function. That
just simplifies everybody, and may even allow future expansion of the
logic. - Linus ]
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-01-04 23:00:53 +03:00
page = grab_cache_page_write_begin ( mapping , index , flags ) ;
2007-10-16 12:25:24 +04:00
if ( ! page )
return - ENOMEM ;
* pagep = page ;
2005-04-17 02:20:36 +04:00
if ( PageUptodate ( page ) )
return 0 ;
2007-10-16 12:25:24 +04:00
/* XXX: inefficient but safe in the face of short writes */
err = affs_do_readpage_ofs ( file , page , 0 , PAGE_CACHE_SIZE ) ;
if ( err ) {
unlock_page ( page ) ;
page_cache_release ( page ) ;
2005-04-17 02:20:36 +04:00
}
return err ;
}
2007-10-16 12:25:24 +04:00
static int affs_write_end_ofs ( struct file * file , struct address_space * mapping ,
loff_t pos , unsigned len , unsigned copied ,
struct page * page , void * fsdata )
2005-04-17 02:20:36 +04:00
{
2007-10-16 12:25:24 +04:00
struct inode * inode = mapping - > host ;
2005-04-17 02:20:36 +04:00
struct super_block * sb = inode - > i_sb ;
struct buffer_head * bh , * prev_bh ;
char * data ;
u32 bidx , boff , bsize ;
2007-10-16 12:25:24 +04:00
unsigned from , to ;
2005-04-17 02:20:36 +04:00
u32 tmp ;
int written ;
2007-10-16 12:25:24 +04:00
from = pos & ( PAGE_CACHE_SIZE - 1 ) ;
to = pos + len ;
/*
* XXX : not sure if this can handle short copies ( len < copied ) , but
* we don ' t have to , because the page should always be uptodate here ,
* due to write_begin .
*/
pr_debug ( " AFFS: write_begin(%u, %llu, %llu) \n " , ( u32 ) inode - > i_ino , ( unsigned long long ) pos , ( unsigned long long ) pos + len ) ;
2005-04-17 02:20:36 +04:00
bsize = AFFS_SB ( sb ) - > s_data_blksize ;
data = page_address ( page ) ;
bh = NULL ;
written = 0 ;
tmp = ( page - > index < < PAGE_CACHE_SHIFT ) + from ;
bidx = tmp / bsize ;
boff = tmp % bsize ;
if ( boff ) {
bh = affs_bread_ino ( inode , bidx , 0 ) ;
if ( IS_ERR ( bh ) )
return PTR_ERR ( bh ) ;
tmp = min ( bsize - boff , to - from ) ;
2008-04-29 11:59:12 +04:00
BUG_ON ( boff + tmp > bsize | | tmp > bsize ) ;
2005-04-17 02:20:36 +04:00
memcpy ( AFFS_DATA ( bh ) + boff , data + from , tmp ) ;
2008-04-30 11:54:47 +04:00
be32_add_cpu ( & AFFS_DATA_HEAD ( bh ) - > size , tmp ) ;
2005-04-17 02:20:36 +04:00
affs_fix_checksum ( sb , bh ) ;
mark_buffer_dirty_inode ( bh , inode ) ;
written + = tmp ;
from + = tmp ;
bidx + + ;
} else if ( bidx ) {
bh = affs_bread_ino ( inode , bidx - 1 , 0 ) ;
if ( IS_ERR ( bh ) )
return PTR_ERR ( bh ) ;
}
while ( from + bsize < = to ) {
prev_bh = bh ;
bh = affs_getemptyblk_ino ( inode , bidx ) ;
if ( IS_ERR ( bh ) )
goto out ;
memcpy ( AFFS_DATA ( bh ) , data + from , bsize ) ;
if ( buffer_new ( bh ) ) {
AFFS_DATA_HEAD ( bh ) - > ptype = cpu_to_be32 ( T_DATA ) ;
AFFS_DATA_HEAD ( bh ) - > key = cpu_to_be32 ( inode - > i_ino ) ;
AFFS_DATA_HEAD ( bh ) - > sequence = cpu_to_be32 ( bidx ) ;
AFFS_DATA_HEAD ( bh ) - > size = cpu_to_be32 ( bsize ) ;
AFFS_DATA_HEAD ( bh ) - > next = 0 ;
bh - > b_state & = ~ ( 1UL < < BH_New ) ;
if ( prev_bh ) {
u32 tmp = be32_to_cpu ( AFFS_DATA_HEAD ( prev_bh ) - > next ) ;
if ( tmp )
affs_warning ( sb , " commit_write_ofs " , " next block already set for %d (%d) " , bidx , tmp ) ;
AFFS_DATA_HEAD ( prev_bh ) - > next = cpu_to_be32 ( bh - > b_blocknr ) ;
affs_adjust_checksum ( prev_bh , bh - > b_blocknr - tmp ) ;
mark_buffer_dirty_inode ( prev_bh , inode ) ;
}
}
affs_brelse ( prev_bh ) ;
affs_fix_checksum ( sb , bh ) ;
mark_buffer_dirty_inode ( bh , inode ) ;
written + = bsize ;
from + = bsize ;
bidx + + ;
}
if ( from < to ) {
prev_bh = bh ;
bh = affs_bread_ino ( inode , bidx , 1 ) ;
if ( IS_ERR ( bh ) )
goto out ;
tmp = min ( bsize , to - from ) ;
2008-04-29 11:59:12 +04:00
BUG_ON ( tmp > bsize ) ;
2005-04-17 02:20:36 +04:00
memcpy ( AFFS_DATA ( bh ) , data + from , tmp ) ;
if ( buffer_new ( bh ) ) {
AFFS_DATA_HEAD ( bh ) - > ptype = cpu_to_be32 ( T_DATA ) ;
AFFS_DATA_HEAD ( bh ) - > key = cpu_to_be32 ( inode - > i_ino ) ;
AFFS_DATA_HEAD ( bh ) - > sequence = cpu_to_be32 ( bidx ) ;
AFFS_DATA_HEAD ( bh ) - > size = cpu_to_be32 ( tmp ) ;
AFFS_DATA_HEAD ( bh ) - > next = 0 ;
bh - > b_state & = ~ ( 1UL < < BH_New ) ;
if ( prev_bh ) {
u32 tmp = be32_to_cpu ( AFFS_DATA_HEAD ( prev_bh ) - > next ) ;
if ( tmp )
affs_warning ( sb , " commit_write_ofs " , " next block already set for %d (%d) " , bidx , tmp ) ;
AFFS_DATA_HEAD ( prev_bh ) - > next = cpu_to_be32 ( bh - > b_blocknr ) ;
affs_adjust_checksum ( prev_bh , bh - > b_blocknr - tmp ) ;
mark_buffer_dirty_inode ( prev_bh , inode ) ;
}
} else if ( be32_to_cpu ( AFFS_DATA_HEAD ( bh ) - > size ) < tmp )
AFFS_DATA_HEAD ( bh ) - > size = cpu_to_be32 ( tmp ) ;
affs_brelse ( prev_bh ) ;
affs_fix_checksum ( sb , bh ) ;
mark_buffer_dirty_inode ( bh , inode ) ;
written + = tmp ;
from + = tmp ;
bidx + + ;
}
SetPageUptodate ( page ) ;
done :
affs_brelse ( bh ) ;
tmp = ( page - > index < < PAGE_CACHE_SHIFT ) + from ;
if ( tmp > inode - > i_size )
inode - > i_size = AFFS_I ( inode ) - > mmu_private = tmp ;
2007-10-16 12:25:24 +04:00
unlock_page ( page ) ;
page_cache_release ( page ) ;
2005-04-17 02:20:36 +04:00
return written ;
out :
bh = prev_bh ;
if ( ! written )
written = PTR_ERR ( bh ) ;
goto done ;
}
2006-06-28 15:26:44 +04:00
const struct address_space_operations affs_aops_ofs = {
2005-04-17 02:20:36 +04:00
. readpage = affs_readpage_ofs ,
//.writepage = affs_writepage_ofs,
2007-10-16 12:25:24 +04:00
. write_begin = affs_write_begin_ofs ,
. write_end = affs_write_end_ofs
2005-04-17 02:20:36 +04:00
} ;
/* Free any preallocated blocks. */
void
affs_free_prealloc ( struct inode * inode )
{
struct super_block * sb = inode - > i_sb ;
pr_debug ( " AFFS: free_prealloc(ino=%lu) \n " , inode - > i_ino ) ;
while ( AFFS_I ( inode ) - > i_pa_cnt ) {
AFFS_I ( inode ) - > i_pa_cnt - - ;
affs_free_block ( sb , + + AFFS_I ( inode ) - > i_lastalloc ) ;
}
}
/* Truncate (or enlarge) a file to the requested size. */
void
affs_truncate ( struct inode * inode )
{
struct super_block * sb = inode - > i_sb ;
u32 ext , ext_key ;
u32 last_blk , blkcnt , blk ;
u32 size ;
struct buffer_head * ext_bh ;
int i ;
pr_debug ( " AFFS: truncate(inode=%d, oldsize=%u, newsize=%u) \n " ,
( u32 ) inode - > i_ino , ( u32 ) AFFS_I ( inode ) - > mmu_private , ( u32 ) inode - > i_size ) ;
last_blk = 0 ;
ext = 0 ;
if ( inode - > i_size ) {
last_blk = ( ( u32 ) inode - > i_size - 1 ) / AFFS_SB ( sb ) - > s_data_blksize ;
ext = last_blk / AFFS_SB ( sb ) - > s_hashsize ;
}
if ( inode - > i_size > AFFS_I ( inode ) - > mmu_private ) {
struct address_space * mapping = inode - > i_mapping ;
struct page * page ;
2007-10-16 12:25:24 +04:00
void * fsdata ;
2013-09-12 01:25:48 +04:00
loff_t size = inode - > i_size ;
2005-04-17 02:20:36 +04:00
int res ;
2007-10-16 12:25:24 +04:00
res = mapping - > a_ops - > write_begin ( NULL , mapping , size , 0 , 0 , & page , & fsdata ) ;
2005-04-17 02:20:36 +04:00
if ( ! res )
2007-10-16 12:25:24 +04:00
res = mapping - > a_ops - > write_end ( NULL , mapping , size , 0 , 0 , page , fsdata ) ;
2008-04-29 19:02:20 +04:00
else
inode - > i_size = AFFS_I ( inode ) - > mmu_private ;
2005-04-17 02:20:36 +04:00
mark_inode_dirty ( inode ) ;
return ;
} else if ( inode - > i_size = = AFFS_I ( inode ) - > mmu_private )
return ;
// lock cache
ext_bh = affs_get_extblock ( inode , ext ) ;
if ( IS_ERR ( ext_bh ) ) {
affs_warning ( sb , " truncate " , " unexpected read error for ext block %u (%d) " ,
ext , PTR_ERR ( ext_bh ) ) ;
return ;
}
if ( AFFS_I ( inode ) - > i_lc ) {
/* clear linear cache */
i = ( ext + 1 ) > > AFFS_I ( inode ) - > i_lc_shift ;
if ( AFFS_I ( inode ) - > i_lc_size > i ) {
AFFS_I ( inode ) - > i_lc_size = i ;
for ( ; i < AFFS_LC_SIZE ; i + + )
AFFS_I ( inode ) - > i_lc [ i ] = 0 ;
}
/* clear associative cache */
for ( i = 0 ; i < AFFS_AC_SIZE ; i + + )
if ( AFFS_I ( inode ) - > i_ac [ i ] . ext > = ext )
AFFS_I ( inode ) - > i_ac [ i ] . ext = 0 ;
}
ext_key = be32_to_cpu ( AFFS_TAIL ( sb , ext_bh ) - > extension ) ;
blkcnt = AFFS_I ( inode ) - > i_blkcnt ;
i = 0 ;
blk = last_blk ;
if ( inode - > i_size ) {
i = last_blk % AFFS_SB ( sb ) - > s_hashsize + 1 ;
blk + + ;
} else
AFFS_HEAD ( ext_bh ) - > first_data = 0 ;
2008-04-29 19:02:20 +04:00
AFFS_HEAD ( ext_bh ) - > block_count = cpu_to_be32 ( i ) ;
2005-04-17 02:20:36 +04:00
size = AFFS_SB ( sb ) - > s_hashsize ;
if ( size > blkcnt - blk + i )
size = blkcnt - blk + i ;
for ( ; i < size ; i + + , blk + + ) {
affs_free_block ( sb , be32_to_cpu ( AFFS_BLOCK ( sb , ext_bh , i ) ) ) ;
AFFS_BLOCK ( sb , ext_bh , i ) = 0 ;
}
AFFS_TAIL ( sb , ext_bh ) - > extension = 0 ;
affs_fix_checksum ( sb , ext_bh ) ;
mark_buffer_dirty_inode ( ext_bh , inode ) ;
affs_brelse ( ext_bh ) ;
if ( inode - > i_size ) {
AFFS_I ( inode ) - > i_blkcnt = last_blk + 1 ;
AFFS_I ( inode ) - > i_extcnt = ext + 1 ;
if ( AFFS_SB ( sb ) - > s_flags & SF_OFS ) {
struct buffer_head * bh = affs_bread_ino ( inode , last_blk , 0 ) ;
u32 tmp ;
2010-08-25 11:05:27 +04:00
if ( IS_ERR ( bh ) ) {
2005-04-17 02:20:36 +04:00
affs_warning ( sb , " truncate " , " unexpected read error for last block %u (%d) " ,
2010-08-25 11:05:27 +04:00
ext , PTR_ERR ( bh ) ) ;
2005-04-17 02:20:36 +04:00
return ;
}
tmp = be32_to_cpu ( AFFS_DATA_HEAD ( bh ) - > next ) ;
AFFS_DATA_HEAD ( bh ) - > next = 0 ;
affs_adjust_checksum ( bh , - tmp ) ;
affs_brelse ( bh ) ;
}
} else {
AFFS_I ( inode ) - > i_blkcnt = 0 ;
AFFS_I ( inode ) - > i_extcnt = 1 ;
}
AFFS_I ( inode ) - > mmu_private = inode - > i_size ;
// unlock cache
while ( ext_key ) {
ext_bh = affs_bread ( sb , ext_key ) ;
size = AFFS_SB ( sb ) - > s_hashsize ;
if ( size > blkcnt - blk )
size = blkcnt - blk ;
for ( i = 0 ; i < size ; i + + , blk + + )
affs_free_block ( sb , be32_to_cpu ( AFFS_BLOCK ( sb , ext_bh , i ) ) ) ;
affs_free_block ( sb , ext_key ) ;
ext_key = be32_to_cpu ( AFFS_TAIL ( sb , ext_bh ) - > extension ) ;
affs_brelse ( ext_bh ) ;
}
affs_free_prealloc ( inode ) ;
}
2009-06-08 09:22:00 +04:00
2011-07-17 04:44:56 +04:00
int affs_file_fsync ( struct file * filp , loff_t start , loff_t end , int datasync )
2009-06-08 09:22:00 +04:00
{
2010-05-26 19:53:25 +04:00
struct inode * inode = filp - > f_mapping - > host ;
2009-06-08 09:22:00 +04:00
int ret , err ;
2011-07-17 04:44:56 +04:00
err = filemap_write_and_wait_range ( inode - > i_mapping , start , end ) ;
if ( err )
return err ;
mutex_lock ( & inode - > i_mutex ) ;
2009-06-08 09:22:00 +04:00
ret = write_inode_now ( inode , 0 ) ;
err = sync_blockdev ( inode - > i_sb - > s_bdev ) ;
if ( ! ret )
ret = err ;
2011-07-17 04:44:56 +04:00
mutex_unlock ( & inode - > i_mutex ) ;
2009-06-08 09:22:00 +04:00
return ret ;
}