2005-04-17 02:20:36 +04:00
/*
* JFFS2 - - Journalling Flash File System , Version 2.
*
2007-04-25 17:16:47 +04:00
* Copyright © 2001 - 2007 Red Hat , Inc .
* Copyright © 2004 Thomas Gleixner < tglx @ linutronix . de >
2005-04-17 02:20:36 +04:00
*
* Created by David Woodhouse < dwmw2 @ infradead . org >
* Modified debugged and enhanced by Thomas Gleixner < tglx @ linutronix . de >
*
* For licensing information , see the file ' LICENCE ' in this directory .
*
*/
2012-02-16 03:56:45 +04:00
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2005-04-17 02:20:36 +04:00
# include <linux/kernel.h>
# include <linux/slab.h>
# include <linux/mtd/mtd.h>
# include <linux/crc32.h>
2017-08-04 18:29:10 +03:00
# include <linux/mtd/rawnand.h>
2005-10-31 02:03:48 +03:00
# include <linux/jiffies.h>
2006-10-18 21:55:46 +04:00
# include <linux/sched.h>
2012-05-07 20:56:53 +04:00
# include <linux/writeback.h>
2005-10-31 02:03:48 +03:00
2005-04-17 02:20:36 +04:00
# include "nodelist.h"
/* For testing write failures */
# undef BREAKME
# undef BREAKMEHEADER
# ifdef BREAKME
static unsigned char * brokenbuf ;
# endif
2005-09-30 17:59:17 +04:00
# define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
# define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
2005-04-17 02:20:36 +04:00
/* max. erase failures before we mark a block bad */
# define MAX_ERASE_FAILURES 2
struct jffs2_inodirty {
uint32_t ino ;
struct jffs2_inodirty * next ;
} ;
static struct jffs2_inodirty inodirty_nomem ;
static int jffs2_wbuf_pending_for_ino ( struct jffs2_sb_info * c , uint32_t ino )
{
struct jffs2_inodirty * this = c - > wbuf_inodes ;
/* If a malloc failed, consider _everything_ dirty */
if ( this = = & inodirty_nomem )
return 1 ;
/* If ino == 0, _any_ non-GC writes mean 'yes' */
if ( this & & ! ino )
return 1 ;
/* Look to see if the inode in question is pending in the wbuf */
while ( this ) {
if ( this - > ino = = ino )
return 1 ;
this = this - > next ;
}
return 0 ;
}
static void jffs2_clear_wbuf_ino_list ( struct jffs2_sb_info * c )
{
struct jffs2_inodirty * this ;
this = c - > wbuf_inodes ;
if ( this ! = & inodirty_nomem ) {
while ( this ) {
struct jffs2_inodirty * next = this - > next ;
kfree ( this ) ;
this = next ;
}
}
c - > wbuf_inodes = NULL ;
}
static void jffs2_wbuf_dirties_inode ( struct jffs2_sb_info * c , uint32_t ino )
{
struct jffs2_inodirty * new ;
2012-05-07 20:56:53 +04:00
/* Schedule delayed write-buffer write-out */
2010-05-19 20:13:19 +04:00
jffs2_dirty_trigger ( c ) ;
2005-04-17 02:20:36 +04:00
if ( jffs2_wbuf_pending_for_ino ( c , ino ) )
return ;
new = kmalloc ( sizeof ( * new ) , GFP_KERNEL ) ;
if ( ! new ) {
2012-02-16 03:56:43 +04:00
jffs2_dbg ( 1 , " No memory to allocate inodirty. Fallback to all considered dirty \n " ) ;
2005-04-17 02:20:36 +04:00
jffs2_clear_wbuf_ino_list ( c ) ;
c - > wbuf_inodes = & inodirty_nomem ;
return ;
}
new - > ino = ino ;
new - > next = c - > wbuf_inodes ;
c - > wbuf_inodes = new ;
return ;
}
static inline void jffs2_refile_wbuf_blocks ( struct jffs2_sb_info * c )
{
struct list_head * this , * next ;
static int n ;
if ( list_empty ( & c - > erasable_pending_wbuf_list ) )
return ;
list_for_each_safe ( this , next , & c - > erasable_pending_wbuf_list ) {
struct jffs2_eraseblock * jeb = list_entry ( this , struct jffs2_eraseblock , list ) ;
2012-02-16 03:56:43 +04:00
jffs2_dbg ( 1 , " Removing eraseblock at 0x%08x from erasable_pending_wbuf_list... \n " ,
jeb - > offset ) ;
2005-04-17 02:20:36 +04:00
list_del ( this ) ;
if ( ( jiffies + ( n + + ) ) & 127 ) {
/* Most of the time, we just erase it immediately. Otherwise we
spend ages scanning it on mount , etc . */
2012-02-16 03:56:43 +04:00
jffs2_dbg ( 1 , " ...and adding to erase_pending_list \n " ) ;
2005-04-17 02:20:36 +04:00
list_add_tail ( & jeb - > list , & c - > erase_pending_list ) ;
c - > nr_erasing_blocks + + ;
2010-05-19 20:05:14 +04:00
jffs2_garbage_collect_trigger ( c ) ;
2005-04-17 02:20:36 +04:00
} else {
/* Sometimes, however, we leave it elsewhere so it doesn't get
immediately reused , and we spread the load a bit . */
2012-02-16 03:56:43 +04:00
jffs2_dbg ( 1 , " ...and adding to erasable_list \n " ) ;
2005-04-17 02:20:36 +04:00
list_add_tail ( & jeb - > list , & c - > erasable_list ) ;
}
}
}
2005-01-25 00:24:18 +03:00
# define REFILE_NOTEMPTY 0
# define REFILE_ANYWAY 1
static void jffs2_block_refile ( struct jffs2_sb_info * c , struct jffs2_eraseblock * jeb , int allow_empty )
2005-04-17 02:20:36 +04:00
{
2012-02-16 03:56:43 +04:00
jffs2_dbg ( 1 , " About to refile bad block at %08x \n " , jeb - > offset ) ;
2005-04-17 02:20:36 +04:00
/* File the existing block on the bad_used_list.... */
if ( c - > nextblock = = jeb )
c - > nextblock = NULL ;
else /* Not sure this should ever happen... need more coffee */
list_del ( & jeb - > list ) ;
if ( jeb - > first_node ) {
2012-02-16 03:56:43 +04:00
jffs2_dbg ( 1 , " Refiling block at %08x to bad_used_list \n " ,
jeb - > offset ) ;
2005-04-17 02:20:36 +04:00
list_add ( & jeb - > list , & c - > bad_used_list ) ;
} else {
2005-01-28 21:53:05 +03:00
BUG_ON ( allow_empty = = REFILE_NOTEMPTY ) ;
2005-04-17 02:20:36 +04:00
/* It has to have had some nodes or we couldn't be here */
2012-02-16 03:56:43 +04:00
jffs2_dbg ( 1 , " Refiling block at %08x to erase_pending_list \n " ,
jeb - > offset ) ;
2005-04-17 02:20:36 +04:00
list_add ( & jeb - > list , & c - > erase_pending_list ) ;
c - > nr_erasing_blocks + + ;
2010-05-19 20:05:14 +04:00
jffs2_garbage_collect_trigger ( c ) ;
2005-04-17 02:20:36 +04:00
}
2006-05-27 00:19:05 +04:00
if ( ! jffs2_prealloc_raw_node_refs ( c , jeb , 1 ) ) {
uint32_t oldfree = jeb - > free_size ;
jffs2_link_node_ref ( c , jeb ,
( jeb - > offset + c - > sector_size - oldfree ) | REF_OBSOLETE ,
oldfree , NULL ) ;
/* convert to wasted */
c - > wasted_size + = oldfree ;
jeb - > wasted_size + = oldfree ;
c - > dirty_size - = oldfree ;
jeb - > dirty_size - = oldfree ;
}
2005-04-17 02:20:36 +04:00
2005-07-24 19:14:17 +04:00
jffs2_dbg_dump_block_lists_nolock ( c ) ;
jffs2_dbg_acct_sanity_check_nolock ( c , jeb ) ;
jffs2_dbg_acct_paranoia_check_nolock ( c , jeb ) ;
2005-04-17 02:20:36 +04:00
}
2006-05-27 00:19:05 +04:00
static struct jffs2_raw_node_ref * * jffs2_incore_replace_raw ( struct jffs2_sb_info * c ,
struct jffs2_inode_info * f ,
struct jffs2_raw_node_ref * raw ,
union jffs2_node_union * node )
{
struct jffs2_node_frag * frag ;
struct jffs2_full_dirent * fd ;
dbg_noderef ( " incore_replace_raw: node at %p is {%04x,%04x} \n " ,
node , je16_to_cpu ( node - > u . magic ) , je16_to_cpu ( node - > u . nodetype ) ) ;
BUG_ON ( je16_to_cpu ( node - > u . magic ) ! = 0x1985 & &
je16_to_cpu ( node - > u . magic ) ! = 0 ) ;
switch ( je16_to_cpu ( node - > u . nodetype ) ) {
case JFFS2_NODETYPE_INODE :
2006-05-27 16:15:16 +04:00
if ( f - > metadata & & f - > metadata - > raw = = raw ) {
dbg_noderef ( " Will replace ->raw in f->metadata at %p \n " , f - > metadata ) ;
return & f - > metadata - > raw ;
}
2006-05-27 00:19:05 +04:00
frag = jffs2_lookup_node_frag ( & f - > fragtree , je32_to_cpu ( node - > i . offset ) ) ;
BUG_ON ( ! frag ) ;
/* Find a frag which refers to the full_dnode we want to modify */
while ( ! frag - > node | | frag - > node - > raw ! = raw ) {
frag = frag_next ( frag ) ;
BUG_ON ( ! frag ) ;
}
dbg_noderef ( " Will replace ->raw in full_dnode at %p \n " , frag - > node ) ;
return & frag - > node - > raw ;
case JFFS2_NODETYPE_DIRENT :
for ( fd = f - > dents ; fd ; fd = fd - > next ) {
if ( fd - > raw = = raw ) {
dbg_noderef ( " Will replace ->raw in full_dirent at %p \n " , fd ) ;
return & fd - > raw ;
}
}
BUG ( ) ;
2006-05-27 16:15:16 +04:00
2006-05-27 00:19:05 +04:00
default :
dbg_noderef ( " Don't care about replacing raw for nodetype %x \n " ,
je16_to_cpu ( node - > u . nodetype ) ) ;
break ;
}
return NULL ;
}
2007-07-11 17:23:54 +04:00
# ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
static int jffs2_verify_write ( struct jffs2_sb_info * c , unsigned char * buf ,
uint32_t ofs )
{
int ret ;
size_t retlen ;
char * eccstr ;
2011-12-23 19:30:16 +04:00
ret = mtd_read ( c - > mtd , ofs , c - > wbuf_pagesize , & retlen , c - > wbuf_verify ) ;
2007-07-11 17:23:54 +04:00
if ( ret & & ret ! = - EUCLEAN & & ret ! = - EBADMSG ) {
2012-02-16 03:56:44 +04:00
pr_warn ( " %s(): Read back of page at %08x failed: %d \n " ,
__func__ , c - > wbuf_ofs , ret ) ;
2007-07-11 17:23:54 +04:00
return ret ;
} else if ( retlen ! = c - > wbuf_pagesize ) {
2012-02-16 03:56:44 +04:00
pr_warn ( " %s(): Read back of page at %08x gave short read: %zd not %d \n " ,
__func__ , ofs , retlen , c - > wbuf_pagesize ) ;
2007-07-11 17:23:54 +04:00
return - EIO ;
}
if ( ! memcmp ( buf , c - > wbuf_verify , c - > wbuf_pagesize ) )
return 0 ;
if ( ret = = - EUCLEAN )
eccstr = " corrected " ;
else if ( ret = = - EBADMSG )
eccstr = " correction failed " ;
else
eccstr = " OK or unused " ;
2012-02-16 03:56:44 +04:00
pr_warn ( " Write verify error (ECC %s) at %08x. Wrote: \n " ,
eccstr , c - > wbuf_ofs ) ;
2007-07-11 17:23:54 +04:00
print_hex_dump ( KERN_WARNING , " " , DUMP_PREFIX_OFFSET , 16 , 1 ,
c - > wbuf , c - > wbuf_pagesize , 0 ) ;
2012-02-16 03:56:44 +04:00
pr_warn ( " Read back: \n " ) ;
2007-07-11 17:23:54 +04:00
print_hex_dump ( KERN_WARNING , " " , DUMP_PREFIX_OFFSET , 16 , 1 ,
c - > wbuf_verify , c - > wbuf_pagesize , 0 ) ;
return - EIO ;
}
# else
# define jffs2_verify_write(c,b,o) (0)
# endif
2005-04-17 02:20:36 +04:00
/* Recover from failure to write wbuf. Recover the nodes up to the
* wbuf , not the one which we were starting to try to write . */
static void jffs2_wbuf_recover ( struct jffs2_sb_info * c )
{
struct jffs2_eraseblock * jeb , * new_jeb ;
2006-05-27 00:19:05 +04:00
struct jffs2_raw_node_ref * raw , * next , * first_raw = NULL ;
2005-04-17 02:20:36 +04:00
size_t retlen ;
int ret ;
2006-05-27 00:19:05 +04:00
int nr_refile = 0 ;
2005-04-17 02:20:36 +04:00
unsigned char * buf ;
uint32_t start , end , ofs , len ;
2006-05-25 04:50:35 +04:00
jeb = & c - > blocks [ c - > wbuf_ofs / c - > sector_size ] ;
2005-04-17 02:20:36 +04:00
spin_lock ( & c - > erase_completion_lock ) ;
2007-03-06 17:01:04 +03:00
if ( c - > wbuf_ofs % c - > mtd - > erasesize )
jffs2_block_refile ( c , jeb , REFILE_NOTEMPTY ) ;
else
jffs2_block_refile ( c , jeb , REFILE_ANYWAY ) ;
2006-05-27 00:19:05 +04:00
spin_unlock ( & c - > erase_completion_lock ) ;
BUG_ON ( ! ref_obsolete ( jeb - > last_node ) ) ;
2005-04-17 02:20:36 +04:00
/* Find the first node to be recovered, by skipping over every
node which ends before the wbuf starts , or which is obsolete . */
2006-05-27 00:19:05 +04:00
for ( next = raw = jeb - > first_node ; next ; raw = next ) {
next = ref_next ( raw ) ;
if ( ref_obsolete ( raw ) | |
( next & & ref_offset ( next ) < = c - > wbuf_ofs ) ) {
dbg_noderef ( " Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete \n " ,
ref_offset ( raw ) , ref_flags ( raw ) ,
( ref_offset ( raw ) + ref_totlen ( c , jeb , raw ) ) ,
c - > wbuf_ofs ) ;
continue ;
}
dbg_noderef ( " First node to be recovered is at 0x%08x(%d)-0x%08x \n " ,
ref_offset ( raw ) , ref_flags ( raw ) ,
( ref_offset ( raw ) + ref_totlen ( c , jeb , raw ) ) ) ;
first_raw = raw ;
break ;
}
if ( ! first_raw ) {
2005-04-17 02:20:36 +04:00
/* All nodes were obsolete. Nothing to recover. */
2012-02-16 03:56:43 +04:00
jffs2_dbg ( 1 , " No non-obsolete nodes to be recovered. Just filing block bad \n " ) ;
2006-05-27 00:19:05 +04:00
c - > wbuf_len = 0 ;
2005-04-17 02:20:36 +04:00
return ;
}
2006-05-27 00:19:05 +04:00
start = ref_offset ( first_raw ) ;
end = ref_offset ( jeb - > last_node ) ;
nr_refile = 1 ;
2005-04-17 02:20:36 +04:00
2006-05-27 00:19:05 +04:00
/* Count the number of refs which need to be copied */
while ( ( raw = ref_next ( raw ) ) ! = jeb - > last_node )
nr_refile + + ;
2005-04-17 02:20:36 +04:00
2006-05-27 00:19:05 +04:00
dbg_noderef ( " wbuf recover %08x-%08x (%d bytes in %d nodes) \n " ,
start , end , end - start , nr_refile ) ;
2005-04-17 02:20:36 +04:00
buf = NULL ;
if ( start < c - > wbuf_ofs ) {
/* First affected node was already partially written.
* Attempt to reread the old data into our buffer . */
buf = kmalloc ( end - start , GFP_KERNEL ) ;
if ( ! buf ) {
2012-02-16 03:56:44 +04:00
pr_crit ( " Malloc failure in wbuf recovery. Data loss ensues. \n " ) ;
2005-04-17 02:20:36 +04:00
goto read_failed ;
}
/* Do the read... */
2011-12-23 19:30:16 +04:00
ret = mtd_read ( c - > mtd , start , c - > wbuf_ofs - start , & retlen ,
buf ) ;
2005-11-07 14:16:07 +03:00
2006-05-29 16:56:39 +04:00
/* ECC recovered ? */
if ( ( ret = = - EUCLEAN | | ret = = - EBADMSG ) & &
( retlen = = c - > wbuf_ofs - start ) )
2005-04-17 02:20:36 +04:00
ret = 0 ;
2006-05-29 16:56:39 +04:00
2005-04-17 02:20:36 +04:00
if ( ret | | retlen ! = c - > wbuf_ofs - start ) {
2012-02-16 03:56:44 +04:00
pr_crit ( " Old data are already lost in wbuf recovery. Data loss ensues. \n " ) ;
2005-04-17 02:20:36 +04:00
kfree ( buf ) ;
buf = NULL ;
read_failed :
2006-05-27 00:19:05 +04:00
first_raw = ref_next ( first_raw ) ;
nr_refile - - ;
while ( first_raw & & ref_obsolete ( first_raw ) ) {
first_raw = ref_next ( first_raw ) ;
nr_refile - - ;
}
2005-04-17 02:20:36 +04:00
/* If this was the only node to be recovered, give up */
2006-05-27 00:19:05 +04:00
if ( ! first_raw ) {
c - > wbuf_len = 0 ;
2005-04-17 02:20:36 +04:00
return ;
2006-05-27 00:19:05 +04:00
}
2005-04-17 02:20:36 +04:00
/* It wasn't. Go on and try to recover nodes complete in the wbuf */
2006-05-27 00:19:05 +04:00
start = ref_offset ( first_raw ) ;
dbg_noderef ( " wbuf now recover %08x-%08x (%d bytes in %d nodes) \n " ,
start , end , end - start , nr_refile ) ;
2005-04-17 02:20:36 +04:00
} else {
/* Read succeeded. Copy the remaining data from the wbuf */
memcpy ( buf + ( c - > wbuf_ofs - start ) , c - > wbuf , end - c - > wbuf_ofs ) ;
}
}
/* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
Either ' buf ' contains the data , or we find it in the wbuf */
/* ... and get an allocation of space from a shiny new block instead */
2006-05-23 03:38:06 +04:00
ret = jffs2_reserve_space_gc ( c , end - start , & len , JFFS2_SUMMARY_NOSUM_SIZE ) ;
2005-04-17 02:20:36 +04:00
if ( ret ) {
2012-02-16 03:56:44 +04:00
pr_warn ( " Failed to allocate space for wbuf recovery. Data loss ensues. \n " ) ;
2005-01-28 21:53:05 +03:00
kfree ( buf ) ;
2005-04-17 02:20:36 +04:00
return ;
}
2006-05-27 00:19:05 +04:00
2007-04-04 14:47:53 +04:00
/* The summary is not recovered, so it must be disabled for this erase block */
jffs2_sum_disable_collecting ( c - > summary ) ;
2006-05-27 00:19:05 +04:00
ret = jffs2_prealloc_raw_node_refs ( c , c - > nextblock , nr_refile ) ;
if ( ret ) {
2012-02-16 03:56:44 +04:00
pr_warn ( " Failed to allocate node refs for wbuf recovery. Data loss ensues. \n " ) ;
2006-05-27 00:19:05 +04:00
kfree ( buf ) ;
return ;
}
2006-05-23 03:38:06 +04:00
ofs = write_ofs ( c ) ;
2005-04-17 02:20:36 +04:00
if ( end - start > = c - > wbuf_pagesize ) {
2005-01-25 00:24:18 +03:00
/* Need to do another write immediately, but it's possible
2005-01-28 21:53:05 +03:00
that this is just because the wbuf itself is completely
2005-11-07 14:16:07 +03:00
full , and there ' s nothing earlier read back from the
flash . Hence ' buf ' isn ' t necessarily what we ' re writing
2005-01-28 21:53:05 +03:00
from . */
2005-01-25 00:24:18 +03:00
unsigned char * rewrite_buf = buf ? : c - > wbuf ;
2005-04-17 02:20:36 +04:00
uint32_t towrite = ( end - start ) - ( ( end - start ) % c - > wbuf_pagesize ) ;
2012-02-16 03:56:43 +04:00
jffs2_dbg ( 1 , " Write 0x%x bytes at 0x%08x in wbuf recover \n " ,
towrite , ofs ) ;
2005-11-07 14:16:07 +03:00
2005-04-17 02:20:36 +04:00
# ifdef BREAKMEHEADER
static int breakme ;
if ( breakme + + = = 20 ) {
2012-02-16 03:56:44 +04:00
pr_notice ( " Faking write error at 0x%08x \n " , ofs ) ;
2005-04-17 02:20:36 +04:00
breakme = 0 ;
2011-12-23 19:35:41 +04:00
mtd_write ( c - > mtd , ofs , towrite , & retlen , brokenbuf ) ;
2005-04-17 02:20:36 +04:00
ret = - EIO ;
} else
# endif
2011-12-23 19:35:41 +04:00
ret = mtd_write ( c - > mtd , ofs , towrite , & retlen ,
rewrite_buf ) ;
2005-04-17 02:20:36 +04:00
2007-07-11 17:23:54 +04:00
if ( ret | | retlen ! = towrite | | jffs2_verify_write ( c , rewrite_buf , ofs ) ) {
2005-04-17 02:20:36 +04:00
/* Argh. We tried. Really we did. */
2012-02-16 03:56:44 +04:00
pr_crit ( " Recovery of wbuf failed due to a second write error \n " ) ;
2005-01-28 21:53:05 +03:00
kfree ( buf ) ;
2005-04-17 02:20:36 +04:00
2006-05-24 05:04:45 +04:00
if ( retlen )
2006-05-27 00:19:05 +04:00
jffs2_add_physical_node_ref ( c , ofs | REF_OBSOLETE , ref_totlen ( c , jeb , first_raw ) , NULL ) ;
2005-04-17 02:20:36 +04:00
return ;
}
2012-02-16 03:56:44 +04:00
pr_notice ( " Recovery of wbuf succeeded to %08x \n " , ofs ) ;
2005-04-17 02:20:36 +04:00
c - > wbuf_len = ( end - start ) - towrite ;
c - > wbuf_ofs = ofs + towrite ;
2005-01-25 00:24:18 +03:00
memmove ( c - > wbuf , rewrite_buf + towrite , c - > wbuf_len ) ;
2005-04-17 02:20:36 +04:00
/* Don't muck about with c->wbuf_inodes. False positives are harmless. */
} else {
/* OK, now we're left with the dregs in whichever buffer we're using */
if ( buf ) {
memcpy ( c - > wbuf , buf , end - start ) ;
} else {
memmove ( c - > wbuf , c - > wbuf + ( start - c - > wbuf_ofs ) , end - start ) ;
}
c - > wbuf_ofs = ofs ;
c - > wbuf_len = end - start ;
}
/* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
new_jeb = & c - > blocks [ ofs / c - > sector_size ] ;
spin_lock ( & c - > erase_completion_lock ) ;
2006-05-27 00:19:05 +04:00
for ( raw = first_raw ; raw ! = jeb - > last_node ; raw = ref_next ( raw ) ) {
uint32_t rawlen = ref_totlen ( c , jeb , raw ) ;
struct jffs2_inode_cache * ic ;
struct jffs2_raw_node_ref * new_ref ;
struct jffs2_raw_node_ref * * adjust_ref = NULL ;
struct jffs2_inode_info * f = NULL ;
2005-04-17 02:20:36 +04:00
2012-02-16 03:56:43 +04:00
jffs2_dbg ( 1 , " Refiling block of %08x at %08x(%d) to %08x \n " ,
rawlen , ref_offset ( raw ) , ref_flags ( raw ) , ofs ) ;
2006-05-27 00:19:05 +04:00
ic = jffs2_raw_ref_to_ic ( raw ) ;
/* Ick. This XATTR mess should be fixed shortly... */
if ( ic & & ic - > class = = RAWNODE_CLASS_XATTR_DATUM ) {
struct jffs2_xattr_datum * xd = ( void * ) ic ;
BUG_ON ( xd - > node ! = raw ) ;
adjust_ref = & xd - > node ;
raw - > next_in_ino = NULL ;
ic = NULL ;
} else if ( ic & & ic - > class = = RAWNODE_CLASS_XATTR_REF ) {
struct jffs2_xattr_datum * xr = ( void * ) ic ;
BUG_ON ( xr - > node ! = raw ) ;
adjust_ref = & xr - > node ;
raw - > next_in_ino = NULL ;
ic = NULL ;
} else if ( ic & & ic - > class = = RAWNODE_CLASS_INODE_CACHE ) {
struct jffs2_raw_node_ref * * p = & ic - > nodes ;
/* Remove the old node from the per-inode list */
while ( * p & & * p ! = ( void * ) ic ) {
if ( * p = = raw ) {
( * p ) = ( raw - > next_in_ino ) ;
raw - > next_in_ino = NULL ;
break ;
}
p = & ( ( * p ) - > next_in_ino ) ;
}
2005-04-17 02:20:36 +04:00
2006-05-27 00:19:05 +04:00
if ( ic - > state = = INO_STATE_PRESENT & & ! ref_obsolete ( raw ) ) {
/* If it's an in-core inode, then we have to adjust any
full_dirent or full_dnode structure to point to the
new version instead of the old */
2008-05-01 21:47:17 +04:00
f = jffs2_gc_fetch_inode ( c , ic - > ino , ! ic - > pino_nlink ) ;
2006-05-27 00:19:05 +04:00
if ( IS_ERR ( f ) ) {
/* Should never happen; it _must_ be present */
JFFS2_ERROR ( " Failed to iget() ino #%u, err %ld \n " ,
ic - > ino , PTR_ERR ( f ) ) ;
BUG ( ) ;
}
/* We don't lock f->sem. There's a number of ways we could
end up in here with it already being locked , and nobody ' s
going to modify it on us anyway because we hold the
alloc_sem . We ' re only changing one - > raw pointer too ,
which we can get away with without upsetting readers . */
adjust_ref = jffs2_incore_replace_raw ( c , f , raw ,
( void * ) ( buf ? : c - > wbuf ) + ( ref_offset ( raw ) - start ) ) ;
} else if ( unlikely ( ic - > state ! = INO_STATE_PRESENT & &
ic - > state ! = INO_STATE_CHECKEDABSENT & &
ic - > state ! = INO_STATE_GC ) ) {
JFFS2_ERROR ( " Inode #%u is in strange state %d! \n " , ic - > ino , ic - > state ) ;
BUG ( ) ;
}
}
new_ref = jffs2_link_node_ref ( c , new_jeb , ofs | ref_flags ( raw ) , rawlen , ic ) ;
if ( adjust_ref ) {
BUG_ON ( * adjust_ref ! = raw ) ;
* adjust_ref = new_ref ;
}
if ( f )
jffs2_gc_release_inode ( c , f ) ;
if ( ! ref_obsolete ( raw ) ) {
2005-04-17 02:20:36 +04:00
jeb - > dirty_size + = rawlen ;
jeb - > used_size - = rawlen ;
c - > dirty_size + = rawlen ;
2006-05-27 00:19:05 +04:00
c - > used_size - = rawlen ;
raw - > flash_offset = ref_offset ( raw ) | REF_OBSOLETE ;
BUG_ON ( raw - > next_in_ino ) ;
2005-04-17 02:20:36 +04:00
}
ofs + = rawlen ;
}
2006-05-27 00:19:05 +04:00
kfree ( buf ) ;
2005-04-17 02:20:36 +04:00
/* Fix up the original jeb now it's on the bad_list */
2006-05-27 00:19:05 +04:00
if ( first_raw = = jeb - > first_node ) {
2012-02-16 03:56:43 +04:00
jffs2_dbg ( 1 , " Failing block at %08x is now empty. Moving to erase_pending_list \n " ,
jeb - > offset ) ;
2006-06-26 11:24:46 +04:00
list_move ( & jeb - > list , & c - > erase_pending_list ) ;
2005-04-17 02:20:36 +04:00
c - > nr_erasing_blocks + + ;
2010-05-19 20:05:14 +04:00
jffs2_garbage_collect_trigger ( c ) ;
2005-04-17 02:20:36 +04:00
}
2005-07-24 19:14:17 +04:00
jffs2_dbg_acct_sanity_check_nolock ( c , jeb ) ;
2006-05-27 00:19:05 +04:00
jffs2_dbg_acct_paranoia_check_nolock ( c , jeb ) ;
2005-04-17 02:20:36 +04:00
2005-07-24 19:14:17 +04:00
jffs2_dbg_acct_sanity_check_nolock ( c , new_jeb ) ;
2006-05-27 00:19:05 +04:00
jffs2_dbg_acct_paranoia_check_nolock ( c , new_jeb ) ;
2005-04-17 02:20:36 +04:00
spin_unlock ( & c - > erase_completion_lock ) ;
2012-02-16 03:56:43 +04:00
jffs2_dbg ( 1 , " wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x \n " ,
c - > wbuf_ofs , c - > wbuf_len ) ;
2006-05-27 00:19:05 +04:00
2005-04-17 02:20:36 +04:00
}
/* Meaning of pad argument:
0 : Do not pad . Probably pointless - we only ever use this when we can ' t pad anyway .
1 : Pad , do not adjust nextblock free_size
2 : Pad , adjust nextblock free_size
*/
# define NOPAD 0
# define PAD_NOACCOUNT 1
# define PAD_ACCOUNTING 2
static int __jffs2_flush_wbuf ( struct jffs2_sb_info * c , int pad )
{
2006-05-27 00:19:05 +04:00
struct jffs2_eraseblock * wbuf_jeb ;
2005-04-17 02:20:36 +04:00
int ret ;
size_t retlen ;
2005-02-09 12:09:05 +03:00
/* Nothing to do if not write-buffering the flash. In particular, we shouldn't
2005-04-17 02:20:36 +04:00
del_timer ( ) the timer we never initialised . */
2005-02-09 12:09:05 +03:00
if ( ! jffs2_is_writebuffered ( c ) )
2005-04-17 02:20:36 +04:00
return 0 ;
2011-06-28 00:21:30 +04:00
if ( ! mutex_is_locked ( & c - > alloc_sem ) ) {
2012-02-16 03:56:44 +04:00
pr_crit ( " jffs2_flush_wbuf() called with alloc_sem not locked! \n " ) ;
2005-04-17 02:20:36 +04:00
BUG ( ) ;
}
2005-02-09 12:09:05 +03:00
if ( ! c - > wbuf_len ) /* already checked c->wbuf above */
2005-04-17 02:20:36 +04:00
return 0 ;
2006-05-27 00:19:05 +04:00
wbuf_jeb = & c - > blocks [ c - > wbuf_ofs / c - > sector_size ] ;
if ( jffs2_prealloc_raw_node_refs ( c , wbuf_jeb , c - > nextblock - > allocated_refs + 1 ) )
2006-05-24 05:04:45 +04:00
return - ENOMEM ;
2005-04-17 02:20:36 +04:00
/* claim remaining space on the page
this happens , if we have a change to a new block ,
or if fsync forces us to flush the writebuffer .
if we have a switch to next page , we will not have
2005-11-07 14:16:07 +03:00
enough remaining space for this .
2005-04-17 02:20:36 +04:00
*/
2005-09-30 17:59:17 +04:00
if ( pad ) {
2005-04-17 02:20:36 +04:00
c - > wbuf_len = PAD ( c - > wbuf_len ) ;
/* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR
with 8 byte page size */
memset ( c - > wbuf + c - > wbuf_len , 0 , c - > wbuf_pagesize - c - > wbuf_len ) ;
2005-11-07 14:16:07 +03:00
2005-04-17 02:20:36 +04:00
if ( c - > wbuf_len + sizeof ( struct jffs2_unknown_node ) < c - > wbuf_pagesize ) {
struct jffs2_unknown_node * padnode = ( void * ) ( c - > wbuf + c - > wbuf_len ) ;
padnode - > magic = cpu_to_je16 ( JFFS2_MAGIC_BITMASK ) ;
padnode - > nodetype = cpu_to_je16 ( JFFS2_NODETYPE_PADDING ) ;
padnode - > totlen = cpu_to_je32 ( c - > wbuf_pagesize - c - > wbuf_len ) ;
padnode - > hdr_crc = cpu_to_je32 ( crc32 ( 0 , padnode , sizeof ( * padnode ) - 4 ) ) ;
}
}
/* else jffs2_flash_writev has actually filled in the rest of the
buffer for us , and will deal with the node refs etc . later . */
2005-11-07 14:16:07 +03:00
2005-04-17 02:20:36 +04:00
# ifdef BREAKME
static int breakme ;
if ( breakme + + = = 20 ) {
2012-02-16 03:56:44 +04:00
pr_notice ( " Faking write error at 0x%08x \n " , c - > wbuf_ofs ) ;
2005-04-17 02:20:36 +04:00
breakme = 0 ;
2011-12-23 19:35:41 +04:00
mtd_write ( c - > mtd , c - > wbuf_ofs , c - > wbuf_pagesize , & retlen ,
brokenbuf ) ;
2005-04-17 02:20:36 +04:00
ret = - EIO ;
2005-11-07 14:16:07 +03:00
} else
2005-04-17 02:20:36 +04:00
# endif
2005-11-07 14:16:07 +03:00
2011-12-23 19:35:41 +04:00
ret = mtd_write ( c - > mtd , c - > wbuf_ofs , c - > wbuf_pagesize ,
& retlen , c - > wbuf ) ;
2005-04-17 02:20:36 +04:00
2007-07-11 17:23:54 +04:00
if ( ret ) {
2012-02-16 03:56:44 +04:00
pr_warn ( " jffs2_flush_wbuf(): Write failed with %d \n " , ret ) ;
2007-07-11 17:23:54 +04:00
goto wfail ;
} else if ( retlen ! = c - > wbuf_pagesize ) {
2012-02-16 03:56:44 +04:00
pr_warn ( " jffs2_flush_wbuf(): Write was short: %zd instead of %d \n " ,
retlen , c - > wbuf_pagesize ) ;
2007-07-11 17:23:54 +04:00
ret = - EIO ;
goto wfail ;
} else if ( ( ret = jffs2_verify_write ( c , c - > wbuf , c - > wbuf_ofs ) ) ) {
wfail :
2005-04-17 02:20:36 +04:00
jffs2_wbuf_recover ( c ) ;
return ret ;
}
/* Adjust free size of the block if we padded. */
2005-09-30 17:59:17 +04:00
if ( pad ) {
2006-05-21 16:00:54 +04:00
uint32_t waste = c - > wbuf_pagesize - c - > wbuf_len ;
2005-04-17 02:20:36 +04:00
2012-02-16 03:56:43 +04:00
jffs2_dbg ( 1 , " jffs2_flush_wbuf() adjusting free_size of %sblock at %08x \n " ,
( wbuf_jeb = = c - > nextblock ) ? " next " : " " ,
wbuf_jeb - > offset ) ;
2005-04-17 02:20:36 +04:00
2005-11-07 14:16:07 +03:00
/* wbuf_pagesize - wbuf_len is the amount of space that's to be
2005-04-17 02:20:36 +04:00
padded . If there is less free space in the block than that ,
something screwed up */
2006-05-27 00:19:05 +04:00
if ( wbuf_jeb - > free_size < waste ) {
2012-02-16 03:56:44 +04:00
pr_crit ( " jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left. \n " ,
c - > wbuf_ofs , c - > wbuf_len , waste ) ;
pr_crit ( " jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x \n " ,
wbuf_jeb - > offset , wbuf_jeb - > free_size ) ;
2005-04-17 02:20:36 +04:00
BUG ( ) ;
}
2006-05-21 16:00:54 +04:00
spin_lock ( & c - > erase_completion_lock ) ;
2006-05-27 00:19:05 +04:00
jffs2_link_node_ref ( c , wbuf_jeb , ( c - > wbuf_ofs + c - > wbuf_len ) | REF_OBSOLETE , waste , NULL ) ;
2006-05-21 16:00:54 +04:00
/* FIXME: that made it count as dirty. Convert to wasted */
2006-05-27 00:19:05 +04:00
wbuf_jeb - > dirty_size - = waste ;
2006-05-21 16:00:54 +04:00
c - > dirty_size - = waste ;
2006-05-27 00:19:05 +04:00
wbuf_jeb - > wasted_size + = waste ;
2006-05-21 16:00:54 +04:00
c - > wasted_size + = waste ;
} else
spin_lock ( & c - > erase_completion_lock ) ;
2005-04-17 02:20:36 +04:00
/* Stick any now-obsoleted blocks on the erase_pending_list */
jffs2_refile_wbuf_blocks ( c ) ;
jffs2_clear_wbuf_ino_list ( c ) ;
spin_unlock ( & c - > erase_completion_lock ) ;
memset ( c - > wbuf , 0xff , c - > wbuf_pagesize ) ;
/* adjust write buffer offset, else we get a non contiguous write bug */
2008-10-17 19:19:13 +04:00
c - > wbuf_ofs + = c - > wbuf_pagesize ;
2005-04-17 02:20:36 +04:00
c - > wbuf_len = 0 ;
return 0 ;
}
2005-11-07 14:16:07 +03:00
/* Trigger garbage collection to flush the write-buffer.
2005-04-17 02:20:36 +04:00
If ino arg is zero , do it if _any_ real ( i . e . not GC ) writes are
2005-11-07 14:16:07 +03:00
outstanding . If ino arg non - zero , do it only if a write for the
2005-04-17 02:20:36 +04:00
given inode is outstanding . */
int jffs2_flush_wbuf_gc ( struct jffs2_sb_info * c , uint32_t ino )
{
uint32_t old_wbuf_ofs ;
uint32_t old_wbuf_len ;
int ret = 0 ;
2012-02-16 03:56:43 +04:00
jffs2_dbg ( 1 , " jffs2_flush_wbuf_gc() called for ino #%u... \n " , ino ) ;
2005-04-17 02:20:36 +04:00
2005-02-03 01:12:08 +03:00
if ( ! c - > wbuf )
return 0 ;
2008-04-22 18:13:40 +04:00
mutex_lock ( & c - > alloc_sem ) ;
2005-04-17 02:20:36 +04:00
if ( ! jffs2_wbuf_pending_for_ino ( c , ino ) ) {
2012-02-16 03:56:43 +04:00
jffs2_dbg ( 1 , " Ino #%d not pending in wbuf. Returning \n " , ino ) ;
2008-04-22 18:13:40 +04:00
mutex_unlock ( & c - > alloc_sem ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
old_wbuf_ofs = c - > wbuf_ofs ;
old_wbuf_len = c - > wbuf_len ;
if ( c - > unchecked_size ) {
/* GC won't make any progress for a while */
2012-02-16 03:56:43 +04:00
jffs2_dbg ( 1 , " %s(): padding. Not finished checking \n " ,
__func__ ) ;
2005-04-17 02:20:36 +04:00
down_write ( & c - > wbuf_sem ) ;
ret = __jffs2_flush_wbuf ( c , PAD_ACCOUNTING ) ;
2005-01-25 00:24:18 +03:00
/* retry flushing wbuf in case jffs2_wbuf_recover
left some data in the wbuf */
if ( ret )
ret = __jffs2_flush_wbuf ( c , PAD_ACCOUNTING ) ;
2005-04-17 02:20:36 +04:00
up_write ( & c - > wbuf_sem ) ;
} else while ( old_wbuf_len & &
old_wbuf_ofs = = c - > wbuf_ofs ) {
2008-04-22 18:13:40 +04:00
mutex_unlock ( & c - > alloc_sem ) ;
2005-04-17 02:20:36 +04:00
2012-02-16 03:56:43 +04:00
jffs2_dbg ( 1 , " %s(): calls gc pass \n " , __func__ ) ;
2005-04-17 02:20:36 +04:00
ret = jffs2_garbage_collect_pass ( c ) ;
if ( ret ) {
/* GC failed. Flush it with padding instead */
2008-04-22 18:13:40 +04:00
mutex_lock ( & c - > alloc_sem ) ;
2005-04-17 02:20:36 +04:00
down_write ( & c - > wbuf_sem ) ;
ret = __jffs2_flush_wbuf ( c , PAD_ACCOUNTING ) ;
2005-01-25 00:24:18 +03:00
/* retry flushing wbuf in case jffs2_wbuf_recover
left some data in the wbuf */
if ( ret )
ret = __jffs2_flush_wbuf ( c , PAD_ACCOUNTING ) ;
2005-04-17 02:20:36 +04:00
up_write ( & c - > wbuf_sem ) ;
break ;
}
2008-04-22 18:13:40 +04:00
mutex_lock ( & c - > alloc_sem ) ;
2005-04-17 02:20:36 +04:00
}
2012-02-16 03:56:43 +04:00
jffs2_dbg ( 1 , " %s(): ends... \n " , __func__ ) ;
2005-04-17 02:20:36 +04:00
2008-04-22 18:13:40 +04:00
mutex_unlock ( & c - > alloc_sem ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
/* Pad write-buffer to end and write it, wasting space. */
int jffs2_flush_wbuf_pad ( struct jffs2_sb_info * c )
{
int ret ;
2005-02-03 01:12:08 +03:00
if ( ! c - > wbuf )
return 0 ;
2005-04-17 02:20:36 +04:00
down_write ( & c - > wbuf_sem ) ;
ret = __jffs2_flush_wbuf ( c , PAD_NOACCOUNT ) ;
2005-01-25 00:24:18 +03:00
/* retry - maybe wbuf recover left some data in wbuf. */
if ( ret )
ret = __jffs2_flush_wbuf ( c , PAD_NOACCOUNT ) ;
2005-04-17 02:20:36 +04:00
up_write ( & c - > wbuf_sem ) ;
return ret ;
}
2006-05-23 13:49:14 +04:00
static size_t jffs2_fill_wbuf ( struct jffs2_sb_info * c , const uint8_t * buf ,
size_t len )
2005-04-17 02:20:36 +04:00
{
2006-05-23 13:49:14 +04:00
if ( len & & ! c - > wbuf_len & & ( len > = c - > wbuf_pagesize ) )
return 0 ;
if ( len > ( c - > wbuf_pagesize - c - > wbuf_len ) )
len = c - > wbuf_pagesize - c - > wbuf_len ;
memcpy ( c - > wbuf + c - > wbuf_len , buf , len ) ;
c - > wbuf_len + = ( uint32_t ) len ;
return len ;
}
int jffs2_flash_writev ( struct jffs2_sb_info * c , const struct kvec * invecs ,
unsigned long count , loff_t to , size_t * retlen ,
uint32_t ino )
{
struct jffs2_eraseblock * jeb ;
size_t wbuf_retlen , donelen = 0 ;
2005-04-17 02:20:36 +04:00
uint32_t outvec_to = to ;
2006-05-23 13:49:14 +04:00
int ret , invec ;
2005-04-17 02:20:36 +04:00
2006-05-23 13:49:14 +04:00
/* If not writebuffered flash, don't bother */
2005-02-09 12:09:05 +03:00
if ( ! jffs2_is_writebuffered ( c ) )
2005-04-17 02:20:36 +04:00
return jffs2_flash_direct_writev ( c , invecs , count , to , retlen ) ;
2005-11-07 14:16:07 +03:00
2005-04-17 02:20:36 +04:00
down_write ( & c - > wbuf_sem ) ;
/* If wbuf_ofs is not initialized, set it to target address */
if ( c - > wbuf_ofs = = 0xFFFFFFFF ) {
c - > wbuf_ofs = PAGE_DIV ( to ) ;
2005-11-07 14:16:07 +03:00
c - > wbuf_len = PAGE_MOD ( to ) ;
2005-04-17 02:20:36 +04:00
memset ( c - > wbuf , 0xff , c - > wbuf_pagesize ) ;
}
2006-05-23 13:49:14 +04:00
/*
* Sanity checks on target address . It ' s permitted to write
* at PAD ( c - > wbuf_len + c - > wbuf_ofs ) , and it ' s permitted to
* write at the beginning of a new erase block . Anything else ,
* and you die . New block starts at xxx000c ( 0 - b = block
* header )
*/
2005-02-09 12:09:05 +03:00
if ( SECTOR_ADDR ( to ) ! = SECTOR_ADDR ( c - > wbuf_ofs ) ) {
2005-04-17 02:20:36 +04:00
/* It's a write to a new block */
if ( c - > wbuf_len ) {
2012-02-16 03:56:43 +04:00
jffs2_dbg ( 1 , " %s(): to 0x%lx causes flush of wbuf at 0x%08x \n " ,
__func__ , ( unsigned long ) to , c - > wbuf_ofs ) ;
2005-04-17 02:20:36 +04:00
ret = __jffs2_flush_wbuf ( c , PAD_NOACCOUNT ) ;
2006-05-23 13:49:14 +04:00
if ( ret )
goto outerr ;
2005-04-17 02:20:36 +04:00
}
/* set pointer to new block */
c - > wbuf_ofs = PAGE_DIV ( to ) ;
2005-11-07 14:16:07 +03:00
c - > wbuf_len = PAGE_MOD ( to ) ;
}
2005-04-17 02:20:36 +04:00
if ( to ! = PAD ( c - > wbuf_ofs + c - > wbuf_len ) ) {
/* We're not writing immediately after the writebuffer. Bad. */
2012-02-16 03:56:44 +04:00
pr_crit ( " %s(): Non-contiguous write to %08lx \n " ,
__func__ , ( unsigned long ) to ) ;
2005-04-17 02:20:36 +04:00
if ( c - > wbuf_len )
2012-02-16 03:56:44 +04:00
pr_crit ( " wbuf was previously %08x-%08x \n " ,
c - > wbuf_ofs , c - > wbuf_ofs + c - > wbuf_len ) ;
2005-04-17 02:20:36 +04:00
BUG ( ) ;
}
2006-05-23 13:49:14 +04:00
/* adjust alignment offset */
if ( c - > wbuf_len ! = PAGE_MOD ( to ) ) {
c - > wbuf_len = PAGE_MOD ( to ) ;
/* take care of alignment to next page */
if ( ! c - > wbuf_len ) {
c - > wbuf_len = c - > wbuf_pagesize ;
ret = __jffs2_flush_wbuf ( c , NOPAD ) ;
if ( ret )
goto outerr ;
2005-04-17 02:20:36 +04:00
}
}
2006-05-23 13:49:14 +04:00
for ( invec = 0 ; invec < count ; invec + + ) {
int vlen = invecs [ invec ] . iov_len ;
uint8_t * v = invecs [ invec ] . iov_base ;
2005-01-25 00:24:18 +03:00
2006-05-23 13:49:14 +04:00
wbuf_retlen = jffs2_fill_wbuf ( c , v , vlen ) ;
2005-01-25 00:24:18 +03:00
2006-05-23 13:49:14 +04:00
if ( c - > wbuf_len = = c - > wbuf_pagesize ) {
ret = __jffs2_flush_wbuf ( c , NOPAD ) ;
if ( ret )
goto outerr ;
2005-04-17 02:20:36 +04:00
}
2006-05-23 13:49:14 +04:00
vlen - = wbuf_retlen ;
outvec_to + = wbuf_retlen ;
2005-04-17 02:20:36 +04:00
donelen + = wbuf_retlen ;
2006-05-23 13:49:14 +04:00
v + = wbuf_retlen ;
if ( vlen > = c - > wbuf_pagesize ) {
2011-12-23 19:35:41 +04:00
ret = mtd_write ( c - > mtd , outvec_to , PAGE_DIV ( vlen ) ,
& wbuf_retlen , v ) ;
2006-05-23 13:49:14 +04:00
if ( ret < 0 | | wbuf_retlen ! = PAGE_DIV ( vlen ) )
goto outfile ;
vlen - = wbuf_retlen ;
outvec_to + = wbuf_retlen ;
c - > wbuf_ofs = outvec_to ;
donelen + = wbuf_retlen ;
v + = wbuf_retlen ;
2005-04-17 02:20:36 +04:00
}
2006-05-23 13:49:14 +04:00
wbuf_retlen = jffs2_fill_wbuf ( c , v , vlen ) ;
if ( c - > wbuf_len = = c - > wbuf_pagesize ) {
ret = __jffs2_flush_wbuf ( c , NOPAD ) ;
if ( ret )
goto outerr ;
}
2005-04-17 02:20:36 +04:00
2006-05-23 13:49:14 +04:00
outvec_to + = wbuf_retlen ;
donelen + = wbuf_retlen ;
2005-04-17 02:20:36 +04:00
}
2006-05-23 13:49:14 +04:00
/*
* If there ' s a remainder in the wbuf and it ' s a non - GC write ,
* remember that the wbuf affects this ino
*/
2005-04-17 02:20:36 +04:00
* retlen = donelen ;
2005-09-07 12:35:26 +04:00
if ( jffs2_sum_active ( ) ) {
int res = jffs2_sum_add_kvec ( c , invecs , count , ( uint32_t ) to ) ;
if ( res )
return res ;
}
2005-04-17 02:20:36 +04:00
if ( c - > wbuf_len & & ino )
jffs2_wbuf_dirties_inode ( c , ino ) ;
ret = 0 ;
2006-05-23 13:49:14 +04:00
up_write ( & c - > wbuf_sem ) ;
return ret ;
outfile :
/*
* At this point we have no problem , c - > wbuf is empty . However
* refile nextblock to avoid writing again to same address .
*/
spin_lock ( & c - > erase_completion_lock ) ;
jeb = & c - > blocks [ outvec_to / c - > sector_size ] ;
jffs2_block_refile ( c , jeb , REFILE_ANYWAY ) ;
spin_unlock ( & c - > erase_completion_lock ) ;
2005-11-07 14:16:07 +03:00
2006-05-23 13:49:14 +04:00
outerr :
* retlen = 0 ;
2005-04-17 02:20:36 +04:00
up_write ( & c - > wbuf_sem ) ;
return ret ;
}
/*
* This is the entry for flash write .
* Check , if we work on NAND FLASH , if so build an kvec and write it via vritev
*/
2006-05-27 00:19:05 +04:00
int jffs2_flash_write ( struct jffs2_sb_info * c , loff_t ofs , size_t len ,
size_t * retlen , const u_char * buf )
2005-04-17 02:20:36 +04:00
{
struct kvec vecs [ 1 ] ;
2005-02-09 12:09:05 +03:00
if ( ! jffs2_is_writebuffered ( c ) )
2005-09-07 12:35:26 +04:00
return jffs2_flash_direct_write ( c , ofs , len , retlen , buf ) ;
2005-04-17 02:20:36 +04:00
vecs [ 0 ] . iov_base = ( unsigned char * ) buf ;
vecs [ 0 ] . iov_len = len ;
return jffs2_flash_writev ( c , vecs , 1 , ofs , retlen , 0 ) ;
}
/*
Handle readback from writebuffer and ECC failure return
*/
int jffs2_flash_read ( struct jffs2_sb_info * c , loff_t ofs , size_t len , size_t * retlen , u_char * buf )
{
loff_t orbf = 0 , owbf = 0 , lwbf = 0 ;
int ret ;
2005-02-09 12:09:05 +03:00
if ( ! jffs2_is_writebuffered ( c ) )
2011-12-23 19:30:16 +04:00
return mtd_read ( c - > mtd , ofs , len , retlen , buf ) ;
2005-04-17 02:20:36 +04:00
2005-02-09 12:09:05 +03:00
/* Read flash */
2005-04-05 16:51:58 +04:00
down_read ( & c - > wbuf_sem ) ;
2011-12-23 19:30:16 +04:00
ret = mtd_read ( c - > mtd , ofs , len , retlen , buf ) ;
2005-02-09 12:09:05 +03:00
2006-05-29 16:56:39 +04:00
if ( ( ret = = - EBADMSG | | ret = = - EUCLEAN ) & & ( * retlen = = len ) ) {
if ( ret = = - EBADMSG )
2012-02-16 03:56:44 +04:00
pr_warn ( " mtd->read(0x%zx bytes from 0x%llx) returned ECC error \n " ,
len , ofs ) ;
2005-11-07 14:16:07 +03:00
/*
2006-05-29 16:56:39 +04:00
* We have the raw data without ECC correction in the buffer ,
* maybe we are lucky and all data or parts are correct . We
* check the node . If data are corrupted node check will sort
* it out . We keep this block , it will fail on write or erase
* and the we mark it bad . Or should we do that now ? But we
* should give him a chance . Maybe we had a system crash or
* power loss before the ecc write or a erase was completed .
2005-02-09 12:09:05 +03:00
* So we return success . : )
*/
2006-05-29 16:56:39 +04:00
ret = 0 ;
2005-11-07 14:16:07 +03:00
}
2005-02-09 12:09:05 +03:00
2005-04-17 02:20:36 +04:00
/* if no writebuffer available or write buffer empty, return */
if ( ! c - > wbuf_pagesize | | ! c - > wbuf_len )
2005-04-05 16:51:58 +04:00
goto exit ;
2005-04-17 02:20:36 +04:00
/* if we read in a different block, return */
2005-02-09 12:09:05 +03:00
if ( SECTOR_ADDR ( ofs ) ! = SECTOR_ADDR ( c - > wbuf_ofs ) )
2005-04-05 16:51:58 +04:00
goto exit ;
2005-04-17 02:20:36 +04:00
if ( ofs > = c - > wbuf_ofs ) {
owbf = ( ofs - c - > wbuf_ofs ) ; /* offset in write buffer */
if ( owbf > c - > wbuf_len ) /* is read beyond write buffer ? */
goto exit ;
lwbf = c - > wbuf_len - owbf ; /* number of bytes to copy */
2005-11-07 14:16:07 +03:00
if ( lwbf > len )
2005-04-17 02:20:36 +04:00
lwbf = len ;
2005-11-07 14:16:07 +03:00
} else {
2005-04-17 02:20:36 +04:00
orbf = ( c - > wbuf_ofs - ofs ) ; /* offset in read buffer */
if ( orbf > len ) /* is write beyond write buffer ? */
goto exit ;
2006-05-29 16:56:39 +04:00
lwbf = len - orbf ; /* number of bytes to copy */
2005-11-07 14:16:07 +03:00
if ( lwbf > c - > wbuf_len )
2005-04-17 02:20:36 +04:00
lwbf = c - > wbuf_len ;
2005-11-07 14:16:07 +03:00
}
2005-04-17 02:20:36 +04:00
if ( lwbf > 0 )
memcpy ( buf + orbf , c - > wbuf + owbf , lwbf ) ;
exit :
up_read ( & c - > wbuf_sem ) ;
return ret ;
}
2007-01-31 12:38:53 +03:00
# define NR_OOB_SCAN_PAGES 4
2007-08-09 13:28:20 +04:00
/* For historical reasons we use only 8 bytes for OOB clean marker */
# define OOB_CM_SIZE 8
2007-01-31 12:38:53 +03:00
static const struct jffs2_unknown_node oob_cleanmarker =
{
2007-04-23 15:07:17 +04:00
. magic = constant_cpu_to_je16 ( JFFS2_MAGIC_BITMASK ) ,
. nodetype = constant_cpu_to_je16 ( JFFS2_NODETYPE_CLEANMARKER ) ,
. totlen = constant_cpu_to_je32 ( 8 )
2007-01-31 12:38:53 +03:00
} ;
2006-05-29 05:26:58 +04:00
2005-04-17 02:20:36 +04:00
/*
2007-01-31 12:38:53 +03:00
* Check , if the out of band area is empty . This function knows about the clean
* marker and if it is present in OOB , treats the OOB as empty anyway .
2005-04-17 02:20:36 +04:00
*/
2006-05-29 05:26:58 +04:00
int jffs2_check_oob_empty ( struct jffs2_sb_info * c ,
struct jffs2_eraseblock * jeb , int mode )
2005-04-17 02:20:36 +04:00
{
2007-01-31 12:38:53 +03:00
int i , ret ;
int cmlen = min_t ( int , c - > oobavail , OOB_CM_SIZE ) ;
2006-05-29 05:26:58 +04:00
struct mtd_oob_ops ops ;
2011-08-31 05:45:40 +04:00
ops . mode = MTD_OPS_AUTO_OOB ;
2007-01-31 12:38:53 +03:00
ops . ooblen = NR_OOB_SCAN_PAGES * c - > oobavail ;
2006-05-29 05:26:58 +04:00
ops . oobbuf = c - > oobbuf ;
2007-01-31 12:38:53 +03:00
ops . len = ops . ooboffs = ops . retlen = ops . oobretlen = 0 ;
2006-05-29 05:26:58 +04:00
ops . datbuf = NULL ;
2011-12-23 20:27:05 +04:00
ret = mtd_read_oob ( c - > mtd , jeb - > offset , & ops ) ;
2012-09-01 02:01:19 +04:00
if ( ( ret & & ! mtd_is_bitflip ( ret ) ) | | ops . oobretlen ! = ops . ooblen ) {
2012-02-16 03:56:44 +04:00
pr_err ( " cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d \n " ,
jeb - > offset , ops . ooblen , ops . oobretlen , ret ) ;
2012-09-01 02:01:19 +04:00
if ( ! ret | | mtd_is_bitflip ( ret ) )
2007-01-31 12:38:53 +03:00
ret = - EIO ;
2006-05-29 05:26:58 +04:00
return ret ;
2005-04-17 02:20:36 +04:00
}
2005-11-07 14:16:07 +03:00
2007-01-31 12:38:53 +03:00
for ( i = 0 ; i < ops . ooblen ; i + + ) {
if ( mode & & i < cmlen )
/* Yeah, we know about the cleanmarker */
2005-04-17 02:20:36 +04:00
continue ;
2006-05-29 05:26:58 +04:00
if ( ops . oobbuf [ i ] ! = 0xFF ) {
2012-02-16 03:56:43 +04:00
jffs2_dbg ( 2 , " Found %02x at %x in OOB for "
" %08x \n " , ops . oobbuf [ i ] , i , jeb - > offset ) ;
2006-05-29 05:26:58 +04:00
return 1 ;
2005-04-17 02:20:36 +04:00
}
}
2006-05-29 05:26:58 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
/*
2007-01-31 12:38:53 +03:00
* Check for a valid cleanmarker .
* Returns : 0 if a valid cleanmarker was found
2007-07-10 13:01:22 +04:00
* 1 if no cleanmarker was found
* negative error code if an error occurred
2006-05-29 05:26:58 +04:00
*/
2007-01-31 12:38:53 +03:00
int jffs2_check_nand_cleanmarker ( struct jffs2_sb_info * c ,
struct jffs2_eraseblock * jeb )
2005-04-17 02:20:36 +04:00
{
2006-05-29 05:26:58 +04:00
struct mtd_oob_ops ops ;
2007-01-31 12:38:53 +03:00
int ret , cmlen = min_t ( int , c - > oobavail , OOB_CM_SIZE ) ;
2005-04-17 02:20:36 +04:00
2011-08-31 05:45:40 +04:00
ops . mode = MTD_OPS_AUTO_OOB ;
2007-01-31 12:38:53 +03:00
ops . ooblen = cmlen ;
2006-05-29 05:26:58 +04:00
ops . oobbuf = c - > oobbuf ;
2007-01-31 12:38:53 +03:00
ops . len = ops . ooboffs = ops . retlen = ops . oobretlen = 0 ;
2006-05-29 05:26:58 +04:00
ops . datbuf = NULL ;
2005-04-17 02:20:36 +04:00
2011-12-23 20:27:05 +04:00
ret = mtd_read_oob ( c - > mtd , jeb - > offset , & ops ) ;
2012-09-01 02:01:19 +04:00
if ( ( ret & & ! mtd_is_bitflip ( ret ) ) | | ops . oobretlen ! = ops . ooblen ) {
2012-02-16 03:56:44 +04:00
pr_err ( " cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d \n " ,
jeb - > offset , ops . ooblen , ops . oobretlen , ret ) ;
2012-09-01 02:01:19 +04:00
if ( ! ret | | mtd_is_bitflip ( ret ) )
2007-01-31 12:38:53 +03:00
ret = - EIO ;
2006-05-29 05:26:58 +04:00
return ret ;
}
2005-04-17 02:20:36 +04:00
2007-01-31 12:38:53 +03:00
return ! ! memcmp ( & oob_cleanmarker , c - > oobbuf , cmlen ) ;
2005-04-17 02:20:36 +04:00
}
2006-05-29 05:26:58 +04:00
int jffs2_write_nand_cleanmarker ( struct jffs2_sb_info * c ,
struct jffs2_eraseblock * jeb )
2005-04-17 02:20:36 +04:00
{
2007-01-31 12:38:53 +03:00
int ret ;
2006-05-29 05:26:58 +04:00
struct mtd_oob_ops ops ;
2007-01-31 12:38:53 +03:00
int cmlen = min_t ( int , c - > oobavail , OOB_CM_SIZE ) ;
2005-04-17 02:20:36 +04:00
2011-08-31 05:45:40 +04:00
ops . mode = MTD_OPS_AUTO_OOB ;
2007-01-31 12:38:53 +03:00
ops . ooblen = cmlen ;
ops . oobbuf = ( uint8_t * ) & oob_cleanmarker ;
ops . len = ops . ooboffs = ops . retlen = ops . oobretlen = 0 ;
2006-05-29 05:26:58 +04:00
ops . datbuf = NULL ;
2011-12-23 20:29:55 +04:00
ret = mtd_write_oob ( c - > mtd , jeb - > offset , & ops ) ;
2007-01-31 12:38:53 +03:00
if ( ret | | ops . oobretlen ! = ops . ooblen ) {
2012-02-16 03:56:44 +04:00
pr_err ( " cannot write OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d \n " ,
jeb - > offset , ops . ooblen , ops . oobretlen , ret ) ;
2007-01-31 12:38:53 +03:00
if ( ! ret )
ret = - EIO ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2007-01-31 12:38:53 +03:00
2005-04-17 02:20:36 +04:00
return 0 ;
}
2005-11-07 14:16:07 +03:00
/*
2005-04-17 02:20:36 +04:00
* On NAND we try to mark this block bad . If the block was erased more
2011-03-31 05:57:33 +04:00
* than MAX_ERASE_FAILURES we mark it finally bad .
2005-04-17 02:20:36 +04:00
* Don ' t care about failures . This block remains on the erase - pending
* or badblock list as long as nobody manipulates the flash with
* a bootloader or something like that .
*/
int jffs2_write_nand_badblock ( struct jffs2_sb_info * c , struct jffs2_eraseblock * jeb , uint32_t bad_offset )
{
int ret ;
/* if the count is < max, we try to write the counter to the 2nd page oob area */
if ( + + jeb - > bad_count < MAX_ERASE_FAILURES )
return 0 ;
2012-02-16 03:56:45 +04:00
pr_warn ( " marking eraseblock at %08x as bad \n " , bad_offset ) ;
2011-12-23 21:37:38 +04:00
ret = mtd_block_markbad ( c - > mtd , bad_offset ) ;
2005-11-07 14:16:07 +03:00
2005-04-17 02:20:36 +04:00
if ( ret ) {
2012-02-16 03:56:43 +04:00
jffs2_dbg ( 1 , " %s(): Write failed for block at %08x: error %d \n " ,
__func__ , jeb - > offset , ret ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
return 1 ;
}
2012-05-07 20:56:53 +04:00
static struct jffs2_sb_info * work_to_sb ( struct work_struct * work )
{
struct delayed_work * dwork ;
2016-01-01 17:06:27 +03:00
dwork = to_delayed_work ( work ) ;
2012-05-07 20:56:53 +04:00
return container_of ( dwork , struct jffs2_sb_info , wbuf_dwork ) ;
}
static void delayed_wbuf_sync ( struct work_struct * work )
{
struct jffs2_sb_info * c = work_to_sb ( work ) ;
struct super_block * sb = OFNI_BS_2SFFJ ( c ) ;
2017-07-17 10:45:34 +03:00
if ( ! sb_rdonly ( sb ) ) {
2012-05-07 20:56:53 +04:00
jffs2_dbg ( 1 , " %s() \n " , __func__ ) ;
jffs2_flush_wbuf_gc ( c , 0 ) ;
}
}
void jffs2_dirty_trigger ( struct jffs2_sb_info * c )
{
struct super_block * sb = OFNI_BS_2SFFJ ( c ) ;
unsigned long delay ;
2017-07-17 10:45:34 +03:00
if ( sb_rdonly ( sb ) )
2012-05-07 20:56:53 +04:00
return ;
2014-08-01 23:13:40 +04:00
delay = msecs_to_jiffies ( dirty_writeback_interval * 10 ) ;
if ( queue_delayed_work ( system_long_wq , & c - > wbuf_dwork , delay ) )
2012-05-07 20:56:53 +04:00
jffs2_dbg ( 1 , " %s() \n " , __func__ ) ;
}
2007-01-31 12:38:53 +03:00
int jffs2_nand_flash_setup ( struct jffs2_sb_info * c )
2005-04-17 02:20:36 +04:00
{
if ( ! c - > mtd - > oobsize )
return 0 ;
2005-11-07 14:16:07 +03:00
2005-04-17 02:20:36 +04:00
/* Cleanmarker is out-of-band, so inline size zero */
c - > cleanmarker_size = 0 ;
2016-03-07 12:46:51 +03:00
if ( c - > mtd - > oobavail = = 0 ) {
2012-02-16 03:56:44 +04:00
pr_err ( " inconsistent device description \n " ) ;
2006-05-28 00:16:10 +04:00
return - EINVAL ;
}
2005-11-07 14:16:07 +03:00
2012-02-16 03:56:45 +04:00
jffs2_dbg ( 1 , " using OOB on NAND \n " ) ;
2006-05-28 00:16:10 +04:00
2016-03-07 12:46:51 +03:00
c - > oobavail = c - > mtd - > oobavail ;
2005-04-17 02:20:36 +04:00
/* Initialise write buffer */
init_rwsem ( & c - > wbuf_sem ) ;
2012-05-07 20:56:53 +04:00
INIT_DELAYED_WORK ( & c - > wbuf_dwork , delayed_wbuf_sync ) ;
2006-05-23 01:18:05 +04:00
c - > wbuf_pagesize = c - > mtd - > writesize ;
2005-04-17 02:20:36 +04:00
c - > wbuf_ofs = 0xFFFFFFFF ;
2005-11-07 14:16:07 +03:00
2005-04-17 02:20:36 +04:00
c - > wbuf = kmalloc ( c - > wbuf_pagesize , GFP_KERNEL ) ;
if ( ! c - > wbuf )
return - ENOMEM ;
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 23:55:00 +03:00
c - > oobbuf = kmalloc_array ( NR_OOB_SCAN_PAGES , c - > oobavail , GFP_KERNEL ) ;
2007-01-31 12:38:53 +03:00
if ( ! c - > oobbuf ) {
2005-04-17 02:20:36 +04:00
kfree ( c - > wbuf ) ;
return - ENOMEM ;
}
2007-01-31 12:38:53 +03:00
2007-07-11 17:23:54 +04:00
# ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
c - > wbuf_verify = kmalloc ( c - > wbuf_pagesize , GFP_KERNEL ) ;
if ( ! c - > wbuf_verify ) {
kfree ( c - > oobbuf ) ;
kfree ( c - > wbuf ) ;
return - ENOMEM ;
}
# endif
2007-01-31 12:38:53 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
void jffs2_nand_flash_cleanup ( struct jffs2_sb_info * c )
{
2007-07-11 17:23:54 +04:00
# ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
kfree ( c - > wbuf_verify ) ;
# endif
2005-04-17 02:20:36 +04:00
kfree ( c - > wbuf ) ;
2006-05-29 05:26:58 +04:00
kfree ( c - > oobbuf ) ;
2005-04-17 02:20:36 +04:00
}
2005-02-09 12:17:45 +03:00
int jffs2_dataflash_setup ( struct jffs2_sb_info * c ) {
c - > cleanmarker_size = 0 ; /* No cleanmarkers needed */
2005-11-07 14:16:07 +03:00
2005-02-09 12:17:45 +03:00
/* Initialize write buffer */
init_rwsem ( & c - > wbuf_sem ) ;
2012-05-07 20:56:53 +04:00
INIT_DELAYED_WORK ( & c - > wbuf_dwork , delayed_wbuf_sync ) ;
2005-09-30 17:59:17 +04:00
c - > wbuf_pagesize = c - > mtd - > erasesize ;
2005-11-07 14:16:07 +03:00
2005-09-30 17:59:17 +04:00
/* Find a suitable c->sector_size
* - Not too much sectors
* - Sectors have to be at least 4 K + some bytes
* - All known dataflashes have erase sizes of 528 or 1056
* - we take at least 8 eraseblocks and want to have at least 8 K size
* - The concatenation should be a power of 2
*/
c - > sector_size = 8 * c - > mtd - > erasesize ;
2005-11-07 14:16:07 +03:00
2005-09-30 17:59:17 +04:00
while ( c - > sector_size < 8192 ) {
c - > sector_size * = 2 ;
}
2005-11-07 14:16:07 +03:00
2005-09-30 17:59:17 +04:00
/* It may be necessary to adjust the flash size */
c - > flash_size = c - > mtd - > size ;
2005-02-09 12:17:45 +03:00
2005-09-30 17:59:17 +04:00
if ( ( c - > flash_size % c - > sector_size ) ! = 0 ) {
c - > flash_size = ( c - > flash_size / c - > sector_size ) * c - > sector_size ;
2012-02-16 03:56:45 +04:00
pr_warn ( " flash size adjusted to %dKiB \n " , c - > flash_size ) ;
2015-11-07 03:30:12 +03:00
}
2005-11-07 14:16:07 +03:00
2005-09-30 17:59:17 +04:00
c - > wbuf_ofs = 0xFFFFFFFF ;
2005-02-09 12:17:45 +03:00
c - > wbuf = kmalloc ( c - > wbuf_pagesize , GFP_KERNEL ) ;
if ( ! c - > wbuf )
return - ENOMEM ;
2008-04-19 00:44:17 +04:00
# ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
c - > wbuf_verify = kmalloc ( c - > wbuf_pagesize , GFP_KERNEL ) ;
if ( ! c - > wbuf_verify ) {
kfree ( c - > wbuf ) ;
return - ENOMEM ;
}
# endif
2012-02-16 03:56:45 +04:00
pr_info ( " write-buffering enabled buffer (%d) erasesize (%d) \n " ,
2012-02-16 03:56:44 +04:00
c - > wbuf_pagesize , c - > sector_size ) ;
2005-02-09 12:17:45 +03:00
return 0 ;
}
void jffs2_dataflash_cleanup ( struct jffs2_sb_info * c ) {
2008-04-19 00:44:17 +04:00
# ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
kfree ( c - > wbuf_verify ) ;
# endif
2005-02-09 12:17:45 +03:00
kfree ( c - > wbuf ) ;
}
2005-08-06 08:51:33 +04:00
int jffs2_nor_wbuf_flash_setup ( struct jffs2_sb_info * c ) {
2006-05-23 01:18:12 +04:00
/* Cleanmarker currently occupies whole programming regions,
* either one or 2 for 8 Byte STMicro flashes . */
c - > cleanmarker_size = max ( 16u , c - > mtd - > writesize ) ;
2005-08-06 08:51:33 +04:00
/* Initialize write buffer */
init_rwsem ( & c - > wbuf_sem ) ;
2012-05-07 20:56:53 +04:00
INIT_DELAYED_WORK ( & c - > wbuf_dwork , delayed_wbuf_sync ) ;
2006-05-23 01:18:05 +04:00
c - > wbuf_pagesize = c - > mtd - > writesize ;
2005-08-06 08:51:33 +04:00
c - > wbuf_ofs = 0xFFFFFFFF ;
c - > wbuf = kmalloc ( c - > wbuf_pagesize , GFP_KERNEL ) ;
if ( ! c - > wbuf )
return - ENOMEM ;
2009-08-27 12:44:09 +04:00
# ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
c - > wbuf_verify = kmalloc ( c - > wbuf_pagesize , GFP_KERNEL ) ;
if ( ! c - > wbuf_verify ) {
kfree ( c - > wbuf ) ;
return - ENOMEM ;
}
# endif
2005-08-06 08:51:33 +04:00
return 0 ;
}
void jffs2_nor_wbuf_flash_cleanup ( struct jffs2_sb_info * c ) {
2009-08-27 12:44:09 +04:00
# ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
kfree ( c - > wbuf_verify ) ;
# endif
2005-08-06 08:51:33 +04:00
kfree ( c - > wbuf ) ;
}
2006-10-04 20:15:21 +04:00
int jffs2_ubivol_setup ( struct jffs2_sb_info * c ) {
c - > cleanmarker_size = 0 ;
if ( c - > mtd - > writesize = = 1 )
/* We do not need write-buffer */
return 0 ;
init_rwsem ( & c - > wbuf_sem ) ;
2012-05-07 20:56:53 +04:00
INIT_DELAYED_WORK ( & c - > wbuf_dwork , delayed_wbuf_sync ) ;
2006-10-04 20:15:21 +04:00
c - > wbuf_pagesize = c - > mtd - > writesize ;
c - > wbuf_ofs = 0xFFFFFFFF ;
c - > wbuf = kmalloc ( c - > wbuf_pagesize , GFP_KERNEL ) ;
if ( ! c - > wbuf )
return - ENOMEM ;
2012-02-16 03:56:45 +04:00
pr_info ( " write-buffering enabled buffer (%d) erasesize (%d) \n " ,
2012-02-16 03:56:44 +04:00
c - > wbuf_pagesize , c - > sector_size ) ;
2006-10-04 20:15:21 +04:00
return 0 ;
}
void jffs2_ubivol_cleanup ( struct jffs2_sb_info * c ) {
kfree ( c - > wbuf ) ;
}