2007-05-29 16:31:42 +04:00
/*
* MTD Oops / Panic logger
*
* Copyright ( C ) 2007 Nokia Corporation . All rights reserved .
*
* Author : Richard Purdie < rpurdie @ openedhand . com >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 51 Franklin St , Fifth Floor , Boston , MA
* 02110 - 1301 USA
*
*/
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/console.h>
# include <linux/vmalloc.h>
# include <linux/workqueue.h>
# include <linux/sched.h>
# include <linux/wait.h>
2008-02-06 13:17:50 +03:00
# include <linux/delay.h>
2008-01-29 13:21:56 +03:00
# include <linux/spinlock.h>
2008-02-07 13:50:57 +03:00
# include <linux/interrupt.h>
2007-05-29 16:31:42 +04:00
# include <linux/mtd/mtd.h>
2008-07-26 12:22:45 +04:00
# define MTDOOPS_KERNMSG_MAGIC 0x5d005d00
2007-05-29 16:31:42 +04:00
# define OOPS_PAGE_SIZE 4096
2008-04-19 00:44:11 +04:00
static struct mtdoops_context {
2007-05-29 16:31:42 +04:00
int mtd_index ;
2008-01-29 14:27:11 +03:00
struct work_struct work_erase ;
struct work_struct work_write ;
2007-05-29 16:31:42 +04:00
struct mtd_info * mtd ;
int oops_pages ;
int nextpage ;
int nextcount ;
2009-02-16 19:21:35 +03:00
char * name ;
2007-05-29 16:31:42 +04:00
void * oops_buf ;
2008-01-29 13:21:56 +03:00
/* writecount and disabling ready are spin lock protected */
spinlock_t writecount_lock ;
2007-05-29 16:31:42 +04:00
int ready ;
int writecount ;
} oops_cxt ;
static void mtdoops_erase_callback ( struct erase_info * done )
{
wait_queue_head_t * wait_q = ( wait_queue_head_t * ) done - > priv ;
wake_up ( wait_q ) ;
}
static int mtdoops_erase_block ( struct mtd_info * mtd , int offset )
{
struct erase_info erase ;
DECLARE_WAITQUEUE ( wait , current ) ;
wait_queue_head_t wait_q ;
int ret ;
init_waitqueue_head ( & wait_q ) ;
erase . mtd = mtd ;
erase . callback = mtdoops_erase_callback ;
erase . addr = offset ;
2008-01-29 13:25:55 +03:00
erase . len = mtd - > erasesize ;
2007-05-29 16:31:42 +04:00
erase . priv = ( u_long ) & wait_q ;
set_current_state ( TASK_INTERRUPTIBLE ) ;
add_wait_queue ( & wait_q , & wait ) ;
ret = mtd - > erase ( mtd , & erase ) ;
if ( ret ) {
set_current_state ( TASK_RUNNING ) ;
remove_wait_queue ( & wait_q , & wait ) ;
2008-12-10 16:37:21 +03:00
printk ( KERN_WARNING " mtdoops: erase of region [0x%llx, 0x%llx] "
2007-05-29 16:31:42 +04:00
" on \" %s \" failed \n " ,
2008-12-10 16:37:21 +03:00
( unsigned long long ) erase . addr , ( unsigned long long ) erase . len , mtd - > name ) ;
2007-05-29 16:31:42 +04:00
return ret ;
}
schedule ( ) ; /* Wait for erase to finish. */
remove_wait_queue ( & wait_q , & wait ) ;
return 0 ;
}
2008-01-29 14:27:11 +03:00
static void mtdoops_inc_counter ( struct mtdoops_context * cxt )
2007-05-29 16:31:42 +04:00
{
struct mtd_info * mtd = cxt - > mtd ;
size_t retlen ;
u32 count ;
int ret ;
cxt - > nextpage + + ;
2008-07-26 12:17:41 +04:00
if ( cxt - > nextpage > = cxt - > oops_pages )
2007-05-29 16:31:42 +04:00
cxt - > nextpage = 0 ;
cxt - > nextcount + + ;
if ( cxt - > nextcount = = 0xffffffff )
cxt - > nextcount = 0 ;
ret = mtd - > read ( mtd , cxt - > nextpage * OOPS_PAGE_SIZE , 4 ,
& retlen , ( u_char * ) & count ) ;
2008-01-29 14:27:09 +03:00
if ( ( retlen ! = 4 ) | | ( ( ret < 0 ) & & ( ret ! = - EUCLEAN ) ) ) {
2007-08-11 01:01:31 +04:00
printk ( KERN_ERR " mtdoops: Read failure at %d (%td of 4 read) "
2007-05-29 16:31:42 +04:00
" , err %d. \n " , cxt - > nextpage * OOPS_PAGE_SIZE ,
retlen , ret ) ;
2008-01-29 14:27:11 +03:00
schedule_work ( & cxt - > work_erase ) ;
return ;
2007-05-29 16:31:42 +04:00
}
/* See if we need to erase the next block */
2008-01-29 14:27:11 +03:00
if ( count ! = 0xffffffff ) {
schedule_work ( & cxt - > work_erase ) ;
return ;
}
2007-05-29 16:31:42 +04:00
printk ( KERN_DEBUG " mtdoops: Ready %d, %d (no erase) \n " ,
cxt - > nextpage , cxt - > nextcount ) ;
cxt - > ready = 1 ;
}
2008-01-29 14:27:11 +03:00
/* Scheduled work - when we can't proceed without erasing a block */
static void mtdoops_workfunc_erase ( struct work_struct * work )
2007-05-29 16:31:42 +04:00
{
2008-01-29 14:27:11 +03:00
struct mtdoops_context * cxt =
container_of ( work , struct mtdoops_context , work_erase ) ;
2007-05-29 16:31:42 +04:00
struct mtd_info * mtd = cxt - > mtd ;
int i = 0 , j , ret , mod ;
/* We were unregistered */
if ( ! mtd )
return ;
mod = ( cxt - > nextpage * OOPS_PAGE_SIZE ) % mtd - > erasesize ;
if ( mod ! = 0 ) {
cxt - > nextpage = cxt - > nextpage + ( ( mtd - > erasesize - mod ) / OOPS_PAGE_SIZE ) ;
2008-07-26 12:17:41 +04:00
if ( cxt - > nextpage > = cxt - > oops_pages )
2007-05-29 16:31:42 +04:00
cxt - > nextpage = 0 ;
}
2008-01-29 14:27:09 +03:00
while ( mtd - > block_isbad ) {
ret = mtd - > block_isbad ( mtd , cxt - > nextpage * OOPS_PAGE_SIZE ) ;
if ( ! ret )
break ;
if ( ret < 0 ) {
printk ( KERN_ERR " mtdoops: block_isbad failed, aborting. \n " ) ;
return ;
}
2007-05-29 16:31:42 +04:00
badblock :
printk ( KERN_WARNING " mtdoops: Bad block at %08x \n " ,
cxt - > nextpage * OOPS_PAGE_SIZE ) ;
i + + ;
cxt - > nextpage = cxt - > nextpage + ( mtd - > erasesize / OOPS_PAGE_SIZE ) ;
2008-07-26 12:17:41 +04:00
if ( cxt - > nextpage > = cxt - > oops_pages )
2007-05-29 16:31:42 +04:00
cxt - > nextpage = 0 ;
if ( i = = ( cxt - > oops_pages / ( mtd - > erasesize / OOPS_PAGE_SIZE ) ) ) {
printk ( KERN_ERR " mtdoops: All blocks bad! \n " ) ;
return ;
}
}
for ( j = 0 , ret = - 1 ; ( j < 3 ) & & ( ret < 0 ) ; j + + )
ret = mtdoops_erase_block ( mtd , cxt - > nextpage * OOPS_PAGE_SIZE ) ;
2008-01-29 14:27:09 +03:00
if ( ret > = 0 ) {
printk ( KERN_DEBUG " mtdoops: Ready %d, %d \n " , cxt - > nextpage , cxt - > nextcount ) ;
cxt - > ready = 1 ;
return ;
2007-05-29 16:31:42 +04:00
}
2008-01-29 14:27:09 +03:00
if ( mtd - > block_markbad & & ( ret = = - EIO ) ) {
ret = mtd - > block_markbad ( mtd , cxt - > nextpage * OOPS_PAGE_SIZE ) ;
if ( ret < 0 ) {
printk ( KERN_ERR " mtdoops: block_markbad failed, aborting. \n " ) ;
return ;
}
}
goto badblock ;
2007-05-29 16:31:42 +04:00
}
2008-02-06 13:17:50 +03:00
static void mtdoops_write ( struct mtdoops_context * cxt , int panic )
2007-05-29 16:31:42 +04:00
{
2008-01-29 14:27:11 +03:00
struct mtd_info * mtd = cxt - > mtd ;
size_t retlen ;
int ret ;
2007-05-29 16:31:42 +04:00
2008-01-29 14:27:11 +03:00
if ( cxt - > writecount < OOPS_PAGE_SIZE )
memset ( cxt - > oops_buf + cxt - > writecount , 0xff ,
OOPS_PAGE_SIZE - cxt - > writecount ) ;
2008-02-06 13:17:50 +03:00
if ( panic )
ret = mtd - > panic_write ( mtd , cxt - > nextpage * OOPS_PAGE_SIZE ,
OOPS_PAGE_SIZE , & retlen , cxt - > oops_buf ) ;
else
ret = mtd - > write ( mtd , cxt - > nextpage * OOPS_PAGE_SIZE ,
2008-01-29 14:27:11 +03:00
OOPS_PAGE_SIZE , & retlen , cxt - > oops_buf ) ;
cxt - > writecount = 0 ;
if ( ( retlen ! = OOPS_PAGE_SIZE ) | | ( ret < 0 ) )
printk ( KERN_ERR " mtdoops: Write failure at %d (%td of %d written), err %d. \n " ,
cxt - > nextpage * OOPS_PAGE_SIZE , retlen , OOPS_PAGE_SIZE , ret ) ;
mtdoops_inc_counter ( cxt ) ;
2008-02-06 13:17:50 +03:00
}
static void mtdoops_workfunc_write ( struct work_struct * work )
{
struct mtdoops_context * cxt =
container_of ( work , struct mtdoops_context , work_write ) ;
mtdoops_write ( cxt , 0 ) ;
2008-01-29 14:27:11 +03:00
}
2007-05-29 16:31:42 +04:00
2008-01-29 14:27:11 +03:00
static void find_next_position ( struct mtdoops_context * cxt )
2007-05-29 16:31:42 +04:00
{
struct mtd_info * mtd = cxt - > mtd ;
2008-01-29 14:27:09 +03:00
int ret , page , maxpos = 0 ;
2008-07-26 12:22:45 +04:00
u32 count [ 2 ] , maxcount = 0xffffffff ;
2007-05-29 16:31:42 +04:00
size_t retlen ;
for ( page = 0 ; page < cxt - > oops_pages ; page + + ) {
2008-07-26 12:22:45 +04:00
ret = mtd - > read ( mtd , page * OOPS_PAGE_SIZE , 8 , & retlen , ( u_char * ) & count [ 0 ] ) ;
if ( ( retlen ! = 8 ) | | ( ( ret < 0 ) & & ( ret ! = - EUCLEAN ) ) ) {
printk ( KERN_ERR " mtdoops: Read failure at %d (%td of 8 read) "
2008-01-29 14:27:09 +03:00
" , err %d. \n " , page * OOPS_PAGE_SIZE , retlen , ret ) ;
continue ;
}
2008-07-26 12:22:45 +04:00
if ( count [ 1 ] ! = MTDOOPS_KERNMSG_MAGIC )
continue ;
if ( count [ 0 ] = = 0xffffffff )
2007-05-29 16:31:42 +04:00
continue ;
if ( maxcount = = 0xffffffff ) {
2008-07-26 12:22:45 +04:00
maxcount = count [ 0 ] ;
2007-05-29 16:31:42 +04:00
maxpos = page ;
2008-07-26 12:22:45 +04:00
} else if ( ( count [ 0 ] < 0x40000000 ) & & ( maxcount > 0xc0000000 ) ) {
maxcount = count [ 0 ] ;
2007-05-29 16:31:42 +04:00
maxpos = page ;
2008-07-26 12:22:45 +04:00
} else if ( ( count [ 0 ] > maxcount ) & & ( count [ 0 ] < 0xc0000000 ) ) {
maxcount = count [ 0 ] ;
2007-05-29 16:31:42 +04:00
maxpos = page ;
2008-07-26 12:22:45 +04:00
} else if ( ( count [ 0 ] > maxcount ) & & ( count [ 0 ] > 0xc0000000 )
2007-05-29 16:31:42 +04:00
& & ( maxcount > 0x80000000 ) ) {
2008-07-26 12:22:45 +04:00
maxcount = count [ 0 ] ;
2007-05-29 16:31:42 +04:00
maxpos = page ;
}
}
if ( maxcount = = 0xffffffff ) {
cxt - > nextpage = 0 ;
cxt - > nextcount = 1 ;
2008-07-26 12:25:18 +04:00
schedule_work ( & cxt - > work_erase ) ;
2008-01-29 14:27:11 +03:00
return ;
2007-05-29 16:31:42 +04:00
}
cxt - > nextpage = maxpos ;
cxt - > nextcount = maxcount ;
2008-01-29 14:27:11 +03:00
mtdoops_inc_counter ( cxt ) ;
2007-05-29 16:31:42 +04:00
}
static void mtdoops_notify_add ( struct mtd_info * mtd )
{
struct mtdoops_context * cxt = & oops_cxt ;
2009-02-16 19:21:35 +03:00
if ( cxt - > name & & ! strcmp ( mtd - > name , cxt - > name ) )
cxt - > mtd_index = mtd - > index ;
2007-05-29 16:31:42 +04:00
if ( ( mtd - > index ! = cxt - > mtd_index ) | | cxt - > mtd_index < 0 )
return ;
if ( mtd - > size < ( mtd - > erasesize * 2 ) ) {
printk ( KERN_ERR " MTD partition %d not big enough for mtdoops \n " ,
mtd - > index ) ;
return ;
}
2008-01-29 13:25:55 +03:00
if ( mtd - > erasesize < OOPS_PAGE_SIZE ) {
printk ( KERN_ERR " Eraseblock size of MTD partition %d too small \n " ,
mtd - > index ) ;
return ;
}
2007-05-29 16:31:42 +04:00
cxt - > mtd = mtd ;
2008-12-10 16:37:21 +03:00
if ( mtd - > size > INT_MAX )
cxt - > oops_pages = INT_MAX / OOPS_PAGE_SIZE ;
else
cxt - > oops_pages = ( int ) mtd - > size / OOPS_PAGE_SIZE ;
2007-05-29 16:31:42 +04:00
2008-01-29 14:27:11 +03:00
find_next_position ( cxt ) ;
2007-05-29 16:31:42 +04:00
2008-01-29 13:25:55 +03:00
printk ( KERN_INFO " mtdoops: Attached to MTD device %d \n " , mtd - > index ) ;
2007-05-29 16:31:42 +04:00
}
static void mtdoops_notify_remove ( struct mtd_info * mtd )
{
struct mtdoops_context * cxt = & oops_cxt ;
if ( ( mtd - > index ! = cxt - > mtd_index ) | | cxt - > mtd_index < 0 )
return ;
cxt - > mtd = NULL ;
flush_scheduled_work ( ) ;
}
2007-07-10 23:33:54 +04:00
static void mtdoops_console_sync ( void )
2007-05-29 16:31:42 +04:00
{
2007-07-10 23:33:54 +04:00
struct mtdoops_context * cxt = & oops_cxt ;
2007-05-29 16:31:42 +04:00
struct mtd_info * mtd = cxt - > mtd ;
2008-01-29 13:21:56 +03:00
unsigned long flags ;
2007-05-29 16:31:42 +04:00
2008-01-29 14:27:11 +03:00
if ( ! cxt - > ready | | ! mtd | | cxt - > writecount = = 0 )
2007-05-29 16:31:42 +04:00
return ;
2008-01-29 13:21:56 +03:00
/*
* Once ready is 0 and we ' ve held the lock no further writes to the
* buffer will happen
*/
spin_lock_irqsave ( & cxt - > writecount_lock , flags ) ;
if ( ! cxt - > ready ) {
spin_unlock_irqrestore ( & cxt - > writecount_lock , flags ) ;
return ;
}
2007-07-10 23:33:54 +04:00
cxt - > ready = 0 ;
2008-01-29 13:21:56 +03:00
spin_unlock_irqrestore ( & cxt - > writecount_lock , flags ) ;
2007-07-10 23:33:54 +04:00
2008-02-06 13:17:50 +03:00
if ( mtd - > panic_write & & in_interrupt ( ) )
/* Interrupt context, we're going to panic so try and log */
mtdoops_write ( cxt , 1 ) ;
else
schedule_work ( & cxt - > work_write ) ;
2007-07-10 23:33:54 +04:00
}
static void
mtdoops_console_write ( struct console * co , const char * s , unsigned int count )
{
struct mtdoops_context * cxt = co - > data ;
struct mtd_info * mtd = cxt - > mtd ;
2008-01-29 13:21:56 +03:00
unsigned long flags ;
2007-07-10 23:33:54 +04:00
if ( ! oops_in_progress ) {
mtdoops_console_sync ( ) ;
return ;
2007-05-29 16:31:42 +04:00
}
2007-07-10 23:33:54 +04:00
if ( ! cxt - > ready | | ! mtd )
2007-05-29 16:31:42 +04:00
return ;
2008-01-29 13:21:56 +03:00
/* Locking on writecount ensures sequential writes to the buffer */
spin_lock_irqsave ( & cxt - > writecount_lock , flags ) ;
/* Check ready status didn't change whilst waiting for the lock */
2009-03-04 10:53:40 +03:00
if ( ! cxt - > ready ) {
spin_unlock_irqrestore ( & cxt - > writecount_lock , flags ) ;
2008-01-29 13:21:56 +03:00
return ;
2009-03-04 10:53:40 +03:00
}
2008-01-29 13:21:56 +03:00
2007-05-29 16:31:42 +04:00
if ( cxt - > writecount = = 0 ) {
u32 * stamp = cxt - > oops_buf ;
2008-07-26 12:22:45 +04:00
* stamp + + = cxt - > nextcount ;
* stamp = MTDOOPS_KERNMSG_MAGIC ;
cxt - > writecount = 8 ;
2007-05-29 16:31:42 +04:00
}
if ( ( count + cxt - > writecount ) > OOPS_PAGE_SIZE )
count = OOPS_PAGE_SIZE - cxt - > writecount ;
2007-11-06 13:56:02 +03:00
memcpy ( cxt - > oops_buf + cxt - > writecount , s , count ) ;
cxt - > writecount + = count ;
2008-01-29 13:21:56 +03:00
spin_unlock_irqrestore ( & cxt - > writecount_lock , flags ) ;
if ( cxt - > writecount = = OOPS_PAGE_SIZE )
mtdoops_console_sync ( ) ;
2007-05-29 16:31:42 +04:00
}
static int __init mtdoops_console_setup ( struct console * co , char * options )
{
struct mtdoops_context * cxt = co - > data ;
2009-02-16 19:21:35 +03:00
if ( cxt - > mtd_index ! = - 1 | | cxt - > name )
2007-05-29 16:31:42 +04:00
return - EBUSY ;
2009-02-16 19:21:35 +03:00
if ( options ) {
cxt - > name = kstrdup ( options , GFP_KERNEL ) ;
return 0 ;
}
2007-05-29 16:31:42 +04:00
if ( co - > index = = - 1 )
return - EINVAL ;
cxt - > mtd_index = co - > index ;
return 0 ;
}
static struct mtd_notifier mtdoops_notifier = {
. add = mtdoops_notify_add ,
. remove = mtdoops_notify_remove ,
} ;
static struct console mtdoops_console = {
. name = " ttyMTD " ,
. write = mtdoops_console_write ,
. setup = mtdoops_console_setup ,
2007-07-10 23:33:54 +04:00
. unblank = mtdoops_console_sync ,
2007-05-29 16:31:42 +04:00
. index = - 1 ,
. data = & oops_cxt ,
} ;
static int __init mtdoops_console_init ( void )
{
struct mtdoops_context * cxt = & oops_cxt ;
cxt - > mtd_index = - 1 ;
cxt - > oops_buf = vmalloc ( OOPS_PAGE_SIZE ) ;
2009-03-04 10:53:40 +03:00
spin_lock_init ( & cxt - > writecount_lock ) ;
2007-05-29 16:31:42 +04:00
if ( ! cxt - > oops_buf ) {
2008-01-29 13:25:55 +03:00
printk ( KERN_ERR " Failed to allocate mtdoops buffer workspace \n " ) ;
2007-05-29 16:31:42 +04:00
return - ENOMEM ;
}
2008-01-29 14:27:11 +03:00
INIT_WORK ( & cxt - > work_erase , mtdoops_workfunc_erase ) ;
INIT_WORK ( & cxt - > work_write , mtdoops_workfunc_write ) ;
2007-05-29 16:31:42 +04:00
register_console ( & mtdoops_console ) ;
register_mtd_user ( & mtdoops_notifier ) ;
return 0 ;
}
static void __exit mtdoops_console_exit ( void )
{
struct mtdoops_context * cxt = & oops_cxt ;
unregister_mtd_user ( & mtdoops_notifier ) ;
unregister_console ( & mtdoops_console ) ;
2009-02-16 19:21:35 +03:00
kfree ( cxt - > name ) ;
2007-05-29 16:31:42 +04:00
vfree ( cxt - > oops_buf ) ;
}
subsys_initcall ( mtdoops_console_init ) ;
module_exit ( mtdoops_console_exit ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_AUTHOR ( " Richard Purdie <rpurdie@openedhand.com> " ) ;
MODULE_DESCRIPTION ( " MTD Oops/Panic console logger/driver " ) ;