2010-05-13 18:57:33 +04:00
/*
* NAND Flash Controller Device Driver
* Copyright © 2009 - 2010 , Intel Corporation and its suppliers .
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* You should have received a copy of the GNU General Public License along with
* this program ; if not , write to the Free Software Foundation , Inc . ,
* 51 Franklin St - Fifth Floor , Boston , MA 02110 - 1301 USA .
*
*/
# include <linux/interrupt.h>
# include <linux/delay.h>
# include <linux/wait.h>
# include <linux/mutex.h>
2010-08-05 09:57:51 +04:00
# include <linux/slab.h>
2010-05-13 18:57:33 +04:00
# include <linux/pci.h>
# include <linux/mtd/mtd.h>
# include <linux/module.h>
# include "denali.h"
MODULE_LICENSE ( " GPL " ) ;
2010-08-05 19:06:04 +04:00
/* We define a module parameter that allows the user to override
2010-05-13 18:57:33 +04:00
* the hardware and decide what timing mode should be used .
*/
# define NAND_DEFAULT_TIMINGS -1
static int onfi_timing_mode = NAND_DEFAULT_TIMINGS ;
module_param ( onfi_timing_mode , int , S_IRUGO ) ;
2010-07-27 07:28:09 +04:00
MODULE_PARM_DESC ( onfi_timing_mode , " Overrides default ONFI setting. "
" -1 indicates use default timings " ) ;
2010-05-13 18:57:33 +04:00
# define DENALI_NAND_NAME "denali-nand"
/* We define a macro here that combines all interrupts this driver uses into
* a single constant value , for convenience . */
# define DENALI_IRQ_ALL (INTR_STATUS0__DMA_CMD_COMP | \
INTR_STATUS0__ECC_TRANSACTION_DONE | \
INTR_STATUS0__ECC_ERR | \
INTR_STATUS0__PROGRAM_FAIL | \
INTR_STATUS0__LOAD_COMP | \
INTR_STATUS0__PROGRAM_COMP | \
INTR_STATUS0__TIME_OUT | \
INTR_STATUS0__ERASE_FAIL | \
INTR_STATUS0__RST_COMP | \
INTR_STATUS0__ERASE_COMP )
2010-08-05 19:06:04 +04:00
/* indicates whether or not the internal value for the flash bank is
2010-08-11 13:46:00 +04:00
* valid or not */
2010-08-05 19:06:04 +04:00
# define CHIP_SELECT_INVALID -1
2010-05-13 18:57:33 +04:00
# define SUPPORT_8BITECC 1
2010-08-05 19:06:04 +04:00
/* This macro divides two integers and rounds fractional values up
2010-05-13 18:57:33 +04:00
* to the nearest integer value . */
# define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X) / (Y)+1) : ((X) / (Y)))
/* this macro allows us to convert from an MTD structure to our own
* device context ( denali ) structure .
*/
# define mtd_to_denali(m) container_of(m, struct denali_nand_info, mtd)
/* These constants are defined by the driver to enable common driver
2010-08-11 13:46:00 +04:00
* configuration options . */
2010-05-13 18:57:33 +04:00
# define SPARE_ACCESS 0x41
# define MAIN_ACCESS 0x42
# define MAIN_SPARE_ACCESS 0x43
# define DENALI_READ 0
# define DENALI_WRITE 0x100
/* types of device accesses. We can issue commands and get status */
# define COMMAND_CYCLE 0
# define ADDR_CYCLE 1
# define STATUS_CYCLE 2
2010-08-05 19:06:04 +04:00
/* this is a helper macro that allows us to
2010-05-13 18:57:33 +04:00
* format the bank into the proper bits for the controller */
# define BANK(x) ((x) << 24)
/* List of platforms this NAND controller has be integrated into */
static const struct pci_device_id denali_pci_ids [ ] = {
{ PCI_VDEVICE ( INTEL , 0x0701 ) , INTEL_CE4100 } ,
{ PCI_VDEVICE ( INTEL , 0x0809 ) , INTEL_MRST } ,
{ /* end: all zeroes */ }
} ;
2010-08-05 19:06:04 +04:00
/* these are static lookup tables that give us easy access to
2010-08-11 13:46:00 +04:00
* registers in the NAND controller .
2010-05-13 18:57:33 +04:00
*/
2010-08-05 19:06:04 +04:00
static const uint32_t intr_status_addresses [ 4 ] = { INTR_STATUS0 ,
INTR_STATUS1 ,
INTR_STATUS2 ,
2010-05-13 18:57:33 +04:00
INTR_STATUS3 } ;
static const uint32_t device_reset_banks [ 4 ] = { DEVICE_RESET__BANK0 ,
2010-08-05 19:06:04 +04:00
DEVICE_RESET__BANK1 ,
DEVICE_RESET__BANK2 ,
DEVICE_RESET__BANK3 } ;
2010-05-13 18:57:33 +04:00
static const uint32_t operation_timeout [ 4 ] = { INTR_STATUS0__TIME_OUT ,
2010-08-05 19:06:04 +04:00
INTR_STATUS1__TIME_OUT ,
INTR_STATUS2__TIME_OUT ,
INTR_STATUS3__TIME_OUT } ;
2010-05-13 18:57:33 +04:00
static const uint32_t reset_complete [ 4 ] = { INTR_STATUS0__RST_COMP ,
2010-08-05 19:06:04 +04:00
INTR_STATUS1__RST_COMP ,
INTR_STATUS2__RST_COMP ,
INTR_STATUS3__RST_COMP } ;
2010-05-13 18:57:33 +04:00
/* forward declarations */
static void clear_interrupts ( struct denali_nand_info * denali ) ;
2010-07-27 07:28:09 +04:00
static uint32_t wait_for_irq ( struct denali_nand_info * denali ,
uint32_t irq_mask ) ;
static void denali_irq_enable ( struct denali_nand_info * denali ,
uint32_t int_mask ) ;
2010-05-13 18:57:33 +04:00
static uint32_t read_interrupt_status ( struct denali_nand_info * denali ) ;
2010-07-27 07:28:09 +04:00
/* Certain operations for the denali NAND controller use
* an indexed mode to read / write data . The operation is
* performed by writing the address value of the command
* to the device memory followed by the data . This function
* abstracts this common operation .
2010-05-13 18:57:33 +04:00
*/
2010-07-27 07:28:09 +04:00
static void index_addr ( struct denali_nand_info * denali ,
uint32_t address , uint32_t data )
2010-05-13 18:57:33 +04:00
{
2010-08-09 19:59:23 +04:00
iowrite32 ( address , denali - > flash_mem ) ;
iowrite32 ( data , denali - > flash_mem + 0x10 ) ;
2010-05-13 18:57:33 +04:00
}
/* Perform an indexed read of the device */
static void index_addr_read_data ( struct denali_nand_info * denali ,
uint32_t address , uint32_t * pdata )
{
2010-08-09 19:59:23 +04:00
iowrite32 ( address , denali - > flash_mem ) ;
2010-05-13 18:57:33 +04:00
* pdata = ioread32 ( denali - > flash_mem + 0x10 ) ;
}
2010-08-05 19:06:04 +04:00
/* We need to buffer some data for some of the NAND core routines.
2010-05-13 18:57:33 +04:00
* The operations manage buffering that data . */
static void reset_buf ( struct denali_nand_info * denali )
{
denali - > buf . head = denali - > buf . tail = 0 ;
}
static void write_byte_to_buf ( struct denali_nand_info * denali , uint8_t byte )
{
BUG_ON ( denali - > buf . tail > = sizeof ( denali - > buf . buf ) ) ;
denali - > buf . buf [ denali - > buf . tail + + ] = byte ;
}
/* reads the status of the device */
static void read_status ( struct denali_nand_info * denali )
{
uint32_t cmd = 0x0 ;
/* initialize the data buffer to store status */
reset_buf ( denali ) ;
/* initiate a device status read */
2010-08-05 19:06:04 +04:00
cmd = MODE_11 | BANK ( denali - > flash_bank ) ;
2010-05-13 18:57:33 +04:00
index_addr ( denali , cmd | COMMAND_CYCLE , 0x70 ) ;
2010-08-09 19:59:23 +04:00
iowrite32 ( cmd | STATUS_CYCLE , denali - > flash_mem ) ;
2010-05-13 18:57:33 +04:00
/* update buffer with status value */
write_byte_to_buf ( denali , ioread32 ( denali - > flash_mem + 0x10 ) ) ;
}
/* resets a specific device connected to the core */
static void reset_bank ( struct denali_nand_info * denali )
{
uint32_t irq_status = 0 ;
2010-08-05 19:06:04 +04:00
uint32_t irq_mask = reset_complete [ denali - > flash_bank ] |
2010-05-13 18:57:33 +04:00
operation_timeout [ denali - > flash_bank ] ;
int bank = 0 ;
clear_interrupts ( denali ) ;
bank = device_reset_banks [ denali - > flash_bank ] ;
2010-08-09 19:59:23 +04:00
iowrite32 ( bank , denali - > flash_reg + DEVICE_RESET ) ;
2010-05-13 18:57:33 +04:00
irq_status = wait_for_irq ( denali , irq_mask ) ;
2010-08-05 19:06:04 +04:00
2010-05-13 18:57:33 +04:00
if ( irq_status & operation_timeout [ denali - > flash_bank ] )
2010-08-09 20:16:51 +04:00
dev_err ( & denali - > dev - > dev , " reset bank failed. \n " ) ;
2010-05-13 18:57:33 +04:00
}
/* Reset the flash controller */
2010-07-27 10:17:37 +04:00
static uint16_t denali_nand_reset ( struct denali_nand_info * denali )
2010-05-13 18:57:33 +04:00
{
uint32_t i ;
2010-08-09 20:16:51 +04:00
dev_dbg ( & denali - > dev - > dev , " %s, Line %d, Function: %s \n " ,
2010-05-13 18:57:33 +04:00
__FILE__ , __LINE__ , __func__ ) ;
for ( i = 0 ; i < LLD_MAX_FLASH_BANKS ; i + + )
2010-08-09 19:59:23 +04:00
iowrite32 ( reset_complete [ i ] | operation_timeout [ i ] ,
2010-05-13 18:57:33 +04:00
denali - > flash_reg + intr_status_addresses [ i ] ) ;
for ( i = 0 ; i < LLD_MAX_FLASH_BANKS ; i + + ) {
2010-08-09 19:59:23 +04:00
iowrite32 ( device_reset_banks [ i ] ,
2010-07-27 07:28:09 +04:00
denali - > flash_reg + DEVICE_RESET ) ;
while ( ! ( ioread32 ( denali - > flash_reg +
intr_status_addresses [ i ] ) &
2010-05-13 18:57:33 +04:00
( reset_complete [ i ] | operation_timeout [ i ] ) ) )
;
if ( ioread32 ( denali - > flash_reg + intr_status_addresses [ i ] ) &
operation_timeout [ i ] )
2010-08-09 20:16:51 +04:00
dev_dbg ( & denali - > dev - > dev ,
2010-05-13 18:57:33 +04:00
" NAND Reset operation timed out on bank %d \n " , i ) ;
}
for ( i = 0 ; i < LLD_MAX_FLASH_BANKS ; i + + )
2010-08-09 19:59:23 +04:00
iowrite32 ( reset_complete [ i ] | operation_timeout [ i ] ,
2010-05-13 18:57:33 +04:00
denali - > flash_reg + intr_status_addresses [ i ] ) ;
return PASS ;
}
2010-07-27 07:28:09 +04:00
/* this routine calculates the ONFI timing values for a given mode and
* programs the clocking register accordingly . The mode is determined by
* the get_onfi_nand_para routine .
2010-05-13 18:57:33 +04:00
*/
2010-07-27 10:17:37 +04:00
static void nand_onfi_timing_set ( struct denali_nand_info * denali ,
2010-07-27 07:28:09 +04:00
uint16_t mode )
2010-05-13 18:57:33 +04:00
{
uint16_t Trea [ 6 ] = { 40 , 30 , 25 , 20 , 20 , 16 } ;
uint16_t Trp [ 6 ] = { 50 , 25 , 17 , 15 , 12 , 10 } ;
uint16_t Treh [ 6 ] = { 30 , 15 , 15 , 10 , 10 , 7 } ;
uint16_t Trc [ 6 ] = { 100 , 50 , 35 , 30 , 25 , 20 } ;
uint16_t Trhoh [ 6 ] = { 0 , 15 , 15 , 15 , 15 , 15 } ;
uint16_t Trloh [ 6 ] = { 0 , 0 , 0 , 0 , 5 , 5 } ;
uint16_t Tcea [ 6 ] = { 100 , 45 , 30 , 25 , 25 , 25 } ;
uint16_t Tadl [ 6 ] = { 200 , 100 , 100 , 100 , 70 , 70 } ;
uint16_t Trhw [ 6 ] = { 200 , 100 , 100 , 100 , 100 , 100 } ;
uint16_t Trhz [ 6 ] = { 200 , 100 , 100 , 100 , 100 , 100 } ;
uint16_t Twhr [ 6 ] = { 120 , 80 , 80 , 60 , 60 , 60 } ;
uint16_t Tcs [ 6 ] = { 70 , 35 , 25 , 25 , 20 , 15 } ;
uint16_t TclsRising = 1 ;
uint16_t data_invalid_rhoh , data_invalid_rloh , data_invalid ;
uint16_t dv_window = 0 ;
uint16_t en_lo , en_hi ;
uint16_t acc_clks ;
uint16_t addr_2_data , re_2_we , re_2_re , we_2_re , cs_cnt ;
2010-08-09 20:16:51 +04:00
dev_dbg ( & denali - > dev - > dev , " %s, Line %d, Function: %s \n " ,
2010-05-13 18:57:33 +04:00
__FILE__ , __LINE__ , __func__ ) ;
en_lo = CEIL_DIV ( Trp [ mode ] , CLK_X ) ;
en_hi = CEIL_DIV ( Treh [ mode ] , CLK_X ) ;
# if ONFI_BLOOM_TIME
if ( ( en_hi * CLK_X ) < ( Treh [ mode ] + 2 ) )
en_hi + + ;
# endif
if ( ( en_lo + en_hi ) * CLK_X < Trc [ mode ] )
en_lo + = CEIL_DIV ( ( Trc [ mode ] - ( en_lo + en_hi ) * CLK_X ) , CLK_X ) ;
if ( ( en_lo + en_hi ) < CLK_MULTI )
en_lo + = CLK_MULTI - en_lo - en_hi ;
while ( dv_window < 8 ) {
data_invalid_rhoh = en_lo * CLK_X + Trhoh [ mode ] ;
data_invalid_rloh = ( en_lo + en_hi ) * CLK_X + Trloh [ mode ] ;
data_invalid =
data_invalid_rhoh <
data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh ;
dv_window = data_invalid - Trea [ mode ] ;
if ( dv_window < 8 )
en_lo + + ;
}
acc_clks = CEIL_DIV ( Trea [ mode ] , CLK_X ) ;
while ( ( ( acc_clks * CLK_X ) - Trea [ mode ] ) < 3 )
acc_clks + + ;
if ( ( data_invalid - acc_clks * CLK_X ) < 2 )
2010-08-09 20:16:51 +04:00
dev_warn ( & denali - > dev - > dev , " %s, Line %d: Warning! \n " ,
2010-05-13 18:57:33 +04:00
__FILE__ , __LINE__ ) ;
addr_2_data = CEIL_DIV ( Tadl [ mode ] , CLK_X ) ;
re_2_we = CEIL_DIV ( Trhw [ mode ] , CLK_X ) ;
re_2_re = CEIL_DIV ( Trhz [ mode ] , CLK_X ) ;
we_2_re = CEIL_DIV ( Twhr [ mode ] , CLK_X ) ;
cs_cnt = CEIL_DIV ( ( Tcs [ mode ] - Trp [ mode ] ) , CLK_X ) ;
if ( ! TclsRising )
cs_cnt = CEIL_DIV ( Tcs [ mode ] , CLK_X ) ;
if ( cs_cnt = = 0 )
cs_cnt = 1 ;
if ( Tcea [ mode ] ) {
while ( ( ( cs_cnt * CLK_X ) + Trea [ mode ] ) < Tcea [ mode ] )
cs_cnt + + ;
}
# if MODE5_WORKAROUND
if ( mode = = 5 )
acc_clks = 5 ;
# endif
/* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
if ( ( ioread32 ( denali - > flash_reg + MANUFACTURER_ID ) = = 0 ) & &
( ioread32 ( denali - > flash_reg + DEVICE_ID ) = = 0x88 ) )
acc_clks = 6 ;
2010-08-09 19:59:23 +04:00
iowrite32 ( acc_clks , denali - > flash_reg + ACC_CLKS ) ;
iowrite32 ( re_2_we , denali - > flash_reg + RE_2_WE ) ;
iowrite32 ( re_2_re , denali - > flash_reg + RE_2_RE ) ;
iowrite32 ( we_2_re , denali - > flash_reg + WE_2_RE ) ;
iowrite32 ( addr_2_data , denali - > flash_reg + ADDR_2_DATA ) ;
iowrite32 ( en_lo , denali - > flash_reg + RDWR_EN_LO_CNT ) ;
iowrite32 ( en_hi , denali - > flash_reg + RDWR_EN_HI_CNT ) ;
iowrite32 ( cs_cnt , denali - > flash_reg + CS_SETUP_CNT ) ;
2010-05-13 18:57:33 +04:00
}
/* queries the NAND device to see what ONFI modes it supports. */
static uint16_t get_onfi_nand_para ( struct denali_nand_info * denali )
{
int i ;
2010-08-06 11:45:19 +04:00
/* we needn't to do a reset here because driver has already
* reset all the banks before
* */
2010-05-13 18:57:33 +04:00
if ( ! ( ioread32 ( denali - > flash_reg + ONFI_TIMING_MODE ) &
ONFI_TIMING_MODE__VALUE ) )
return FAIL ;
for ( i = 5 ; i > 0 ; i - - ) {
2010-07-27 07:28:09 +04:00
if ( ioread32 ( denali - > flash_reg + ONFI_TIMING_MODE ) &
( 0x01 < < i ) )
2010-05-13 18:57:33 +04:00
break ;
}
2010-07-27 10:17:37 +04:00
nand_onfi_timing_set ( denali , i ) ;
2010-05-13 18:57:33 +04:00
/* By now, all the ONFI devices we know support the page cache */
/* rw feature. So here we enable the pipeline_rw_ahead feature */
/* iowrite32(1, denali->flash_reg + CACHE_WRITE_ENABLE); */
/* iowrite32(1, denali->flash_reg + CACHE_READ_ENABLE); */
return PASS ;
}
2010-08-06 11:45:19 +04:00
static void get_samsung_nand_para ( struct denali_nand_info * denali ,
uint8_t device_id )
2010-05-13 18:57:33 +04:00
{
2010-08-06 11:45:19 +04:00
if ( device_id = = 0xd3 ) { /* Samsung K9WAG08U1A */
2010-05-13 18:57:33 +04:00
/* Set timing register values according to datasheet */
2010-08-09 19:59:23 +04:00
iowrite32 ( 5 , denali - > flash_reg + ACC_CLKS ) ;
iowrite32 ( 20 , denali - > flash_reg + RE_2_WE ) ;
iowrite32 ( 12 , denali - > flash_reg + WE_2_RE ) ;
iowrite32 ( 14 , denali - > flash_reg + ADDR_2_DATA ) ;
iowrite32 ( 3 , denali - > flash_reg + RDWR_EN_LO_CNT ) ;
iowrite32 ( 2 , denali - > flash_reg + RDWR_EN_HI_CNT ) ;
iowrite32 ( 2 , denali - > flash_reg + CS_SETUP_CNT ) ;
2010-05-13 18:57:33 +04:00
}
}
static void get_toshiba_nand_para ( struct denali_nand_info * denali )
{
uint32_t tmp ;
/* Workaround to fix a controller bug which reports a wrong */
/* spare area size for some kind of Toshiba NAND device */
if ( ( ioread32 ( denali - > flash_reg + DEVICE_MAIN_AREA_SIZE ) = = 4096 ) & &
( ioread32 ( denali - > flash_reg + DEVICE_SPARE_AREA_SIZE ) = = 64 ) ) {
2010-08-09 19:59:23 +04:00
iowrite32 ( 216 , denali - > flash_reg + DEVICE_SPARE_AREA_SIZE ) ;
2010-05-13 18:57:33 +04:00
tmp = ioread32 ( denali - > flash_reg + DEVICES_CONNECTED ) *
ioread32 ( denali - > flash_reg + DEVICE_SPARE_AREA_SIZE ) ;
2010-08-09 19:59:23 +04:00
iowrite32 ( tmp ,
2010-07-27 07:28:09 +04:00
denali - > flash_reg + LOGICAL_PAGE_SPARE_SIZE ) ;
2010-05-13 18:57:33 +04:00
# if SUPPORT_15BITECC
2010-08-09 19:59:23 +04:00
iowrite32 ( 15 , denali - > flash_reg + ECC_CORRECTION ) ;
2010-05-13 18:57:33 +04:00
# elif SUPPORT_8BITECC
2010-08-09 19:59:23 +04:00
iowrite32 ( 8 , denali - > flash_reg + ECC_CORRECTION ) ;
2010-05-13 18:57:33 +04:00
# endif
}
}
2010-08-05 20:48:49 +04:00
static void get_hynix_nand_para ( struct denali_nand_info * denali ,
uint8_t device_id )
2010-05-13 18:57:33 +04:00
{
uint32_t main_size , spare_size ;
2010-08-05 20:48:49 +04:00
switch ( device_id ) {
2010-05-13 18:57:33 +04:00
case 0xD5 : /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
case 0xD7 : /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
2010-08-09 19:59:23 +04:00
iowrite32 ( 128 , denali - > flash_reg + PAGES_PER_BLOCK ) ;
iowrite32 ( 4096 , denali - > flash_reg + DEVICE_MAIN_AREA_SIZE ) ;
iowrite32 ( 224 , denali - > flash_reg + DEVICE_SPARE_AREA_SIZE ) ;
2010-07-27 07:28:09 +04:00
main_size = 4096 *
ioread32 ( denali - > flash_reg + DEVICES_CONNECTED ) ;
spare_size = 224 *
ioread32 ( denali - > flash_reg + DEVICES_CONNECTED ) ;
2010-08-09 19:59:23 +04:00
iowrite32 ( main_size ,
2010-07-27 07:28:09 +04:00
denali - > flash_reg + LOGICAL_PAGE_DATA_SIZE ) ;
2010-08-09 19:59:23 +04:00
iowrite32 ( spare_size ,
2010-07-27 07:28:09 +04:00
denali - > flash_reg + LOGICAL_PAGE_SPARE_SIZE ) ;
2010-08-09 19:59:23 +04:00
iowrite32 ( 0 , denali - > flash_reg + DEVICE_WIDTH ) ;
2010-05-13 18:57:33 +04:00
# if SUPPORT_15BITECC
2010-08-09 19:59:23 +04:00
iowrite32 ( 15 , denali - > flash_reg + ECC_CORRECTION ) ;
2010-05-13 18:57:33 +04:00
# elif SUPPORT_8BITECC
2010-08-09 19:59:23 +04:00
iowrite32 ( 8 , denali - > flash_reg + ECC_CORRECTION ) ;
2010-05-13 18:57:33 +04:00
# endif
break ;
default :
2010-08-09 20:16:51 +04:00
dev_warn ( & denali - > dev - > dev ,
2010-05-13 18:57:33 +04:00
" Spectra: Unknown Hynix NAND (Device ID: 0x%x). "
" Will use default parameter values instead. \n " ,
2010-08-06 14:48:21 +04:00
device_id ) ;
2010-05-13 18:57:33 +04:00
}
}
/* determines how many NAND chips are connected to the controller. Note for
2010-08-11 13:46:00 +04:00
* Intel CE4100 devices we don ' t support more than one device .
2010-05-13 18:57:33 +04:00
*/
static void find_valid_banks ( struct denali_nand_info * denali )
{
uint32_t id [ LLD_MAX_FLASH_BANKS ] ;
int i ;
denali - > total_used_banks = 1 ;
for ( i = 0 ; i < LLD_MAX_FLASH_BANKS ; i + + ) {
index_addr ( denali , ( uint32_t ) ( MODE_11 | ( i < < 24 ) | 0 ) , 0x90 ) ;
index_addr ( denali , ( uint32_t ) ( MODE_11 | ( i < < 24 ) | 1 ) , 0 ) ;
2010-07-27 07:28:09 +04:00
index_addr_read_data ( denali ,
( uint32_t ) ( MODE_11 | ( i < < 24 ) | 2 ) , & id [ i ] ) ;
2010-05-13 18:57:33 +04:00
2010-08-09 20:16:51 +04:00
dev_dbg ( & denali - > dev - > dev ,
2010-05-13 18:57:33 +04:00
" Return 1st ID for bank[%d]: %x \n " , i , id [ i ] ) ;
if ( i = = 0 ) {
if ( ! ( id [ i ] & 0x0ff ) )
break ; /* WTF? */
} else {
if ( ( id [ i ] & 0x0ff ) = = ( id [ 0 ] & 0x0ff ) )
denali - > total_used_banks + + ;
else
break ;
}
}
2010-07-27 06:41:53 +04:00
if ( denali - > platform = = INTEL_CE4100 ) {
2010-05-13 18:57:33 +04:00
/* Platform limitations of the CE4100 device limit
* users to a single chip solution for NAND .
2010-08-05 19:06:04 +04:00
* Multichip support is not enabled .
*/
2010-07-27 06:41:53 +04:00
if ( denali - > total_used_banks ! = 1 ) {
2010-08-09 20:16:51 +04:00
dev_err ( & denali - > dev - > dev ,
" Sorry, Intel CE4100 only supports "
2010-05-13 18:57:33 +04:00
" a single NAND device. \n " ) ;
BUG ( ) ;
}
}
2010-08-09 20:16:51 +04:00
dev_dbg ( & denali - > dev - > dev ,
2010-05-13 18:57:33 +04:00
" denali->total_used_banks: %d \n " , denali - > total_used_banks ) ;
}
static void detect_partition_feature ( struct denali_nand_info * denali )
{
2010-08-06 14:48:21 +04:00
/* For MRST platform, denali->fwblks represent the
* number of blocks firmware is taken ,
* FW is in protect partition and MTD driver has no
* permission to access it . So let driver know how many
* blocks it can ' t touch .
* */
2010-05-13 18:57:33 +04:00
if ( ioread32 ( denali - > flash_reg + FEATURES ) & FEATURES__PARTITION ) {
if ( ( ioread32 ( denali - > flash_reg + PERM_SRC_ID_1 ) &
PERM_SRC_ID_1__SRCID ) = = SPECTRA_PARTITION_ID ) {
2010-08-06 14:48:21 +04:00
denali - > fwblks =
2010-05-13 18:57:33 +04:00
( ( ioread32 ( denali - > flash_reg + MIN_MAX_BANK_1 ) &
MIN_MAX_BANK_1__MIN_VALUE ) *
2010-08-06 14:48:21 +04:00
denali - > blksperchip )
2010-05-13 18:57:33 +04:00
+
( ioread32 ( denali - > flash_reg + MIN_BLK_ADDR_1 ) &
MIN_BLK_ADDR_1__VALUE ) ;
2010-08-06 14:48:21 +04:00
} else
denali - > fwblks = SPECTRA_START_BLOCK ;
} else
denali - > fwblks = SPECTRA_START_BLOCK ;
2010-05-13 18:57:33 +04:00
}
2010-07-27 10:17:37 +04:00
static uint16_t denali_nand_timing_set ( struct denali_nand_info * denali )
2010-05-13 18:57:33 +04:00
{
uint16_t status = PASS ;
2010-08-05 20:48:49 +04:00
uint32_t id_bytes [ 5 ] , addr ;
uint8_t i , maf_id , device_id ;
2010-05-13 18:57:33 +04:00
2010-08-09 20:16:51 +04:00
dev_dbg ( & denali - > dev - > dev ,
" %s, Line %d, Function: %s \n " ,
__FILE__ , __LINE__ , __func__ ) ;
2010-05-13 18:57:33 +04:00
2010-08-05 20:48:49 +04:00
/* Use read id method to get device ID and other
* params . For some NAND chips , controller can ' t
* report the correct device ID by reading from
* DEVICE_ID register
* */
addr = ( uint32_t ) MODE_11 | BANK ( denali - > flash_bank ) ;
index_addr ( denali , ( uint32_t ) addr | 0 , 0x90 ) ;
index_addr ( denali , ( uint32_t ) addr | 1 , 0 ) ;
for ( i = 0 ; i < 5 ; i + + )
index_addr_read_data ( denali , addr | 2 , & id_bytes [ i ] ) ;
maf_id = id_bytes [ 0 ] ;
device_id = id_bytes [ 1 ] ;
2010-05-13 18:57:33 +04:00
if ( ioread32 ( denali - > flash_reg + ONFI_DEVICE_NO_OF_LUNS ) &
ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE ) { /* ONFI 1.0 NAND */
if ( FAIL = = get_onfi_nand_para ( denali ) )
return FAIL ;
2010-08-05 20:48:49 +04:00
} else if ( maf_id = = 0xEC ) { /* Samsung NAND */
2010-08-06 11:45:19 +04:00
get_samsung_nand_para ( denali , device_id ) ;
2010-08-05 20:48:49 +04:00
} else if ( maf_id = = 0x98 ) { /* Toshiba NAND */
2010-05-13 18:57:33 +04:00
get_toshiba_nand_para ( denali ) ;
2010-08-05 20:48:49 +04:00
} else if ( maf_id = = 0xAD ) { /* Hynix NAND */
get_hynix_nand_para ( denali , device_id ) ;
2010-05-13 18:57:33 +04:00
}
2010-08-09 20:16:51 +04:00
dev_info ( & denali - > dev - > dev ,
" Dump timing register values: "
" acc_clks: %d, re_2_we: %d, re_2_re: %d \n "
" we_2_re: %d, addr_2_data: %d, rdwr_en_lo_cnt: %d \n "
2010-05-13 18:57:33 +04:00
" rdwr_en_hi_cnt: %d, cs_setup_cnt: %d \n " ,
ioread32 ( denali - > flash_reg + ACC_CLKS ) ,
ioread32 ( denali - > flash_reg + RE_2_WE ) ,
2010-08-09 20:16:51 +04:00
ioread32 ( denali - > flash_reg + RE_2_RE ) ,
2010-05-13 18:57:33 +04:00
ioread32 ( denali - > flash_reg + WE_2_RE ) ,
ioread32 ( denali - > flash_reg + ADDR_2_DATA ) ,
ioread32 ( denali - > flash_reg + RDWR_EN_LO_CNT ) ,
ioread32 ( denali - > flash_reg + RDWR_EN_HI_CNT ) ,
ioread32 ( denali - > flash_reg + CS_SETUP_CNT ) ) ;
find_valid_banks ( denali ) ;
detect_partition_feature ( denali ) ;
/* If the user specified to override the default timings
2010-08-05 19:06:04 +04:00
* with a specific ONFI mode , we apply those changes here .
2010-05-13 18:57:33 +04:00
*/
if ( onfi_timing_mode ! = NAND_DEFAULT_TIMINGS )
2010-07-27 10:17:37 +04:00
nand_onfi_timing_set ( denali , onfi_timing_mode ) ;
2010-05-13 18:57:33 +04:00
return status ;
}
2010-07-27 10:17:37 +04:00
static void denali_set_intr_modes ( struct denali_nand_info * denali ,
2010-05-13 18:57:33 +04:00
uint16_t INT_ENABLE )
{
2010-08-09 20:16:51 +04:00
dev_dbg ( & denali - > dev - > dev , " %s, Line %d, Function: %s \n " ,
2010-05-13 18:57:33 +04:00
__FILE__ , __LINE__ , __func__ ) ;
if ( INT_ENABLE )
2010-08-09 19:59:23 +04:00
iowrite32 ( 1 , denali - > flash_reg + GLOBAL_INT_ENABLE ) ;
2010-05-13 18:57:33 +04:00
else
2010-08-09 19:59:23 +04:00
iowrite32 ( 0 , denali - > flash_reg + GLOBAL_INT_ENABLE ) ;
2010-05-13 18:57:33 +04:00
}
/* validation function to verify that the controlling software is making
2010-08-11 13:46:00 +04:00
* a valid request
2010-05-13 18:57:33 +04:00
*/
static inline bool is_flash_bank_valid ( int flash_bank )
{
2010-08-05 19:06:04 +04:00
return ( flash_bank > = 0 & & flash_bank < 4 ) ;
2010-05-13 18:57:33 +04:00
}
static void denali_irq_init ( struct denali_nand_info * denali )
{
uint32_t int_mask = 0 ;
/* Disable global interrupts */
2010-07-27 10:17:37 +04:00
denali_set_intr_modes ( denali , false ) ;
2010-05-13 18:57:33 +04:00
int_mask = DENALI_IRQ_ALL ;
/* Clear all status bits */
2010-08-09 19:59:23 +04:00
iowrite32 ( 0xFFFF , denali - > flash_reg + INTR_STATUS0 ) ;
iowrite32 ( 0xFFFF , denali - > flash_reg + INTR_STATUS1 ) ;
iowrite32 ( 0xFFFF , denali - > flash_reg + INTR_STATUS2 ) ;
iowrite32 ( 0xFFFF , denali - > flash_reg + INTR_STATUS3 ) ;
2010-05-13 18:57:33 +04:00
denali_irq_enable ( denali , int_mask ) ;
}
static void denali_irq_cleanup ( int irqnum , struct denali_nand_info * denali )
{
2010-07-27 10:17:37 +04:00
denali_set_intr_modes ( denali , false ) ;
2010-05-13 18:57:33 +04:00
free_irq ( irqnum , denali ) ;
}
2010-07-27 07:28:09 +04:00
static void denali_irq_enable ( struct denali_nand_info * denali ,
uint32_t int_mask )
2010-05-13 18:57:33 +04:00
{
2010-08-09 19:59:23 +04:00
iowrite32 ( int_mask , denali - > flash_reg + INTR_EN0 ) ;
iowrite32 ( int_mask , denali - > flash_reg + INTR_EN1 ) ;
iowrite32 ( int_mask , denali - > flash_reg + INTR_EN2 ) ;
iowrite32 ( int_mask , denali - > flash_reg + INTR_EN3 ) ;
2010-05-13 18:57:33 +04:00
}
/* This function only returns when an interrupt that this driver cares about
2010-08-05 19:06:04 +04:00
* occurs . This is to reduce the overhead of servicing interrupts
2010-05-13 18:57:33 +04:00
*/
static inline uint32_t denali_irq_detected ( struct denali_nand_info * denali )
{
2010-07-27 07:32:21 +04:00
return read_interrupt_status ( denali ) & DENALI_IRQ_ALL ;
2010-05-13 18:57:33 +04:00
}
/* Interrupts are cleared by writing a 1 to the appropriate status bit */
2010-07-27 07:28:09 +04:00
static inline void clear_interrupt ( struct denali_nand_info * denali ,
uint32_t irq_mask )
2010-05-13 18:57:33 +04:00
{
uint32_t intr_status_reg = 0 ;
intr_status_reg = intr_status_addresses [ denali - > flash_bank ] ;
2010-08-09 19:59:23 +04:00
iowrite32 ( irq_mask , denali - > flash_reg + intr_status_reg ) ;
2010-05-13 18:57:33 +04:00
}
static void clear_interrupts ( struct denali_nand_info * denali )
{
uint32_t status = 0x0 ;
spin_lock_irq ( & denali - > irq_lock ) ;
status = read_interrupt_status ( denali ) ;
2010-08-09 20:07:01 +04:00
clear_interrupt ( denali , status ) ;
2010-05-13 18:57:33 +04:00
denali - > irq_status = 0x0 ;
spin_unlock_irq ( & denali - > irq_lock ) ;
}
static uint32_t read_interrupt_status ( struct denali_nand_info * denali )
{
uint32_t intr_status_reg = 0 ;
intr_status_reg = intr_status_addresses [ denali - > flash_bank ] ;
return ioread32 ( denali - > flash_reg + intr_status_reg ) ;
}
2010-08-05 19:06:04 +04:00
/* This is the interrupt service routine. It handles all interrupts
* sent to this device . Note that on CE4100 , this is a shared
* interrupt .
2010-05-13 18:57:33 +04:00
*/
static irqreturn_t denali_isr ( int irq , void * dev_id )
{
struct denali_nand_info * denali = dev_id ;
uint32_t irq_status = 0x0 ;
irqreturn_t result = IRQ_NONE ;
spin_lock ( & denali - > irq_lock ) ;
2010-08-05 19:06:04 +04:00
/* check to see if a valid NAND chip has
* been selected .
2010-05-13 18:57:33 +04:00
*/
2010-07-27 06:41:53 +04:00
if ( is_flash_bank_valid ( denali - > flash_bank ) ) {
2010-08-05 19:06:04 +04:00
/* check to see if controller generated
2010-05-13 18:57:33 +04:00
* the interrupt , since this is a shared interrupt */
2010-07-27 07:28:09 +04:00
irq_status = denali_irq_detected ( denali ) ;
if ( irq_status ! = 0 ) {
2010-05-13 18:57:33 +04:00
/* handle interrupt */
/* first acknowledge it */
clear_interrupt ( denali , irq_status ) ;
/* store the status in the device context for someone
to read */
denali - > irq_status | = irq_status ;
/* notify anyone who cares that it happened */
complete ( & denali - > complete ) ;
/* tell the OS that we've handled this */
result = IRQ_HANDLED ;
}
}
spin_unlock ( & denali - > irq_lock ) ;
return result ;
}
# define BANK(x) ((x) << 24)
static uint32_t wait_for_irq ( struct denali_nand_info * denali , uint32_t irq_mask )
{
unsigned long comp_res = 0 ;
uint32_t intr_status = 0 ;
bool retry = false ;
unsigned long timeout = msecs_to_jiffies ( 1000 ) ;
2010-07-27 06:41:53 +04:00
do {
2010-07-27 07:28:09 +04:00
comp_res =
wait_for_completion_timeout ( & denali - > complete , timeout ) ;
2010-05-13 18:57:33 +04:00
spin_lock_irq ( & denali - > irq_lock ) ;
intr_status = denali - > irq_status ;
2010-07-27 06:41:53 +04:00
if ( intr_status & irq_mask ) {
2010-05-13 18:57:33 +04:00
denali - > irq_status & = ~ irq_mask ;
spin_unlock_irq ( & denali - > irq_lock ) ;
/* our interrupt was detected */
break ;
2010-07-27 06:41:53 +04:00
} else {
2010-08-05 19:06:04 +04:00
/* these are not the interrupts you are looking for -
* need to wait again */
2010-05-13 18:57:33 +04:00
spin_unlock_irq ( & denali - > irq_lock ) ;
retry = true ;
}
} while ( comp_res ! = 0 ) ;
2010-07-27 06:41:53 +04:00
if ( comp_res = = 0 ) {
2010-05-13 18:57:33 +04:00
/* timeout */
2010-08-05 19:06:04 +04:00
printk ( KERN_ERR " timeout occurred, status = 0x%x, mask = 0x%x \n " ,
intr_status , irq_mask ) ;
2010-05-13 18:57:33 +04:00
intr_status = 0 ;
}
return intr_status ;
}
2010-08-05 19:06:04 +04:00
/* This helper function setups the registers for ECC and whether or not
2010-08-11 13:46:00 +04:00
* the spare area will be transfered . */
2010-08-05 19:06:04 +04:00
static void setup_ecc_for_xfer ( struct denali_nand_info * denali , bool ecc_en ,
2010-05-13 18:57:33 +04:00
bool transfer_spare )
{
2010-08-05 19:06:04 +04:00
int ecc_en_flag = 0 , transfer_spare_flag = 0 ;
2010-05-13 18:57:33 +04:00
/* set ECC, transfer spare bits if needed */
ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0 ;
transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0 ;
/* Enable spare area/ECC per user's request. */
2010-08-09 19:59:23 +04:00
iowrite32 ( ecc_en_flag , denali - > flash_reg + ECC_ENABLE ) ;
iowrite32 ( transfer_spare_flag ,
2010-07-27 07:28:09 +04:00
denali - > flash_reg + TRANSFER_SPARE_REG ) ;
2010-05-13 18:57:33 +04:00
}
2010-08-05 19:06:04 +04:00
/* sends a pipeline command operation to the controller. See the Denali NAND
2010-08-11 13:46:00 +04:00
* controller ' s user guide for more information ( section 4.2 .3 .6 ) .
2010-05-13 18:57:33 +04:00
*/
2010-07-27 07:28:09 +04:00
static int denali_send_pipeline_cmd ( struct denali_nand_info * denali ,
bool ecc_en ,
bool transfer_spare ,
int access_type ,
int op )
2010-05-13 18:57:33 +04:00
{
int status = PASS ;
2010-08-05 19:06:04 +04:00
uint32_t addr = 0x0 , cmd = 0x0 , page_count = 1 , irq_status = 0 ,
2010-05-13 18:57:33 +04:00
irq_mask = 0 ;
2010-07-27 07:32:21 +04:00
if ( op = = DENALI_READ )
irq_mask = INTR_STATUS0__LOAD_COMP ;
else if ( op = = DENALI_WRITE )
irq_mask = 0 ;
else
BUG ( ) ;
2010-05-13 18:57:33 +04:00
setup_ecc_for_xfer ( denali , ecc_en , transfer_spare ) ;
/* clear interrupts */
2010-08-05 19:06:04 +04:00
clear_interrupts ( denali ) ;
2010-05-13 18:57:33 +04:00
addr = BANK ( denali - > flash_bank ) | denali - > page ;
2010-07-27 06:41:53 +04:00
if ( op = = DENALI_WRITE & & access_type ! = SPARE_ACCESS ) {
2010-08-05 19:06:04 +04:00
cmd = MODE_01 | addr ;
2010-08-09 19:59:23 +04:00
iowrite32 ( cmd , denali - > flash_mem ) ;
2010-07-27 06:41:53 +04:00
} else if ( op = = DENALI_WRITE & & access_type = = SPARE_ACCESS ) {
2010-05-13 18:57:33 +04:00
/* read spare area */
2010-08-05 19:06:04 +04:00
cmd = MODE_10 | addr ;
2010-05-13 18:57:33 +04:00
index_addr ( denali , ( uint32_t ) cmd , access_type ) ;
2010-08-05 19:06:04 +04:00
cmd = MODE_01 | addr ;
2010-08-09 19:59:23 +04:00
iowrite32 ( cmd , denali - > flash_mem ) ;
2010-07-27 06:41:53 +04:00
} else if ( op = = DENALI_READ ) {
2010-05-13 18:57:33 +04:00
/* setup page read request for access type */
2010-08-05 19:06:04 +04:00
cmd = MODE_10 | addr ;
2010-05-13 18:57:33 +04:00
index_addr ( denali , ( uint32_t ) cmd , access_type ) ;
/* page 33 of the NAND controller spec indicates we should not
2010-08-05 19:06:04 +04:00
use the pipeline commands in Spare area only mode . So we
2010-05-13 18:57:33 +04:00
don ' t .
*/
2010-07-27 06:41:53 +04:00
if ( access_type = = SPARE_ACCESS ) {
2010-05-13 18:57:33 +04:00
cmd = MODE_01 | addr ;
2010-08-09 19:59:23 +04:00
iowrite32 ( cmd , denali - > flash_mem ) ;
2010-07-27 06:41:53 +04:00
} else {
2010-07-27 07:28:09 +04:00
index_addr ( denali , ( uint32_t ) cmd ,
0x2000 | op | page_count ) ;
2010-08-05 19:06:04 +04:00
/* wait for command to be accepted
2010-07-27 07:28:09 +04:00
* can always use status0 bit as the
* mask is identical for each
2010-05-13 18:57:33 +04:00
* bank . */
irq_status = wait_for_irq ( denali , irq_mask ) ;
2010-07-27 06:41:53 +04:00
if ( irq_status = = 0 ) {
2010-08-09 20:16:51 +04:00
dev_err ( & denali - > dev - > dev ,
" cmd, page, addr on timeout "
" (0x%x, 0x%x, 0x%x) \n " ,
cmd , denali - > page , addr ) ;
2010-05-13 18:57:33 +04:00
status = FAIL ;
2010-07-27 06:41:53 +04:00
} else {
2010-05-13 18:57:33 +04:00
cmd = MODE_01 | addr ;
2010-08-09 19:59:23 +04:00
iowrite32 ( cmd , denali - > flash_mem ) ;
2010-05-13 18:57:33 +04:00
}
}
}
return status ;
}
/* helper function that simply writes a buffer to the flash */
2010-07-27 07:28:09 +04:00
static int write_data_to_flash_mem ( struct denali_nand_info * denali ,
const uint8_t * buf ,
int len )
2010-05-13 18:57:33 +04:00
{
uint32_t i = 0 , * buf32 ;
2010-08-05 19:06:04 +04:00
/* verify that the len is a multiple of 4. see comment in
* read_data_from_flash_mem ( ) */
2010-05-13 18:57:33 +04:00
BUG_ON ( ( len % 4 ) ! = 0 ) ;
/* write the data to the flash memory */
buf32 = ( uint32_t * ) buf ;
for ( i = 0 ; i < len / 4 ; i + + )
2010-08-09 19:59:23 +04:00
iowrite32 ( * buf32 + + , denali - > flash_mem + 0x10 ) ;
2010-08-05 19:06:04 +04:00
return i * 4 ; /* intent is to return the number of bytes read */
2010-05-13 18:57:33 +04:00
}
/* helper function that simply reads a buffer from the flash */
2010-07-27 07:28:09 +04:00
static int read_data_from_flash_mem ( struct denali_nand_info * denali ,
uint8_t * buf ,
int len )
2010-05-13 18:57:33 +04:00
{
uint32_t i = 0 , * buf32 ;
/* we assume that len will be a multiple of 4, if not
* it would be nice to know about it ASAP rather than
2010-08-05 19:06:04 +04:00
* have random failures . . .
* This assumption is based on the fact that this
* function is designed to be used to read flash pages ,
2010-05-13 18:57:33 +04:00
* which are typically multiples of 4. . .
*/
BUG_ON ( ( len % 4 ) ! = 0 ) ;
/* transfer the data from the flash */
buf32 = ( uint32_t * ) buf ;
for ( i = 0 ; i < len / 4 ; i + + )
* buf32 + + = ioread32 ( denali - > flash_mem + 0x10 ) ;
2010-08-05 19:06:04 +04:00
return i * 4 ; /* intent is to return the number of bytes read */
2010-05-13 18:57:33 +04:00
}
/* writes OOB data to the device */
static int write_oob_data ( struct mtd_info * mtd , uint8_t * buf , int page )
{
struct denali_nand_info * denali = mtd_to_denali ( mtd ) ;
uint32_t irq_status = 0 ;
2010-08-05 19:06:04 +04:00
uint32_t irq_mask = INTR_STATUS0__PROGRAM_COMP |
2010-05-13 18:57:33 +04:00
INTR_STATUS0__PROGRAM_FAIL ;
int status = 0 ;
denali - > page = page ;
2010-08-05 19:06:04 +04:00
if ( denali_send_pipeline_cmd ( denali , false , false , SPARE_ACCESS ,
2010-07-27 06:41:53 +04:00
DENALI_WRITE ) = = PASS ) {
2010-05-13 18:57:33 +04:00
write_data_to_flash_mem ( denali , buf , mtd - > oobsize ) ;
/* wait for operation to complete */
irq_status = wait_for_irq ( denali , irq_mask ) ;
2010-07-27 06:41:53 +04:00
if ( irq_status = = 0 ) {
2010-08-09 20:16:51 +04:00
dev_err ( & denali - > dev - > dev , " OOB write failed \n " ) ;
2010-05-13 18:57:33 +04:00
status = - EIO ;
}
2010-07-27 06:41:53 +04:00
} else {
2010-08-09 20:16:51 +04:00
dev_err ( & denali - > dev - > dev , " unable to send pipeline command \n " ) ;
2010-08-05 19:06:04 +04:00
status = - EIO ;
2010-05-13 18:57:33 +04:00
}
return status ;
}
/* reads OOB data from the device */
static void read_oob_data ( struct mtd_info * mtd , uint8_t * buf , int page )
{
struct denali_nand_info * denali = mtd_to_denali ( mtd ) ;
2010-07-27 07:28:09 +04:00
uint32_t irq_mask = INTR_STATUS0__LOAD_COMP ,
irq_status = 0 , addr = 0x0 , cmd = 0x0 ;
2010-05-13 18:57:33 +04:00
denali - > page = page ;
2010-08-05 19:06:04 +04:00
if ( denali_send_pipeline_cmd ( denali , false , true , SPARE_ACCESS ,
2010-07-27 06:41:53 +04:00
DENALI_READ ) = = PASS ) {
2010-08-05 19:06:04 +04:00
read_data_from_flash_mem ( denali , buf , mtd - > oobsize ) ;
2010-05-13 18:57:33 +04:00
2010-08-05 19:06:04 +04:00
/* wait for command to be accepted
2010-05-13 18:57:33 +04:00
* can always use status0 bit as the mask is identical for each
* bank . */
irq_status = wait_for_irq ( denali , irq_mask ) ;
if ( irq_status = = 0 )
2010-08-09 20:16:51 +04:00
dev_err ( & denali - > dev - > dev , " page on OOB timeout %d \n " ,
2010-07-27 07:28:09 +04:00
denali - > page ) ;
2010-05-13 18:57:33 +04:00
/* We set the device back to MAIN_ACCESS here as I observed
* instability with the controller if you do a block erase
* and the last transaction was a SPARE_ACCESS . Block erase
* is reliable ( according to the MTD test infrastructure )
2010-08-05 19:06:04 +04:00
* if you are in MAIN_ACCESS .
2010-05-13 18:57:33 +04:00
*/
addr = BANK ( denali - > flash_bank ) | denali - > page ;
2010-08-05 19:06:04 +04:00
cmd = MODE_10 | addr ;
2010-05-13 18:57:33 +04:00
index_addr ( denali , ( uint32_t ) cmd , MAIN_ACCESS ) ;
}
}
2010-08-05 19:06:04 +04:00
/* this function examines buffers to see if they contain data that
2010-05-13 18:57:33 +04:00
* indicate that the buffer is part of an erased region of flash .
*/
bool is_erased ( uint8_t * buf , int len )
{
int i = 0 ;
for ( i = 0 ; i < len ; i + + )
if ( buf [ i ] ! = 0xFF )
return false ;
return true ;
}
# define ECC_SECTOR_SIZE 512
# define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
# define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
# define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
2010-08-09 20:07:01 +04:00
# define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO__ERROR_TYPE))
# define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
2010-05-13 18:57:33 +04:00
# define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
2010-08-05 19:06:04 +04:00
static bool handle_ecc ( struct denali_nand_info * denali , uint8_t * buf ,
2010-08-09 20:07:01 +04:00
uint32_t irq_status )
2010-05-13 18:57:33 +04:00
{
bool check_erased_page = false ;
2010-07-27 06:41:53 +04:00
if ( irq_status & INTR_STATUS0__ECC_ERR ) {
2010-05-13 18:57:33 +04:00
/* read the ECC errors. we'll ignore them for now */
uint32_t err_address = 0 , err_correction_info = 0 ;
uint32_t err_byte = 0 , err_sector = 0 , err_device = 0 ;
uint32_t err_correction_value = 0 ;
2010-08-09 20:07:01 +04:00
denali_set_intr_modes ( denali , false ) ;
2010-05-13 18:57:33 +04:00
2010-07-27 06:41:53 +04:00
do {
2010-08-05 19:06:04 +04:00
err_address = ioread32 ( denali - > flash_reg +
2010-05-13 18:57:33 +04:00
ECC_ERROR_ADDRESS ) ;
err_sector = ECC_SECTOR ( err_address ) ;
err_byte = ECC_BYTE ( err_address ) ;
2010-08-05 19:06:04 +04:00
err_correction_info = ioread32 ( denali - > flash_reg +
2010-05-13 18:57:33 +04:00
ERR_CORRECTION_INFO ) ;
2010-08-05 19:06:04 +04:00
err_correction_value =
2010-05-13 18:57:33 +04:00
ECC_CORRECTION_VALUE ( err_correction_info ) ;
err_device = ECC_ERR_DEVICE ( err_correction_info ) ;
2010-07-27 06:41:53 +04:00
if ( ECC_ERROR_CORRECTABLE ( err_correction_info ) ) {
2010-08-09 20:07:01 +04:00
/* If err_byte is larger than ECC_SECTOR_SIZE,
* means error happend in OOB , so we ignore
* it . It ' s no need for us to correct it
* err_device is represented the NAND error
* bits are happened in if there are more
* than one NAND connected .
* */
if ( err_byte < ECC_SECTOR_SIZE ) {
int offset ;
offset = ( err_sector *
ECC_SECTOR_SIZE +
err_byte ) *
denali - > devnum +
err_device ;
2010-05-13 18:57:33 +04:00
/* correct the ECC error */
buf [ offset ] ^ = err_correction_value ;
denali - > mtd . ecc_stats . corrected + + ;
}
2010-07-27 06:41:53 +04:00
} else {
2010-08-05 19:06:04 +04:00
/* if the error is not correctable, need to
2010-07-27 07:28:09 +04:00
* look at the page to see if it is an erased
* page . if so , then it ' s not a real ECC error
* */
2010-05-13 18:57:33 +04:00
check_erased_page = true ;
}
} while ( ! ECC_LAST_ERR ( err_correction_info ) ) ;
2010-08-09 20:07:01 +04:00
/* Once handle all ecc errors, controller will triger
* a ECC_TRANSACTION_DONE interrupt , so here just wait
* for a while for this interrupt
* */
while ( ! ( read_interrupt_status ( denali ) &
INTR_STATUS0__ECC_TRANSACTION_DONE ) )
cpu_relax ( ) ;
clear_interrupts ( denali ) ;
denali_set_intr_modes ( denali , true ) ;
2010-05-13 18:57:33 +04:00
}
return check_erased_page ;
}
/* programs the controller to either enable/disable DMA transfers */
2010-05-13 19:12:43 +04:00
static void denali_enable_dma ( struct denali_nand_info * denali , bool en )
2010-05-13 18:57:33 +04:00
{
uint32_t reg_val = 0x0 ;
2010-07-27 07:32:21 +04:00
if ( en )
reg_val = DMA_ENABLE__FLAG ;
2010-05-13 18:57:33 +04:00
2010-08-09 19:59:23 +04:00
iowrite32 ( reg_val , denali - > flash_reg + DMA_ENABLE ) ;
2010-05-13 18:57:33 +04:00
ioread32 ( denali - > flash_reg + DMA_ENABLE ) ;
}
/* setups the HW to perform the data DMA */
2010-05-13 19:12:43 +04:00
static void denali_setup_dma ( struct denali_nand_info * denali , int op )
2010-05-13 18:57:33 +04:00
{
uint32_t mode = 0x0 ;
const int page_count = 1 ;
dma_addr_t addr = denali - > buf . dma_buf ;
mode = MODE_10 | BANK ( denali - > flash_bank ) ;
/* DMA is a four step process */
/* 1. setup transfer type and # of pages */
index_addr ( denali , mode | denali - > page , 0x2000 | op | page_count ) ;
/* 2. set memory high address bits 23:8 */
index_addr ( denali , mode | ( ( uint16_t ) ( addr > > 16 ) < < 8 ) , 0x2200 ) ;
/* 3. set memory low address bits 23:8 */
index_addr ( denali , mode | ( ( uint16_t ) addr < < 8 ) , 0x2300 ) ;
/* 4. interrupt when complete, burst len = 64 bytes*/
index_addr ( denali , mode | 0x14000 , 0x2400 ) ;
}
2010-08-05 19:06:04 +04:00
/* writes a page. user specifies type, and this function handles the
2010-08-11 13:46:00 +04:00
* configuration details . */
2010-08-05 19:06:04 +04:00
static void write_page ( struct mtd_info * mtd , struct nand_chip * chip ,
2010-05-13 18:57:33 +04:00
const uint8_t * buf , bool raw_xfer )
{
struct denali_nand_info * denali = mtd_to_denali ( mtd ) ;
struct pci_dev * pci_dev = denali - > dev ;
dma_addr_t addr = denali - > buf . dma_buf ;
size_t size = denali - > mtd . writesize + denali - > mtd . oobsize ;
uint32_t irq_status = 0 ;
2010-08-05 19:06:04 +04:00
uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP |
2010-05-13 18:57:33 +04:00
INTR_STATUS0__PROGRAM_FAIL ;
/* if it is a raw xfer, we want to disable ecc, and send
* the spare area .
* ! raw_xfer - enable ecc
* raw_xfer - transfer spare
*/
setup_ecc_for_xfer ( denali , ! raw_xfer , raw_xfer ) ;
/* copy buffer into DMA buffer */
memcpy ( denali - > buf . buf , buf , mtd - > writesize ) ;
2010-07-27 06:41:53 +04:00
if ( raw_xfer ) {
2010-05-13 18:57:33 +04:00
/* transfer the data to the spare area */
2010-08-05 19:06:04 +04:00
memcpy ( denali - > buf . buf + mtd - > writesize ,
chip - > oob_poi ,
mtd - > oobsize ) ;
2010-05-13 18:57:33 +04:00
}
pci_dma_sync_single_for_device ( pci_dev , addr , size , PCI_DMA_TODEVICE ) ;
clear_interrupts ( denali ) ;
2010-08-05 19:06:04 +04:00
denali_enable_dma ( denali , true ) ;
2010-05-13 18:57:33 +04:00
2010-05-13 19:12:43 +04:00
denali_setup_dma ( denali , DENALI_WRITE ) ;
2010-05-13 18:57:33 +04:00
/* wait for operation to complete */
irq_status = wait_for_irq ( denali , irq_mask ) ;
2010-07-27 06:41:53 +04:00
if ( irq_status = = 0 ) {
2010-08-09 20:16:51 +04:00
dev_err ( & denali - > dev - > dev ,
" timeout on write_page (type = %d) \n " ,
raw_xfer ) ;
2010-08-05 19:06:04 +04:00
denali - > status =
2010-07-27 07:28:09 +04:00
( irq_status & INTR_STATUS0__PROGRAM_FAIL ) ?
NAND_STATUS_FAIL : PASS ;
2010-05-13 18:57:33 +04:00
}
2010-08-05 19:06:04 +04:00
denali_enable_dma ( denali , false ) ;
2010-05-13 18:57:33 +04:00
pci_dma_sync_single_for_cpu ( pci_dev , addr , size , PCI_DMA_TODEVICE ) ;
}
/* NAND core entry points */
2010-08-05 19:06:04 +04:00
/* this is the callback that the NAND core calls to write a page. Since
2010-08-11 13:46:00 +04:00
* writing a page with ECC or without is similar , all the work is done
* by write_page above .
* */
2010-08-05 19:06:04 +04:00
static void denali_write_page ( struct mtd_info * mtd , struct nand_chip * chip ,
2010-05-13 18:57:33 +04:00
const uint8_t * buf )
{
/* for regular page writes, we let HW handle all the ECC
2010-08-05 19:06:04 +04:00
* data written to the device . */
2010-05-13 18:57:33 +04:00
write_page ( mtd , chip , buf , false ) ;
}
2010-08-05 19:06:04 +04:00
/* This is the callback that the NAND core calls to write a page without ECC.
2010-08-11 13:46:00 +04:00
* raw access is similiar to ECC page writes , so all the work is done in the
* write_page ( ) function above .
2010-05-13 18:57:33 +04:00
*/
2010-08-05 19:06:04 +04:00
static void denali_write_page_raw ( struct mtd_info * mtd , struct nand_chip * chip ,
2010-05-13 18:57:33 +04:00
const uint8_t * buf )
{
2010-08-05 19:06:04 +04:00
/* for raw page writes, we want to disable ECC and simply write
2010-05-13 18:57:33 +04:00
whatever data is in the buffer . */
write_page ( mtd , chip , buf , true ) ;
}
2010-08-05 19:06:04 +04:00
static int denali_write_oob ( struct mtd_info * mtd , struct nand_chip * chip ,
2010-05-13 18:57:33 +04:00
int page )
{
2010-08-05 19:06:04 +04:00
return write_oob_data ( mtd , chip - > oob_poi , page ) ;
2010-05-13 18:57:33 +04:00
}
2010-08-05 19:06:04 +04:00
static int denali_read_oob ( struct mtd_info * mtd , struct nand_chip * chip ,
2010-05-13 18:57:33 +04:00
int page , int sndcmd )
{
read_oob_data ( mtd , chip - > oob_poi , page ) ;
2010-08-05 19:06:04 +04:00
return 0 ; /* notify NAND core to send command to
NAND device . */
2010-05-13 18:57:33 +04:00
}
static int denali_read_page ( struct mtd_info * mtd , struct nand_chip * chip ,
uint8_t * buf , int page )
{
struct denali_nand_info * denali = mtd_to_denali ( mtd ) ;
struct pci_dev * pci_dev = denali - > dev ;
dma_addr_t addr = denali - > buf . dma_buf ;
size_t size = denali - > mtd . writesize + denali - > mtd . oobsize ;
uint32_t irq_status = 0 ;
2010-08-05 19:06:04 +04:00
uint32_t irq_mask = INTR_STATUS0__ECC_TRANSACTION_DONE |
2010-05-13 18:57:33 +04:00
INTR_STATUS0__ECC_ERR ;
bool check_erased_page = false ;
setup_ecc_for_xfer ( denali , true , false ) ;
2010-05-13 19:12:43 +04:00
denali_enable_dma ( denali , true ) ;
2010-05-13 18:57:33 +04:00
pci_dma_sync_single_for_device ( pci_dev , addr , size , PCI_DMA_FROMDEVICE ) ;
clear_interrupts ( denali ) ;
2010-05-13 19:12:43 +04:00
denali_setup_dma ( denali , DENALI_READ ) ;
2010-05-13 18:57:33 +04:00
/* wait for operation to complete */
irq_status = wait_for_irq ( denali , irq_mask ) ;
pci_dma_sync_single_for_cpu ( pci_dev , addr , size , PCI_DMA_FROMDEVICE ) ;
memcpy ( buf , denali - > buf . buf , mtd - > writesize ) ;
2010-08-05 19:06:04 +04:00
2010-08-09 20:07:01 +04:00
check_erased_page = handle_ecc ( denali , buf , irq_status ) ;
2010-05-13 19:12:43 +04:00
denali_enable_dma ( denali , false ) ;
2010-05-13 18:57:33 +04:00
2010-07-27 06:41:53 +04:00
if ( check_erased_page ) {
2010-05-13 18:57:33 +04:00
read_oob_data ( & denali - > mtd , chip - > oob_poi , denali - > page ) ;
/* check ECC failures that may have occurred on erased pages */
2010-07-27 06:41:53 +04:00
if ( check_erased_page ) {
2010-05-13 18:57:33 +04:00
if ( ! is_erased ( buf , denali - > mtd . writesize ) )
denali - > mtd . ecc_stats . failed + + ;
if ( ! is_erased ( buf , denali - > mtd . oobsize ) )
denali - > mtd . ecc_stats . failed + + ;
2010-08-05 19:06:04 +04:00
}
2010-05-13 18:57:33 +04:00
}
return 0 ;
}
static int denali_read_page_raw ( struct mtd_info * mtd , struct nand_chip * chip ,
uint8_t * buf , int page )
{
struct denali_nand_info * denali = mtd_to_denali ( mtd ) ;
struct pci_dev * pci_dev = denali - > dev ;
dma_addr_t addr = denali - > buf . dma_buf ;
size_t size = denali - > mtd . writesize + denali - > mtd . oobsize ;
uint32_t irq_status = 0 ;
uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP ;
2010-08-05 19:06:04 +04:00
2010-05-13 18:57:33 +04:00
setup_ecc_for_xfer ( denali , false , true ) ;
2010-05-13 19:12:43 +04:00
denali_enable_dma ( denali , true ) ;
2010-05-13 18:57:33 +04:00
pci_dma_sync_single_for_device ( pci_dev , addr , size , PCI_DMA_FROMDEVICE ) ;
clear_interrupts ( denali ) ;
2010-05-13 19:12:43 +04:00
denali_setup_dma ( denali , DENALI_READ ) ;
2010-05-13 18:57:33 +04:00
/* wait for operation to complete */
irq_status = wait_for_irq ( denali , irq_mask ) ;
pci_dma_sync_single_for_cpu ( pci_dev , addr , size , PCI_DMA_FROMDEVICE ) ;
2010-05-13 19:12:43 +04:00
denali_enable_dma ( denali , false ) ;
2010-05-13 18:57:33 +04:00
memcpy ( buf , denali - > buf . buf , mtd - > writesize ) ;
memcpy ( chip - > oob_poi , denali - > buf . buf + mtd - > writesize , mtd - > oobsize ) ;
return 0 ;
}
static uint8_t denali_read_byte ( struct mtd_info * mtd )
{
struct denali_nand_info * denali = mtd_to_denali ( mtd ) ;
uint8_t result = 0xff ;
if ( denali - > buf . head < denali - > buf . tail )
result = denali - > buf . buf [ denali - > buf . head + + ] ;
return result ;
}
static void denali_select_chip ( struct mtd_info * mtd , int chip )
{
struct denali_nand_info * denali = mtd_to_denali ( mtd ) ;
2010-08-09 20:16:51 +04:00
2010-05-13 18:57:33 +04:00
spin_lock_irq ( & denali - > irq_lock ) ;
denali - > flash_bank = chip ;
spin_unlock_irq ( & denali - > irq_lock ) ;
}
static int denali_waitfunc ( struct mtd_info * mtd , struct nand_chip * chip )
{
struct denali_nand_info * denali = mtd_to_denali ( mtd ) ;
int status = denali - > status ;
denali - > status = 0 ;
return status ;
}
static void denali_erase ( struct mtd_info * mtd , int page )
{
struct denali_nand_info * denali = mtd_to_denali ( mtd ) ;
uint32_t cmd = 0x0 , irq_status = 0 ;
/* clear interrupts */
2010-08-05 19:06:04 +04:00
clear_interrupts ( denali ) ;
2010-05-13 18:57:33 +04:00
/* setup page read request for access type */
cmd = MODE_10 | BANK ( denali - > flash_bank ) | page ;
index_addr ( denali , ( uint32_t ) cmd , 0x1 ) ;
/* wait for erase to complete or failure to occur */
2010-08-05 19:06:04 +04:00
irq_status = wait_for_irq ( denali , INTR_STATUS0__ERASE_COMP |
2010-05-13 18:57:33 +04:00
INTR_STATUS0__ERASE_FAIL ) ;
2010-07-27 07:28:09 +04:00
denali - > status = ( irq_status & INTR_STATUS0__ERASE_FAIL ) ?
NAND_STATUS_FAIL : PASS ;
2010-05-13 18:57:33 +04:00
}
2010-08-05 19:06:04 +04:00
static void denali_cmdfunc ( struct mtd_info * mtd , unsigned int cmd , int col ,
2010-05-13 18:57:33 +04:00
int page )
{
struct denali_nand_info * denali = mtd_to_denali ( mtd ) ;
2010-08-05 20:48:49 +04:00
uint32_t addr , id ;
int i ;
2010-05-13 18:57:33 +04:00
2010-07-27 06:41:53 +04:00
switch ( cmd ) {
2010-07-27 07:32:21 +04:00
case NAND_CMD_PAGEPROG :
break ;
case NAND_CMD_STATUS :
read_status ( denali ) ;
break ;
case NAND_CMD_READID :
reset_buf ( denali ) ;
2010-08-05 20:48:49 +04:00
/*sometimes ManufactureId read from register is not right
* e . g . some of Micron MT29F32G08QAA MLC NAND chips
* So here we send READID cmd to NAND insteand
* */
addr = ( uint32_t ) MODE_11 | BANK ( denali - > flash_bank ) ;
index_addr ( denali , ( uint32_t ) addr | 0 , 0x90 ) ;
index_addr ( denali , ( uint32_t ) addr | 1 , 0 ) ;
for ( i = 0 ; i < 5 ; i + + ) {
index_addr_read_data ( denali ,
( uint32_t ) addr | 2 ,
& id ) ;
write_byte_to_buf ( denali , id ) ;
2010-07-27 07:32:21 +04:00
}
break ;
case NAND_CMD_READ0 :
case NAND_CMD_SEQIN :
denali - > page = page ;
break ;
case NAND_CMD_RESET :
reset_bank ( denali ) ;
break ;
case NAND_CMD_READOOB :
/* TODO: Read OOB data */
break ;
default :
printk ( KERN_ERR " : unsupported command "
" received 0x%x \n " , cmd ) ;
break ;
2010-05-13 18:57:33 +04:00
}
}
/* stubs for ECC functions not used by the NAND core */
2010-08-05 19:06:04 +04:00
static int denali_ecc_calculate ( struct mtd_info * mtd , const uint8_t * data ,
2010-05-13 18:57:33 +04:00
uint8_t * ecc_code )
{
2010-08-09 20:16:51 +04:00
struct denali_nand_info * denali = mtd_to_denali ( mtd ) ;
dev_err ( & denali - > dev - > dev ,
" denali_ecc_calculate called unexpectedly \n " ) ;
2010-05-13 18:57:33 +04:00
BUG ( ) ;
return - EIO ;
}
2010-08-05 19:06:04 +04:00
static int denali_ecc_correct ( struct mtd_info * mtd , uint8_t * data ,
2010-05-13 18:57:33 +04:00
uint8_t * read_ecc , uint8_t * calc_ecc )
{
2010-08-09 20:16:51 +04:00
struct denali_nand_info * denali = mtd_to_denali ( mtd ) ;
dev_err ( & denali - > dev - > dev ,
" denali_ecc_correct called unexpectedly \n " ) ;
2010-05-13 18:57:33 +04:00
BUG ( ) ;
return - EIO ;
}
static void denali_ecc_hwctl ( struct mtd_info * mtd , int mode )
{
2010-08-09 20:16:51 +04:00
struct denali_nand_info * denali = mtd_to_denali ( mtd ) ;
dev_err ( & denali - > dev - > dev ,
" denali_ecc_hwctl called unexpectedly \n " ) ;
2010-05-13 18:57:33 +04:00
BUG ( ) ;
}
/* end NAND core entry points */
/* Initialization code to bring the device up to a known good state */
static void denali_hw_init ( struct denali_nand_info * denali )
{
2010-08-06 14:02:03 +04:00
/* tell driver how many bit controller will skip before
* writing ECC code in OOB , this register may be already
* set by firmware . So we read this value out .
* if this value is 0 , just let it be .
* */
denali - > bbtskipbytes = ioread32 ( denali - > flash_reg +
SPARE_AREA_SKIP_BYTES ) ;
2010-05-13 18:57:33 +04:00
denali_irq_init ( denali ) ;
2010-07-27 10:17:37 +04:00
denali_nand_reset ( denali ) ;
2010-08-09 19:59:23 +04:00
iowrite32 ( 0x0F , denali - > flash_reg + RB_PIN_ENABLED ) ;
iowrite32 ( CHIP_EN_DONT_CARE__FLAG ,
2010-07-27 07:28:09 +04:00
denali - > flash_reg + CHIP_ENABLE_DONT_CARE ) ;
2010-05-13 18:57:33 +04:00
2010-08-09 19:59:23 +04:00
iowrite32 ( 0x0 , denali - > flash_reg + SPARE_AREA_SKIP_BYTES ) ;
iowrite32 ( 0xffff , denali - > flash_reg + SPARE_AREA_MARKER ) ;
2010-05-13 18:57:33 +04:00
/* Should set value for these registers when init */
2010-08-09 19:59:23 +04:00
iowrite32 ( 0 , denali - > flash_reg + TWO_ROW_ADDR_CYCLES ) ;
iowrite32 ( 1 , denali - > flash_reg + ECC_ENABLE ) ;
2010-05-13 18:57:33 +04:00
}
2010-08-06 14:02:03 +04:00
/* Althogh controller spec said SLC ECC is forceb to be 4bit,
* but denali controller in MRST only support 15 bit and 8 bit ECC
* correction
* */
# define ECC_8BITS 14
static struct nand_ecclayout nand_8bit_oob = {
. eccbytes = 14 ,
2010-05-13 18:57:33 +04:00
} ;
2010-08-06 14:02:03 +04:00
# define ECC_15BITS 26
static struct nand_ecclayout nand_15bit_oob = {
. eccbytes = 26 ,
2010-05-13 18:57:33 +04:00
} ;
static uint8_t bbt_pattern [ ] = { ' B ' , ' b ' , ' t ' , ' 0 ' } ;
static uint8_t mirror_pattern [ ] = { ' 1 ' , ' t ' , ' b ' , ' B ' } ;
static struct nand_bbt_descr bbt_main_descr = {
. options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP ,
. offs = 8 ,
. len = 4 ,
. veroffs = 12 ,
. maxblocks = 4 ,
. pattern = bbt_pattern ,
} ;
static struct nand_bbt_descr bbt_mirror_descr = {
. options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP ,
. offs = 8 ,
. len = 4 ,
. veroffs = 12 ,
. maxblocks = 4 ,
. pattern = mirror_pattern ,
} ;
2010-06-11 14:17:00 +04:00
/* initialize driver data structures */
2010-05-13 18:57:33 +04:00
void denali_drv_init ( struct denali_nand_info * denali )
{
denali - > idx = 0 ;
/* setup interrupt handler */
2010-08-05 19:06:04 +04:00
/* the completion object will be used to notify
2010-05-13 18:57:33 +04:00
* the callee that the interrupt is done */
init_completion ( & denali - > complete ) ;
/* the spinlock will be used to synchronize the ISR
2010-08-05 19:06:04 +04:00
* with any element that might be access shared
2010-05-13 18:57:33 +04:00
* data ( interrupt status ) */
spin_lock_init ( & denali - > irq_lock ) ;
/* indicate that MTD has not selected a valid bank yet */
denali - > flash_bank = CHIP_SELECT_INVALID ;
/* initialize our irq_status variable to indicate no interrupts */
denali - > irq_status = 0 ;
}
/* driver entry point */
static int denali_pci_probe ( struct pci_dev * dev , const struct pci_device_id * id )
{
int ret = - ENODEV ;
resource_size_t csr_base , mem_base ;
unsigned long csr_len , mem_len ;
struct denali_nand_info * denali ;
denali = kzalloc ( sizeof ( * denali ) , GFP_KERNEL ) ;
if ( ! denali )
return - ENOMEM ;
ret = pci_enable_device ( dev ) ;
if ( ret ) {
printk ( KERN_ERR " Spectra: pci_enable_device failed. \n " ) ;
2010-08-09 14:37:00 +04:00
goto failed_alloc_memery ;
2010-05-13 18:57:33 +04:00
}
if ( id - > driver_data = = INTEL_CE4100 ) {
2010-08-05 19:06:04 +04:00
/* Due to a silicon limitation, we can only support
* ONFI timing mode 1 and below .
*/
2010-07-27 06:41:53 +04:00
if ( onfi_timing_mode < - 1 | | onfi_timing_mode > 1 ) {
2010-07-27 07:28:09 +04:00
printk ( KERN_ERR " Intel CE4100 only supports "
" ONFI timing mode 1 or below \n " ) ;
2010-05-13 18:57:33 +04:00
ret = - EINVAL ;
2010-08-09 14:37:00 +04:00
goto failed_enable_dev ;
2010-05-13 18:57:33 +04:00
}
denali - > platform = INTEL_CE4100 ;
mem_base = pci_resource_start ( dev , 0 ) ;
mem_len = pci_resource_len ( dev , 1 ) ;
csr_base = pci_resource_start ( dev , 1 ) ;
csr_len = pci_resource_len ( dev , 1 ) ;
} else {
denali - > platform = INTEL_MRST ;
csr_base = pci_resource_start ( dev , 0 ) ;
2010-08-09 14:37:00 +04:00
csr_len = pci_resource_len ( dev , 0 ) ;
2010-05-13 18:57:33 +04:00
mem_base = pci_resource_start ( dev , 1 ) ;
mem_len = pci_resource_len ( dev , 1 ) ;
if ( ! mem_len ) {
mem_base = csr_base + csr_len ;
mem_len = csr_len ;
}
}
/* Is 32-bit DMA supported? */
ret = pci_set_dma_mask ( dev , DMA_BIT_MASK ( 32 ) ) ;
2010-07-27 06:41:53 +04:00
if ( ret ) {
2010-05-13 18:57:33 +04:00
printk ( KERN_ERR " Spectra: no usable DMA configuration \n " ) ;
2010-08-09 14:37:00 +04:00
goto failed_enable_dev ;
2010-05-13 18:57:33 +04:00
}
2010-07-27 07:28:09 +04:00
denali - > buf . dma_buf =
pci_map_single ( dev , denali - > buf . buf ,
DENALI_BUF_SIZE ,
PCI_DMA_BIDIRECTIONAL ) ;
2010-05-13 18:57:33 +04:00
2010-07-27 06:41:53 +04:00
if ( pci_dma_mapping_error ( dev , denali - > buf . dma_buf ) ) {
2010-08-09 20:16:51 +04:00
dev_err ( & dev - > dev , " Spectra: failed to map DMA buffer \n " ) ;
2010-08-09 14:37:00 +04:00
goto failed_enable_dev ;
2010-05-13 18:57:33 +04:00
}
pci_set_master ( dev ) ;
denali - > dev = dev ;
ret = pci_request_regions ( dev , DENALI_NAND_NAME ) ;
if ( ret ) {
printk ( KERN_ERR " Spectra: Unable to request memory regions \n " ) ;
2010-08-09 14:37:00 +04:00
goto failed_dma_map ;
2010-05-13 18:57:33 +04:00
}
denali - > flash_reg = ioremap_nocache ( csr_base , csr_len ) ;
if ( ! denali - > flash_reg ) {
printk ( KERN_ERR " Spectra: Unable to remap memory region \n " ) ;
ret = - ENOMEM ;
2010-08-09 14:37:00 +04:00
goto failed_req_regions ;
2010-05-13 18:57:33 +04:00
}
denali - > flash_mem = ioremap_nocache ( mem_base , mem_len ) ;
if ( ! denali - > flash_mem ) {
printk ( KERN_ERR " Spectra: ioremap_nocache failed! " ) ;
ret = - ENOMEM ;
2010-08-09 14:37:00 +04:00
goto failed_remap_reg ;
2010-05-13 18:57:33 +04:00
}
denali_hw_init ( denali ) ;
denali_drv_init ( denali ) ;
if ( request_irq ( dev - > irq , denali_isr , IRQF_SHARED ,
DENALI_NAND_NAME , denali ) ) {
printk ( KERN_ERR " Spectra: Unable to allocate IRQ \n " ) ;
ret = - ENODEV ;
2010-08-09 14:37:00 +04:00
goto failed_remap_mem ;
2010-05-13 18:57:33 +04:00
}
/* now that our ISR is registered, we can enable interrupts */
2010-07-27 10:17:37 +04:00
denali_set_intr_modes ( denali , true ) ;
2010-05-13 18:57:33 +04:00
pci_set_drvdata ( dev , denali ) ;
2010-07-27 10:17:37 +04:00
denali_nand_timing_set ( denali ) ;
2010-05-13 18:57:33 +04:00
denali - > mtd . name = " Denali NAND " ;
denali - > mtd . owner = THIS_MODULE ;
denali - > mtd . priv = & denali - > nand ;
/* register the driver with the NAND core subsystem */
denali - > nand . select_chip = denali_select_chip ;
denali - > nand . cmdfunc = denali_cmdfunc ;
denali - > nand . read_byte = denali_read_byte ;
denali - > nand . waitfunc = denali_waitfunc ;
2010-08-05 19:06:04 +04:00
/* scan for NAND devices attached to the controller
2010-05-13 18:57:33 +04:00
* this is the first stage in a two step process to register
2010-08-05 19:06:04 +04:00
* with the nand subsystem */
2010-07-27 06:41:53 +04:00
if ( nand_scan_ident ( & denali - > mtd , LLD_MAX_FLASH_BANKS , NULL ) ) {
2010-05-13 18:57:33 +04:00
ret = - ENXIO ;
2010-08-09 14:37:00 +04:00
goto failed_req_irq ;
2010-05-13 18:57:33 +04:00
}
2010-08-05 19:06:04 +04:00
2010-08-06 14:48:21 +04:00
/* MTD supported page sizes vary by kernel. We validate our
* kernel supports the device here .
*/
if ( denali - > mtd . writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE ) {
ret = - ENODEV ;
printk ( KERN_ERR " Spectra: device size not supported by this "
" version of MTD. " ) ;
2010-08-09 14:37:00 +04:00
goto failed_req_irq ;
2010-08-06 14:48:21 +04:00
}
2010-08-06 14:19:09 +04:00
/* support for multi nand
* MTD known nothing about multi nand ,
* so we should tell it the real pagesize
* and anything necessery
*/
denali - > devnum = ioread32 ( denali - > flash_reg + DEVICES_CONNECTED ) ;
denali - > nand . chipsize < < = ( denali - > devnum - 1 ) ;
denali - > nand . page_shift + = ( denali - > devnum - 1 ) ;
denali - > nand . pagemask = ( denali - > nand . chipsize > >
denali - > nand . page_shift ) - 1 ;
denali - > nand . bbt_erase_shift + = ( denali - > devnum - 1 ) ;
denali - > nand . phys_erase_shift = denali - > nand . bbt_erase_shift ;
denali - > nand . chip_shift + = ( denali - > devnum - 1 ) ;
denali - > mtd . writesize < < = ( denali - > devnum - 1 ) ;
denali - > mtd . oobsize < < = ( denali - > devnum - 1 ) ;
denali - > mtd . erasesize < < = ( denali - > devnum - 1 ) ;
denali - > mtd . size = denali - > nand . numchips * denali - > nand . chipsize ;
denali - > bbtskipbytes * = denali - > devnum ;
2010-08-05 19:06:04 +04:00
/* second stage of the NAND scan
* this stage requires information regarding ECC and
* bad block management . */
2010-05-13 18:57:33 +04:00
/* Bad block management */
denali - > nand . bbt_td = & bbt_main_descr ;
denali - > nand . bbt_md = & bbt_mirror_descr ;
/* skip the scan for now until we have OOB read and write support */
denali - > nand . options | = NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN ;
denali - > nand . ecc . mode = NAND_ECC_HW_SYNDROME ;
2010-08-06 14:02:03 +04:00
/* Denali Controller only support 15bit and 8bit ECC in MRST,
* so just let controller do 15 bit ECC for MLC and 8 bit ECC for
* SLC if possible .
* */
if ( denali - > nand . cellinfo & 0xc & &
( denali - > mtd . oobsize > ( denali - > bbtskipbytes +
ECC_15BITS * ( denali - > mtd . writesize /
ECC_SECTOR_SIZE ) ) ) ) {
/* if MLC OOB size is large enough, use 15bit ECC*/
denali - > nand . ecc . layout = & nand_15bit_oob ;
denali - > nand . ecc . bytes = ECC_15BITS ;
2010-08-09 19:59:23 +04:00
iowrite32 ( 15 , denali - > flash_reg + ECC_CORRECTION ) ;
2010-08-06 14:02:03 +04:00
} else if ( denali - > mtd . oobsize < ( denali - > bbtskipbytes +
ECC_8BITS * ( denali - > mtd . writesize /
ECC_SECTOR_SIZE ) ) ) {
printk ( KERN_ERR " Your NAND chip OOB is not large enough to "
" contain 8bit ECC correction codes " ) ;
2010-08-09 14:37:00 +04:00
goto failed_req_irq ;
2010-08-06 14:02:03 +04:00
} else {
denali - > nand . ecc . layout = & nand_8bit_oob ;
denali - > nand . ecc . bytes = ECC_8BITS ;
2010-08-09 19:59:23 +04:00
iowrite32 ( 8 , denali - > flash_reg + ECC_CORRECTION ) ;
2010-05-13 18:57:33 +04:00
}
2010-08-06 14:19:09 +04:00
denali - > nand . ecc . bytes * = denali - > devnum ;
2010-08-06 14:02:03 +04:00
denali - > nand . ecc . layout - > eccbytes * =
denali - > mtd . writesize / ECC_SECTOR_SIZE ;
denali - > nand . ecc . layout - > oobfree [ 0 ] . offset =
denali - > bbtskipbytes + denali - > nand . ecc . layout - > eccbytes ;
denali - > nand . ecc . layout - > oobfree [ 0 ] . length =
denali - > mtd . oobsize - denali - > nand . ecc . layout - > eccbytes -
denali - > bbtskipbytes ;
2010-08-06 14:48:21 +04:00
/* Let driver know the total blocks number and
* how many blocks contained by each nand chip .
* blksperchip will help driver to know how many
* blocks is taken by FW .
* */
denali - > totalblks = denali - > mtd . size > >
denali - > nand . phys_erase_shift ;
denali - > blksperchip = denali - > totalblks / denali - > nand . numchips ;
2010-08-05 19:06:04 +04:00
/* These functions are required by the NAND core framework, otherwise,
* the NAND core will assert . However , we don ' t need them , so we ' ll stub
* them out . */
2010-05-13 18:57:33 +04:00
denali - > nand . ecc . calculate = denali_ecc_calculate ;
denali - > nand . ecc . correct = denali_ecc_correct ;
denali - > nand . ecc . hwctl = denali_ecc_hwctl ;
/* override the default read operations */
2010-08-06 14:19:09 +04:00
denali - > nand . ecc . size = ECC_SECTOR_SIZE * denali - > devnum ;
2010-05-13 18:57:33 +04:00
denali - > nand . ecc . read_page = denali_read_page ;
denali - > nand . ecc . read_page_raw = denali_read_page_raw ;
denali - > nand . ecc . write_page = denali_write_page ;
denali - > nand . ecc . write_page_raw = denali_write_page_raw ;
denali - > nand . ecc . read_oob = denali_read_oob ;
denali - > nand . ecc . write_oob = denali_write_oob ;
denali - > nand . erase_cmd = denali_erase ;
2010-07-27 06:41:53 +04:00
if ( nand_scan_tail ( & denali - > mtd ) ) {
2010-05-13 18:57:33 +04:00
ret = - ENXIO ;
2010-08-09 14:37:00 +04:00
goto failed_req_irq ;
2010-05-13 18:57:33 +04:00
}
ret = add_mtd_device ( & denali - > mtd ) ;
if ( ret ) {
2010-08-09 20:16:51 +04:00
dev_err ( & dev - > dev , " Spectra: Failed to register MTD: %d \n " ,
ret ) ;
2010-08-09 14:37:00 +04:00
goto failed_req_irq ;
2010-05-13 18:57:33 +04:00
}
return 0 ;
2010-08-09 14:37:00 +04:00
failed_req_irq :
2010-05-13 18:57:33 +04:00
denali_irq_cleanup ( dev - > irq , denali ) ;
2010-08-09 14:37:00 +04:00
failed_remap_mem :
2010-05-13 18:57:33 +04:00
iounmap ( denali - > flash_mem ) ;
2010-08-09 14:37:00 +04:00
failed_remap_reg :
iounmap ( denali - > flash_reg ) ;
failed_req_regions :
2010-05-13 18:57:33 +04:00
pci_release_regions ( dev ) ;
2010-08-09 14:37:00 +04:00
failed_dma_map :
2010-08-05 19:06:04 +04:00
pci_unmap_single ( dev , denali - > buf . dma_buf , DENALI_BUF_SIZE ,
2010-05-13 18:57:33 +04:00
PCI_DMA_BIDIRECTIONAL ) ;
2010-08-09 14:37:00 +04:00
failed_enable_dev :
pci_disable_device ( dev ) ;
failed_alloc_memery :
2010-05-13 18:57:33 +04:00
kfree ( denali ) ;
return ret ;
}
/* driver exit point */
static void denali_pci_remove ( struct pci_dev * dev )
{
struct denali_nand_info * denali = pci_get_drvdata ( dev ) ;
nand_release ( & denali - > mtd ) ;
del_mtd_device ( & denali - > mtd ) ;
denali_irq_cleanup ( dev - > irq , denali ) ;
iounmap ( denali - > flash_reg ) ;
iounmap ( denali - > flash_mem ) ;
pci_release_regions ( dev ) ;
pci_disable_device ( dev ) ;
2010-08-05 19:06:04 +04:00
pci_unmap_single ( dev , denali - > buf . dma_buf , DENALI_BUF_SIZE ,
2010-05-13 18:57:33 +04:00
PCI_DMA_BIDIRECTIONAL ) ;
pci_set_drvdata ( dev , NULL ) ;
kfree ( denali ) ;
}
MODULE_DEVICE_TABLE ( pci , denali_pci_ids ) ;
static struct pci_driver denali_pci_driver = {
. name = DENALI_NAND_NAME ,
. id_table = denali_pci_ids ,
. probe = denali_pci_probe ,
. remove = denali_pci_remove ,
} ;
static int __devinit denali_init ( void )
{
2010-07-27 07:28:09 +04:00
printk ( KERN_INFO " Spectra MTD driver built on %s @ %s \n " ,
__DATE__ , __TIME__ ) ;
2010-05-13 18:57:33 +04:00
return pci_register_driver ( & denali_pci_driver ) ;
}
/* Free memory */
static void __devexit denali_exit ( void )
{
pci_unregister_driver ( & denali_pci_driver ) ;
}
module_init ( denali_init ) ;
module_exit ( denali_exit ) ;