2008-07-25 12:49:04 +04:00
/*
* Intel 5100 Memory Controllers kernel module
*
* This file may be distributed under the terms of the
* GNU General Public License .
*
* This module is based on the following document :
*
* Intel 5100 X Chipset Memory Controller Hub ( MCH ) - Datasheet
* http : //download.intel.com/design/chipsets/datashts/318378.pdf
*
2009-12-16 03:47:42 +03:00
* The intel 5100 has two independent channels . EDAC core currently
* can not reflect this configuration so instead the chip - select
2011-03-31 05:57:33 +04:00
* rows for each respective channel are laid out one after another ,
2009-12-16 03:47:42 +03:00
* the first half belonging to channel 0 , the second half belonging
* to channel 1.
2012-04-16 22:09:52 +04:00
*
* This driver is for DDR2 DIMMs , and it uses chip select to select among the
* several ranks . However , instead of showing memories as ranks , it outputs
* them as DIMM ' s . An internal table creates the association between ranks
* and DIMM ' s .
2008-07-25 12:49:04 +04:00
*/
# include <linux/module.h>
# include <linux/init.h>
# include <linux/pci.h>
# include <linux/pci_ids.h>
# include <linux/edac.h>
# include <linux/delay.h>
# include <linux/mmzone.h>
2012-08-08 19:30:58 +04:00
# include <linux/debugfs.h>
2008-07-25 12:49:04 +04:00
2015-09-22 13:36:15 +03:00
# include "edac_module.h"
2008-07-25 12:49:04 +04:00
2008-07-25 12:49:08 +04:00
/* register addresses */
2008-07-25 12:49:04 +04:00
/* device 16, func 1 */
2008-07-25 12:49:06 +04:00
# define I5100_MC 0x40 /* Memory Control Register */
2009-12-16 03:47:42 +03:00
# define I5100_MC_SCRBEN_MASK (1 << 7)
# define I5100_MC_SCRBDONE_MASK (1 << 4)
2008-07-25 12:49:04 +04:00
# define I5100_MS 0x44 /* Memory Status Register */
# define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */
# define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */
# define I5100_TOLM 0x6c /* Top of Low Memory */
# define I5100_MIR0 0x80 /* Memory Interleave Range 0 */
# define I5100_MIR1 0x84 /* Memory Interleave Range 1 */
# define I5100_AMIR_0 0x8c /* Adjusted Memory Interleave Range 0 */
# define I5100_AMIR_1 0x90 /* Adjusted Memory Interleave Range 1 */
# define I5100_FERR_NF_MEM 0xa0 /* MC First Non Fatal Errors */
# define I5100_FERR_NF_MEM_M16ERR_MASK (1 << 16)
# define I5100_FERR_NF_MEM_M15ERR_MASK (1 << 15)
# define I5100_FERR_NF_MEM_M14ERR_MASK (1 << 14)
2008-07-25 12:49:05 +04:00
# define I5100_FERR_NF_MEM_M12ERR_MASK (1 << 12)
# define I5100_FERR_NF_MEM_M11ERR_MASK (1 << 11)
# define I5100_FERR_NF_MEM_M10ERR_MASK (1 << 10)
# define I5100_FERR_NF_MEM_M6ERR_MASK (1 << 6)
# define I5100_FERR_NF_MEM_M5ERR_MASK (1 << 5)
# define I5100_FERR_NF_MEM_M4ERR_MASK (1 << 4)
2012-02-17 14:36:54 +04:00
# define I5100_FERR_NF_MEM_M1ERR_MASK (1 << 1)
2008-07-25 12:49:04 +04:00
# define I5100_FERR_NF_MEM_ANY_MASK \
( I5100_FERR_NF_MEM_M16ERR_MASK | \
I5100_FERR_NF_MEM_M15ERR_MASK | \
2008-07-25 12:49:05 +04:00
I5100_FERR_NF_MEM_M14ERR_MASK | \
I5100_FERR_NF_MEM_M12ERR_MASK | \
I5100_FERR_NF_MEM_M11ERR_MASK | \
I5100_FERR_NF_MEM_M10ERR_MASK | \
I5100_FERR_NF_MEM_M6ERR_MASK | \
I5100_FERR_NF_MEM_M5ERR_MASK | \
I5100_FERR_NF_MEM_M4ERR_MASK | \
I5100_FERR_NF_MEM_M1ERR_MASK )
2008-07-25 12:49:04 +04:00
# define I5100_NERR_NF_MEM 0xa4 /* MC Next Non-Fatal Errors */
2008-07-25 12:49:06 +04:00
# define I5100_EMASK_MEM 0xa8 /* MC Error Mask Register */
2012-08-08 19:30:57 +04:00
# define I5100_MEM0EINJMSK0 0x200 /* Injection Mask0 Register Channel 0 */
# define I5100_MEM1EINJMSK0 0x208 /* Injection Mask0 Register Channel 1 */
# define I5100_MEMXEINJMSK0_EINJEN (1 << 27)
# define I5100_MEM0EINJMSK1 0x204 /* Injection Mask1 Register Channel 0 */
# define I5100_MEM1EINJMSK1 0x206 /* Injection Mask1 Register Channel 1 */
/* Device 19, Function 0 */
# define I5100_DINJ0 0x9a
2008-07-25 12:49:04 +04:00
/* device 21 and 22, func 0 */
# define I5100_MTR_0 0x154 /* Memory Technology Registers 0-3 */
# define I5100_DMIR 0x15c /* DIMM Interleave Range */
# define I5100_VALIDLOG 0x18c /* Valid Log Markers */
# define I5100_NRECMEMA 0x190 /* Non-Recoverable Memory Error Log Reg A */
# define I5100_NRECMEMB 0x194 /* Non-Recoverable Memory Error Log Reg B */
# define I5100_REDMEMA 0x198 /* Recoverable Memory Data Error Log Reg A */
# define I5100_REDMEMB 0x19c /* Recoverable Memory Data Error Log Reg B */
# define I5100_RECMEMA 0x1a0 /* Recoverable Memory Error Log Reg A */
# define I5100_RECMEMB 0x1a4 /* Recoverable Memory Error Log Reg B */
2008-07-25 12:49:08 +04:00
# define I5100_MTR_4 0x1b0 /* Memory Technology Registers 4,5 */
/* bit field accessors */
2009-12-16 03:47:42 +03:00
static inline u32 i5100_mc_scrben ( u32 mc )
{
return mc > > 7 & 1 ;
}
2008-07-25 12:49:08 +04:00
static inline u32 i5100_mc_errdeten ( u32 mc )
{
return mc > > 5 & 1 ;
}
2009-12-16 03:47:42 +03:00
static inline u32 i5100_mc_scrbdone ( u32 mc )
{
return mc > > 4 & 1 ;
}
2008-07-25 12:49:08 +04:00
static inline u16 i5100_spddata_rdo ( u16 a )
{
return a > > 15 & 1 ;
}
static inline u16 i5100_spddata_sbe ( u16 a )
{
return a > > 13 & 1 ;
}
static inline u16 i5100_spddata_busy ( u16 a )
{
return a > > 12 & 1 ;
}
static inline u16 i5100_spddata_data ( u16 a )
{
return a & ( ( 1 < < 8 ) - 1 ) ;
}
static inline u32 i5100_spdcmd_create ( u32 dti , u32 ckovrd , u32 sa , u32 ba ,
u32 data , u32 cmd )
{
return ( ( dti & ( ( 1 < < 4 ) - 1 ) ) < < 28 ) |
( ( ckovrd & 1 ) < < 27 ) |
( ( sa & ( ( 1 < < 3 ) - 1 ) ) < < 24 ) |
( ( ba & ( ( 1 < < 8 ) - 1 ) ) < < 16 ) |
( ( data & ( ( 1 < < 8 ) - 1 ) ) < < 8 ) |
( cmd & 1 ) ;
}
static inline u16 i5100_tolm_tolm ( u16 a )
{
return a > > 12 & ( ( 1 < < 4 ) - 1 ) ;
}
static inline u16 i5100_mir_limit ( u16 a )
{
return a > > 4 & ( ( 1 < < 12 ) - 1 ) ;
}
static inline u16 i5100_mir_way1 ( u16 a )
{
return a > > 1 & 1 ;
}
static inline u16 i5100_mir_way0 ( u16 a )
{
return a & 1 ;
}
static inline u32 i5100_ferr_nf_mem_chan_indx ( u32 a )
{
return a > > 28 & 1 ;
}
static inline u32 i5100_ferr_nf_mem_any ( u32 a )
{
return a & I5100_FERR_NF_MEM_ANY_MASK ;
}
static inline u32 i5100_nerr_nf_mem_any ( u32 a )
{
return i5100_ferr_nf_mem_any ( a ) ;
}
static inline u32 i5100_dmir_limit ( u32 a )
{
return a > > 16 & ( ( 1 < < 11 ) - 1 ) ;
}
static inline u32 i5100_dmir_rank ( u32 a , u32 i )
{
return a > > ( 4 * i ) & ( ( 1 < < 2 ) - 1 ) ;
}
static inline u16 i5100_mtr_present ( u16 a )
{
return a > > 10 & 1 ;
}
static inline u16 i5100_mtr_ethrottle ( u16 a )
{
return a > > 9 & 1 ;
}
static inline u16 i5100_mtr_width ( u16 a )
{
return a > > 8 & 1 ;
}
static inline u16 i5100_mtr_numbank ( u16 a )
{
return a > > 6 & 1 ;
}
static inline u16 i5100_mtr_numrow ( u16 a )
{
return a > > 2 & ( ( 1 < < 2 ) - 1 ) ;
}
static inline u16 i5100_mtr_numcol ( u16 a )
{
return a & ( ( 1 < < 2 ) - 1 ) ;
}
static inline u32 i5100_validlog_redmemvalid ( u32 a )
{
return a > > 2 & 1 ;
}
static inline u32 i5100_validlog_recmemvalid ( u32 a )
{
return a > > 1 & 1 ;
}
static inline u32 i5100_validlog_nrecmemvalid ( u32 a )
{
return a & 1 ;
}
static inline u32 i5100_nrecmema_merr ( u32 a )
{
return a > > 15 & ( ( 1 < < 5 ) - 1 ) ;
}
static inline u32 i5100_nrecmema_bank ( u32 a )
{
return a > > 12 & ( ( 1 < < 3 ) - 1 ) ;
}
static inline u32 i5100_nrecmema_rank ( u32 a )
{
return a > > 8 & ( ( 1 < < 3 ) - 1 ) ;
}
static inline u32 i5100_nrecmema_dm_buf_id ( u32 a )
{
return a & ( ( 1 < < 8 ) - 1 ) ;
}
static inline u32 i5100_nrecmemb_cas ( u32 a )
{
return a > > 16 & ( ( 1 < < 13 ) - 1 ) ;
}
static inline u32 i5100_nrecmemb_ras ( u32 a )
{
return a & ( ( 1 < < 16 ) - 1 ) ;
}
static inline u32 i5100_recmema_merr ( u32 a )
{
return i5100_nrecmema_merr ( a ) ;
}
static inline u32 i5100_recmema_bank ( u32 a )
{
return i5100_nrecmema_bank ( a ) ;
}
static inline u32 i5100_recmema_rank ( u32 a )
{
return i5100_nrecmema_rank ( a ) ;
}
static inline u32 i5100_recmemb_cas ( u32 a )
{
return i5100_nrecmemb_cas ( a ) ;
}
static inline u32 i5100_recmemb_ras ( u32 a )
{
return i5100_nrecmemb_ras ( a ) ;
}
2008-07-25 12:49:04 +04:00
/* some generic limits */
2009-12-16 03:47:40 +03:00
# define I5100_MAX_RANKS_PER_CHAN 6
# define I5100_CHANNELS 2
2008-07-25 12:49:04 +04:00
# define I5100_MAX_RANKS_PER_DIMM 4
# define I5100_DIMM_ADDR_LINES (6 - 3) /* 64 bits / 8 bits per byte */
2009-12-16 03:47:40 +03:00
# define I5100_MAX_DIMM_SLOTS_PER_CHAN 4
2008-07-25 12:49:04 +04:00
# define I5100_MAX_RANK_INTERLEAVE 4
# define I5100_MAX_DMIRS 5
2009-12-16 03:47:42 +03:00
# define I5100_SCRUB_REFRESH_RATE (5 * 60 * HZ)
2008-07-25 12:49:04 +04:00
struct i5100_priv {
/* ranks on each dimm -- 0 maps to not present -- obtained via SPD */
2009-12-16 03:47:40 +03:00
int dimm_numrank [ I5100_CHANNELS ] [ I5100_MAX_DIMM_SLOTS_PER_CHAN ] ;
2008-07-25 12:49:04 +04:00
/*
* mainboard chip select map - - maps i5100 chip selects to
* DIMM slot chip selects . In the case of only 4 ranks per
2009-12-16 03:47:40 +03:00
* channel , the mapping is fairly obvious but not unique .
* we map - 1 - > NC and assume both channels use the same
2008-07-25 12:49:04 +04:00
* map . . .
*
*/
2009-12-16 03:47:40 +03:00
int dimm_csmap [ I5100_MAX_DIMM_SLOTS_PER_CHAN ] [ I5100_MAX_RANKS_PER_DIMM ] ;
2008-07-25 12:49:04 +04:00
/* memory interleave range */
struct {
u64 limit ;
unsigned way [ 2 ] ;
2009-12-16 03:47:40 +03:00
} mir [ I5100_CHANNELS ] ;
2008-07-25 12:49:04 +04:00
/* adjusted memory interleave range register */
2009-12-16 03:47:40 +03:00
unsigned amir [ I5100_CHANNELS ] ;
2008-07-25 12:49:04 +04:00
/* dimm interleave range */
struct {
unsigned rank [ I5100_MAX_RANK_INTERLEAVE ] ;
u64 limit ;
2009-12-16 03:47:40 +03:00
} dmir [ I5100_CHANNELS ] [ I5100_MAX_DMIRS ] ;
2008-07-25 12:49:04 +04:00
/* memory technology registers... */
struct {
unsigned present ; /* 0 or 1 */
unsigned ethrottle ; /* 0 or 1 */
unsigned width ; /* 4 or 8 bits */
unsigned numbank ; /* 2 or 3 lines */
unsigned numrow ; /* 13 .. 16 lines */
unsigned numcol ; /* 11 .. 12 lines */
2009-12-16 03:47:40 +03:00
} mtr [ I5100_CHANNELS ] [ I5100_MAX_RANKS_PER_CHAN ] ;
2008-07-25 12:49:04 +04:00
u64 tolm ; /* top of low memory in bytes */
2009-12-16 03:47:40 +03:00
unsigned ranksperchan ; /* number of ranks per channel */
2008-07-25 12:49:04 +04:00
struct pci_dev * mc ; /* device 16 func 1 */
2012-08-08 19:30:56 +04:00
struct pci_dev * einj ; /* device 19 func 0 */
2008-07-25 12:49:04 +04:00
struct pci_dev * ch0mm ; /* device 21 func 0 */
struct pci_dev * ch1mm ; /* device 22 func 0 */
2009-12-16 03:47:42 +03:00
struct delayed_work i5100_scrubbing ;
int scrub_enable ;
2012-08-08 19:30:57 +04:00
/* Error injection */
u8 inject_channel ;
u8 inject_hlinesel ;
u8 inject_deviceptr1 ;
u8 inject_deviceptr2 ;
u16 inject_eccmask1 ;
u16 inject_eccmask2 ;
2012-08-08 19:30:58 +04:00
struct dentry * debugfs ;
2008-07-25 12:49:04 +04:00
} ;
2012-08-08 19:30:58 +04:00
static struct dentry * i5100_debugfs ;
2009-12-16 03:47:40 +03:00
/* map a rank/chan to a slot number on the mainboard */
2008-07-25 12:49:04 +04:00
static int i5100_rank_to_slot ( const struct mem_ctl_info * mci ,
2009-12-16 03:47:40 +03:00
int chan , int rank )
2008-07-25 12:49:04 +04:00
{
const struct i5100_priv * priv = mci - > pvt_info ;
int i ;
2009-12-16 03:47:40 +03:00
for ( i = 0 ; i < I5100_MAX_DIMM_SLOTS_PER_CHAN ; i + + ) {
2008-07-25 12:49:04 +04:00
int j ;
2009-12-16 03:47:40 +03:00
const int numrank = priv - > dimm_numrank [ chan ] [ i ] ;
2008-07-25 12:49:04 +04:00
for ( j = 0 ; j < numrank ; j + + )
if ( priv - > dimm_csmap [ i ] [ j ] = = rank )
2009-12-16 03:47:40 +03:00
return i * 2 + chan ;
2008-07-25 12:49:04 +04:00
}
return - 1 ;
}
static const char * i5100_err_msg ( unsigned err )
{
2008-07-25 12:49:08 +04:00
static const char * merrs [ ] = {
2008-07-25 12:49:04 +04:00
" unknown " , /* 0 */
" uncorrectable data ECC on replay " , /* 1 */
" unknown " , /* 2 */
" unknown " , /* 3 */
" aliased uncorrectable demand data ECC " , /* 4 */
" aliased uncorrectable spare-copy data ECC " , /* 5 */
" aliased uncorrectable patrol data ECC " , /* 6 */
" unknown " , /* 7 */
" unknown " , /* 8 */
" unknown " , /* 9 */
" non-aliased uncorrectable demand data ECC " , /* 10 */
" non-aliased uncorrectable spare-copy data ECC " , /* 11 */
" non-aliased uncorrectable patrol data ECC " , /* 12 */
" unknown " , /* 13 */
" correctable demand data ECC " , /* 14 */
" correctable spare-copy data ECC " , /* 15 */
" correctable patrol data ECC " , /* 16 */
" unknown " , /* 17 */
" SPD protocol error " , /* 18 */
" unknown " , /* 19 */
" spare copy initiated " , /* 20 */
" spare copy completed " , /* 21 */
} ;
unsigned i ;
for ( i = 0 ; i < ARRAY_SIZE ( merrs ) ; i + + )
if ( 1 < < i & err )
return merrs [ i ] ;
return " none " ;
}
2009-12-16 03:47:40 +03:00
/* convert csrow index into a rank (per channel -- 0..5) */
2019-09-02 15:33:41 +03:00
static unsigned int i5100_csrow_to_rank ( const struct mem_ctl_info * mci ,
unsigned int csrow )
2008-07-25 12:49:04 +04:00
{
const struct i5100_priv * priv = mci - > pvt_info ;
2009-12-16 03:47:40 +03:00
return csrow % priv - > ranksperchan ;
2008-07-25 12:49:04 +04:00
}
2009-12-16 03:47:40 +03:00
/* convert csrow index into a channel (0..1) */
2019-09-02 15:33:41 +03:00
static unsigned int i5100_csrow_to_chan ( const struct mem_ctl_info * mci ,
unsigned int csrow )
2008-07-25 12:49:04 +04:00
{
const struct i5100_priv * priv = mci - > pvt_info ;
2009-12-16 03:47:40 +03:00
return csrow / priv - > ranksperchan ;
2008-07-25 12:49:04 +04:00
}
static void i5100_handle_ce ( struct mem_ctl_info * mci ,
2009-12-16 03:47:40 +03:00
int chan ,
2008-07-25 12:49:04 +04:00
unsigned bank ,
unsigned rank ,
unsigned long syndrome ,
unsigned cas ,
unsigned ras ,
const char * msg )
{
2012-04-16 22:09:52 +04:00
char detail [ 80 ] ;
2012-01-28 01:38:08 +04:00
2012-04-16 22:09:52 +04:00
/* Form out message */
snprintf ( detail , sizeof ( detail ) ,
" bank %u, cas %u, ras %u \n " ,
bank , cas , ras ) ;
2008-07-25 12:49:04 +04:00
2012-06-04 20:27:43 +04:00
edac_mc_handle_error ( HW_EVENT_ERR_CORRECTED , mci , 1 ,
2012-04-16 22:09:52 +04:00
0 , 0 , syndrome ,
chan , rank , - 1 ,
2012-06-04 18:29:25 +04:00
msg , detail ) ;
2008-07-25 12:49:04 +04:00
}
static void i5100_handle_ue ( struct mem_ctl_info * mci ,
2009-12-16 03:47:40 +03:00
int chan ,
2008-07-25 12:49:04 +04:00
unsigned bank ,
unsigned rank ,
unsigned long syndrome ,
unsigned cas ,
unsigned ras ,
const char * msg )
{
2012-04-16 22:09:52 +04:00
char detail [ 80 ] ;
2008-07-25 12:49:04 +04:00
2012-04-16 22:09:52 +04:00
/* Form out message */
snprintf ( detail , sizeof ( detail ) ,
" bank %u, cas %u, ras %u \n " ,
bank , cas , ras ) ;
2008-07-25 12:49:04 +04:00
2012-06-04 20:27:43 +04:00
edac_mc_handle_error ( HW_EVENT_ERR_UNCORRECTED , mci , 1 ,
2012-04-16 22:09:52 +04:00
0 , 0 , syndrome ,
chan , rank , - 1 ,
2012-06-04 18:29:25 +04:00
msg , detail ) ;
2008-07-25 12:49:04 +04:00
}
2009-12-16 03:47:40 +03:00
static void i5100_read_log ( struct mem_ctl_info * mci , int chan ,
2008-07-25 12:49:04 +04:00
u32 ferr , u32 nerr )
{
struct i5100_priv * priv = mci - > pvt_info ;
2009-12-16 03:47:40 +03:00
struct pci_dev * pdev = ( chan ) ? priv - > ch1mm : priv - > ch0mm ;
2008-07-25 12:49:04 +04:00
u32 dw ;
u32 dw2 ;
unsigned syndrome = 0 ;
unsigned merr ;
unsigned bank ;
unsigned rank ;
unsigned cas ;
unsigned ras ;
pci_read_config_dword ( pdev , I5100_VALIDLOG , & dw ) ;
2008-07-25 12:49:08 +04:00
if ( i5100_validlog_redmemvalid ( dw ) ) {
2008-07-25 12:49:04 +04:00
pci_read_config_dword ( pdev , I5100_REDMEMA , & dw2 ) ;
2008-07-25 12:49:08 +04:00
syndrome = dw2 ;
2008-07-25 12:49:04 +04:00
pci_read_config_dword ( pdev , I5100_REDMEMB , & dw2 ) ;
}
2008-07-25 12:49:08 +04:00
if ( i5100_validlog_recmemvalid ( dw ) ) {
2008-07-25 12:49:04 +04:00
const char * msg ;
pci_read_config_dword ( pdev , I5100_RECMEMA , & dw2 ) ;
2008-07-25 12:49:08 +04:00
merr = i5100_recmema_merr ( dw2 ) ;
bank = i5100_recmema_bank ( dw2 ) ;
rank = i5100_recmema_rank ( dw2 ) ;
2008-07-25 12:49:04 +04:00
pci_read_config_dword ( pdev , I5100_RECMEMB , & dw2 ) ;
2008-07-25 12:49:08 +04:00
cas = i5100_recmemb_cas ( dw2 ) ;
ras = i5100_recmemb_ras ( dw2 ) ;
2008-07-25 12:49:04 +04:00
/* FIXME: not really sure if this is what merr is...
*/
if ( ! merr )
msg = i5100_err_msg ( ferr ) ;
else
msg = i5100_err_msg ( nerr ) ;
2009-12-16 03:47:40 +03:00
i5100_handle_ce ( mci , chan , bank , rank , syndrome , cas , ras , msg ) ;
2008-07-25 12:49:04 +04:00
}
2008-07-25 12:49:08 +04:00
if ( i5100_validlog_nrecmemvalid ( dw ) ) {
2008-07-25 12:49:04 +04:00
const char * msg ;
pci_read_config_dword ( pdev , I5100_NRECMEMA , & dw2 ) ;
2008-07-25 12:49:08 +04:00
merr = i5100_nrecmema_merr ( dw2 ) ;
bank = i5100_nrecmema_bank ( dw2 ) ;
rank = i5100_nrecmema_rank ( dw2 ) ;
2008-07-25 12:49:04 +04:00
pci_read_config_dword ( pdev , I5100_NRECMEMB , & dw2 ) ;
2008-07-25 12:49:08 +04:00
cas = i5100_nrecmemb_cas ( dw2 ) ;
ras = i5100_nrecmemb_ras ( dw2 ) ;
2008-07-25 12:49:04 +04:00
/* FIXME: not really sure if this is what merr is...
*/
if ( ! merr )
msg = i5100_err_msg ( ferr ) ;
else
msg = i5100_err_msg ( nerr ) ;
2009-12-16 03:47:40 +03:00
i5100_handle_ue ( mci , chan , bank , rank , syndrome , cas , ras , msg ) ;
2008-07-25 12:49:04 +04:00
}
pci_write_config_dword ( pdev , I5100_VALIDLOG , dw ) ;
}
static void i5100_check_error ( struct mem_ctl_info * mci )
{
struct i5100_priv * priv = mci - > pvt_info ;
2011-12-09 20:12:15 +04:00
u32 dw , dw2 ;
2008-07-25 12:49:04 +04:00
pci_read_config_dword ( priv - > mc , I5100_FERR_NF_MEM , & dw ) ;
2008-07-25 12:49:08 +04:00
if ( i5100_ferr_nf_mem_any ( dw ) ) {
2008-07-25 12:49:04 +04:00
pci_read_config_dword ( priv - > mc , I5100_NERR_NF_MEM , & dw2 ) ;
2008-07-25 12:49:08 +04:00
i5100_read_log ( mci , i5100_ferr_nf_mem_chan_indx ( dw ) ,
i5100_ferr_nf_mem_any ( dw ) ,
i5100_nerr_nf_mem_any ( dw2 ) ) ;
2011-12-09 20:12:15 +04:00
pci_write_config_dword ( priv - > mc , I5100_NERR_NF_MEM , dw2 ) ;
2008-07-25 12:49:04 +04:00
}
2011-12-09 20:12:15 +04:00
pci_write_config_dword ( priv - > mc , I5100_FERR_NF_MEM , dw ) ;
2008-07-25 12:49:04 +04:00
}
2009-12-16 03:47:42 +03:00
/* The i5100 chipset will scrub the entire memory once, then
* set a done bit . Continuous scrubbing is achieved by enqueing
* delayed work to a workqueue , checking every few minutes if
* the scrubbing has completed and if so reinitiating it .
*/
static void i5100_refresh_scrubbing ( struct work_struct * work )
{
2016-01-01 17:59:07 +03:00
struct delayed_work * i5100_scrubbing = to_delayed_work ( work ) ;
2009-12-16 03:47:42 +03:00
struct i5100_priv * priv = container_of ( i5100_scrubbing ,
struct i5100_priv ,
i5100_scrubbing ) ;
u32 dw ;
pci_read_config_dword ( priv - > mc , I5100_MC , & dw ) ;
if ( priv - > scrub_enable ) {
pci_read_config_dword ( priv - > mc , I5100_MC , & dw ) ;
if ( i5100_mc_scrbdone ( dw ) ) {
dw | = I5100_MC_SCRBEN_MASK ;
pci_write_config_dword ( priv - > mc , I5100_MC , dw ) ;
pci_read_config_dword ( priv - > mc , I5100_MC , & dw ) ;
}
schedule_delayed_work ( & ( priv - > i5100_scrubbing ) ,
I5100_SCRUB_REFRESH_RATE ) ;
}
}
/*
* The bandwidth is based on experimentation , feel free to refine it .
*/
2010-05-25 20:21:07 +04:00
static int i5100_set_scrub_rate ( struct mem_ctl_info * mci , u32 bandwidth )
2009-12-16 03:47:42 +03:00
{
struct i5100_priv * priv = mci - > pvt_info ;
u32 dw ;
pci_read_config_dword ( priv - > mc , I5100_MC , & dw ) ;
2010-05-25 20:21:07 +04:00
if ( bandwidth ) {
2009-12-16 03:47:42 +03:00
priv - > scrub_enable = 1 ;
dw | = I5100_MC_SCRBEN_MASK ;
schedule_delayed_work ( & ( priv - > i5100_scrubbing ) ,
I5100_SCRUB_REFRESH_RATE ) ;
} else {
priv - > scrub_enable = 0 ;
dw & = ~ I5100_MC_SCRBEN_MASK ;
cancel_delayed_work ( & ( priv - > i5100_scrubbing ) ) ;
}
pci_write_config_dword ( priv - > mc , I5100_MC , dw ) ;
pci_read_config_dword ( priv - > mc , I5100_MC , & dw ) ;
2010-05-25 20:21:07 +04:00
bandwidth = 5900000 * i5100_mc_scrben ( dw ) ;
2009-12-16 03:47:42 +03:00
2010-11-24 21:52:09 +03:00
return bandwidth ;
2009-12-16 03:47:42 +03:00
}
2010-11-24 21:52:09 +03:00
static int i5100_get_scrub_rate ( struct mem_ctl_info * mci )
2009-12-16 03:47:42 +03:00
{
struct i5100_priv * priv = mci - > pvt_info ;
u32 dw ;
pci_read_config_dword ( priv - > mc , I5100_MC , & dw ) ;
2010-11-24 21:52:09 +03:00
return 5900000 * i5100_mc_scrben ( dw ) ;
2009-12-16 03:47:42 +03:00
}
2008-07-25 12:49:04 +04:00
static struct pci_dev * pci_get_device_func ( unsigned vendor ,
unsigned device ,
unsigned func )
{
struct pci_dev * ret = NULL ;
while ( 1 ) {
ret = pci_get_device ( vendor , device , ret ) ;
if ( ! ret )
break ;
if ( PCI_FUNC ( ret - > devfn ) = = func )
break ;
}
return ret ;
}
2019-09-02 15:33:41 +03:00
static unsigned long i5100_npages ( struct mem_ctl_info * mci , unsigned int csrow )
2008-07-25 12:49:04 +04:00
{
struct i5100_priv * priv = mci - > pvt_info ;
2019-09-02 15:33:41 +03:00
const unsigned int chan_rank = i5100_csrow_to_rank ( mci , csrow ) ;
const unsigned int chan = i5100_csrow_to_chan ( mci , csrow ) ;
2008-07-25 12:49:04 +04:00
unsigned addr_lines ;
/* dimm present? */
2009-12-16 03:47:40 +03:00
if ( ! priv - > mtr [ chan ] [ chan_rank ] . present )
2008-07-25 12:49:04 +04:00
return 0ULL ;
addr_lines =
I5100_DIMM_ADDR_LINES +
2009-12-16 03:47:40 +03:00
priv - > mtr [ chan ] [ chan_rank ] . numcol +
priv - > mtr [ chan ] [ chan_rank ] . numrow +
priv - > mtr [ chan ] [ chan_rank ] . numbank ;
2008-07-25 12:49:04 +04:00
return ( unsigned long )
( ( unsigned long long ) ( 1ULL < < addr_lines ) / PAGE_SIZE ) ;
}
2012-12-22 01:23:51 +04:00
static void i5100_init_mtr ( struct mem_ctl_info * mci )
2008-07-25 12:49:04 +04:00
{
struct i5100_priv * priv = mci - > pvt_info ;
struct pci_dev * mms [ 2 ] = { priv - > ch0mm , priv - > ch1mm } ;
int i ;
2009-12-16 03:47:40 +03:00
for ( i = 0 ; i < I5100_CHANNELS ; i + + ) {
2008-07-25 12:49:04 +04:00
int j ;
struct pci_dev * pdev = mms [ i ] ;
2009-12-16 03:47:40 +03:00
for ( j = 0 ; j < I5100_MAX_RANKS_PER_CHAN ; j + + ) {
2008-07-25 12:49:04 +04:00
const unsigned addr =
( j < 4 ) ? I5100_MTR_0 + j * 2 :
I5100_MTR_4 + ( j - 4 ) * 2 ;
u16 w ;
pci_read_config_word ( pdev , addr , & w ) ;
2008-07-25 12:49:08 +04:00
priv - > mtr [ i ] [ j ] . present = i5100_mtr_present ( w ) ;
priv - > mtr [ i ] [ j ] . ethrottle = i5100_mtr_ethrottle ( w ) ;
priv - > mtr [ i ] [ j ] . width = 4 + 4 * i5100_mtr_width ( w ) ;
priv - > mtr [ i ] [ j ] . numbank = 2 + i5100_mtr_numbank ( w ) ;
priv - > mtr [ i ] [ j ] . numrow = 13 + i5100_mtr_numrow ( w ) ;
priv - > mtr [ i ] [ j ] . numcol = 10 + i5100_mtr_numcol ( w ) ;
2008-07-25 12:49:04 +04:00
}
}
}
/*
* FIXME : make this into a real i2c adapter ( so that dimm - decode
* will work ) ?
*/
static int i5100_read_spd_byte ( const struct mem_ctl_info * mci ,
u8 ch , u8 slot , u8 addr , u8 * byte )
{
struct i5100_priv * priv = mci - > pvt_info ;
u16 w ;
pci_read_config_word ( priv - > mc , I5100_SPDDATA , & w ) ;
2008-07-25 12:49:08 +04:00
if ( i5100_spddata_busy ( w ) )
2008-07-25 12:49:04 +04:00
return - 1 ;
2008-07-25 12:49:08 +04:00
pci_write_config_dword ( priv - > mc , I5100_SPDCMD ,
i5100_spdcmd_create ( 0xa , 1 , ch * 4 + slot , addr ,
0 , 0 ) ) ;
2008-07-25 12:49:04 +04:00
/* wait up to 100ms */
udelay ( 100 ) ;
while ( 1 ) {
pci_read_config_word ( priv - > mc , I5100_SPDDATA , & w ) ;
2008-07-25 12:49:08 +04:00
if ( ! i5100_spddata_busy ( w ) )
2008-07-25 12:49:04 +04:00
break ;
udelay ( 100 ) ;
}
2008-07-25 12:49:08 +04:00
if ( ! i5100_spddata_rdo ( w ) | | i5100_spddata_sbe ( w ) )
2008-07-25 12:49:04 +04:00
return - 1 ;
2008-07-25 12:49:08 +04:00
* byte = i5100_spddata_data ( w ) ;
2008-07-25 12:49:04 +04:00
return 0 ;
}
/*
* fill dimm chip select map
*
* FIXME :
* o not the only way to may chip selects to dimm slots
* o investigate if there is some way to obtain this map from the bios
*/
2012-12-22 01:23:51 +04:00
static void i5100_init_dimm_csmap ( struct mem_ctl_info * mci )
2008-07-25 12:49:04 +04:00
{
struct i5100_priv * priv = mci - > pvt_info ;
int i ;
2009-12-16 03:47:40 +03:00
for ( i = 0 ; i < I5100_MAX_DIMM_SLOTS_PER_CHAN ; i + + ) {
2008-07-25 12:49:04 +04:00
int j ;
for ( j = 0 ; j < I5100_MAX_RANKS_PER_DIMM ; j + + )
priv - > dimm_csmap [ i ] [ j ] = - 1 ; /* default NC */
}
/* only 2 chip selects per slot... */
2009-12-16 03:47:42 +03:00
if ( priv - > ranksperchan = = 4 ) {
priv - > dimm_csmap [ 0 ] [ 0 ] = 0 ;
priv - > dimm_csmap [ 0 ] [ 1 ] = 3 ;
priv - > dimm_csmap [ 1 ] [ 0 ] = 1 ;
priv - > dimm_csmap [ 1 ] [ 1 ] = 2 ;
priv - > dimm_csmap [ 2 ] [ 0 ] = 2 ;
priv - > dimm_csmap [ 3 ] [ 0 ] = 3 ;
} else {
priv - > dimm_csmap [ 0 ] [ 0 ] = 0 ;
priv - > dimm_csmap [ 0 ] [ 1 ] = 1 ;
priv - > dimm_csmap [ 1 ] [ 0 ] = 2 ;
priv - > dimm_csmap [ 1 ] [ 1 ] = 3 ;
priv - > dimm_csmap [ 2 ] [ 0 ] = 4 ;
priv - > dimm_csmap [ 2 ] [ 1 ] = 5 ;
}
2008-07-25 12:49:04 +04:00
}
2012-12-22 01:23:51 +04:00
static void i5100_init_dimm_layout ( struct pci_dev * pdev ,
struct mem_ctl_info * mci )
2008-07-25 12:49:04 +04:00
{
struct i5100_priv * priv = mci - > pvt_info ;
int i ;
2009-12-16 03:47:40 +03:00
for ( i = 0 ; i < I5100_CHANNELS ; i + + ) {
2008-07-25 12:49:04 +04:00
int j ;
2009-12-16 03:47:40 +03:00
for ( j = 0 ; j < I5100_MAX_DIMM_SLOTS_PER_CHAN ; j + + ) {
2008-07-25 12:49:04 +04:00
u8 rank ;
if ( i5100_read_spd_byte ( mci , i , j , 5 , & rank ) < 0 )
priv - > dimm_numrank [ i ] [ j ] = 0 ;
else
priv - > dimm_numrank [ i ] [ j ] = ( rank & 3 ) + 1 ;
}
}
i5100_init_dimm_csmap ( mci ) ;
}
2012-12-22 01:23:51 +04:00
static void i5100_init_interleaving ( struct pci_dev * pdev ,
struct mem_ctl_info * mci )
2008-07-25 12:49:04 +04:00
{
u16 w ;
u32 dw ;
struct i5100_priv * priv = mci - > pvt_info ;
struct pci_dev * mms [ 2 ] = { priv - > ch0mm , priv - > ch1mm } ;
int i ;
pci_read_config_word ( pdev , I5100_TOLM , & w ) ;
2008-07-25 12:49:08 +04:00
priv - > tolm = ( u64 ) i5100_tolm_tolm ( w ) * 256 * 1024 * 1024 ;
2008-07-25 12:49:04 +04:00
pci_read_config_word ( pdev , I5100_MIR0 , & w ) ;
2008-07-25 12:49:08 +04:00
priv - > mir [ 0 ] . limit = ( u64 ) i5100_mir_limit ( w ) < < 28 ;
priv - > mir [ 0 ] . way [ 1 ] = i5100_mir_way1 ( w ) ;
priv - > mir [ 0 ] . way [ 0 ] = i5100_mir_way0 ( w ) ;
2008-07-25 12:49:04 +04:00
pci_read_config_word ( pdev , I5100_MIR1 , & w ) ;
2008-07-25 12:49:08 +04:00
priv - > mir [ 1 ] . limit = ( u64 ) i5100_mir_limit ( w ) < < 28 ;
priv - > mir [ 1 ] . way [ 1 ] = i5100_mir_way1 ( w ) ;
priv - > mir [ 1 ] . way [ 0 ] = i5100_mir_way0 ( w ) ;
2008-07-25 12:49:04 +04:00
pci_read_config_word ( pdev , I5100_AMIR_0 , & w ) ;
priv - > amir [ 0 ] = w ;
pci_read_config_word ( pdev , I5100_AMIR_1 , & w ) ;
priv - > amir [ 1 ] = w ;
2009-12-16 03:47:40 +03:00
for ( i = 0 ; i < I5100_CHANNELS ; i + + ) {
2008-07-25 12:49:04 +04:00
int j ;
for ( j = 0 ; j < 5 ; j + + ) {
int k ;
pci_read_config_dword ( mms [ i ] , I5100_DMIR + j * 4 , & dw ) ;
priv - > dmir [ i ] [ j ] . limit =
2008-07-25 12:49:08 +04:00
( u64 ) i5100_dmir_limit ( dw ) < < 28 ;
2008-07-25 12:49:04 +04:00
for ( k = 0 ; k < I5100_MAX_RANKS_PER_DIMM ; k + + )
priv - > dmir [ i ] [ j ] . rank [ k ] =
2008-07-25 12:49:08 +04:00
i5100_dmir_rank ( dw , k ) ;
2008-07-25 12:49:04 +04:00
}
}
i5100_init_mtr ( mci ) ;
}
2012-12-22 01:23:51 +04:00
static void i5100_init_csrows ( struct mem_ctl_info * mci )
2008-07-25 12:49:04 +04:00
{
struct i5100_priv * priv = mci - > pvt_info ;
2019-11-06 12:33:07 +03:00
struct dimm_info * dimm ;
2008-07-25 12:49:04 +04:00
2019-11-06 12:33:07 +03:00
mci_for_each_dimm ( mci , dimm ) {
const unsigned long npages = i5100_npages ( mci , dimm - > idx ) ;
const unsigned int chan = i5100_csrow_to_chan ( mci , dimm - > idx ) ;
const unsigned int rank = i5100_csrow_to_rank ( mci , dimm - > idx ) ;
2008-07-25 12:49:04 +04:00
if ( ! npages )
continue ;
2012-01-28 16:09:38 +04:00
dimm - > nr_pages = npages ;
2014-02-13 15:17:38 +04:00
dimm - > grain = 32 ;
dimm - > dtype = ( priv - > mtr [ chan ] [ rank ] . width = = 4 ) ?
DEV_X4 : DEV_X8 ;
dimm - > mtype = MEM_RDDR2 ;
dimm - > edac_mode = EDAC_SECDED ;
snprintf ( dimm - > label , sizeof ( dimm - > label ) , " DIMM%u " ,
i5100_rank_to_slot ( mci , chan , rank ) ) ;
2012-04-16 22:09:52 +04:00
2012-04-30 00:08:39 +04:00
edac_dbg ( 2 , " dimm channel %d, rank %d, size %ld \n " ,
chan , rank , ( long ) PAGES_TO_MiB ( npages ) ) ;
2008-07-25 12:49:04 +04:00
}
}
2012-08-08 19:30:57 +04:00
/****************************************************************************
* Error injection routines
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static void i5100_do_inject ( struct mem_ctl_info * mci )
{
struct i5100_priv * priv = mci - > pvt_info ;
u32 mask0 ;
u16 mask1 ;
/* MEM[1:0]EINJMSK0
* 31 - ADDRMATCHEN
* 29 : 28 - HLINESEL
* 00 Reserved
* 01 Lower half of cache line
* 10 Upper half of cache line
* 11 Both upper and lower parts of cache line
* 27 - EINJEN
* 25 : 19 - XORMASK1 for deviceptr1
* 9 : 5 - SEC2RAM for deviceptr2
* 4 : 0 - FIR2RAM for deviceptr1
*/
mask0 = ( ( priv - > inject_hlinesel & 0x3 ) < < 28 ) |
I5100_MEMXEINJMSK0_EINJEN |
( ( priv - > inject_eccmask1 & 0xffff ) < < 10 ) |
( ( priv - > inject_deviceptr2 & 0x1f ) < < 5 ) |
( priv - > inject_deviceptr1 & 0x1f ) ;
/* MEM[1:0]EINJMSK1
* 15 : 0 - XORMASK2 for deviceptr2
*/
mask1 = priv - > inject_eccmask2 ;
if ( priv - > inject_channel = = 0 ) {
pci_write_config_dword ( priv - > mc , I5100_MEM0EINJMSK0 , mask0 ) ;
pci_write_config_word ( priv - > mc , I5100_MEM0EINJMSK1 , mask1 ) ;
} else {
pci_write_config_dword ( priv - > mc , I5100_MEM1EINJMSK0 , mask0 ) ;
pci_write_config_word ( priv - > mc , I5100_MEM1EINJMSK1 , mask1 ) ;
}
/* Error Injection Response Function
* Intel 5100 Memory Controller Hub Chipset ( 318378 ) datasheet
* hints about this register but carry no data about them . All
* data regarding device 19 is based on experimentation and the
* Intel 7300 Chipset Memory Controller Hub ( 318082 ) datasheet
* which appears to be accurate for the i5100 in this area .
*
* The injection code don ' t work without setting this register .
* The register needs to be flipped off then on else the hardware
* will only preform the first injection .
*
* Stop condition bits 7 : 4
* 1010 - Stop after one injection
* 1011 - Never stop injecting faults
*
* Start condition bits 3 : 0
* 1010 - Never start
* 1011 - Start immediately
*/
pci_write_config_byte ( priv - > einj , I5100_DINJ0 , 0xaa ) ;
pci_write_config_byte ( priv - > einj , I5100_DINJ0 , 0xab ) ;
}
2012-08-08 19:30:58 +04:00
# define to_mci(k) container_of(k, struct mem_ctl_info, dev)
static ssize_t inject_enable_write ( struct file * file , const char __user * data ,
size_t count , loff_t * ppos )
{
struct device * dev = file - > private_data ;
struct mem_ctl_info * mci = to_mci ( dev ) ;
i5100_do_inject ( mci ) ;
return count ;
}
static const struct file_operations i5100_inject_enable_fops = {
2013-02-26 13:18:34 +04:00
. open = simple_open ,
2012-08-08 19:30:58 +04:00
. write = inject_enable_write ,
. llseek = generic_file_llseek ,
} ;
static int i5100_setup_debugfs ( struct mem_ctl_info * mci )
{
struct i5100_priv * priv = mci - > pvt_info ;
if ( ! i5100_debugfs )
return - ENODEV ;
2015-09-22 13:36:15 +03:00
priv - > debugfs = edac_debugfs_create_dir_at ( mci - > bus - > name , i5100_debugfs ) ;
2012-08-08 19:30:58 +04:00
if ( ! priv - > debugfs )
return - ENOMEM ;
2015-09-22 13:36:15 +03:00
edac_debugfs_create_x8 ( " inject_channel " , S_IRUGO | S_IWUSR , priv - > debugfs ,
& priv - > inject_channel ) ;
edac_debugfs_create_x8 ( " inject_hlinesel " , S_IRUGO | S_IWUSR , priv - > debugfs ,
& priv - > inject_hlinesel ) ;
edac_debugfs_create_x8 ( " inject_deviceptr1 " , S_IRUGO | S_IWUSR , priv - > debugfs ,
& priv - > inject_deviceptr1 ) ;
edac_debugfs_create_x8 ( " inject_deviceptr2 " , S_IRUGO | S_IWUSR , priv - > debugfs ,
& priv - > inject_deviceptr2 ) ;
edac_debugfs_create_x16 ( " inject_eccmask1 " , S_IRUGO | S_IWUSR , priv - > debugfs ,
& priv - > inject_eccmask1 ) ;
edac_debugfs_create_x16 ( " inject_eccmask2 " , S_IRUGO | S_IWUSR , priv - > debugfs ,
& priv - > inject_eccmask2 ) ;
edac_debugfs_create_file ( " inject_enable " , S_IWUSR , priv - > debugfs ,
& mci - > dev , & i5100_inject_enable_fops ) ;
2012-08-08 19:30:58 +04:00
return 0 ;
}
2012-12-22 01:23:51 +04:00
static int i5100_init_one ( struct pci_dev * pdev , const struct pci_device_id * id )
2008-07-25 12:49:04 +04:00
{
int rc ;
struct mem_ctl_info * mci ;
2012-04-16 22:09:52 +04:00
struct edac_mc_layer layers [ 2 ] ;
2008-07-25 12:49:04 +04:00
struct i5100_priv * priv ;
2012-08-08 19:30:56 +04:00
struct pci_dev * ch0mm , * ch1mm , * einj ;
2008-07-25 12:49:04 +04:00
int ret = 0 ;
u32 dw ;
int ranksperch ;
if ( PCI_FUNC ( pdev - > devfn ) ! = 1 )
return - ENODEV ;
rc = pci_enable_device ( pdev ) ;
if ( rc < 0 ) {
ret = rc ;
goto bail ;
}
2008-07-25 12:49:06 +04:00
/* ECC enabled? */
pci_read_config_dword ( pdev , I5100_MC , & dw ) ;
2008-07-25 12:49:08 +04:00
if ( ! i5100_mc_errdeten ( dw ) ) {
2008-07-25 12:49:06 +04:00
printk ( KERN_INFO " i5100_edac: ECC not enabled. \n " ) ;
ret = - ENODEV ;
2008-07-25 12:49:08 +04:00
goto bail_pdev ;
2008-07-25 12:49:06 +04:00
}
2008-07-25 12:49:04 +04:00
/* figure out how many ranks, from strapped state of 48GB_Mode input */
pci_read_config_dword ( pdev , I5100_MS , & dw ) ;
ranksperch = ! ! ( dw & ( 1 < < 8 ) ) * 2 + 4 ;
2008-07-25 12:49:06 +04:00
/* enable error reporting... */
pci_read_config_dword ( pdev , I5100_EMASK_MEM , & dw ) ;
dw & = ~ I5100_FERR_NF_MEM_ANY_MASK ;
pci_write_config_dword ( pdev , I5100_EMASK_MEM , dw ) ;
2008-07-25 12:49:04 +04:00
/* device 21, func 0, Channel 0 Memory Map, Error Flag/Mask, etc... */
ch0mm = pci_get_device_func ( PCI_VENDOR_ID_INTEL ,
PCI_DEVICE_ID_INTEL_5100_21 , 0 ) ;
2008-07-25 12:49:08 +04:00
if ( ! ch0mm ) {
ret = - ENODEV ;
goto bail_pdev ;
}
2008-07-25 12:49:04 +04:00
rc = pci_enable_device ( ch0mm ) ;
if ( rc < 0 ) {
ret = rc ;
goto bail_ch0 ;
}
/* device 22, func 0, Channel 1 Memory Map, Error Flag/Mask, etc... */
ch1mm = pci_get_device_func ( PCI_VENDOR_ID_INTEL ,
PCI_DEVICE_ID_INTEL_5100_22 , 0 ) ;
if ( ! ch1mm ) {
ret = - ENODEV ;
2008-07-25 12:49:08 +04:00
goto bail_disable_ch0 ;
2008-07-25 12:49:04 +04:00
}
rc = pci_enable_device ( ch1mm ) ;
if ( rc < 0 ) {
ret = rc ;
goto bail_ch1 ;
}
2012-04-16 22:09:52 +04:00
layers [ 0 ] . type = EDAC_MC_LAYER_CHANNEL ;
layers [ 0 ] . size = 2 ;
layers [ 0 ] . is_virt_csrow = false ;
layers [ 1 ] . type = EDAC_MC_LAYER_SLOT ;
layers [ 1 ] . size = ranksperch ;
layers [ 1 ] . is_virt_csrow = true ;
2012-05-02 21:37:00 +04:00
mci = edac_mc_alloc ( 0 , ARRAY_SIZE ( layers ) , layers ,
2012-04-16 22:09:52 +04:00
sizeof ( * priv ) ) ;
2008-07-25 12:49:04 +04:00
if ( ! mci ) {
ret = - ENOMEM ;
2008-07-25 12:49:08 +04:00
goto bail_disable_ch1 ;
2008-07-25 12:49:04 +04:00
}
2012-08-08 19:30:56 +04:00
/* device 19, func 0, Error injection */
einj = pci_get_device_func ( PCI_VENDOR_ID_INTEL ,
PCI_DEVICE_ID_INTEL_5100_19 , 0 ) ;
if ( ! einj ) {
ret = - ENODEV ;
goto bail_einj ;
}
rc = pci_enable_device ( einj ) ;
if ( rc < 0 ) {
ret = rc ;
goto bail_disable_einj ;
}
2012-03-16 14:44:18 +04:00
mci - > pdev = & pdev - > dev ;
2008-07-25 12:49:04 +04:00
priv = mci - > pvt_info ;
2009-12-16 03:47:40 +03:00
priv - > ranksperchan = ranksperch ;
2008-07-25 12:49:04 +04:00
priv - > mc = pdev ;
priv - > ch0mm = ch0mm ;
priv - > ch1mm = ch1mm ;
2012-08-08 19:30:56 +04:00
priv - > einj = einj ;
2008-07-25 12:49:04 +04:00
2009-12-16 03:47:42 +03:00
INIT_DELAYED_WORK ( & ( priv - > i5100_scrubbing ) , i5100_refresh_scrubbing ) ;
/* If scrubbing was already enabled by the bios, start maintaining it */
pci_read_config_dword ( pdev , I5100_MC , & dw ) ;
if ( i5100_mc_scrben ( dw ) ) {
priv - > scrub_enable = 1 ;
schedule_delayed_work ( & ( priv - > i5100_scrubbing ) ,
I5100_SCRUB_REFRESH_RATE ) ;
}
2008-07-25 12:49:04 +04:00
i5100_init_dimm_layout ( pdev , mci ) ;
i5100_init_interleaving ( pdev , mci ) ;
mci - > mtype_cap = MEM_FLAG_FB_DDR2 ;
mci - > edac_ctl_cap = EDAC_FLAG_SECDED ;
mci - > edac_cap = EDAC_FLAG_SECDED ;
mci - > mod_name = " i5100_edac.c " ;
mci - > ctl_name = " i5100 " ;
mci - > dev_name = pci_name ( pdev ) ;
2008-07-25 12:49:08 +04:00
mci - > ctl_page_to_phys = NULL ;
2008-07-25 12:49:04 +04:00
mci - > edac_check = i5100_check_error ;
2009-12-16 03:47:42 +03:00
mci - > set_sdram_scrub_rate = i5100_set_scrub_rate ;
mci - > get_sdram_scrub_rate = i5100_get_scrub_rate ;
2008-07-25 12:49:04 +04:00
2012-08-08 19:30:57 +04:00
priv - > inject_channel = 0 ;
priv - > inject_hlinesel = 0 ;
priv - > inject_deviceptr1 = 0 ;
priv - > inject_deviceptr2 = 0 ;
priv - > inject_eccmask1 = 0 ;
priv - > inject_eccmask2 = 0 ;
2008-07-25 12:49:04 +04:00
i5100_init_csrows ( mci ) ;
/* this strange construction seems to be in every driver, dunno why */
switch ( edac_op_state ) {
case EDAC_OPSTATE_POLL :
case EDAC_OPSTATE_NMI :
break ;
default :
edac_op_state = EDAC_OPSTATE_POLL ;
break ;
}
if ( edac_mc_add_mc ( mci ) ) {
ret = - ENODEV ;
2009-12-16 03:47:42 +03:00
goto bail_scrub ;
2008-07-25 12:49:04 +04:00
}
2012-08-08 19:30:58 +04:00
i5100_setup_debugfs ( mci ) ;
2008-07-25 12:49:08 +04:00
return ret ;
2008-07-25 12:49:04 +04:00
2009-12-16 03:47:42 +03:00
bail_scrub :
priv - > scrub_enable = 0 ;
cancel_delayed_work_sync ( & ( priv - > i5100_scrubbing ) ) ;
2008-07-25 12:49:04 +04:00
edac_mc_free ( mci ) ;
2012-08-08 19:30:56 +04:00
bail_disable_einj :
pci_disable_device ( einj ) ;
bail_einj :
pci_dev_put ( einj ) ;
2008-07-25 12:49:08 +04:00
bail_disable_ch1 :
pci_disable_device ( ch1mm ) ;
2008-07-25 12:49:04 +04:00
bail_ch1 :
pci_dev_put ( ch1mm ) ;
2008-07-25 12:49:08 +04:00
bail_disable_ch0 :
pci_disable_device ( ch0mm ) ;
2008-07-25 12:49:04 +04:00
bail_ch0 :
pci_dev_put ( ch0mm ) ;
2008-07-25 12:49:08 +04:00
bail_pdev :
pci_disable_device ( pdev ) ;
2008-07-25 12:49:04 +04:00
bail :
return ret ;
}
2012-12-22 01:23:51 +04:00
static void i5100_remove_one ( struct pci_dev * pdev )
2008-07-25 12:49:04 +04:00
{
struct mem_ctl_info * mci ;
struct i5100_priv * priv ;
mci = edac_mc_del_mc ( & pdev - > dev ) ;
if ( ! mci )
return ;
priv = mci - > pvt_info ;
2009-12-16 03:47:42 +03:00
2015-09-22 13:36:15 +03:00
edac_debugfs_remove_recursive ( priv - > debugfs ) ;
2012-08-08 19:30:58 +04:00
2009-12-16 03:47:42 +03:00
priv - > scrub_enable = 0 ;
cancel_delayed_work_sync ( & ( priv - > i5100_scrubbing ) ) ;
2008-07-25 12:49:08 +04:00
pci_disable_device ( pdev ) ;
pci_disable_device ( priv - > ch0mm ) ;
pci_disable_device ( priv - > ch1mm ) ;
2012-08-08 19:30:56 +04:00
pci_disable_device ( priv - > einj ) ;
2008-07-25 12:49:04 +04:00
pci_dev_put ( priv - > ch0mm ) ;
pci_dev_put ( priv - > ch1mm ) ;
2012-08-08 19:30:56 +04:00
pci_dev_put ( priv - > einj ) ;
2008-07-25 12:49:04 +04:00
edac_mc_free ( mci ) ;
}
2013-12-06 13:23:08 +04:00
static const struct pci_device_id i5100_pci_tbl [ ] = {
2008-07-25 12:49:04 +04:00
/* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
{ PCI_DEVICE ( PCI_VENDOR_ID_INTEL , PCI_DEVICE_ID_INTEL_5100_16 ) } ,
{ 0 , }
} ;
MODULE_DEVICE_TABLE ( pci , i5100_pci_tbl ) ;
static struct pci_driver i5100_driver = {
. name = KBUILD_BASENAME ,
. probe = i5100_init_one ,
2012-12-22 01:23:51 +04:00
. remove = i5100_remove_one ,
2008-07-25 12:49:04 +04:00
. id_table = i5100_pci_tbl ,
} ;
static int __init i5100_init ( void )
{
int pci_rc ;
2015-09-22 13:36:15 +03:00
i5100_debugfs = edac_debugfs_create_dir_at ( " i5100_edac " , NULL ) ;
2008-07-25 12:49:04 +04:00
2012-08-08 19:30:58 +04:00
pci_rc = pci_register_driver ( & i5100_driver ) ;
2008-07-25 12:49:04 +04:00
return ( pci_rc < 0 ) ? pci_rc : 0 ;
}
static void __exit i5100_exit ( void )
{
2015-09-22 13:36:15 +03:00
edac_debugfs_remove ( i5100_debugfs ) ;
2012-08-08 19:30:58 +04:00
2008-07-25 12:49:04 +04:00
pci_unregister_driver ( & i5100_driver ) ;
}
module_init ( i5100_init ) ;
module_exit ( i5100_exit ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_AUTHOR
( " Arthur Jones <ajones@riverbed.com> " ) ;
MODULE_DESCRIPTION ( " MC Driver for Intel I5100 memory controllers " ) ;