2009-05-04 22:11:14 +04:00
# include "amd64_edac.h"
2010-09-17 20:03:43 +04:00
# include <asm/amd_nb.h>
2009-05-04 22:11:14 +04:00
static struct edac_pci_ctl_info * amd64_ctl_pci ;
static int report_gart_errors ;
module_param ( report_gart_errors , int , 0644 ) ;
/*
* Set by command line parameter . If BIOS has enabled the ECC , this override is
* cleared to prevent re - enabling the hardware by this driver .
*/
static int ecc_enable_override ;
module_param ( ecc_enable_override , int , 0644 ) ;
2010-02-02 08:39:15 +03:00
static struct msr __percpu * msrs ;
2009-12-11 20:14:40 +03:00
2010-10-15 21:25:38 +04:00
/*
* count successfully initialized driver instances for setup_pci_device ( )
*/
static atomic_t drv_instances = ATOMIC_INIT ( 0 ) ;
2010-10-13 18:11:59 +04:00
/* Per-node driver instances */
static struct mem_ctl_info * * mcis ;
2010-10-14 18:01:30 +04:00
static struct ecc_settings * * ecc_stngs ;
2009-05-04 22:11:14 +04:00
2009-06-25 21:32:38 +04:00
/*
* Valid scrub rates for the K8 hardware memory scrubber . We map the scrubbing
* bandwidth to a valid bit pattern . The ' set ' operation finds the ' matching -
* or higher value ' .
*
* FIXME : Produce a better mapping / linearisation .
*/
2010-11-24 21:52:09 +03:00
struct scrubrate {
u32 scrubval ; /* bit pattern for scrub rate */
u32 bandwidth ; /* bandwidth consumed (bytes/sec) */
} scrubrates [ ] = {
2009-06-25 21:32:38 +04:00
{ 0x01 , 1600000000UL } ,
{ 0x02 , 800000000UL } ,
{ 0x03 , 400000000UL } ,
{ 0x04 , 200000000UL } ,
{ 0x05 , 100000000UL } ,
{ 0x06 , 50000000UL } ,
{ 0x07 , 25000000UL } ,
{ 0x08 , 12284069UL } ,
{ 0x09 , 6274509UL } ,
{ 0x0A , 3121951UL } ,
{ 0x0B , 1560975UL } ,
{ 0x0C , 781440UL } ,
{ 0x0D , 390720UL } ,
{ 0x0E , 195300UL } ,
{ 0x0F , 97650UL } ,
{ 0x10 , 48854UL } ,
{ 0x11 , 24427UL } ,
{ 0x12 , 12213UL } ,
{ 0x13 , 6101UL } ,
{ 0x14 , 3051UL } ,
{ 0x15 , 1523UL } ,
{ 0x16 , 761UL } ,
{ 0x00 , 0UL } , /* scrubbing off */
} ;
2010-10-08 20:32:29 +04:00
static int __amd64_read_pci_cfg_dword ( struct pci_dev * pdev , int offset ,
u32 * val , const char * func )
{
int err = 0 ;
err = pci_read_config_dword ( pdev , offset , val ) ;
if ( err )
amd64_warn ( " %s: error reading F%dx%03x. \n " ,
func , PCI_FUNC ( pdev - > devfn ) , offset ) ;
return err ;
}
int __amd64_write_pci_cfg_dword ( struct pci_dev * pdev , int offset ,
u32 val , const char * func )
{
int err = 0 ;
err = pci_write_config_dword ( pdev , offset , val ) ;
if ( err )
amd64_warn ( " %s: error writing to F%dx%03x. \n " ,
func , PCI_FUNC ( pdev - > devfn ) , offset ) ;
return err ;
}
/*
*
* Depending on the family , F2 DCT reads need special handling :
*
* K8 : has a single DCT only
*
* F10h : each DCT has its own set of regs
* DCT0 - > F2x040 . .
* DCT1 - > F2x140 . .
*
* F15h : we select which DCT we access using F1x10C [ DctCfgSel ]
*
*/
static int k8_read_dct_pci_cfg ( struct amd64_pvt * pvt , int addr , u32 * val ,
const char * func )
{
if ( addr > = 0x100 )
return - EINVAL ;
return __amd64_read_pci_cfg_dword ( pvt - > F2 , addr , val , func ) ;
}
static int f10_read_dct_pci_cfg ( struct amd64_pvt * pvt , int addr , u32 * val ,
const char * func )
{
return __amd64_read_pci_cfg_dword ( pvt - > F2 , addr , val , func ) ;
}
2011-09-19 19:34:45 +04:00
/*
* Select DCT to which PCI cfg accesses are routed
*/
static void f15h_select_dct ( struct amd64_pvt * pvt , u8 dct )
{
u32 reg = 0 ;
amd64_read_pci_cfg ( pvt - > F1 , DCT_CFG_SEL , & reg ) ;
reg & = 0xfffffffe ;
reg | = dct ;
amd64_write_pci_cfg ( pvt - > F1 , DCT_CFG_SEL , reg ) ;
}
2010-10-08 20:32:29 +04:00
static int f15_read_dct_pci_cfg ( struct amd64_pvt * pvt , int addr , u32 * val ,
const char * func )
{
u8 dct = 0 ;
if ( addr > = 0x140 & & addr < = 0x1a0 ) {
dct = 1 ;
addr - = 0x100 ;
}
2011-09-19 19:34:45 +04:00
f15h_select_dct ( pvt , dct ) ;
2010-10-08 20:32:29 +04:00
return __amd64_read_pci_cfg_dword ( pvt - > F2 , addr , val , func ) ;
}
2009-05-04 22:11:14 +04:00
/*
* Memory scrubber control interface . For K8 , memory scrubbing is handled by
* hardware and can involve L2 cache , dcache as well as the main memory . With
* F10 , this is extended to L3 cache scrubbing on CPU models sporting that
* functionality .
*
* This causes the " units " for the scrubbing speed to vary from 64 byte blocks
* ( dram ) over to cache lines . This is nasty , so we will use bandwidth in
* bytes / sec for the setting .
*
* Currently , we only do dram scrubbing . If the scrubbing is done in software on
* other archs , we might not have access to the caches directly .
*/
/*
* scan the scrub rate mapping table for a close or matching bandwidth value to
* issue . If requested is too big , then use last maximum value found .
*/
2010-10-01 20:38:19 +04:00
static int __amd64_set_scrub_rate ( struct pci_dev * ctl , u32 new_bw , u32 min_rate )
2009-05-04 22:11:14 +04:00
{
u32 scrubval ;
int i ;
/*
* map the configured rate ( new_bw ) to a value specific to the AMD64
* memory controller and apply to register . Search for the first
* bandwidth entry that is greater or equal than the setting requested
* and program that . If at last entry , turn off DRAM scrubbing .
*/
for ( i = 0 ; i < ARRAY_SIZE ( scrubrates ) ; i + + ) {
/*
* skip scrub rates which aren ' t recommended
* ( see F10 BKDG , F3x58 )
*/
2010-10-01 20:38:19 +04:00
if ( scrubrates [ i ] . scrubval < min_rate )
2009-05-04 22:11:14 +04:00
continue ;
if ( scrubrates [ i ] . bandwidth < = new_bw )
break ;
/*
* if no suitable bandwidth found , turn off DRAM scrubbing
* entirely by falling back to the last element in the
* scrubrates array .
*/
}
scrubval = scrubrates [ i ] . scrubval ;
2011-01-07 18:26:49 +03:00
pci_write_bits32 ( ctl , SCRCTRL , scrubval , 0x001F ) ;
2009-05-04 22:11:14 +04:00
2010-11-24 21:52:09 +03:00
if ( scrubval )
return scrubrates [ i ] . bandwidth ;
2009-05-04 22:11:14 +04:00
return 0 ;
}
2010-10-01 20:38:19 +04:00
static int amd64_set_scrub_rate ( struct mem_ctl_info * mci , u32 bw )
2009-05-04 22:11:14 +04:00
{
struct amd64_pvt * pvt = mci - > pvt_info ;
2011-01-19 22:02:38 +03:00
u32 min_scrubrate = 0x5 ;
2009-05-04 22:11:14 +04:00
2011-01-19 22:02:38 +03:00
if ( boot_cpu_data . x86 = = 0xf )
min_scrubrate = 0x0 ;
2011-09-19 19:34:45 +04:00
/* F15h Erratum #505 */
if ( boot_cpu_data . x86 = = 0x15 )
f15h_select_dct ( pvt , 0 ) ;
2011-01-19 22:02:38 +03:00
return __amd64_set_scrub_rate ( pvt - > F3 , bw , min_scrubrate ) ;
2009-05-04 22:11:14 +04:00
}
2010-11-24 21:52:09 +03:00
static int amd64_get_scrub_rate ( struct mem_ctl_info * mci )
2009-05-04 22:11:14 +04:00
{
struct amd64_pvt * pvt = mci - > pvt_info ;
u32 scrubval = 0 ;
2010-11-24 21:52:09 +03:00
int i , retval = - EINVAL ;
2009-05-04 22:11:14 +04:00
2011-09-19 19:34:45 +04:00
/* F15h Erratum #505 */
if ( boot_cpu_data . x86 = = 0x15 )
f15h_select_dct ( pvt , 0 ) ;
2011-01-07 18:26:49 +03:00
amd64_read_pci_cfg ( pvt - > F3 , SCRCTRL , & scrubval ) ;
2009-05-04 22:11:14 +04:00
scrubval = scrubval & 0x001F ;
2010-01-11 22:58:21 +03:00
for ( i = 0 ; i < ARRAY_SIZE ( scrubrates ) ; i + + ) {
2009-05-04 22:11:14 +04:00
if ( scrubrates [ i ] . scrubval = = scrubval ) {
2010-11-24 21:52:09 +03:00
retval = scrubrates [ i ] . bandwidth ;
2009-05-04 22:11:14 +04:00
break ;
}
}
2010-11-24 21:52:09 +03:00
return retval ;
2009-05-04 22:11:14 +04:00
}
2009-04-27 17:53:22 +04:00
/*
2010-10-21 20:52:53 +04:00
* returns true if the SysAddr given by sys_addr matches the
* DRAM base / limit associated with node_id
2009-04-27 17:53:22 +04:00
*/
2011-02-21 20:55:00 +03:00
static bool amd64_base_limit_match ( struct amd64_pvt * pvt , u64 sys_addr ,
unsigned nid )
2009-04-27 17:53:22 +04:00
{
2010-10-21 20:52:53 +04:00
u64 addr ;
2009-04-27 17:53:22 +04:00
/* The K8 treats this as a 40-bit value. However, bits 63-40 will be
* all ones if the most significant implemented address bit is 1.
* Here we discard bits 63 - 40. See section 3.4 .2 of AMD publication
* 24592 : AMD x86 - 64 Architecture Programmer ' s Manual Volume 1
* Application Programming .
*/
addr = sys_addr & 0x000000ffffffffffull ;
2010-10-21 20:52:53 +04:00
return ( ( addr > = get_dram_base ( pvt , nid ) ) & &
( addr < = get_dram_limit ( pvt , nid ) ) ) ;
2009-04-27 17:53:22 +04:00
}
/*
* Attempt to map a SysAddr to a node . On success , return a pointer to the
* mem_ctl_info structure for the node that the SysAddr maps to .
*
* On failure , return NULL .
*/
static struct mem_ctl_info * find_mc_by_sys_addr ( struct mem_ctl_info * mci ,
u64 sys_addr )
{
struct amd64_pvt * pvt ;
2011-02-21 20:55:00 +03:00
unsigned node_id ;
2009-04-27 17:53:22 +04:00
u32 intlv_en , bits ;
/*
* Here we use the DRAM Base ( section 3.4 .4 .1 ) and DRAM Limit ( section
* 3.4 .4 .2 ) registers to map the SysAddr to a node ID .
*/
pvt = mci - > pvt_info ;
/*
* The value of this field should be the same for all DRAM Base
* registers . Therefore we arbitrarily choose to read it from the
* register for node 0.
*/
2010-10-21 20:52:53 +04:00
intlv_en = dram_intlv_en ( pvt , 0 ) ;
2009-04-27 17:53:22 +04:00
if ( intlv_en = = 0 ) {
2010-10-21 20:52:53 +04:00
for ( node_id = 0 ; node_id < DRAM_RANGES ; node_id + + ) {
2009-04-27 17:53:22 +04:00
if ( amd64_base_limit_match ( pvt , sys_addr , node_id ) )
2009-09-18 14:39:19 +04:00
goto found ;
2009-04-27 17:53:22 +04:00
}
2009-09-18 14:39:19 +04:00
goto err_no_match ;
2009-04-27 17:53:22 +04:00
}
2009-09-18 14:27:27 +04:00
if ( unlikely ( ( intlv_en ! = 0x01 ) & &
( intlv_en ! = 0x03 ) & &
( intlv_en ! = 0x07 ) ) ) {
2010-10-07 20:29:15 +04:00
amd64_warn ( " DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug? \n " , intlv_en ) ;
2009-04-27 17:53:22 +04:00
return NULL ;
}
bits = ( ( ( u32 ) sys_addr ) > > 12 ) & intlv_en ;
for ( node_id = 0 ; ; ) {
2010-10-21 20:52:53 +04:00
if ( ( dram_intlv_sel ( pvt , node_id ) & intlv_en ) = = bits )
2009-04-27 17:53:22 +04:00
break ; /* intlv_sel field matches */
2010-10-21 20:52:53 +04:00
if ( + + node_id > = DRAM_RANGES )
2009-04-27 17:53:22 +04:00
goto err_no_match ;
}
/* sanity test for sys_addr */
if ( unlikely ( ! amd64_base_limit_match ( pvt , sys_addr , node_id ) ) ) {
2010-10-07 20:29:15 +04:00
amd64_warn ( " %s: sys_addr 0x%llx falls outside base/limit address "
" range for node %d with node interleaving enabled. \n " ,
__func__ , sys_addr , node_id ) ;
2009-04-27 17:53:22 +04:00
return NULL ;
}
found :
2011-02-21 20:55:00 +03:00
return edac_mc_find ( ( int ) node_id ) ;
2009-04-27 17:53:22 +04:00
err_no_match :
debugf2 ( " sys_addr 0x%lx doesn't match any node \n " ,
( unsigned long ) sys_addr ) ;
return NULL ;
}
2009-04-27 17:57:12 +04:00
/*
2010-11-29 21:49:02 +03:00
* compute the CS base address of the @ csrow on the DRAM controller @ dct .
* For details see F2x [ 5 C : 40 ] in the processor ' s BKDG
2009-04-27 17:57:12 +04:00
*/
2010-11-29 21:49:02 +03:00
static void get_cs_base_and_mask ( struct amd64_pvt * pvt , int csrow , u8 dct ,
u64 * base , u64 * mask )
2009-04-27 17:57:12 +04:00
{
2010-11-29 21:49:02 +03:00
u64 csbase , csmask , base_bits , mask_bits ;
u8 addr_shift ;
2009-04-27 17:57:12 +04:00
2010-11-29 21:49:02 +03:00
if ( boot_cpu_data . x86 = = 0xf & & pvt - > ext_model < K8_REV_F ) {
csbase = pvt - > csels [ dct ] . csbases [ csrow ] ;
csmask = pvt - > csels [ dct ] . csmasks [ csrow ] ;
base_bits = GENMASK ( 21 , 31 ) | GENMASK ( 9 , 15 ) ;
mask_bits = GENMASK ( 21 , 29 ) | GENMASK ( 9 , 15 ) ;
addr_shift = 4 ;
} else {
csbase = pvt - > csels [ dct ] . csbases [ csrow ] ;
csmask = pvt - > csels [ dct ] . csmasks [ csrow > > 1 ] ;
addr_shift = 8 ;
2009-04-27 17:57:12 +04:00
2010-11-29 21:49:02 +03:00
if ( boot_cpu_data . x86 = = 0x15 )
base_bits = mask_bits = GENMASK ( 19 , 30 ) | GENMASK ( 5 , 13 ) ;
else
base_bits = mask_bits = GENMASK ( 19 , 28 ) | GENMASK ( 5 , 13 ) ;
}
2009-04-27 17:57:12 +04:00
2010-11-29 21:49:02 +03:00
* base = ( csbase & base_bits ) < < addr_shift ;
2009-04-27 17:57:12 +04:00
2010-11-29 21:49:02 +03:00
* mask = ~ 0ULL ;
/* poke holes for the csmask */
* mask & = ~ ( mask_bits < < addr_shift ) ;
/* OR them in */
* mask | = ( csmask & mask_bits ) < < addr_shift ;
2009-04-27 17:57:12 +04:00
}
2010-11-29 21:49:02 +03:00
# define for_each_chip_select(i, dct, pvt) \
for ( i = 0 ; i < pvt - > csels [ dct ] . b_cnt ; i + + )
2011-01-13 20:02:22 +03:00
# define chip_select_base(i, dct, pvt) \
pvt - > csels [ dct ] . csbases [ i ]
2010-11-29 21:49:02 +03:00
# define for_each_chip_select_mask(i, dct, pvt) \
for ( i = 0 ; i < pvt - > csels [ dct ] . m_cnt ; i + + )
2009-04-27 17:57:12 +04:00
/*
* @ input_addr is an InputAddr associated with the node given by mci . Return the
* csrow that input_addr maps to , or - 1 on failure ( no csrow claims input_addr ) .
*/
static int input_addr_to_csrow ( struct mem_ctl_info * mci , u64 input_addr )
{
struct amd64_pvt * pvt ;
int csrow ;
u64 base , mask ;
pvt = mci - > pvt_info ;
2010-11-29 21:49:02 +03:00
for_each_chip_select ( csrow , 0 , pvt ) {
if ( ! csrow_enabled ( csrow , 0 , pvt ) )
2009-04-27 17:57:12 +04:00
continue ;
2010-11-29 21:49:02 +03:00
get_cs_base_and_mask ( pvt , csrow , 0 , & base , & mask ) ;
mask = ~ mask ;
2009-04-27 17:57:12 +04:00
if ( ( input_addr & mask ) = = ( base & mask ) ) {
debugf2 ( " InputAddr 0x%lx matches csrow %d (node %d) \n " ,
( unsigned long ) input_addr , csrow ,
pvt - > mc_node_id ) ;
return csrow ;
}
}
debugf2 ( " no matching csrow for InputAddr 0x%lx (MC node %d) \n " ,
( unsigned long ) input_addr , pvt - > mc_node_id ) ;
return - 1 ;
}
/*
* Obtain info from the DRAM Hole Address Register ( section 3.4 .8 , pub # 26094 )
* for the node represented by mci . Info is passed back in * hole_base ,
* * hole_offset , and * hole_size . Function returns 0 if info is valid or 1 if
* info is invalid . Info may be invalid for either of the following reasons :
*
* - The revision of the node is not E or greater . In this case , the DRAM Hole
* Address Register does not exist .
*
* - The DramHoleValid bit is cleared in the DRAM Hole Address Register ,
* indicating that its contents are not valid .
*
* The values passed back in * hole_base , * hole_offset , and * hole_size are
* complete 32 - bit values despite the fact that the bitfields in the DHAR
* only represent bits 31 - 24 of the base and offset values .
*/
int amd64_get_dram_hole_info ( struct mem_ctl_info * mci , u64 * hole_base ,
u64 * hole_offset , u64 * hole_size )
{
struct amd64_pvt * pvt = mci - > pvt_info ;
u64 base ;
/* only revE and later have the DRAM Hole Address Register */
2009-10-21 15:44:36 +04:00
if ( boot_cpu_data . x86 = = 0xf & & pvt - > ext_model < K8_REV_E ) {
2009-04-27 17:57:12 +04:00
debugf1 ( " revision %d for node %d does not support DHAR \n " ,
pvt - > ext_model , pvt - > mc_node_id ) ;
return 1 ;
}
2010-11-11 19:29:13 +03:00
/* valid for Fam10h and above */
2010-12-10 21:49:19 +03:00
if ( boot_cpu_data . x86 > = 0x10 & & ! dhar_mem_hoist_valid ( pvt ) ) {
2009-04-27 17:57:12 +04:00
debugf1 ( " Dram Memory Hoisting is DISABLED on this system \n " ) ;
return 1 ;
}
2010-12-10 21:49:19 +03:00
if ( ! dhar_valid ( pvt ) ) {
2009-04-27 17:57:12 +04:00
debugf1 ( " Dram Memory Hoisting is DISABLED on this node %d \n " ,
pvt - > mc_node_id ) ;
return 1 ;
}
/* This node has Memory Hoisting */
/* +------------------+--------------------+--------------------+-----
* | memory | DRAM hole | relocated |
* | [ 0 , ( x - 1 ) ] | [ x , 0xffffffff ] | addresses from |
* | | | DRAM hole |
* | | | [ 0x100000000 , |
* | | | ( 0x100000000 + |
* | | | ( 0xffffffff - x ) ) ] |
* + - - - - - - - - - - - - - - - - - - + - - - - - - - - - - - - - - - - - - - - + - - - - - - - - - - - - - - - - - - - - + - - - - -
*
* Above is a diagram of physical memory showing the DRAM hole and the
* relocated addresses from the DRAM hole . As shown , the DRAM hole
* starts at address x ( the base address ) and extends through address
* 0xffffffff . The DRAM Hole Address Register ( DHAR ) relocates the
* addresses in the hole so that they start at 0x100000000 .
*/
2010-11-11 19:29:13 +03:00
base = dhar_base ( pvt ) ;
2009-04-27 17:57:12 +04:00
* hole_base = base ;
* hole_size = ( 0x1ull < < 32 ) - base ;
if ( boot_cpu_data . x86 > 0xf )
2010-11-11 19:29:13 +03:00
* hole_offset = f10_dhar_offset ( pvt ) ;
2009-04-27 17:57:12 +04:00
else
2010-11-11 19:29:13 +03:00
* hole_offset = k8_dhar_offset ( pvt ) ;
2009-04-27 17:57:12 +04:00
debugf1 ( " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx \n " ,
pvt - > mc_node_id , ( unsigned long ) * hole_base ,
( unsigned long ) * hole_offset , ( unsigned long ) * hole_size ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( amd64_get_dram_hole_info ) ;
2009-05-04 22:46:50 +04:00
/*
* Return the DramAddr that the SysAddr given by @ sys_addr maps to . It is
* assumed that sys_addr maps to the node given by mci .
*
* The first part of section 3.4 .4 ( p . 70 ) shows how the DRAM Base ( section
* 3.4 .4 .1 ) and DRAM Limit ( section 3.4 .4 .2 ) registers are used to translate a
* SysAddr to a DramAddr . If the DRAM Hole Address Register ( DHAR ) is enabled ,
* then it is also involved in translating a SysAddr to a DramAddr . Sections
* 3.4 .8 and 3.5 .8 .2 describe the DHAR and how it is used for memory hoisting .
* These parts of the documentation are unclear . I interpret them as follows :
*
* When node n receives a SysAddr , it processes the SysAddr as follows :
*
* 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
* Limit registers for node n . If the SysAddr is not within the range
* specified by the base and limit values , then node n ignores the Sysaddr
* ( since it does not map to node n ) . Otherwise continue to step 2 below .
*
* 2. If the DramHoleValid bit of the DHAR for node n is clear , the DHAR is
* disabled so skip to step 3 below . Otherwise see if the SysAddr is within
* the range of relocated addresses ( starting at 0x100000000 ) from the DRAM
* hole . If not , skip to step 3 below . Else get the value of the
* DramHoleOffset field from the DHAR . To obtain the DramAddr , subtract the
* offset defined by this value from the SysAddr .
*
* 3. Obtain the base address for node n from the DRAMBase field of the DRAM
* Base register for node n . To obtain the DramAddr , subtract the base
* address from the SysAddr , as shown near the start of section 3.4 .4 ( p .70 ) .
*/
static u64 sys_addr_to_dram_addr ( struct mem_ctl_info * mci , u64 sys_addr )
{
2010-10-21 20:52:53 +04:00
struct amd64_pvt * pvt = mci - > pvt_info ;
2009-05-04 22:46:50 +04:00
u64 dram_base , hole_base , hole_offset , hole_size , dram_addr ;
int ret = 0 ;
2010-10-21 20:52:53 +04:00
dram_base = get_dram_base ( pvt , pvt - > mc_node_id ) ;
2009-05-04 22:46:50 +04:00
ret = amd64_get_dram_hole_info ( mci , & hole_base , & hole_offset ,
& hole_size ) ;
if ( ! ret ) {
if ( ( sys_addr > = ( 1ull < < 32 ) ) & &
( sys_addr < ( ( 1ull < < 32 ) + hole_size ) ) ) {
/* use DHAR to translate SysAddr to DramAddr */
dram_addr = sys_addr - hole_offset ;
debugf2 ( " using DHAR to translate SysAddr 0x%lx to "
" DramAddr 0x%lx \n " ,
( unsigned long ) sys_addr ,
( unsigned long ) dram_addr ) ;
return dram_addr ;
}
}
/*
* Translate the SysAddr to a DramAddr as shown near the start of
* section 3.4 .4 ( p . 70 ) . Although sys_addr is a 64 - bit value , the k8
* only deals with 40 - bit values . Therefore we discard bits 63 - 40 of
* sys_addr below . If bit 39 of sys_addr is 1 then the bits we
* discard are all 1 s . Otherwise the bits we discard are all 0 s . See
* section 3.4 .2 of AMD publication 24592 : AMD x86 - 64 Architecture
* Programmer ' s Manual Volume 1 Application Programming .
*/
2010-12-13 21:21:07 +03:00
dram_addr = ( sys_addr & GENMASK ( 0 , 39 ) ) - dram_base ;
2009-05-04 22:46:50 +04:00
debugf2 ( " using DRAM Base register to translate SysAddr 0x%lx to "
" DramAddr 0x%lx \n " , ( unsigned long ) sys_addr ,
( unsigned long ) dram_addr ) ;
return dram_addr ;
}
/*
* @ intlv_en is the value of the IntlvEn field from a DRAM Base register
* ( section 3.4 .4 .1 ) . Return the number of bits from a SysAddr that are used
* for node interleaving .
*/
static int num_node_interleave_bits ( unsigned intlv_en )
{
static const int intlv_shift_table [ ] = { 0 , 1 , 0 , 2 , 0 , 0 , 0 , 3 } ;
int n ;
BUG_ON ( intlv_en > 7 ) ;
n = intlv_shift_table [ intlv_en ] ;
return n ;
}
/* Translate the DramAddr given by @dram_addr to an InputAddr. */
static u64 dram_addr_to_input_addr ( struct mem_ctl_info * mci , u64 dram_addr )
{
struct amd64_pvt * pvt ;
int intlv_shift ;
u64 input_addr ;
pvt = mci - > pvt_info ;
/*
* See the start of section 3.4 .4 ( p . 70 , BKDG # 26094 , K8 , revA - E )
* concerning translating a DramAddr to an InputAddr .
*/
2010-10-21 20:52:53 +04:00
intlv_shift = num_node_interleave_bits ( dram_intlv_en ( pvt , 0 ) ) ;
2010-12-13 21:21:07 +03:00
input_addr = ( ( dram_addr > > intlv_shift ) & GENMASK ( 12 , 35 ) ) +
( dram_addr & 0xfff ) ;
2009-05-04 22:46:50 +04:00
debugf2 ( " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx \n " ,
intlv_shift , ( unsigned long ) dram_addr ,
( unsigned long ) input_addr ) ;
return input_addr ;
}
/*
* Translate the SysAddr represented by @ sys_addr to an InputAddr . It is
* assumed that @ sys_addr maps to the node given by mci .
*/
static u64 sys_addr_to_input_addr ( struct mem_ctl_info * mci , u64 sys_addr )
{
u64 input_addr ;
input_addr =
dram_addr_to_input_addr ( mci , sys_addr_to_dram_addr ( mci , sys_addr ) ) ;
debugf2 ( " SysAdddr 0x%lx translates to InputAddr 0x%lx \n " ,
( unsigned long ) sys_addr , ( unsigned long ) input_addr ) ;
return input_addr ;
}
/*
* @ input_addr is an InputAddr associated with the node represented by mci .
* Translate @ input_addr to a DramAddr and return the result .
*/
static u64 input_addr_to_dram_addr ( struct mem_ctl_info * mci , u64 input_addr )
{
struct amd64_pvt * pvt ;
2011-02-21 20:55:00 +03:00
unsigned node_id , intlv_shift ;
2009-05-04 22:46:50 +04:00
u64 bits , dram_addr ;
u32 intlv_sel ;
/*
* Near the start of section 3.4 .4 ( p . 70 , BKDG # 26094 , K8 , revA - E )
* shows how to translate a DramAddr to an InputAddr . Here we reverse
* this procedure . When translating from a DramAddr to an InputAddr , the
* bits used for node interleaving are discarded . Here we recover these
* bits from the IntlvSel field of the DRAM Limit register ( section
* 3.4 .4 .2 ) for the node that input_addr is associated with .
*/
pvt = mci - > pvt_info ;
node_id = pvt - > mc_node_id ;
2011-02-21 20:55:00 +03:00
BUG_ON ( node_id > 7 ) ;
2009-05-04 22:46:50 +04:00
2010-10-21 20:52:53 +04:00
intlv_shift = num_node_interleave_bits ( dram_intlv_en ( pvt , 0 ) ) ;
2009-05-04 22:46:50 +04:00
if ( intlv_shift = = 0 ) {
debugf1 ( " InputAddr 0x%lx translates to DramAddr of "
" same value \n " , ( unsigned long ) input_addr ) ;
return input_addr ;
}
2010-12-13 21:21:07 +03:00
bits = ( ( input_addr & GENMASK ( 12 , 35 ) ) < < intlv_shift ) +
( input_addr & 0xfff ) ;
2009-05-04 22:46:50 +04:00
2010-10-21 20:52:53 +04:00
intlv_sel = dram_intlv_sel ( pvt , node_id ) & ( ( 1 < < intlv_shift ) - 1 ) ;
2009-05-04 22:46:50 +04:00
dram_addr = bits + ( intlv_sel < < 12 ) ;
debugf1 ( " InputAddr 0x%lx translates to DramAddr 0x%lx "
" (%d node interleave bits) \n " , ( unsigned long ) input_addr ,
( unsigned long ) dram_addr , intlv_shift ) ;
return dram_addr ;
}
/*
* @ dram_addr is a DramAddr that maps to the node represented by mci . Convert
* @ dram_addr to a SysAddr .
*/
static u64 dram_addr_to_sys_addr ( struct mem_ctl_info * mci , u64 dram_addr )
{
struct amd64_pvt * pvt = mci - > pvt_info ;
2010-10-21 20:52:53 +04:00
u64 hole_base , hole_offset , hole_size , base , sys_addr ;
2009-05-04 22:46:50 +04:00
int ret = 0 ;
ret = amd64_get_dram_hole_info ( mci , & hole_base , & hole_offset ,
& hole_size ) ;
if ( ! ret ) {
if ( ( dram_addr > = hole_base ) & &
( dram_addr < ( hole_base + hole_size ) ) ) {
sys_addr = dram_addr + hole_offset ;
debugf1 ( " using DHAR to translate DramAddr 0x%lx to "
" SysAddr 0x%lx \n " , ( unsigned long ) dram_addr ,
( unsigned long ) sys_addr ) ;
return sys_addr ;
}
}
2010-10-21 20:52:53 +04:00
base = get_dram_base ( pvt , pvt - > mc_node_id ) ;
2009-05-04 22:46:50 +04:00
sys_addr = dram_addr + base ;
/*
* The sys_addr we have computed up to this point is a 40 - bit value
* because the k8 deals with 40 - bit values . However , the value we are
* supposed to return is a full 64 - bit physical address . The AMD
* x86 - 64 architecture specifies that the most significant implemented
* address bit through bit 63 of a physical address must be either all
* 0 s or all 1 s . Therefore we sign - extend the 40 - bit sys_addr to a
* 64 - bit value below . See section 3.4 .2 of AMD publication 24592 :
* AMD x86 - 64 Architecture Programmer ' s Manual Volume 1 Application
* Programming .
*/
sys_addr | = ~ ( ( sys_addr & ( 1ull < < 39 ) ) - 1 ) ;
debugf1 ( " Node %d, DramAddr 0x%lx to SysAddr 0x%lx \n " ,
pvt - > mc_node_id , ( unsigned long ) dram_addr ,
( unsigned long ) sys_addr ) ;
return sys_addr ;
}
/*
* @ input_addr is an InputAddr associated with the node given by mci . Translate
* @ input_addr to a SysAddr .
*/
static inline u64 input_addr_to_sys_addr ( struct mem_ctl_info * mci ,
u64 input_addr )
{
return dram_addr_to_sys_addr ( mci ,
input_addr_to_dram_addr ( mci , input_addr ) ) ;
}
/* Map the Error address to a PAGE and PAGE OFFSET. */
static inline void error_address_to_page_and_offset ( u64 error_address ,
u32 * page , u32 * offset )
{
* page = ( u32 ) ( error_address > > PAGE_SHIFT ) ;
* offset = ( ( u32 ) error_address ) & ~ PAGE_MASK ;
}
/*
* @ sys_addr is an error address ( a SysAddr ) extracted from the MCA NB Address
* Low ( section 3.6 .4 .5 ) and MCA NB Address High ( section 3.6 .4 .6 ) registers
* of a node that detected an ECC memory error . mci represents the node that
* the error address maps to ( possibly different from the node that detected
* the error ) . Return the number of the csrow that sys_addr maps to , or - 1 on
* error .
*/
static int sys_addr_to_csrow ( struct mem_ctl_info * mci , u64 sys_addr )
{
int csrow ;
csrow = input_addr_to_csrow ( mci , sys_addr_to_input_addr ( mci , sys_addr ) ) ;
if ( csrow = = - 1 )
2010-10-07 20:29:15 +04:00
amd64_mc_err ( mci , " Failed to translate InputAddr to csrow for "
" address 0x%lx \n " , ( unsigned long ) sys_addr ) ;
2009-05-04 22:46:50 +04:00
return csrow ;
}
2009-04-27 17:57:12 +04:00
2009-11-12 21:05:07 +03:00
static int get_channel_from_ecc_syndrome ( struct mem_ctl_info * , u16 ) ;
2009-04-27 18:09:09 +04:00
/*
* Determine if the DIMMs have ECC enabled . ECC is enabled ONLY if all the DIMMs
* are ECC capable .
*/
2011-10-06 10:30:25 +04:00
static unsigned long amd64_determine_edac_cap ( struct amd64_pvt * pvt )
2009-04-27 18:09:09 +04:00
{
2010-12-22 16:28:24 +03:00
u8 bit ;
2011-10-06 10:30:25 +04:00
unsigned long edac_cap = EDAC_FLAG_NONE ;
2009-04-27 18:09:09 +04:00
2009-10-21 15:44:36 +04:00
bit = ( boot_cpu_data . x86 > 0xf | | pvt - > ext_model > = K8_REV_F )
2009-04-27 18:09:09 +04:00
? 19
: 17 ;
2009-06-10 20:29:54 +04:00
if ( pvt - > dclr0 & BIT ( bit ) )
2009-04-27 18:09:09 +04:00
edac_cap = EDAC_FLAG_SECDED ;
return edac_cap ;
}
2011-02-23 19:25:12 +03:00
static void amd64_debug_display_dimm_sizes ( struct amd64_pvt * , u8 ) ;
2009-04-27 18:09:09 +04:00
2009-11-03 18:18:33 +03:00
static void amd64_dump_dramcfg_low ( u32 dclr , int chan )
{
debugf1 ( " F2x%d90 (DRAM Cfg Low): 0x%08x \n " , chan , dclr ) ;
debugf1 ( " DIMM type: %sbuffered; all DIMMs support ECC: %s \n " ,
( dclr & BIT ( 16 ) ) ? " un " : " " ,
( dclr & BIT ( 19 ) ) ? " yes " : " no " ) ;
debugf1 ( " PAR/ERR parity: %s \n " ,
( dclr & BIT ( 8 ) ) ? " enabled " : " disabled " ) ;
2010-12-22 16:28:24 +03:00
if ( boot_cpu_data . x86 = = 0x10 )
debugf1 ( " DCT 128bit mode width: %s \n " ,
( dclr & BIT ( 11 ) ) ? " 128b " : " 64b " ) ;
2009-11-03 18:18:33 +03:00
debugf1 ( " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s \n " ,
( dclr & BIT ( 12 ) ) ? " yes " : " no " ,
( dclr & BIT ( 13 ) ) ? " yes " : " no " ,
( dclr & BIT ( 14 ) ) ? " yes " : " no " ,
( dclr & BIT ( 15 ) ) ? " yes " : " no " ) ;
}
2009-04-27 18:09:09 +04:00
/* Display and decode various NB registers for debug purposes. */
2010-10-08 20:32:29 +04:00
static void dump_misc_regs ( struct amd64_pvt * pvt )
2009-04-27 18:09:09 +04:00
{
2009-11-03 18:18:33 +03:00
debugf1 ( " F3xE8 (NB Cap): 0x%08x \n " , pvt - > nbcap ) ;
debugf1 ( " NB two channel DRAM capable: %s \n " ,
2011-01-07 18:26:49 +03:00
( pvt - > nbcap & NBCAP_DCT_DUAL ) ? " yes " : " no " ) ;
2009-04-27 18:09:09 +04:00
2009-11-03 18:18:33 +03:00
debugf1 ( " ECC capable: %s, ChipKill ECC capable: %s \n " ,
2011-01-07 18:26:49 +03:00
( pvt - > nbcap & NBCAP_SECDED ) ? " yes " : " no " ,
( pvt - > nbcap & NBCAP_CHIPKILL ) ? " yes " : " no " ) ;
2009-11-03 18:18:33 +03:00
amd64_dump_dramcfg_low ( pvt - > dclr0 , 0 ) ;
2009-04-27 18:09:09 +04:00
2009-10-16 15:39:30 +04:00
debugf1 ( " F3xB0 (Online Spare): 0x%08x \n " , pvt - > online_spare ) ;
2009-04-27 18:09:09 +04:00
2009-10-16 15:39:30 +04:00
debugf1 ( " F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
" offset: 0x%08x \n " ,
2010-11-11 19:29:13 +03:00
pvt - > dhar , dhar_base ( pvt ) ,
( boot_cpu_data . x86 = = 0xf ) ? k8_dhar_offset ( pvt )
: f10_dhar_offset ( pvt ) ) ;
2009-04-27 18:09:09 +04:00
2010-12-10 21:49:19 +03:00
debugf1 ( " DramHoleValid: %s \n " , dhar_valid ( pvt ) ? " yes " : " no " ) ;
2009-04-27 18:09:09 +04:00
2011-02-23 19:25:12 +03:00
amd64_debug_display_dimm_sizes ( pvt , 0 ) ;
2011-02-03 17:59:57 +03:00
2009-10-16 15:39:30 +04:00
/* everything below this point is Fam10h and above */
2011-02-03 17:59:57 +03:00
if ( boot_cpu_data . x86 = = 0xf )
2009-04-27 18:09:09 +04:00
return ;
2011-02-03 17:59:57 +03:00
2011-02-23 19:25:12 +03:00
amd64_debug_display_dimm_sizes ( pvt , 1 ) ;
2009-04-27 18:09:09 +04:00
2011-01-19 22:35:12 +03:00
amd64_info ( " using %s syndromes. \n " , ( ( pvt - > ecc_sym_sz = = 8 ) ? " x8 " : " x4 " ) ) ;
2010-03-09 14:46:00 +03:00
2009-10-16 15:39:30 +04:00
/* Only if NOT ganged does dclr1 have valid info */
2009-11-03 18:18:33 +03:00
if ( ! dct_ganging_enabled ( pvt ) )
amd64_dump_dramcfg_low ( pvt - > dclr1 , 1 ) ;
2009-04-27 18:09:09 +04:00
}
2009-04-27 18:12:00 +04:00
/*
2010-11-29 21:49:02 +03:00
* see BKDG , F2x [ 1 , 0 ] [ 5 C : 40 ] , F2 [ 1 , 0 ] [ 6 C : 60 ]
2009-04-27 18:12:00 +04:00
*/
2010-11-29 21:49:02 +03:00
static void prep_chip_selects ( struct amd64_pvt * pvt )
2009-04-27 18:12:00 +04:00
{
2009-10-21 15:44:36 +04:00
if ( boot_cpu_data . x86 = = 0xf & & pvt - > ext_model < K8_REV_F ) {
2010-11-29 21:49:02 +03:00
pvt - > csels [ 0 ] . b_cnt = pvt - > csels [ 1 ] . b_cnt = 8 ;
pvt - > csels [ 0 ] . m_cnt = pvt - > csels [ 1 ] . m_cnt = 8 ;
2009-09-21 16:35:51 +04:00
} else {
2010-11-29 21:49:02 +03:00
pvt - > csels [ 0 ] . b_cnt = pvt - > csels [ 1 ] . b_cnt = 8 ;
pvt - > csels [ 0 ] . m_cnt = pvt - > csels [ 1 ] . m_cnt = 4 ;
2009-04-27 18:12:00 +04:00
}
}
/*
2010-11-29 21:49:02 +03:00
* Function 2 Offset F10_DCSB0 ; read in the DCS Base and DCS Mask registers
2009-04-27 18:12:00 +04:00
*/
2010-10-08 20:32:29 +04:00
static void read_dct_base_mask ( struct amd64_pvt * pvt )
2009-04-27 18:12:00 +04:00
{
2010-11-29 21:49:02 +03:00
int cs ;
2009-04-27 18:12:00 +04:00
2010-11-29 21:49:02 +03:00
prep_chip_selects ( pvt ) ;
2009-04-27 18:12:00 +04:00
2010-11-29 21:49:02 +03:00
for_each_chip_select ( cs , 0 , pvt ) {
2011-02-21 21:37:24 +03:00
int reg0 = DCSB0 + ( cs * 4 ) ;
int reg1 = DCSB1 + ( cs * 4 ) ;
2010-11-29 21:49:02 +03:00
u32 * base0 = & pvt - > csels [ 0 ] . csbases [ cs ] ;
u32 * base1 = & pvt - > csels [ 1 ] . csbases [ cs ] ;
2010-10-08 20:32:29 +04:00
2010-11-29 21:49:02 +03:00
if ( ! amd64_read_dct_pci_cfg ( pvt , reg0 , base0 ) )
2009-04-27 18:12:00 +04:00
debugf0 ( " DCSB0[%d]=0x%08x reg: F2x%x \n " ,
2010-11-29 21:49:02 +03:00
cs , * base0 , reg0 ) ;
2009-04-27 18:12:00 +04:00
2010-11-29 21:49:02 +03:00
if ( boot_cpu_data . x86 = = 0xf | | dct_ganging_enabled ( pvt ) )
continue ;
2010-10-08 20:32:29 +04:00
2010-11-29 21:49:02 +03:00
if ( ! amd64_read_dct_pci_cfg ( pvt , reg1 , base1 ) )
debugf0 ( " DCSB1[%d]=0x%08x reg: F2x%x \n " ,
cs , * base1 , reg1 ) ;
2009-04-27 18:12:00 +04:00
}
2010-11-29 21:49:02 +03:00
for_each_chip_select_mask ( cs , 0 , pvt ) {
2011-02-21 21:37:24 +03:00
int reg0 = DCSM0 + ( cs * 4 ) ;
int reg1 = DCSM1 + ( cs * 4 ) ;
2010-11-29 21:49:02 +03:00
u32 * mask0 = & pvt - > csels [ 0 ] . csmasks [ cs ] ;
u32 * mask1 = & pvt - > csels [ 1 ] . csmasks [ cs ] ;
2010-10-08 20:32:29 +04:00
2010-11-29 21:49:02 +03:00
if ( ! amd64_read_dct_pci_cfg ( pvt , reg0 , mask0 ) )
2009-04-27 18:12:00 +04:00
debugf0 ( " DCSM0[%d]=0x%08x reg: F2x%x \n " ,
2010-11-29 21:49:02 +03:00
cs , * mask0 , reg0 ) ;
2009-04-27 18:12:00 +04:00
2010-11-29 21:49:02 +03:00
if ( boot_cpu_data . x86 = = 0xf | | dct_ganging_enabled ( pvt ) )
continue ;
2010-10-08 20:32:29 +04:00
2010-11-29 21:49:02 +03:00
if ( ! amd64_read_dct_pci_cfg ( pvt , reg1 , mask1 ) )
debugf0 ( " DCSM1[%d]=0x%08x reg: F2x%x \n " ,
cs , * mask1 , reg1 ) ;
2009-04-27 18:12:00 +04:00
}
}
2010-10-07 20:29:15 +04:00
static enum mem_type amd64_determine_memory_type ( struct amd64_pvt * pvt , int cs )
2009-04-27 18:12:00 +04:00
{
enum mem_type type ;
2010-12-22 16:28:24 +03:00
/* F15h supports only DDR3 */
if ( boot_cpu_data . x86 > = 0x15 )
type = ( pvt - > dclr0 & BIT ( 16 ) ) ? MEM_DDR3 : MEM_RDDR3 ;
else if ( boot_cpu_data . x86 = = 0x10 | | pvt - > ext_model > = K8_REV_F ) {
2009-11-12 17:37:57 +03:00
if ( pvt - > dchr0 & DDR3_MODE )
type = ( pvt - > dclr0 & BIT ( 16 ) ) ? MEM_DDR3 : MEM_RDDR3 ;
else
type = ( pvt - > dclr0 & BIT ( 16 ) ) ? MEM_DDR2 : MEM_RDDR2 ;
2009-04-27 18:12:00 +04:00
} else {
type = ( pvt - > dclr0 & BIT ( 18 ) ) ? MEM_DDR : MEM_RDDR ;
}
2010-10-07 20:29:15 +04:00
amd64_info ( " CS%d: %s \n " , cs , edac_mem_types [ type ] ) ;
2009-04-27 18:12:00 +04:00
return type ;
}
2010-12-22 16:28:24 +03:00
/* Get the number of DCT channels the memory controller is using. */
2009-04-27 18:14:52 +04:00
static int k8_early_channel_count ( struct amd64_pvt * pvt )
{
2010-12-22 16:28:24 +03:00
int flag ;
2009-04-27 18:14:52 +04:00
2010-10-01 21:44:53 +04:00
if ( pvt - > ext_model > = K8_REV_F )
2009-04-27 18:14:52 +04:00
/* RevF (NPT) and later */
2011-01-18 21:16:08 +03:00
flag = pvt - > dclr0 & WIDTH_128 ;
2010-10-01 21:44:53 +04:00
else
2009-04-27 18:14:52 +04:00
/* RevE and earlier */
flag = pvt - > dclr0 & REVE_WIDTH_128 ;
/* not used */
pvt - > dclr1 = 0 ;
return ( flag ) ? 2 : 1 ;
}
2011-01-10 16:37:27 +03:00
/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
static u64 get_error_address ( struct mce * m )
2009-04-27 18:14:52 +04:00
{
2011-03-30 17:42:10 +04:00
struct cpuinfo_x86 * c = & boot_cpu_data ;
u64 addr ;
2011-01-10 16:37:27 +03:00
u8 start_bit = 1 ;
u8 end_bit = 47 ;
2011-03-30 17:42:10 +04:00
if ( c - > x86 = = 0xf ) {
2011-01-10 16:37:27 +03:00
start_bit = 3 ;
end_bit = 39 ;
}
2011-03-30 17:42:10 +04:00
addr = m - > addr & GENMASK ( start_bit , end_bit ) ;
/*
* Erratum 637 workaround
*/
if ( c - > x86 = = 0x15 ) {
struct amd64_pvt * pvt ;
u64 cc6_base , tmp_addr ;
u32 tmp ;
u8 mce_nid , intlv_en ;
if ( ( addr & GENMASK ( 24 , 47 ) ) > > 24 ! = 0x00fdf7 )
return addr ;
mce_nid = amd_get_nb_id ( m - > extcpu ) ;
pvt = mcis [ mce_nid ] - > pvt_info ;
amd64_read_pci_cfg ( pvt - > F1 , DRAM_LOCAL_NODE_LIM , & tmp ) ;
intlv_en = tmp > > 21 & 0x7 ;
/* add [47:27] + 3 trailing bits */
cc6_base = ( tmp & GENMASK ( 0 , 20 ) ) < < 3 ;
/* reverse and add DramIntlvEn */
cc6_base | = intlv_en ^ 0x7 ;
/* pin at [47:24] */
cc6_base < < = 24 ;
if ( ! intlv_en )
return cc6_base | ( addr & GENMASK ( 0 , 23 ) ) ;
amd64_read_pci_cfg ( pvt - > F1 , DRAM_LOCAL_NODE_BASE , & tmp ) ;
/* faster log2 */
tmp_addr = ( addr & GENMASK ( 12 , 23 ) ) < < __fls ( intlv_en + 1 ) ;
/* OR DramIntlvSel into bits [14:12] */
tmp_addr | = ( tmp & GENMASK ( 21 , 23 ) ) > > 9 ;
/* add remaining [11:0] bits from original MC4_ADDR */
tmp_addr | = addr & GENMASK ( 0 , 11 ) ;
return cc6_base | tmp_addr ;
}
return addr ;
2009-04-27 18:14:52 +04:00
}
2010-10-21 20:52:53 +04:00
static void read_dram_base_limit_regs ( struct amd64_pvt * pvt , unsigned range )
2009-04-27 18:14:52 +04:00
{
2011-03-21 22:45:06 +03:00
struct cpuinfo_x86 * c = & boot_cpu_data ;
2011-02-21 21:37:24 +03:00
int off = range < < 3 ;
2009-04-27 18:14:52 +04:00
2010-10-21 20:52:53 +04:00
amd64_read_pci_cfg ( pvt - > F1 , DRAM_BASE_LO + off , & pvt - > ranges [ range ] . base . lo ) ;
amd64_read_pci_cfg ( pvt - > F1 , DRAM_LIMIT_LO + off , & pvt - > ranges [ range ] . lim . lo ) ;
2009-04-27 18:14:52 +04:00
2011-03-21 22:45:06 +03:00
if ( c - > x86 = = 0xf )
2010-10-21 20:52:53 +04:00
return ;
2009-04-27 18:14:52 +04:00
2010-10-21 20:52:53 +04:00
if ( ! dram_rw ( pvt , range ) )
return ;
2009-04-27 18:14:52 +04:00
2010-10-21 20:52:53 +04:00
amd64_read_pci_cfg ( pvt - > F1 , DRAM_BASE_HI + off , & pvt - > ranges [ range ] . base . hi ) ;
amd64_read_pci_cfg ( pvt - > F1 , DRAM_LIMIT_HI + off , & pvt - > ranges [ range ] . lim . hi ) ;
2011-03-21 22:45:06 +03:00
/* Factor in CC6 save area by reading dst node's limit reg */
if ( c - > x86 = = 0x15 ) {
struct pci_dev * f1 = NULL ;
u8 nid = dram_dst_node ( pvt , range ) ;
u32 llim ;
f1 = pci_get_domain_bus_and_slot ( 0 , 0 , PCI_DEVFN ( 0x18 + nid , 1 ) ) ;
if ( WARN_ON ( ! f1 ) )
return ;
amd64_read_pci_cfg ( f1 , DRAM_LOCAL_NODE_LIM , & llim ) ;
pvt - > ranges [ range ] . lim . lo & = GENMASK ( 0 , 15 ) ;
/* {[39:27],111b} */
pvt - > ranges [ range ] . lim . lo | = ( ( llim & 0x1fff ) < < 3 | 0x7 ) < < 16 ;
pvt - > ranges [ range ] . lim . hi & = GENMASK ( 0 , 7 ) ;
/* [47:40] */
pvt - > ranges [ range ] . lim . hi | = llim > > 13 ;
pci_dev_put ( f1 ) ;
}
2009-04-27 18:14:52 +04:00
}
2011-01-10 16:24:32 +03:00
static void k8_map_sysaddr_to_csrow ( struct mem_ctl_info * mci , u64 sys_addr ,
u16 syndrome )
2009-04-27 18:14:52 +04:00
{
struct mem_ctl_info * src_mci ;
2011-01-10 16:24:32 +03:00
struct amd64_pvt * pvt = mci - > pvt_info ;
2009-04-27 18:14:52 +04:00
int channel , csrow ;
u32 page , offset ;
2012-04-16 22:03:50 +04:00
error_address_to_page_and_offset ( sys_addr , & page , & offset ) ;
/*
* Find out which node the error address belongs to . This may be
* different from the node that detected the error .
*/
src_mci = find_mc_by_sys_addr ( mci , sys_addr ) ;
if ( ! src_mci ) {
amd64_mc_err ( mci , " failed to map error addr 0x%lx to a node \n " ,
( unsigned long ) sys_addr ) ;
edac_mc_handle_error ( HW_EVENT_ERR_CORRECTED , mci ,
page , offset , syndrome ,
- 1 , - 1 , - 1 ,
EDAC_MOD_STR ,
" failed to map error addr to a node " ,
NULL ) ;
return ;
}
/* Now map the sys_addr to a CSROW */
csrow = sys_addr_to_csrow ( src_mci , sys_addr ) ;
if ( csrow < 0 ) {
edac_mc_handle_error ( HW_EVENT_ERR_CORRECTED , mci ,
page , offset , syndrome ,
- 1 , - 1 , - 1 ,
EDAC_MOD_STR ,
" failed to map error addr to a csrow " ,
NULL ) ;
return ;
}
2009-04-27 18:14:52 +04:00
/* CHIPKILL enabled */
2011-01-10 16:24:32 +03:00
if ( pvt - > nbcfg & NBCFG_CHIPKILL ) {
2009-11-12 21:05:07 +03:00
channel = get_channel_from_ecc_syndrome ( mci , syndrome ) ;
2009-04-27 18:14:52 +04:00
if ( channel < 0 ) {
/*
* Syndrome didn ' t map , so we don ' t know which of the
* 2 DIMMs is in error . So we need to ID ' both ' of them
* as suspect .
*/
2012-04-16 22:03:50 +04:00
amd64_mc_warn ( src_mci , " unknown syndrome 0x%04x - "
" possible error reporting race \n " ,
syndrome ) ;
edac_mc_handle_error ( HW_EVENT_ERR_CORRECTED , mci ,
page , offset , syndrome ,
csrow , - 1 , - 1 ,
EDAC_MOD_STR ,
" unknown syndrome - possible error reporting race " ,
NULL ) ;
2009-04-27 18:14:52 +04:00
return ;
}
} else {
/*
* non - chipkill ecc mode
*
* The k8 documentation is unclear about how to determine the
* channel number when using non - chipkill memory . This method
* was obtained from email communication with someone at AMD .
* ( Wish the email was placed in this comment - norsk )
*/
2009-10-26 17:00:19 +03:00
channel = ( ( sys_addr & BIT ( 3 ) ) ! = 0 ) ;
2009-04-27 18:14:52 +04:00
}
2012-04-16 22:03:50 +04:00
edac_mc_handle_error ( HW_EVENT_ERR_CORRECTED , src_mci ,
page , offset , syndrome ,
csrow , channel , - 1 ,
EDAC_MOD_STR , " " , NULL ) ;
2009-04-27 18:14:52 +04:00
}
2011-01-18 21:16:08 +03:00
static int ddr2_cs_size ( unsigned i , bool dct_width )
2009-04-27 18:14:52 +04:00
{
2011-01-18 21:16:08 +03:00
unsigned shift = 0 ;
2009-04-27 18:14:52 +04:00
2011-01-18 21:16:08 +03:00
if ( i < = 2 )
shift = i ;
else if ( ! ( i & 0x1 ) )
shift = i > > 1 ;
2009-10-21 15:44:36 +04:00
else
2011-01-18 21:16:08 +03:00
shift = ( i + 1 ) > > 1 ;
2009-04-27 18:14:52 +04:00
2011-01-18 21:16:08 +03:00
return 128 < < ( shift + ! ! dct_width ) ;
}
static int k8_dbam_to_chip_select ( struct amd64_pvt * pvt , u8 dct ,
unsigned cs_mode )
{
u32 dclr = dct ? pvt - > dclr1 : pvt - > dclr0 ;
if ( pvt - > ext_model > = K8_REV_F ) {
WARN_ON ( cs_mode > 11 ) ;
return ddr2_cs_size ( cs_mode , dclr & WIDTH_128 ) ;
}
else if ( pvt - > ext_model > = K8_REV_D ) {
2011-11-10 00:28:43 +04:00
unsigned diff ;
2011-01-18 21:16:08 +03:00
WARN_ON ( cs_mode > 10 ) ;
2011-11-10 00:28:43 +04:00
/*
* the below calculation , besides trying to win an obfuscated C
* contest , maps cs_mode values to DIMM chip select sizes . The
* mappings are :
*
* cs_mode CS size ( mb )
* = = = = = = = = = = = = = = = = = = =
* 0 32
* 1 64
* 2 128
* 3 128
* 4 256
* 5 512
* 6 256
* 7 512
* 8 1024
* 9 1024
* 10 2048
*
* Basically , it calculates a value with which to shift the
* smallest CS size of 32 MB .
*
* ddr [ 23 ] _cs_size have a similar purpose .
*/
diff = cs_mode / 3 + ( unsigned ) ( cs_mode > 5 ) ;
return 32 < < ( cs_mode - diff ) ;
2011-01-18 21:16:08 +03:00
}
else {
WARN_ON ( cs_mode > 6 ) ;
return 32 < < cs_mode ;
}
2009-04-27 18:14:52 +04:00
}
2009-04-27 18:16:50 +04:00
/*
* Get the number of DCT channels in use .
*
* Return :
* number of Memory Channels in operation
* Pass back :
* contents of the DCL0_LOW register
*/
2011-01-07 19:58:04 +03:00
static int f1x_early_channel_count ( struct amd64_pvt * pvt )
2009-04-27 18:16:50 +04:00
{
2009-10-13 21:26:55 +04:00
int i , j , channels = 0 ;
2009-04-27 18:16:50 +04:00
2011-01-07 19:58:04 +03:00
/* On F10h, if we are in 128 bit mode, then we are using 2 channels */
2011-01-18 21:16:08 +03:00
if ( boot_cpu_data . x86 = = 0x10 & & ( pvt - > dclr0 & WIDTH_128 ) )
2011-01-07 19:58:04 +03:00
return 2 ;
2009-04-27 18:16:50 +04:00
/*
2009-10-16 21:55:49 +04:00
* Need to check if in unganged mode : In such , there are 2 channels ,
* but they are not in 128 bit mode and thus the above ' dclr0 ' status
* bit will be OFF .
2009-04-27 18:16:50 +04:00
*
* Need to check DCT0 [ 0 ] and DCT1 [ 0 ] to see if only one of them has
* their CSEnable bit on . If so , then SINGLE DIMM case .
*/
2009-10-16 21:55:49 +04:00
debugf0 ( " Data width is not 128 bits - need more decoding \n " ) ;
2009-04-27 18:14:52 +04:00
2009-04-27 18:16:50 +04:00
/*
* Check DRAM Bank Address Mapping values for each DIMM to see if there
* is more than just one DIMM present in unganged mode . Need to check
* both controllers since DIMMs can be placed in either one .
*/
2010-12-21 17:53:27 +03:00
for ( i = 0 ; i < 2 ; i + + ) {
u32 dbam = ( i ? pvt - > dbam1 : pvt - > dbam0 ) ;
2009-04-27 18:16:50 +04:00
2009-08-07 19:04:49 +04:00
for ( j = 0 ; j < 4 ; j + + ) {
if ( DBAM_DIMM ( j , dbam ) > 0 ) {
channels + + ;
break ;
}
}
2009-04-27 18:16:50 +04:00
}
2009-10-16 21:55:49 +04:00
if ( channels > 2 )
channels = 2 ;
2010-10-07 20:29:15 +04:00
amd64_info ( " MCT channel count: %d \n " , channels ) ;
2009-04-27 18:16:50 +04:00
return channels ;
}
2011-01-18 21:16:08 +03:00
static int ddr3_cs_size ( unsigned i , bool dct_width )
2009-04-27 18:16:50 +04:00
{
2011-01-18 21:16:08 +03:00
unsigned shift = 0 ;
int cs_size = 0 ;
if ( i = = 0 | | i = = 3 | | i = = 4 )
cs_size = - 1 ;
else if ( i < = 2 )
shift = i ;
else if ( i = = 12 )
shift = 7 ;
else if ( ! ( i & 0x1 ) )
shift = i > > 1 ;
else
shift = ( i + 1 ) > > 1 ;
if ( cs_size ! = - 1 )
cs_size = ( 128 * ( 1 < < ! ! dct_width ) ) < < shift ;
return cs_size ;
}
static int f10_dbam_to_chip_select ( struct amd64_pvt * pvt , u8 dct ,
unsigned cs_mode )
{
u32 dclr = dct ? pvt - > dclr1 : pvt - > dclr0 ;
WARN_ON ( cs_mode > 11 ) ;
2009-10-21 15:44:36 +04:00
if ( pvt - > dchr0 & DDR3_MODE | | pvt - > dchr1 & DDR3_MODE )
2011-01-18 21:16:08 +03:00
return ddr3_cs_size ( cs_mode , dclr & WIDTH_128 ) ;
2009-10-21 15:44:36 +04:00
else
2011-01-18 21:16:08 +03:00
return ddr2_cs_size ( cs_mode , dclr & WIDTH_128 ) ;
}
/*
* F15h supports only 64 bit DCT interfaces
*/
static int f15_dbam_to_chip_select ( struct amd64_pvt * pvt , u8 dct ,
unsigned cs_mode )
{
WARN_ON ( cs_mode > 12 ) ;
2009-10-21 15:44:36 +04:00
2011-01-18 21:16:08 +03:00
return ddr3_cs_size ( cs_mode , false ) ;
2009-04-27 18:16:50 +04:00
}
2011-01-17 19:52:57 +03:00
static void read_dram_ctl_register ( struct amd64_pvt * pvt )
2009-04-27 18:20:17 +04:00
{
2011-01-17 19:52:57 +03:00
if ( boot_cpu_data . x86 = = 0xf )
return ;
2010-12-22 21:31:45 +03:00
if ( ! amd64_read_dct_pci_cfg ( pvt , DCT_SEL_LO , & pvt - > dct_sel_lo ) ) {
debugf0 ( " F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x \n " ,
pvt - > dct_sel_lo , dct_sel_baseaddr ( pvt ) ) ;
2009-10-09 21:14:43 +04:00
2011-01-17 19:52:57 +03:00
debugf0 ( " DCTs operate in %s mode. \n " ,
( dct_ganging_enabled ( pvt ) ? " ganged " : " unganged " ) ) ;
2009-10-09 21:14:43 +04:00
if ( ! dct_ganging_enabled ( pvt ) )
debugf0 ( " Address range split per DCT: %s \n " ,
( dct_high_range_enabled ( pvt ) ? " yes " : " no " ) ) ;
2010-12-22 21:31:45 +03:00
debugf0 ( " data interleave for ECC: %s, "
2009-10-09 21:14:43 +04:00
" DRAM cleared since last warm reset: %s \n " ,
( dct_data_intlv_enabled ( pvt ) ? " enabled " : " disabled " ) ,
( dct_memory_cleared ( pvt ) ? " yes " : " no " ) ) ;
2010-12-22 21:31:45 +03:00
debugf0 ( " channel interleave: %s, "
" interleave bits selector: 0x%x \n " ,
2009-10-09 21:14:43 +04:00
( dct_interleave_enabled ( pvt ) ? " enabled " : " disabled " ) ,
2009-04-27 18:20:17 +04:00
dct_sel_interleave_addr ( pvt ) ) ;
}
2010-12-22 21:31:45 +03:00
amd64_read_dct_pci_cfg ( pvt , DCT_SEL_HI , & pvt - > dct_sel_hi ) ;
2009-04-27 18:20:17 +04:00
}
2009-04-27 18:22:43 +04:00
/*
2010-12-09 20:57:54 +03:00
* Determine channel ( DCT ) based on the interleaving mode : F10h BKDG , 2.8 .9 Memory
2009-04-27 18:22:43 +04:00
* Interleaving Modes .
*/
2011-01-17 17:59:58 +03:00
static u8 f1x_determine_channel ( struct amd64_pvt * pvt , u64 sys_addr ,
2010-12-09 20:57:54 +03:00
bool hi_range_sel , u8 intlv_en )
2009-04-27 18:20:17 +04:00
{
2011-02-21 21:33:10 +03:00
u8 dct_sel_high = ( pvt - > dct_sel_lo > > 1 ) & 1 ;
2009-04-27 18:20:17 +04:00
if ( dct_ganging_enabled ( pvt ) )
2010-12-09 20:57:54 +03:00
return 0 ;
2009-04-27 18:20:17 +04:00
2010-12-09 20:57:54 +03:00
if ( hi_range_sel )
return dct_sel_high ;
2009-04-27 18:20:17 +04:00
2010-12-09 20:57:54 +03:00
/*
* see F2x110 [ DctSelIntLvAddr ] - channel interleave mode
*/
if ( dct_interleave_enabled ( pvt ) ) {
u8 intlv_addr = dct_sel_interleave_addr ( pvt ) ;
/* return DCT select function: 0=DCT0, 1=DCT1 */
if ( ! intlv_addr )
return sys_addr > > 6 & 1 ;
if ( intlv_addr & 0x2 ) {
u8 shift = intlv_addr & 0x1 ? 9 : 6 ;
u32 temp = hweight_long ( ( u32 ) ( ( sys_addr > > 16 ) & 0x1F ) ) % 2 ;
return ( ( sys_addr > > shift ) & 1 ) ^ temp ;
}
return ( sys_addr > > ( 12 + hweight8 ( intlv_en ) ) ) & 1 ;
}
if ( dct_high_range_enabled ( pvt ) )
return ~ dct_sel_high & 1 ;
2009-04-27 18:20:17 +04:00
return 0 ;
}
2010-12-10 21:49:19 +03:00
/* Convert the sys_addr to the normalized DCT address */
2011-02-21 21:49:01 +03:00
static u64 f1x_get_norm_dct_addr ( struct amd64_pvt * pvt , unsigned range ,
2010-12-10 21:49:19 +03:00
u64 sys_addr , bool hi_rng ,
u32 dct_sel_base_addr )
2009-04-27 18:20:17 +04:00
{
u64 chan_off ;
2010-12-10 21:49:19 +03:00
u64 dram_base = get_dram_base ( pvt , range ) ;
u64 hole_off = f10_dhar_offset ( pvt ) ;
u64 dct_sel_base_off = ( pvt - > dct_sel_hi & 0xFFFFFC00 ) < < 16 ;
2009-04-27 18:20:17 +04:00
2010-12-10 21:49:19 +03:00
if ( hi_rng ) {
/*
* if
* base address of high range is below 4 Gb
* ( bits [ 47 : 27 ] at [ 31 : 11 ] )
* DRAM address space on this DCT is hoisted above 4 Gb & &
* sys_addr > 4 Gb
*
* remove hole offset from sys_addr
* else
* remove high range offset from sys_addr
*/
if ( ( ! ( dct_sel_base_addr > > 16 ) | |
dct_sel_base_addr < dhar_base ( pvt ) ) & &
2011-02-21 21:43:02 +03:00
dhar_valid ( pvt ) & &
2010-12-10 21:49:19 +03:00
( sys_addr > = BIT_64 ( 32 ) ) )
2010-11-11 19:29:13 +03:00
chan_off = hole_off ;
2009-04-27 18:20:17 +04:00
else
chan_off = dct_sel_base_off ;
} else {
2010-12-10 21:49:19 +03:00
/*
* if
* we have a valid hole & &
* sys_addr > 4 Gb
*
* remove hole
* else
* remove dram base to normalize to DCT address
*/
2011-02-21 21:43:02 +03:00
if ( dhar_valid ( pvt ) & & ( sys_addr > = BIT_64 ( 32 ) ) )
2010-11-11 19:29:13 +03:00
chan_off = hole_off ;
2009-04-27 18:20:17 +04:00
else
2010-12-10 21:49:19 +03:00
chan_off = dram_base ;
2009-04-27 18:20:17 +04:00
}
2010-12-10 21:49:19 +03:00
return ( sys_addr & GENMASK ( 6 , 47 ) ) - ( chan_off & GENMASK ( 23 , 47 ) ) ;
2009-04-27 18:20:17 +04:00
}
/*
* checks if the csrow passed in is marked as SPARED , if so returns the new
* spare row
*/
2010-11-29 21:49:02 +03:00
static int f10_process_possible_spare ( struct amd64_pvt * pvt , u8 dct , int csrow )
2009-04-27 18:20:17 +04:00
{
2011-01-13 20:02:22 +03:00
int tmp_cs ;
if ( online_spare_swap_done ( pvt , dct ) & &
csrow = = online_spare_bad_dramcs ( pvt , dct ) ) {
for_each_chip_select ( tmp_cs , dct , pvt ) {
if ( chip_select_base ( tmp_cs , dct , pvt ) & 0x2 ) {
csrow = tmp_cs ;
break ;
}
}
2009-04-27 18:20:17 +04:00
}
return csrow ;
}
/*
* Iterate over the DRAM DCT " base " and " mask " registers looking for a
* SystemAddr match on the specified ' ChannelSelect ' and ' NodeID '
*
* Return :
* - EINVAL : NOT FOUND
* 0. . csrow = Chip - Select Row
*/
2011-01-17 17:59:58 +03:00
static int f1x_lookup_addr_in_dct ( u64 in_addr , u32 nid , u8 dct )
2009-04-27 18:20:17 +04:00
{
struct mem_ctl_info * mci ;
struct amd64_pvt * pvt ;
2010-11-29 21:49:02 +03:00
u64 cs_base , cs_mask ;
2009-04-27 18:20:17 +04:00
int cs_found = - EINVAL ;
int csrow ;
2010-10-13 18:11:59 +04:00
mci = mcis [ nid ] ;
2009-04-27 18:20:17 +04:00
if ( ! mci )
return cs_found ;
pvt = mci - > pvt_info ;
2010-11-29 21:49:02 +03:00
debugf1 ( " input addr: 0x%llx, DCT: %d \n " , in_addr , dct ) ;
2009-04-27 18:20:17 +04:00
2010-11-29 21:49:02 +03:00
for_each_chip_select ( csrow , dct , pvt ) {
if ( ! csrow_enabled ( csrow , dct , pvt ) )
2009-04-27 18:20:17 +04:00
continue ;
2010-11-29 21:49:02 +03:00
get_cs_base_and_mask ( pvt , csrow , dct , & cs_base , & cs_mask ) ;
2009-04-27 18:20:17 +04:00
2010-11-29 21:49:02 +03:00
debugf1 ( " CSROW=%d CSBase=0x%llx CSMask=0x%llx \n " ,
csrow , cs_base , cs_mask ) ;
2009-04-27 18:20:17 +04:00
2010-11-29 21:49:02 +03:00
cs_mask = ~ cs_mask ;
2009-04-27 18:20:17 +04:00
2010-11-29 21:49:02 +03:00
debugf1 ( " (InputAddr & ~CSMask)=0x%llx "
" (CSBase & ~CSMask)=0x%llx \n " ,
( in_addr & cs_mask ) , ( cs_base & cs_mask ) ) ;
2009-04-27 18:20:17 +04:00
2010-11-29 21:49:02 +03:00
if ( ( in_addr & cs_mask ) = = ( cs_base & cs_mask ) ) {
cs_found = f10_process_possible_spare ( pvt , dct , csrow ) ;
2009-04-27 18:20:17 +04:00
debugf1 ( " MATCH csrow=%d \n " , cs_found ) ;
break ;
}
}
return cs_found ;
}
2011-01-12 00:08:07 +03:00
/*
* See F2x10C . Non - interleaved graphics framebuffer memory under the 16 G is
* swapped with a region located at the bottom of memory so that the GPU can use
* the interleaved region and thus two channels .
*/
2011-01-17 17:59:58 +03:00
static u64 f1x_swap_interleaved_region ( struct amd64_pvt * pvt , u64 sys_addr )
2011-01-12 00:08:07 +03:00
{
u32 swap_reg , swap_base , swap_limit , rgn_size , tmp_addr ;
if ( boot_cpu_data . x86 = = 0x10 ) {
/* only revC3 and revE have that feature */
if ( boot_cpu_data . x86_model < 4 | |
( boot_cpu_data . x86_model < 0xa & &
boot_cpu_data . x86_mask < 3 ) )
return sys_addr ;
}
amd64_read_dct_pci_cfg ( pvt , SWAP_INTLV_REG , & swap_reg ) ;
if ( ! ( swap_reg & 0x1 ) )
return sys_addr ;
swap_base = ( swap_reg > > 3 ) & 0x7f ;
swap_limit = ( swap_reg > > 11 ) & 0x7f ;
rgn_size = ( swap_reg > > 20 ) & 0x7f ;
tmp_addr = sys_addr > > 27 ;
if ( ! ( sys_addr > > 34 ) & &
( ( ( tmp_addr > = swap_base ) & &
( tmp_addr < = swap_limit ) ) | |
( tmp_addr < rgn_size ) ) )
return sys_addr ^ ( u64 ) swap_base < < 27 ;
return sys_addr ;
}
2009-04-27 18:22:43 +04:00
/* For a given @dram_range, check if @sys_addr falls within it. */
2011-02-21 21:49:01 +03:00
static int f1x_match_to_this_node ( struct amd64_pvt * pvt , unsigned range ,
2009-04-27 18:22:43 +04:00
u64 sys_addr , int * nid , int * chan_sel )
{
2010-12-09 20:57:54 +03:00
int cs_found = - EINVAL ;
2010-12-10 21:49:19 +03:00
u64 chan_addr ;
2011-01-13 18:01:13 +03:00
u32 dct_sel_base ;
2010-11-29 21:49:02 +03:00
u8 channel ;
2010-12-09 20:57:54 +03:00
bool high_range = false ;
2009-04-27 18:22:43 +04:00
2010-10-21 20:52:53 +04:00
u8 node_id = dram_dst_node ( pvt , range ) ;
2010-12-09 20:57:54 +03:00
u8 intlv_en = dram_intlv_en ( pvt , range ) ;
2010-10-21 20:52:53 +04:00
u32 intlv_sel = dram_intlv_sel ( pvt , range ) ;
2009-04-27 18:22:43 +04:00
2010-12-10 21:49:19 +03:00
debugf1 ( " (range %d) SystemAddr= 0x%llx Limit=0x%llx \n " ,
range , sys_addr , get_dram_limit ( pvt , range ) ) ;
2009-04-27 18:22:43 +04:00
2011-01-17 15:03:26 +03:00
if ( dhar_valid ( pvt ) & &
dhar_base ( pvt ) < = sys_addr & &
sys_addr < BIT_64 ( 32 ) ) {
amd64_warn ( " Huh? Address is in the MMIO hole: 0x%016llx \n " ,
sys_addr ) ;
return - EINVAL ;
}
2011-04-08 17:05:21 +04:00
if ( intlv_en & & ( intlv_sel ! = ( ( sys_addr > > 12 ) & intlv_en ) ) )
2009-04-27 18:22:43 +04:00
return - EINVAL ;
2011-01-17 17:59:58 +03:00
sys_addr = f1x_swap_interleaved_region ( pvt , sys_addr ) ;
2011-01-12 00:08:07 +03:00
2009-04-27 18:22:43 +04:00
dct_sel_base = dct_sel_baseaddr ( pvt ) ;
/*
* check whether addresses > = DctSelBaseAddr [ 47 : 27 ] are to be used to
* select between DCT0 and DCT1 .
*/
if ( dct_high_range_enabled ( pvt ) & &
! dct_ganging_enabled ( pvt ) & &
( ( sys_addr > > 27 ) > = ( dct_sel_base > > 11 ) ) )
2010-12-09 20:57:54 +03:00
high_range = true ;
2009-04-27 18:22:43 +04:00
2011-01-17 17:59:58 +03:00
channel = f1x_determine_channel ( pvt , sys_addr , high_range , intlv_en ) ;
2009-04-27 18:22:43 +04:00
2011-01-17 17:59:58 +03:00
chan_addr = f1x_get_norm_dct_addr ( pvt , range , sys_addr ,
2010-12-10 21:49:19 +03:00
high_range , dct_sel_base ) ;
2009-04-27 18:22:43 +04:00
2011-01-13 16:57:34 +03:00
/* Remove node interleaving, see F1x120 */
if ( intlv_en )
chan_addr = ( ( chan_addr > > ( 12 + hweight8 ( intlv_en ) ) ) < < 12 ) |
( chan_addr & 0xfff ) ;
2009-04-27 18:22:43 +04:00
2011-01-13 18:01:13 +03:00
/* remove channel interleave */
2009-04-27 18:22:43 +04:00
if ( dct_interleave_enabled ( pvt ) & &
! dct_high_range_enabled ( pvt ) & &
! dct_ganging_enabled ( pvt ) ) {
2011-01-13 18:01:13 +03:00
if ( dct_sel_interleave_addr ( pvt ) ! = 1 ) {
if ( dct_sel_interleave_addr ( pvt ) = = 0x3 )
/* hash 9 */
chan_addr = ( ( chan_addr > > 10 ) < < 9 ) |
( chan_addr & 0x1ff ) ;
else
/* A[6] or hash 6 */
chan_addr = ( ( chan_addr > > 7 ) < < 6 ) |
( chan_addr & 0x3f ) ;
} else
/* A[12] */
chan_addr = ( ( chan_addr > > 13 ) < < 12 ) |
( chan_addr & 0xfff ) ;
2009-04-27 18:22:43 +04:00
}
2011-01-13 18:01:13 +03:00
debugf1 ( " Normalized DCT addr: 0x%llx \n " , chan_addr ) ;
2009-04-27 18:22:43 +04:00
2011-01-17 17:59:58 +03:00
cs_found = f1x_lookup_addr_in_dct ( chan_addr , node_id , channel ) ;
2009-04-27 18:22:43 +04:00
if ( cs_found > = 0 ) {
* nid = node_id ;
* chan_sel = channel ;
}
return cs_found ;
}
2011-01-17 17:59:58 +03:00
static int f1x_translate_sysaddr_to_cs ( struct amd64_pvt * pvt , u64 sys_addr ,
2009-04-27 18:22:43 +04:00
int * node , int * chan_sel )
{
2011-02-21 21:49:01 +03:00
int cs_found = - EINVAL ;
unsigned range ;
2009-04-27 18:22:43 +04:00
2010-10-21 20:52:53 +04:00
for ( range = 0 ; range < DRAM_RANGES ; range + + ) {
2009-04-27 18:22:43 +04:00
2010-10-21 20:52:53 +04:00
if ( ! dram_rw ( pvt , range ) )
2009-04-27 18:22:43 +04:00
continue ;
2010-10-21 20:52:53 +04:00
if ( ( get_dram_base ( pvt , range ) < = sys_addr ) & &
( get_dram_limit ( pvt , range ) > = sys_addr ) ) {
2009-04-27 18:22:43 +04:00
2011-01-17 17:59:58 +03:00
cs_found = f1x_match_to_this_node ( pvt , range ,
2009-04-27 18:22:43 +04:00
sys_addr , node ,
chan_sel ) ;
if ( cs_found > = 0 )
break ;
}
}
return cs_found ;
}
/*
2009-11-13 17:10:43 +03:00
* For reference see " 2.8.5 Routing DRAM Requests " in F10 BKDG . This code maps
* a @ sys_addr to NodeID , DCT ( channel ) and chip select ( CSROW ) .
2009-04-27 18:22:43 +04:00
*
2009-11-13 17:10:43 +03:00
* The @ sys_addr is usually an error address received from the hardware
* ( MCX_ADDR ) .
2009-04-27 18:22:43 +04:00
*/
2011-01-17 17:59:58 +03:00
static void f1x_map_sysaddr_to_csrow ( struct mem_ctl_info * mci , u64 sys_addr ,
2011-01-10 16:24:32 +03:00
u16 syndrome )
2009-04-27 18:22:43 +04:00
{
struct amd64_pvt * pvt = mci - > pvt_info ;
u32 page , offset ;
int nid , csrow , chan = 0 ;
2012-04-16 22:03:50 +04:00
error_address_to_page_and_offset ( sys_addr , & page , & offset ) ;
2011-01-17 17:59:58 +03:00
csrow = f1x_translate_sysaddr_to_cs ( pvt , sys_addr , & nid , & chan ) ;
2009-04-27 18:22:43 +04:00
2009-11-13 17:10:43 +03:00
if ( csrow < 0 ) {
2012-04-16 22:03:50 +04:00
edac_mc_handle_error ( HW_EVENT_ERR_CORRECTED , mci ,
page , offset , syndrome ,
- 1 , - 1 , - 1 ,
EDAC_MOD_STR ,
" failed to map error addr to a csrow " ,
NULL ) ;
2009-11-13 17:10:43 +03:00
return ;
}
/*
* We need the syndromes for channel detection only when we ' re
* ganged . Otherwise @ chan should already contain the channel at
* this point .
*/
2010-12-23 16:07:18 +03:00
if ( dct_ganging_enabled ( pvt ) )
2009-11-13 17:10:43 +03:00
chan = get_channel_from_ecc_syndrome ( mci , syndrome ) ;
2009-04-27 18:22:43 +04:00
2012-04-16 22:03:50 +04:00
edac_mc_handle_error ( HW_EVENT_ERR_CORRECTED , mci ,
page , offset , syndrome ,
csrow , chan , - 1 ,
EDAC_MOD_STR , " " , NULL ) ;
2009-04-27 18:22:43 +04:00
}
/*
2009-10-16 15:48:28 +04:00
* debug routine to display the memory sizes of all logical DIMMs and its
2010-12-22 16:28:24 +03:00
* CSROWs
2009-04-27 18:22:43 +04:00
*/
2011-02-23 19:25:12 +03:00
static void amd64_debug_display_dimm_sizes ( struct amd64_pvt * pvt , u8 ctrl )
2009-04-27 18:22:43 +04:00
{
2009-12-21 16:52:53 +03:00
int dimm , size0 , size1 , factor = 0 ;
2010-12-21 17:53:27 +03:00
u32 * dcsb = ctrl ? pvt - > csels [ 1 ] . csbases : pvt - > csels [ 0 ] . csbases ;
u32 dbam = ctrl ? pvt - > dbam1 : pvt - > dbam0 ;
2009-04-27 18:22:43 +04:00
2009-10-16 15:48:28 +04:00
if ( boot_cpu_data . x86 = = 0xf ) {
2011-01-18 21:16:08 +03:00
if ( pvt - > dclr0 & WIDTH_128 )
2009-12-21 16:52:53 +03:00
factor = 1 ;
2009-10-16 15:48:28 +04:00
/* K8 families < revF not supported yet */
2009-10-21 15:44:36 +04:00
if ( pvt - > ext_model < K8_REV_F )
2009-10-16 15:48:28 +04:00
return ;
else
WARN_ON ( ctrl ! = 0 ) ;
}
2011-02-03 17:59:57 +03:00
dbam = ( ctrl & & ! dct_ganging_enabled ( pvt ) ) ? pvt - > dbam1 : pvt - > dbam0 ;
2010-11-29 21:49:02 +03:00
dcsb = ( ctrl & & ! dct_ganging_enabled ( pvt ) ) ? pvt - > csels [ 1 ] . csbases
: pvt - > csels [ 0 ] . csbases ;
2009-04-27 18:22:43 +04:00
2011-02-03 17:59:57 +03:00
debugf1 ( " F2x%d80 (DRAM Bank Address Mapping): 0x%08x \n " , ctrl , dbam ) ;
2009-04-27 18:22:43 +04:00
2009-10-16 15:48:28 +04:00
edac_printk ( KERN_DEBUG , EDAC_MC , " DCT%d chip selects: \n " , ctrl ) ;
2009-04-27 18:22:43 +04:00
/* Dump memory sizes for DIMM and its CSROWs */
for ( dimm = 0 ; dimm < 4 ; dimm + + ) {
size0 = 0 ;
2010-11-29 21:49:02 +03:00
if ( dcsb [ dimm * 2 ] & DCSB_CS_ENABLE )
2011-01-18 21:16:08 +03:00
size0 = pvt - > ops - > dbam_to_cs ( pvt , ctrl ,
DBAM_DIMM ( dimm , dbam ) ) ;
2009-04-27 18:22:43 +04:00
size1 = 0 ;
2010-11-29 21:49:02 +03:00
if ( dcsb [ dimm * 2 + 1 ] & DCSB_CS_ENABLE )
2011-01-18 21:16:08 +03:00
size1 = pvt - > ops - > dbam_to_cs ( pvt , ctrl ,
DBAM_DIMM ( dimm , dbam ) ) ;
2009-04-27 18:22:43 +04:00
2010-10-07 20:29:15 +04:00
amd64_info ( EDAC_MC " : %d: %5dMB %d: %5dMB \n " ,
dimm * 2 , size0 < < factor ,
dimm * 2 + 1 , size1 < < factor ) ;
2009-04-27 18:22:43 +04:00
}
}
2009-04-27 18:25:05 +04:00
static struct amd64_family_type amd64_family_types [ ] = {
[ K8_CPUS ] = {
2010-10-01 21:20:05 +04:00
. ctl_name = " K8 " ,
2010-10-01 22:11:07 +04:00
. f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP ,
. f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC ,
2009-04-27 18:25:05 +04:00
. ops = {
2009-10-21 15:44:36 +04:00
. early_channel_count = k8_early_channel_count ,
. map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow ,
. dbam_to_cs = k8_dbam_to_chip_select ,
2010-10-08 20:32:29 +04:00
. read_dct_pci_cfg = k8_read_dct_pci_cfg ,
2009-04-27 18:25:05 +04:00
}
} ,
[ F10_CPUS ] = {
2010-10-01 21:20:05 +04:00
. ctl_name = " F10h " ,
2010-10-01 22:11:07 +04:00
. f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP ,
. f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC ,
2009-04-27 18:25:05 +04:00
. ops = {
2011-01-07 19:58:04 +03:00
. early_channel_count = f1x_early_channel_count ,
2011-01-17 17:59:58 +03:00
. map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow ,
2009-10-21 15:44:36 +04:00
. dbam_to_cs = f10_dbam_to_chip_select ,
2010-10-08 20:32:29 +04:00
. read_dct_pci_cfg = f10_read_dct_pci_cfg ,
}
} ,
[ F15_CPUS ] = {
. ctl_name = " F15h " ,
2011-01-19 20:15:10 +03:00
. f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1 ,
. f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3 ,
2010-10-08 20:32:29 +04:00
. ops = {
2011-01-07 19:58:04 +03:00
. early_channel_count = f1x_early_channel_count ,
2011-01-17 17:59:58 +03:00
. map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow ,
2011-01-18 21:16:08 +03:00
. dbam_to_cs = f15_dbam_to_chip_select ,
2010-10-08 20:32:29 +04:00
. read_dct_pci_cfg = f15_read_dct_pci_cfg ,
2009-04-27 18:25:05 +04:00
}
} ,
} ;
static struct pci_dev * pci_get_related_function ( unsigned int vendor ,
unsigned int device ,
struct pci_dev * related )
{
struct pci_dev * dev = NULL ;
dev = pci_get_device ( vendor , device , dev ) ;
while ( dev ) {
if ( ( dev - > bus - > number = = related - > bus - > number ) & &
( PCI_SLOT ( dev - > devfn ) = = PCI_SLOT ( related - > devfn ) ) )
break ;
dev = pci_get_device ( vendor , device , dev ) ;
}
return dev ;
}
2009-04-27 18:37:05 +04:00
/*
2009-11-12 21:05:07 +03:00
* These are tables of eigenvectors ( one per line ) which can be used for the
* construction of the syndrome tables . The modified syndrome search algorithm
* uses those to find the symbol in error and thus the DIMM .
2009-04-27 18:37:05 +04:00
*
2009-11-12 21:05:07 +03:00
* Algorithm courtesy of Ross LaFetra from AMD .
2009-04-27 18:37:05 +04:00
*/
2009-11-12 21:05:07 +03:00
static u16 x4_vectors [ ] = {
0x2f57 , 0x1afe , 0x66cc , 0xdd88 ,
0x11eb , 0x3396 , 0x7f4c , 0xeac8 ,
0x0001 , 0x0002 , 0x0004 , 0x0008 ,
0x1013 , 0x3032 , 0x4044 , 0x8088 ,
0x106b , 0x30d6 , 0x70fc , 0xe0a8 ,
0x4857 , 0xc4fe , 0x13cc , 0x3288 ,
0x1ac5 , 0x2f4a , 0x5394 , 0xa1e8 ,
0x1f39 , 0x251e , 0xbd6c , 0x6bd8 ,
0x15c1 , 0x2a42 , 0x89ac , 0x4758 ,
0x2b03 , 0x1602 , 0x4f0c , 0xca08 ,
0x1f07 , 0x3a0e , 0x6b04 , 0xbd08 ,
0x8ba7 , 0x465e , 0x244c , 0x1cc8 ,
0x2b87 , 0x164e , 0x642c , 0xdc18 ,
0x40b9 , 0x80de , 0x1094 , 0x20e8 ,
0x27db , 0x1eb6 , 0x9dac , 0x7b58 ,
0x11c1 , 0x2242 , 0x84ac , 0x4c58 ,
0x1be5 , 0x2d7a , 0x5e34 , 0xa718 ,
0x4b39 , 0x8d1e , 0x14b4 , 0x28d8 ,
0x4c97 , 0xc87e , 0x11fc , 0x33a8 ,
0x8e97 , 0x497e , 0x2ffc , 0x1aa8 ,
0x16b3 , 0x3d62 , 0x4f34 , 0x8518 ,
0x1e2f , 0x391a , 0x5cac , 0xf858 ,
0x1d9f , 0x3b7a , 0x572c , 0xfe18 ,
0x15f5 , 0x2a5a , 0x5264 , 0xa3b8 ,
0x1dbb , 0x3b66 , 0x715c , 0xe3f8 ,
0x4397 , 0xc27e , 0x17fc , 0x3ea8 ,
0x1617 , 0x3d3e , 0x6464 , 0xb8b8 ,
0x23ff , 0x12aa , 0xab6c , 0x56d8 ,
0x2dfb , 0x1ba6 , 0x913c , 0x7328 ,
0x185d , 0x2ca6 , 0x7914 , 0x9e28 ,
0x171b , 0x3e36 , 0x7d7c , 0xebe8 ,
0x4199 , 0x82ee , 0x19f4 , 0x2e58 ,
0x4807 , 0xc40e , 0x130c , 0x3208 ,
0x1905 , 0x2e0a , 0x5804 , 0xac08 ,
0x213f , 0x132a , 0xadfc , 0x5ba8 ,
0x19a9 , 0x2efe , 0xb5cc , 0x6f88 ,
2009-04-27 18:37:05 +04:00
} ;
2009-11-12 21:05:07 +03:00
static u16 x8_vectors [ ] = {
0x0145 , 0x028a , 0x2374 , 0x43c8 , 0xa1f0 , 0x0520 , 0x0a40 , 0x1480 ,
0x0211 , 0x0422 , 0x0844 , 0x1088 , 0x01b0 , 0x44e0 , 0x23c0 , 0xed80 ,
0x1011 , 0x0116 , 0x022c , 0x0458 , 0x08b0 , 0x8c60 , 0x2740 , 0x4e80 ,
0x0411 , 0x0822 , 0x1044 , 0x0158 , 0x02b0 , 0x2360 , 0x46c0 , 0xab80 ,
0x0811 , 0x1022 , 0x012c , 0x0258 , 0x04b0 , 0x4660 , 0x8cc0 , 0x2780 ,
0x2071 , 0x40e2 , 0xa0c4 , 0x0108 , 0x0210 , 0x0420 , 0x0840 , 0x1080 ,
0x4071 , 0x80e2 , 0x0104 , 0x0208 , 0x0410 , 0x0820 , 0x1040 , 0x2080 ,
0x8071 , 0x0102 , 0x0204 , 0x0408 , 0x0810 , 0x1020 , 0x2040 , 0x4080 ,
0x019d , 0x03d6 , 0x136c , 0x2198 , 0x50b0 , 0xb2e0 , 0x0740 , 0x0e80 ,
0x0189 , 0x03ea , 0x072c , 0x0e58 , 0x1cb0 , 0x56e0 , 0x37c0 , 0xf580 ,
0x01fd , 0x0376 , 0x06ec , 0x0bb8 , 0x1110 , 0x2220 , 0x4440 , 0x8880 ,
0x0163 , 0x02c6 , 0x1104 , 0x0758 , 0x0eb0 , 0x2be0 , 0x6140 , 0xc280 ,
0x02fd , 0x01c6 , 0x0b5c , 0x1108 , 0x07b0 , 0x25a0 , 0x8840 , 0x6180 ,
0x0801 , 0x012e , 0x025c , 0x04b8 , 0x1370 , 0x26e0 , 0x57c0 , 0xb580 ,
0x0401 , 0x0802 , 0x015c , 0x02b8 , 0x22b0 , 0x13e0 , 0x7140 , 0xe280 ,
0x0201 , 0x0402 , 0x0804 , 0x01b8 , 0x11b0 , 0x31a0 , 0x8040 , 0x7180 ,
0x0101 , 0x0202 , 0x0404 , 0x0808 , 0x1010 , 0x2020 , 0x4040 , 0x8080 ,
0x0001 , 0x0002 , 0x0004 , 0x0008 , 0x0010 , 0x0020 , 0x0040 , 0x0080 ,
0x0100 , 0x0200 , 0x0400 , 0x0800 , 0x1000 , 0x2000 , 0x4000 , 0x8000 ,
} ;
2011-02-23 19:41:50 +03:00
static int decode_syndrome ( u16 syndrome , u16 * vectors , unsigned num_vecs ,
unsigned v_dim )
2009-04-27 18:37:05 +04:00
{
2009-11-12 21:05:07 +03:00
unsigned int i , err_sym ;
for ( err_sym = 0 ; err_sym < num_vecs / v_dim ; err_sym + + ) {
u16 s = syndrome ;
2011-02-23 19:41:50 +03:00
unsigned v_idx = err_sym * v_dim ;
unsigned v_end = ( err_sym + 1 ) * v_dim ;
2009-11-12 21:05:07 +03:00
/* walk over all 16 bits of the syndrome */
for ( i = 1 ; i < ( 1U < < 16 ) ; i < < = 1 ) {
/* if bit is set in that eigenvector... */
if ( v_idx < v_end & & vectors [ v_idx ] & i ) {
u16 ev_comp = vectors [ v_idx + + ] ;
/* ... and bit set in the modified syndrome, */
if ( s & i ) {
/* remove it. */
s ^ = ev_comp ;
2009-04-27 18:25:05 +04:00
2009-11-12 21:05:07 +03:00
if ( ! s )
return err_sym ;
}
2009-04-27 18:37:05 +04:00
2009-11-12 21:05:07 +03:00
} else if ( s & i )
/* can't get to zero, move to next symbol */
break ;
}
2009-04-27 18:37:05 +04:00
}
debugf0 ( " syndrome(%x) not found \n " , syndrome ) ;
return - 1 ;
}
2009-05-06 19:55:27 +04:00
2009-11-12 21:05:07 +03:00
static int map_err_sym_to_channel ( int err_sym , int sym_size )
{
if ( sym_size = = 4 )
switch ( err_sym ) {
case 0x20 :
case 0x21 :
return 0 ;
break ;
case 0x22 :
case 0x23 :
return 1 ;
break ;
default :
return err_sym > > 4 ;
break ;
}
/* x8 symbols */
else
switch ( err_sym ) {
/* imaginary bits not in a DIMM */
case 0x10 :
WARN ( 1 , KERN_ERR " Invalid error symbol: 0x%x \n " ,
err_sym ) ;
return - 1 ;
break ;
case 0x11 :
return 0 ;
break ;
case 0x12 :
return 1 ;
break ;
default :
return err_sym > > 3 ;
break ;
}
return - 1 ;
}
static int get_channel_from_ecc_syndrome ( struct mem_ctl_info * mci , u16 syndrome )
{
struct amd64_pvt * pvt = mci - > pvt_info ;
2010-03-09 14:46:00 +03:00
int err_sym = - 1 ;
2011-01-19 22:35:12 +03:00
if ( pvt - > ecc_sym_sz = = 8 )
2010-03-09 14:46:00 +03:00
err_sym = decode_syndrome ( syndrome , x8_vectors ,
ARRAY_SIZE ( x8_vectors ) ,
2011-01-19 22:35:12 +03:00
pvt - > ecc_sym_sz ) ;
else if ( pvt - > ecc_sym_sz = = 4 )
2010-03-09 14:46:00 +03:00
err_sym = decode_syndrome ( syndrome , x4_vectors ,
ARRAY_SIZE ( x4_vectors ) ,
2011-01-19 22:35:12 +03:00
pvt - > ecc_sym_sz ) ;
2010-03-09 14:46:00 +03:00
else {
2011-01-19 22:35:12 +03:00
amd64_warn ( " Illegal syndrome type: %u \n " , pvt - > ecc_sym_sz ) ;
2010-03-09 14:46:00 +03:00
return err_sym ;
2009-11-12 21:05:07 +03:00
}
2010-03-09 14:46:00 +03:00
2011-01-19 22:35:12 +03:00
return map_err_sym_to_channel ( err_sym , pvt - > ecc_sym_sz ) ;
2009-11-12 21:05:07 +03:00
}
2009-05-06 19:55:27 +04:00
/*
* Handle any Correctable Errors ( CEs ) that have occurred . Check for valid ERROR
* ADDRESS and process .
*/
2011-01-10 16:24:32 +03:00
static void amd64_handle_ce ( struct mem_ctl_info * mci , struct mce * m )
2009-05-06 19:55:27 +04:00
{
struct amd64_pvt * pvt = mci - > pvt_info ;
2009-10-26 17:00:19 +03:00
u64 sys_addr ;
2011-01-10 16:24:32 +03:00
u16 syndrome ;
2009-05-06 19:55:27 +04:00
/* Ensure that the Error Address is VALID */
2011-01-10 16:24:32 +03:00
if ( ! ( m - > status & MCI_STATUS_ADDRV ) ) {
2010-10-07 20:29:15 +04:00
amd64_mc_err ( mci , " HW has no ERROR_ADDRESS available \n " ) ;
2012-04-16 22:03:50 +04:00
edac_mc_handle_error ( HW_EVENT_ERR_CORRECTED , mci ,
0 , 0 , 0 ,
- 1 , - 1 , - 1 ,
EDAC_MOD_STR ,
" HW has no ERROR_ADDRESS available " ,
NULL ) ;
2009-05-06 19:55:27 +04:00
return ;
}
2011-01-10 16:37:27 +03:00
sys_addr = get_error_address ( m ) ;
2011-01-10 16:24:32 +03:00
syndrome = extract_syndrome ( m - > status ) ;
2009-05-06 19:55:27 +04:00
2010-10-07 20:29:15 +04:00
amd64_mc_err ( mci , " CE ERROR_ADDRESS= 0x%llx \n " , sys_addr ) ;
2009-05-06 19:55:27 +04:00
2011-01-10 16:24:32 +03:00
pvt - > ops - > map_sysaddr_to_csrow ( mci , sys_addr , syndrome ) ;
2009-05-06 19:55:27 +04:00
}
/* Handle any Un-correctable Errors (UEs) */
2011-01-10 16:24:32 +03:00
static void amd64_handle_ue ( struct mem_ctl_info * mci , struct mce * m )
2009-05-06 19:55:27 +04:00
{
2009-11-13 16:02:57 +03:00
struct mem_ctl_info * log_mci , * src_mci = NULL ;
2009-05-06 19:55:27 +04:00
int csrow ;
2009-10-26 17:00:19 +03:00
u64 sys_addr ;
2009-05-06 19:55:27 +04:00
u32 page , offset ;
log_mci = mci ;
2011-01-10 16:24:32 +03:00
if ( ! ( m - > status & MCI_STATUS_ADDRV ) ) {
2010-10-07 20:29:15 +04:00
amd64_mc_err ( mci , " HW has no ERROR_ADDRESS available \n " ) ;
2012-04-16 22:03:50 +04:00
edac_mc_handle_error ( HW_EVENT_ERR_UNCORRECTED , mci ,
0 , 0 , 0 ,
- 1 , - 1 , - 1 ,
EDAC_MOD_STR ,
" HW has no ERROR_ADDRESS available " ,
NULL ) ;
2009-05-06 19:55:27 +04:00
return ;
}
2011-01-10 16:37:27 +03:00
sys_addr = get_error_address ( m ) ;
2012-04-16 22:03:50 +04:00
error_address_to_page_and_offset ( sys_addr , & page , & offset ) ;
2009-05-06 19:55:27 +04:00
/*
* Find out which node the error address belongs to . This may be
* different from the node that detected the error .
*/
2009-10-26 17:00:19 +03:00
src_mci = find_mc_by_sys_addr ( mci , sys_addr ) ;
2009-05-06 19:55:27 +04:00
if ( ! src_mci ) {
2010-10-07 20:29:15 +04:00
amd64_mc_err ( mci , " ERROR ADDRESS (0x%lx) NOT mapped to a MC \n " ,
( unsigned long ) sys_addr ) ;
2012-04-16 22:03:50 +04:00
edac_mc_handle_error ( HW_EVENT_ERR_UNCORRECTED , mci ,
page , offset , 0 ,
- 1 , - 1 , - 1 ,
EDAC_MOD_STR ,
" ERROR ADDRESS NOT mapped to a MC " , NULL ) ;
2009-05-06 19:55:27 +04:00
return ;
}
log_mci = src_mci ;
2009-10-26 17:00:19 +03:00
csrow = sys_addr_to_csrow ( log_mci , sys_addr ) ;
2009-05-06 19:55:27 +04:00
if ( csrow < 0 ) {
2010-10-07 20:29:15 +04:00
amd64_mc_err ( mci , " ERROR_ADDRESS (0x%lx) NOT mapped to CS \n " ,
( unsigned long ) sys_addr ) ;
2012-04-16 22:03:50 +04:00
edac_mc_handle_error ( HW_EVENT_ERR_UNCORRECTED , mci ,
page , offset , 0 ,
- 1 , - 1 , - 1 ,
EDAC_MOD_STR ,
" ERROR ADDRESS NOT mapped to CS " ,
NULL ) ;
2009-05-06 19:55:27 +04:00
} else {
2012-04-16 22:03:50 +04:00
edac_mc_handle_error ( HW_EVENT_ERR_UNCORRECTED , mci ,
page , offset , 0 ,
csrow , - 1 , - 1 ,
EDAC_MOD_STR , " " , NULL ) ;
2009-05-06 19:55:27 +04:00
}
}
2009-07-24 15:51:42 +04:00
static inline void __amd64_decode_bus_error ( struct mem_ctl_info * mci ,
2011-01-10 16:24:32 +03:00
struct mce * m )
2009-05-06 19:55:27 +04:00
{
2011-01-10 16:24:32 +03:00
u16 ec = EC ( m - > status ) ;
u8 xec = XEC ( m - > status , 0x1f ) ;
u8 ecc_type = ( m - > status > > 45 ) & 0x3 ;
2009-05-06 19:55:27 +04:00
2009-06-25 21:32:38 +04:00
/* Bail early out if this was an 'observed' error */
2011-01-07 18:26:49 +03:00
if ( PP ( ec ) = = NBSL_PP_OBS )
2009-06-25 21:32:38 +04:00
return ;
2009-05-06 19:55:27 +04:00
2009-07-23 18:32:01 +04:00
/* Do only ECC errors */
if ( xec & & xec ! = F10_NBSL_EXT_ERR_ECC )
2009-05-06 19:55:27 +04:00
return ;
2009-07-23 18:32:01 +04:00
if ( ecc_type = = 2 )
2011-01-10 16:24:32 +03:00
amd64_handle_ce ( mci , m ) ;
2009-07-23 18:32:01 +04:00
else if ( ecc_type = = 1 )
2011-01-10 16:24:32 +03:00
amd64_handle_ue ( mci , m ) ;
2009-05-06 19:55:27 +04:00
}
2011-08-24 20:44:22 +04:00
void amd64_decode_bus_error ( int node_id , struct mce * m )
2009-05-06 19:55:27 +04:00
{
2011-08-24 20:44:22 +04:00
__amd64_decode_bus_error ( mcis [ node_id ] , m ) ;
2009-05-06 19:55:27 +04:00
}
2009-04-27 21:41:25 +04:00
/*
2010-10-01 22:11:07 +04:00
* Use pvt - > F2 which contains the F2 CPU PCI device to get the related
2010-10-01 21:27:58 +04:00
* F1 ( AddrMap ) and F3 ( Misc ) devices . Return negative value on error .
2009-04-27 21:41:25 +04:00
*/
2010-10-15 21:25:38 +04:00
static int reserve_mc_sibling_devs ( struct amd64_pvt * pvt , u16 f1_id , u16 f3_id )
2009-04-27 21:41:25 +04:00
{
/* Reserve the ADDRESS MAP Device */
2010-10-01 22:11:07 +04:00
pvt - > F1 = pci_get_related_function ( pvt - > F2 - > vendor , f1_id , pvt - > F2 ) ;
if ( ! pvt - > F1 ) {
2010-10-07 20:29:15 +04:00
amd64_err ( " error address map device not found: "
" vendor %x device 0x%x (broken BIOS?) \n " ,
PCI_VENDOR_ID_AMD , f1_id ) ;
2010-10-01 21:27:58 +04:00
return - ENODEV ;
2009-04-27 21:41:25 +04:00
}
/* Reserve the MISC Device */
2010-10-01 22:11:07 +04:00
pvt - > F3 = pci_get_related_function ( pvt - > F2 - > vendor , f3_id , pvt - > F2 ) ;
if ( ! pvt - > F3 ) {
pci_dev_put ( pvt - > F1 ) ;
pvt - > F1 = NULL ;
2009-04-27 21:41:25 +04:00
2010-10-07 20:29:15 +04:00
amd64_err ( " error F3 device not found: "
" vendor %x device 0x%x (broken BIOS?) \n " ,
PCI_VENDOR_ID_AMD , f3_id ) ;
2009-04-27 21:41:25 +04:00
2010-10-01 21:27:58 +04:00
return - ENODEV ;
2009-04-27 21:41:25 +04:00
}
2010-10-01 22:11:07 +04:00
debugf1 ( " F1: %s \n " , pci_name ( pvt - > F1 ) ) ;
debugf1 ( " F2: %s \n " , pci_name ( pvt - > F2 ) ) ;
debugf1 ( " F3: %s \n " , pci_name ( pvt - > F3 ) ) ;
2009-04-27 21:41:25 +04:00
return 0 ;
}
2010-10-15 21:25:38 +04:00
static void free_mc_sibling_devs ( struct amd64_pvt * pvt )
2009-04-27 21:41:25 +04:00
{
2010-10-01 22:11:07 +04:00
pci_dev_put ( pvt - > F1 ) ;
pci_dev_put ( pvt - > F3 ) ;
2009-04-27 21:41:25 +04:00
}
/*
* Retrieve the hardware registers of the memory controller ( this includes the
* ' Address Map ' and ' Misc ' device regs )
*/
2010-10-15 21:25:38 +04:00
static void read_mc_regs ( struct amd64_pvt * pvt )
2009-04-27 21:41:25 +04:00
{
2011-01-19 22:35:12 +03:00
struct cpuinfo_x86 * c = & boot_cpu_data ;
2009-04-27 21:41:25 +04:00
u64 msr_val ;
2010-03-09 14:46:00 +03:00
u32 tmp ;
2011-02-21 21:49:01 +03:00
unsigned range ;
2009-04-27 21:41:25 +04:00
/*
* Retrieve TOP_MEM and TOP_MEM2 ; no masking off of reserved bits since
* those are Read - As - Zero
*/
2009-10-12 17:27:45 +04:00
rdmsrl ( MSR_K8_TOP_MEM1 , pvt - > top_mem ) ;
debugf0 ( " TOP_MEM: 0x%016llx \n " , pvt - > top_mem ) ;
2009-04-27 21:41:25 +04:00
/* check first whether TOP_MEM2 is enabled */
rdmsrl ( MSR_K8_SYSCFG , msr_val ) ;
if ( msr_val & ( 1U < < 21 ) ) {
2009-10-12 17:27:45 +04:00
rdmsrl ( MSR_K8_TOP_MEM2 , pvt - > top_mem2 ) ;
debugf0 ( " TOP_MEM2: 0x%016llx \n " , pvt - > top_mem2 ) ;
2009-04-27 21:41:25 +04:00
} else
debugf0 ( " TOP_MEM2 disabled. \n " ) ;
2011-01-07 18:26:49 +03:00
amd64_read_pci_cfg ( pvt - > F3 , NBCAP , & pvt - > nbcap ) ;
2009-04-27 21:41:25 +04:00
2011-01-17 19:52:57 +03:00
read_dram_ctl_register ( pvt ) ;
2009-04-27 21:41:25 +04:00
2010-10-21 20:52:53 +04:00
for ( range = 0 ; range < DRAM_RANGES ; range + + ) {
u8 rw ;
2009-04-27 21:41:25 +04:00
2010-10-21 20:52:53 +04:00
/* read settings for this DRAM range */
read_dram_base_limit_regs ( pvt , range ) ;
rw = dram_rw ( pvt , range ) ;
if ( ! rw )
continue ;
debugf1 ( " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx \n " ,
range ,
get_dram_base ( pvt , range ) ,
get_dram_limit ( pvt , range ) ) ;
debugf1 ( " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d \n " ,
dram_intlv_en ( pvt , range ) ? " Enabled " : " Disabled " ,
( rw & 0x1 ) ? " R " : " - " ,
( rw & 0x2 ) ? " W " : " - " ,
dram_intlv_sel ( pvt , range ) ,
dram_dst_node ( pvt , range ) ) ;
2009-04-27 21:41:25 +04:00
}
2010-10-08 20:32:29 +04:00
read_dct_base_mask ( pvt ) ;
2009-04-27 21:41:25 +04:00
2010-11-11 19:29:13 +03:00
amd64_read_pci_cfg ( pvt - > F1 , DHAR , & pvt - > dhar ) ;
2010-12-21 17:53:27 +03:00
amd64_read_dct_pci_cfg ( pvt , DBAM0 , & pvt - > dbam0 ) ;
2009-04-27 21:41:25 +04:00
2010-10-01 22:11:07 +04:00
amd64_read_pci_cfg ( pvt - > F3 , F10_ONLINE_SPARE , & pvt - > online_spare ) ;
2009-04-27 21:41:25 +04:00
2010-12-22 16:28:24 +03:00
amd64_read_dct_pci_cfg ( pvt , DCLR0 , & pvt - > dclr0 ) ;
amd64_read_dct_pci_cfg ( pvt , DCHR0 , & pvt - > dchr0 ) ;
2009-04-27 21:41:25 +04:00
2010-12-22 21:31:45 +03:00
if ( ! dct_ganging_enabled ( pvt ) ) {
2010-12-22 16:28:24 +03:00
amd64_read_dct_pci_cfg ( pvt , DCLR1 , & pvt - > dclr1 ) ;
amd64_read_dct_pci_cfg ( pvt , DCHR1 , & pvt - > dchr1 ) ;
2009-04-27 21:41:25 +04:00
}
2010-03-09 14:46:00 +03:00
2011-01-19 22:35:12 +03:00
pvt - > ecc_sym_sz = 4 ;
if ( c - > x86 > = 0x10 ) {
2010-10-08 20:32:29 +04:00
amd64_read_pci_cfg ( pvt - > F3 , EXT_NB_MCA_CFG , & tmp ) ;
2010-12-21 17:53:27 +03:00
amd64_read_dct_pci_cfg ( pvt , DBAM1 , & pvt - > dbam1 ) ;
2010-03-09 14:46:00 +03:00
2011-01-19 22:35:12 +03:00
/* F10h, revD and later can do x8 ECC too */
if ( ( c - > x86 > 0x10 | | c - > x86_model > 7 ) & & tmp & BIT ( 25 ) )
pvt - > ecc_sym_sz = 8 ;
}
2010-10-08 20:32:29 +04:00
dump_misc_regs ( pvt ) ;
2009-04-27 21:41:25 +04:00
}
/*
* NOTE : CPU Revision Dependent code
*
* Input :
2010-11-29 21:49:02 +03:00
* @ csrow_nr ChipSelect Row Number ( 0. . NUM_CHIPSELECTS - 1 )
2009-04-27 21:41:25 +04:00
* k8 private pointer to - - >
* DRAM Bank Address mapping register
* node_id
* DCL register where dual_channel_active is
*
* The DBAM register consists of 4 sets of 4 bits each definitions :
*
* Bits : CSROWs
* 0 - 3 CSROWs 0 and 1
* 4 - 7 CSROWs 2 and 3
* 8 - 11 CSROWs 4 and 5
* 12 - 15 CSROWs 6 and 7
*
* Values range from : 0 to 15
* The meaning of the values depends on CPU revision and dual - channel state ,
* see relevant BKDG more info .
*
* The memory controller provides for total of only 8 CSROWs in its current
* architecture . Each " pair " of CSROWs normally represents just one DIMM in
* single channel or two ( 2 ) DIMMs in dual channel mode .
*
* The following code logic collapses the various tables for CSROW based on CPU
* revision .
*
* Returns :
* The number of PAGE_SIZE pages on the specified CSROW number it
* encompasses
*
*/
2011-01-18 21:16:08 +03:00
static u32 amd64_csrow_nr_pages ( struct amd64_pvt * pvt , u8 dct , int csrow_nr )
2009-04-27 21:41:25 +04:00
{
2009-10-21 15:44:36 +04:00
u32 cs_mode , nr_pages ;
2012-02-23 05:20:38 +04:00
u32 dbam = dct ? pvt - > dbam1 : pvt - > dbam0 ;
2009-04-27 21:41:25 +04:00
/*
* The math on this doesn ' t look right on the surface because x / 2 * 4 can
* be simplified to x * 2 but this expression makes use of the fact that
* it is integral math where 1 / 2 = 0. This intermediate value becomes the
* number of bits to shift the DBAM register to extract the proper CSROW
* field .
*/
2012-02-23 05:20:38 +04:00
cs_mode = ( dbam > > ( ( csrow_nr / 2 ) * 4 ) ) & 0xF ;
2009-04-27 21:41:25 +04:00
2011-01-18 21:16:08 +03:00
nr_pages = pvt - > ops - > dbam_to_cs ( pvt , dct , cs_mode ) < < ( 20 - PAGE_SHIFT ) ;
2009-04-27 21:41:25 +04:00
2009-10-21 15:44:36 +04:00
debugf0 ( " (csrow=%d) DBAM map index= %d \n " , csrow_nr , cs_mode ) ;
2012-01-28 16:09:38 +04:00
debugf0 ( " nr_pages/channel= %u channel-count = %d \n " ,
2009-04-27 21:41:25 +04:00
nr_pages , pvt - > channel_count ) ;
return nr_pages ;
}
/*
* Initialize the array of csrow attribute instances , based on the values
* from pci config hardware registers .
*/
2010-10-15 21:25:38 +04:00
static int init_csrows ( struct mem_ctl_info * mci )
2009-04-27 21:41:25 +04:00
{
struct csrow_info * csrow ;
2010-10-15 19:44:04 +04:00
struct amd64_pvt * pvt = mci - > pvt_info ;
2012-01-28 04:20:32 +04:00
u64 base , mask ;
2010-10-15 19:44:04 +04:00
u32 val ;
2012-01-28 01:38:08 +04:00
int i , j , empty = 1 ;
enum mem_type mtype ;
enum edac_type edac_mode ;
2012-01-28 16:09:38 +04:00
int nr_pages = 0 ;
2009-04-27 21:41:25 +04:00
2010-12-23 16:07:18 +03:00
amd64_read_pci_cfg ( pvt - > F3 , NBCFG , & val ) ;
2009-04-27 21:41:25 +04:00
2010-10-15 19:44:04 +04:00
pvt - > nbcfg = val ;
2009-04-27 21:41:25 +04:00
2010-10-15 19:44:04 +04:00
debugf0 ( " node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d] \n " ,
pvt - > mc_node_id , val ,
2010-12-23 16:07:18 +03:00
! ! ( val & NBCFG_CHIPKILL ) , ! ! ( val & NBCFG_ECC_ENABLE ) ) ;
2009-04-27 21:41:25 +04:00
2010-11-29 21:49:02 +03:00
for_each_chip_select ( i , 0 , pvt ) {
2009-04-27 21:41:25 +04:00
csrow = & mci - > csrows [ i ] ;
2012-02-23 05:20:38 +04:00
if ( ! csrow_enabled ( i , 0 , pvt ) & & ! csrow_enabled ( i , 1 , pvt ) ) {
2009-04-27 21:41:25 +04:00
debugf1 ( " ----CSROW %d EMPTY for node %d \n " , i ,
pvt - > mc_node_id ) ;
continue ;
}
debugf1 ( " ----CSROW %d VALID for MC node %d \n " ,
i , pvt - > mc_node_id ) ;
empty = 0 ;
2012-02-23 05:20:38 +04:00
if ( csrow_enabled ( i , 0 , pvt ) )
2012-01-28 16:09:38 +04:00
nr_pages = amd64_csrow_nr_pages ( pvt , 0 , i ) ;
2012-02-23 05:20:38 +04:00
if ( csrow_enabled ( i , 1 , pvt ) )
2012-01-28 16:09:38 +04:00
nr_pages + = amd64_csrow_nr_pages ( pvt , 1 , i ) ;
2010-11-29 21:49:02 +03:00
get_cs_base_and_mask ( pvt , i , 0 , & base , & mask ) ;
2009-04-27 21:41:25 +04:00
/* 8 bytes of resolution */
2012-01-28 01:38:08 +04:00
mtype = amd64_determine_memory_type ( pvt , i ) ;
2009-04-27 21:41:25 +04:00
debugf1 ( " for MC node %d csrow %d: \n " , pvt - > mc_node_id , i ) ;
2012-01-28 16:09:38 +04:00
debugf1 ( " nr_pages: %u \n " , nr_pages * pvt - > channel_count ) ;
2009-04-27 21:41:25 +04:00
/*
* determine whether CHIPKILL or JUST ECC or NO ECC is operating
*/
2010-12-23 16:07:18 +03:00
if ( pvt - > nbcfg & NBCFG_ECC_ENABLE )
2012-01-28 01:38:08 +04:00
edac_mode = ( pvt - > nbcfg & NBCFG_CHIPKILL ) ?
EDAC_S4ECD4ED : EDAC_SECDED ;
2009-04-27 21:41:25 +04:00
else
2012-01-28 01:38:08 +04:00
edac_mode = EDAC_NONE ;
for ( j = 0 ; j < pvt - > channel_count ; j + + ) {
csrow - > channels [ j ] . dimm - > mtype = mtype ;
csrow - > channels [ j ] . dimm - > edac_mode = edac_mode ;
2012-01-28 16:09:38 +04:00
csrow - > channels [ j ] . dimm - > nr_pages = nr_pages ;
2012-01-28 01:38:08 +04:00
}
2009-04-27 21:41:25 +04:00
}
return empty ;
}
2009-05-06 19:55:27 +04:00
2009-11-03 17:29:26 +03:00
/* get all cores on this DCT */
2011-02-21 20:55:00 +03:00
static void get_cpus_on_this_dct_cpumask ( struct cpumask * mask , unsigned nid )
2009-11-03 17:29:26 +03:00
{
int cpu ;
for_each_online_cpu ( cpu )
if ( amd_get_nb_id ( cpu ) = = nid )
cpumask_set_cpu ( cpu , mask ) ;
}
/* check MCG_CTL on all the cpus on this node */
2011-02-21 20:55:00 +03:00
static bool amd64_nb_mce_bank_enabled_on_node ( unsigned nid )
2009-11-03 17:29:26 +03:00
{
cpumask_var_t mask ;
2009-12-11 20:14:40 +03:00
int cpu , nbe ;
2009-11-03 17:29:26 +03:00
bool ret = false ;
if ( ! zalloc_cpumask_var ( & mask , GFP_KERNEL ) ) {
2010-10-07 20:29:15 +04:00
amd64_warn ( " %s: Error allocating mask \n " , __func__ ) ;
2009-11-03 17:29:26 +03:00
return false ;
}
get_cpus_on_this_dct_cpumask ( mask , nid ) ;
rdmsr_on_cpus ( mask , MSR_IA32_MCG_CTL , msrs ) ;
for_each_cpu ( cpu , mask ) {
2009-12-11 20:14:40 +03:00
struct msr * reg = per_cpu_ptr ( msrs , cpu ) ;
2011-01-07 18:26:49 +03:00
nbe = reg - > l & MSR_MCGCTL_NBE ;
2009-11-03 17:29:26 +03:00
debugf0 ( " core: %u, MCG_CTL: 0x%llx, NB MSR is %s \n " ,
2009-12-11 20:14:40 +03:00
cpu , reg - > q ,
2009-11-03 17:29:26 +03:00
( nbe ? " enabled " : " disabled " ) ) ;
if ( ! nbe )
goto out ;
}
ret = true ;
out :
free_cpumask_var ( mask ) ;
return ret ;
}
2010-10-15 19:44:04 +04:00
static int toggle_ecc_err_reporting ( struct ecc_settings * s , u8 nid , bool on )
2009-11-03 17:29:26 +03:00
{
cpumask_var_t cmask ;
2009-12-11 20:14:40 +03:00
int cpu ;
2009-11-03 17:29:26 +03:00
if ( ! zalloc_cpumask_var ( & cmask , GFP_KERNEL ) ) {
2010-10-07 20:29:15 +04:00
amd64_warn ( " %s: error allocating mask \n " , __func__ ) ;
2009-11-03 17:29:26 +03:00
return false ;
}
2010-10-14 18:01:30 +04:00
get_cpus_on_this_dct_cpumask ( cmask , nid ) ;
2009-11-03 17:29:26 +03:00
rdmsr_on_cpus ( cmask , MSR_IA32_MCG_CTL , msrs ) ;
for_each_cpu ( cpu , cmask ) {
2009-12-11 20:14:40 +03:00
struct msr * reg = per_cpu_ptr ( msrs , cpu ) ;
2009-11-03 17:29:26 +03:00
if ( on ) {
2011-01-07 18:26:49 +03:00
if ( reg - > l & MSR_MCGCTL_NBE )
2010-10-14 18:01:30 +04:00
s - > flags . nb_mce_enable = 1 ;
2009-11-03 17:29:26 +03:00
2011-01-07 18:26:49 +03:00
reg - > l | = MSR_MCGCTL_NBE ;
2009-11-03 17:29:26 +03:00
} else {
/*
2010-02-24 16:49:47 +03:00
* Turn off NB MCE reporting only when it was off before
2009-11-03 17:29:26 +03:00
*/
2010-10-14 18:01:30 +04:00
if ( ! s - > flags . nb_mce_enable )
2011-01-07 18:26:49 +03:00
reg - > l & = ~ MSR_MCGCTL_NBE ;
2009-11-03 17:29:26 +03:00
}
}
wrmsr_on_cpus ( cmask , MSR_IA32_MCG_CTL , msrs ) ;
free_cpumask_var ( cmask ) ;
return 0 ;
}
2010-10-15 19:44:04 +04:00
static bool enable_ecc_error_reporting ( struct ecc_settings * s , u8 nid ,
struct pci_dev * F3 )
2009-04-27 21:46:08 +04:00
{
2010-10-15 19:44:04 +04:00
bool ret = true ;
2010-12-22 21:48:20 +03:00
u32 value , mask = 0x3 ; /* UECC/CECC enable */
2009-04-27 21:46:08 +04:00
2010-10-15 19:44:04 +04:00
if ( toggle_ecc_err_reporting ( s , nid , ON ) ) {
amd64_warn ( " Error enabling ECC reporting over MCGCTL! \n " ) ;
return false ;
}
2010-12-22 21:48:20 +03:00
amd64_read_pci_cfg ( F3 , NBCTL , & value ) ;
2009-04-27 21:46:08 +04:00
2010-10-14 18:01:30 +04:00
s - > old_nbctl = value & mask ;
s - > nbctl_valid = true ;
2009-04-27 21:46:08 +04:00
value | = mask ;
2010-12-22 21:48:20 +03:00
amd64_write_pci_cfg ( F3 , NBCTL , value ) ;
2009-04-27 21:46:08 +04:00
2010-12-23 16:07:18 +03:00
amd64_read_pci_cfg ( F3 , NBCFG , & value ) ;
2009-04-27 21:46:08 +04:00
2010-12-23 16:07:18 +03:00
debugf0 ( " 1: node %d, NBCFG=0x%08x[DramEccEn: %d] \n " ,
nid , value , ! ! ( value & NBCFG_ECC_ENABLE ) ) ;
2009-04-27 21:46:08 +04:00
2010-12-23 16:07:18 +03:00
if ( ! ( value & NBCFG_ECC_ENABLE ) ) {
2010-10-07 20:29:15 +04:00
amd64_warn ( " DRAM ECC disabled on this node, enabling... \n " ) ;
2009-04-27 21:46:08 +04:00
2010-10-14 18:01:30 +04:00
s - > flags . nb_ecc_prev = 0 ;
2010-02-24 16:49:47 +03:00
2009-04-27 21:46:08 +04:00
/* Attempt to turn on DRAM ECC Enable */
2010-12-23 16:07:18 +03:00
value | = NBCFG_ECC_ENABLE ;
amd64_write_pci_cfg ( F3 , NBCFG , value ) ;
2009-04-27 21:46:08 +04:00
2010-12-23 16:07:18 +03:00
amd64_read_pci_cfg ( F3 , NBCFG , & value ) ;
2009-04-27 21:46:08 +04:00
2010-12-23 16:07:18 +03:00
if ( ! ( value & NBCFG_ECC_ENABLE ) ) {
2010-10-07 20:29:15 +04:00
amd64_warn ( " Hardware rejected DRAM ECC enable, "
" check memory DIMM configuration. \n " ) ;
2010-10-15 19:44:04 +04:00
ret = false ;
2009-04-27 21:46:08 +04:00
} else {
2010-10-07 20:29:15 +04:00
amd64_info ( " Hardware accepted DRAM ECC Enable \n " ) ;
2009-04-27 21:46:08 +04:00
}
2010-02-24 16:49:47 +03:00
} else {
2010-10-14 18:01:30 +04:00
s - > flags . nb_ecc_prev = 1 ;
2009-04-27 21:46:08 +04:00
}
2010-02-24 16:49:47 +03:00
2010-12-23 16:07:18 +03:00
debugf0 ( " 2: node %d, NBCFG=0x%08x[DramEccEn: %d] \n " ,
nid , value , ! ! ( value & NBCFG_ECC_ENABLE ) ) ;
2009-04-27 21:46:08 +04:00
2010-10-15 19:44:04 +04:00
return ret ;
2009-04-27 21:46:08 +04:00
}
2010-10-15 21:25:38 +04:00
static void restore_ecc_error_reporting ( struct ecc_settings * s , u8 nid ,
struct pci_dev * F3 )
2009-04-27 21:46:08 +04:00
{
2010-12-22 21:48:20 +03:00
u32 value , mask = 0x3 ; /* UECC/CECC enable */
2009-04-27 21:46:08 +04:00
2010-10-14 18:01:30 +04:00
if ( ! s - > nbctl_valid )
2009-04-27 21:46:08 +04:00
return ;
2010-12-22 21:48:20 +03:00
amd64_read_pci_cfg ( F3 , NBCTL , & value ) ;
2009-04-27 21:46:08 +04:00
value & = ~ mask ;
2010-10-14 18:01:30 +04:00
value | = s - > old_nbctl ;
2009-04-27 21:46:08 +04:00
2010-12-22 21:48:20 +03:00
amd64_write_pci_cfg ( F3 , NBCTL , value ) ;
2009-04-27 21:46:08 +04:00
2010-10-14 18:01:30 +04:00
/* restore previous BIOS DRAM ECC "off" setting we force-enabled */
if ( ! s - > flags . nb_ecc_prev ) {
2010-12-23 16:07:18 +03:00
amd64_read_pci_cfg ( F3 , NBCFG , & value ) ;
value & = ~ NBCFG_ECC_ENABLE ;
amd64_write_pci_cfg ( F3 , NBCFG , value ) ;
2010-02-24 16:49:47 +03:00
}
/* restore the NB Enable MCGCTL bit */
2010-10-15 19:44:04 +04:00
if ( toggle_ecc_err_reporting ( s , nid , OFF ) )
2010-10-07 20:29:15 +04:00
amd64_warn ( " Error restoring NB MCGCTL settings! \n " ) ;
2009-04-27 21:46:08 +04:00
}
/*
2010-10-15 19:44:04 +04:00
* EDAC requires that the BIOS have ECC enabled before
* taking over the processing of ECC errors . A command line
* option allows to force - enable hardware ECC later in
* enable_ecc_error_reporting ( ) .
2009-04-27 21:46:08 +04:00
*/
2010-02-11 19:15:57 +03:00
static const char * ecc_msg =
" ECC disabled in the BIOS or no ECC capability, module will not load. \n "
" Either enable ECC checking or force module loading by setting "
" 'ecc_enable_override'. \n "
" (Note that use of the override may cause unknown side effects.) \n " ;
2009-08-05 17:47:22 +04:00
2010-10-15 19:44:04 +04:00
static bool ecc_enabled ( struct pci_dev * F3 , u8 nid )
2009-04-27 21:46:08 +04:00
{
u32 value ;
2010-10-15 19:44:04 +04:00
u8 ecc_en = 0 ;
2009-09-16 15:05:46 +04:00
bool nb_mce_en = false ;
2009-04-27 21:46:08 +04:00
2010-12-23 16:07:18 +03:00
amd64_read_pci_cfg ( F3 , NBCFG , & value ) ;
2009-04-27 21:46:08 +04:00
2010-12-23 16:07:18 +03:00
ecc_en = ! ! ( value & NBCFG_ECC_ENABLE ) ;
2010-10-15 19:44:04 +04:00
amd64_info ( " DRAM ECC %s. \n " , ( ecc_en ? " enabled " : " disabled " ) ) ;
2009-04-27 21:46:08 +04:00
2010-10-15 19:44:04 +04:00
nb_mce_en = amd64_nb_mce_bank_enabled_on_node ( nid ) ;
2009-09-16 15:05:46 +04:00
if ( ! nb_mce_en )
2010-10-15 19:44:04 +04:00
amd64_notice ( " NB MCE bank disabled, set MSR "
" 0x%08x[4] on node %d to enable. \n " ,
MSR_IA32_MCG_CTL , nid ) ;
2009-04-27 21:46:08 +04:00
2010-10-15 19:44:04 +04:00
if ( ! ecc_en | | ! nb_mce_en ) {
amd64_notice ( " %s " , ecc_msg ) ;
return false ;
}
return true ;
2009-04-27 21:46:08 +04:00
}
2009-04-27 22:01:01 +04:00
struct mcidev_sysfs_attribute sysfs_attrs [ ARRAY_SIZE ( amd64_dbg_attrs ) +
ARRAY_SIZE ( amd64_inj_attrs ) +
1 ] ;
struct mcidev_sysfs_attribute terminator = { . attr = { . name = NULL } } ;
2010-10-15 21:25:38 +04:00
static void set_mc_sysfs_attrs ( struct mem_ctl_info * mci )
2009-04-27 22:01:01 +04:00
{
unsigned int i = 0 , j = 0 ;
for ( ; i < ARRAY_SIZE ( amd64_dbg_attrs ) ; i + + )
sysfs_attrs [ i ] = amd64_dbg_attrs [ i ] ;
2010-11-26 21:24:44 +03:00
if ( boot_cpu_data . x86 > = 0x10 )
for ( j = 0 ; j < ARRAY_SIZE ( amd64_inj_attrs ) ; j + + , i + + )
sysfs_attrs [ i ] = amd64_inj_attrs [ j ] ;
2009-04-27 22:01:01 +04:00
sysfs_attrs [ i ] = terminator ;
mci - > mc_driver_sysfs_attributes = sysfs_attrs ;
}
2011-01-19 20:15:10 +03:00
static void setup_mci_misc_attrs ( struct mem_ctl_info * mci ,
struct amd64_family_type * fam )
2009-04-27 22:01:01 +04:00
{
struct amd64_pvt * pvt = mci - > pvt_info ;
mci - > mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2 ;
mci - > edac_ctl_cap = EDAC_FLAG_NONE ;
2011-01-07 18:26:49 +03:00
if ( pvt - > nbcap & NBCAP_SECDED )
2009-04-27 22:01:01 +04:00
mci - > edac_ctl_cap | = EDAC_FLAG_SECDED ;
2011-01-07 18:26:49 +03:00
if ( pvt - > nbcap & NBCAP_CHIPKILL )
2009-04-27 22:01:01 +04:00
mci - > edac_ctl_cap | = EDAC_FLAG_S4ECD4ED ;
mci - > edac_cap = amd64_determine_edac_cap ( pvt ) ;
mci - > mod_name = EDAC_MOD_STR ;
mci - > mod_ver = EDAC_AMD64_VERSION ;
2011-01-19 20:15:10 +03:00
mci - > ctl_name = fam - > ctl_name ;
2010-10-01 22:11:07 +04:00
mci - > dev_name = pci_name ( pvt - > F2 ) ;
2009-04-27 22:01:01 +04:00
mci - > ctl_page_to_phys = NULL ;
/* memory scrubber interface */
mci - > set_sdram_scrub_rate = amd64_set_scrub_rate ;
mci - > get_sdram_scrub_rate = amd64_get_scrub_rate ;
}
2010-10-01 21:20:05 +04:00
/*
* returns a pointer to the family descriptor on success , NULL otherwise .
*/
static struct amd64_family_type * amd64_per_family_init ( struct amd64_pvt * pvt )
2010-10-01 20:38:19 +04:00
{
2010-10-01 21:20:05 +04:00
u8 fam = boot_cpu_data . x86 ;
struct amd64_family_type * fam_type = NULL ;
switch ( fam ) {
2010-10-01 20:38:19 +04:00
case 0xf :
2010-10-01 21:20:05 +04:00
fam_type = & amd64_family_types [ K8_CPUS ] ;
2010-10-01 21:35:38 +04:00
pvt - > ops = & amd64_family_types [ K8_CPUS ] . ops ;
2010-10-01 20:38:19 +04:00
break ;
2011-01-19 20:15:10 +03:00
2010-10-01 20:38:19 +04:00
case 0x10 :
2010-10-01 21:20:05 +04:00
fam_type = & amd64_family_types [ F10_CPUS ] ;
2010-10-01 21:35:38 +04:00
pvt - > ops = & amd64_family_types [ F10_CPUS ] . ops ;
2011-01-19 20:15:10 +03:00
break ;
case 0x15 :
fam_type = & amd64_family_types [ F15_CPUS ] ;
pvt - > ops = & amd64_family_types [ F15_CPUS ] . ops ;
2010-10-01 20:38:19 +04:00
break ;
default :
2010-10-07 20:29:15 +04:00
amd64_err ( " Unsupported family! \n " ) ;
2010-10-01 21:20:05 +04:00
return NULL ;
2010-10-01 20:38:19 +04:00
}
2010-10-01 21:20:05 +04:00
2010-10-01 21:35:38 +04:00
pvt - > ext_model = boot_cpu_data . x86_model > > 4 ;
2011-01-19 20:15:10 +03:00
amd64_info ( " %s %sdetected (node %d). \n " , fam_type - > ctl_name ,
2010-10-01 21:20:05 +04:00
( fam = = 0xf ?
2010-10-07 20:29:15 +04:00
( pvt - > ext_model > = K8_REV_F ? " revF or later "
: " revE or earlier " )
: " " ) , pvt - > mc_node_id ) ;
2010-10-01 21:20:05 +04:00
return fam_type ;
2010-10-01 20:38:19 +04:00
}
2010-10-15 19:44:04 +04:00
static int amd64_init_one_instance ( struct pci_dev * F2 )
2009-04-27 22:01:01 +04:00
{
struct amd64_pvt * pvt = NULL ;
2010-10-01 21:20:05 +04:00
struct amd64_family_type * fam_type = NULL ;
2010-10-15 21:25:38 +04:00
struct mem_ctl_info * mci = NULL ;
2012-04-16 22:03:50 +04:00
struct edac_mc_layer layers [ 2 ] ;
2009-04-27 22:01:01 +04:00
int err = 0 , ret ;
2010-10-15 21:25:38 +04:00
u8 nid = get_node_id ( F2 ) ;
2009-04-27 22:01:01 +04:00
ret = - ENOMEM ;
pvt = kzalloc ( sizeof ( struct amd64_pvt ) , GFP_KERNEL ) ;
if ( ! pvt )
2010-10-15 21:25:38 +04:00
goto err_ret ;
2009-04-27 22:01:01 +04:00
2010-10-15 21:25:38 +04:00
pvt - > mc_node_id = nid ;
2010-10-01 22:11:07 +04:00
pvt - > F2 = F2 ;
2009-04-27 22:01:01 +04:00
2010-10-01 20:38:19 +04:00
ret = - EINVAL ;
2010-10-01 21:20:05 +04:00
fam_type = amd64_per_family_init ( pvt ) ;
if ( ! fam_type )
2010-10-01 20:38:19 +04:00
goto err_free ;
2009-04-27 22:01:01 +04:00
ret = - ENODEV ;
2010-10-15 21:25:38 +04:00
err = reserve_mc_sibling_devs ( pvt , fam_type - > f1_id , fam_type - > f3_id ) ;
2009-04-27 22:01:01 +04:00
if ( err )
goto err_free ;
2010-10-15 21:25:38 +04:00
read_mc_regs ( pvt ) ;
2009-04-27 22:01:01 +04:00
/*
* We need to determine how many memory channels there are . Then use
* that information for calculating the size of the dynamic instance
2010-10-15 21:25:38 +04:00
* tables in the ' mci ' structure .
2009-04-27 22:01:01 +04:00
*/
2010-10-15 21:25:38 +04:00
ret = - EINVAL ;
2009-04-27 22:01:01 +04:00
pvt - > channel_count = pvt - > ops - > early_channel_count ( pvt ) ;
if ( pvt - > channel_count < 0 )
2010-10-15 21:25:38 +04:00
goto err_siblings ;
2009-04-27 22:01:01 +04:00
ret = - ENOMEM ;
2012-04-16 22:03:50 +04:00
layers [ 0 ] . type = EDAC_MC_LAYER_CHIP_SELECT ;
layers [ 0 ] . size = pvt - > csels [ 0 ] . b_cnt ;
layers [ 0 ] . is_virt_csrow = true ;
layers [ 1 ] . type = EDAC_MC_LAYER_CHANNEL ;
layers [ 1 ] . size = pvt - > channel_count ;
layers [ 1 ] . is_virt_csrow = false ;
2012-05-02 21:37:00 +04:00
mci = edac_mc_alloc ( nid , ARRAY_SIZE ( layers ) , layers , 0 ) ;
2009-04-27 22:01:01 +04:00
if ( ! mci )
2010-10-15 21:25:38 +04:00
goto err_siblings ;
2009-04-27 22:01:01 +04:00
mci - > pvt_info = pvt ;
2010-10-01 22:11:07 +04:00
mci - > dev = & pvt - > F2 - > dev ;
2009-04-27 22:01:01 +04:00
2011-01-19 20:15:10 +03:00
setup_mci_misc_attrs ( mci , fam_type ) ;
2010-10-15 21:25:38 +04:00
if ( init_csrows ( mci ) )
2009-04-27 22:01:01 +04:00
mci - > edac_cap = EDAC_FLAG_NONE ;
2010-10-15 21:25:38 +04:00
set_mc_sysfs_attrs ( mci ) ;
2009-04-27 22:01:01 +04:00
ret = - ENODEV ;
if ( edac_mc_add_mc ( mci ) ) {
debugf1 ( " failed edac_mc_add_mc() \n " ) ;
goto err_add_mc ;
}
2009-07-24 15:51:42 +04:00
/* register stuff with EDAC MCE */
if ( report_gart_errors )
amd_report_gart_errors ( true ) ;
amd_register_ecc_decoder ( amd64_decode_bus_error ) ;
2010-10-15 21:25:38 +04:00
mcis [ nid ] = mci ;
atomic_inc ( & drv_instances ) ;
2009-04-27 22:01:01 +04:00
return 0 ;
err_add_mc :
edac_mc_free ( mci ) ;
2010-10-15 21:25:38 +04:00
err_siblings :
free_mc_sibling_devs ( pvt ) ;
2009-04-27 22:01:01 +04:00
2010-10-15 21:25:38 +04:00
err_free :
kfree ( pvt ) ;
2009-04-27 22:01:01 +04:00
2010-10-15 21:25:38 +04:00
err_ret :
2009-04-27 22:01:01 +04:00
return ret ;
}
2010-10-15 19:44:04 +04:00
static int __devinit amd64_probe_one_instance ( struct pci_dev * pdev ,
2010-10-01 21:35:38 +04:00
const struct pci_device_id * mc_type )
2009-04-27 22:01:01 +04:00
{
2010-10-14 18:01:30 +04:00
u8 nid = get_node_id ( pdev ) ;
2010-10-15 19:44:04 +04:00
struct pci_dev * F3 = node_to_amd_nb ( nid ) - > misc ;
2010-10-14 18:01:30 +04:00
struct ecc_settings * s ;
2010-10-15 19:44:04 +04:00
int ret = 0 ;
2009-04-27 22:01:01 +04:00
ret = pci_enable_device ( pdev ) ;
2010-10-01 21:35:38 +04:00
if ( ret < 0 ) {
debugf0 ( " ret=%d \n " , ret ) ;
return - EIO ;
}
2009-04-27 22:01:01 +04:00
2010-10-14 18:01:30 +04:00
ret = - ENOMEM ;
s = kzalloc ( sizeof ( struct ecc_settings ) , GFP_KERNEL ) ;
if ( ! s )
2010-10-15 19:44:04 +04:00
goto err_out ;
2010-10-14 18:01:30 +04:00
ecc_stngs [ nid ] = s ;
2010-10-15 19:44:04 +04:00
if ( ! ecc_enabled ( F3 , nid ) ) {
ret = - ENODEV ;
if ( ! ecc_enable_override )
goto err_enable ;
amd64_warn ( " Forcing ECC on! \n " ) ;
if ( ! enable_ecc_error_reporting ( s , nid , F3 ) )
goto err_enable ;
}
ret = amd64_init_one_instance ( pdev ) ;
2010-10-15 21:25:38 +04:00
if ( ret < 0 ) {
2010-10-14 18:01:30 +04:00
amd64_err ( " Error probing instance: %d \n " , nid ) ;
2010-10-15 21:25:38 +04:00
restore_ecc_error_reporting ( s , nid , F3 ) ;
}
2009-04-27 22:01:01 +04:00
return ret ;
2010-10-15 19:44:04 +04:00
err_enable :
kfree ( s ) ;
ecc_stngs [ nid ] = NULL ;
err_out :
return ret ;
2009-04-27 22:01:01 +04:00
}
static void __devexit amd64_remove_one_instance ( struct pci_dev * pdev )
{
struct mem_ctl_info * mci ;
struct amd64_pvt * pvt ;
2010-10-15 21:25:38 +04:00
u8 nid = get_node_id ( pdev ) ;
struct pci_dev * F3 = node_to_amd_nb ( nid ) - > misc ;
struct ecc_settings * s = ecc_stngs [ nid ] ;
2009-04-27 22:01:01 +04:00
/* Remove from EDAC CORE tracking list */
mci = edac_mc_del_mc ( & pdev - > dev ) ;
if ( ! mci )
return ;
pvt = mci - > pvt_info ;
2010-10-15 21:25:38 +04:00
restore_ecc_error_reporting ( s , nid , F3 ) ;
2009-04-27 22:01:01 +04:00
2010-10-15 21:25:38 +04:00
free_mc_sibling_devs ( pvt ) ;
2009-04-27 22:01:01 +04:00
2009-07-24 15:51:42 +04:00
/* unregister from EDAC MCE */
amd_report_gart_errors ( false ) ;
amd_unregister_ecc_decoder ( amd64_decode_bus_error ) ;
2010-10-15 21:25:38 +04:00
kfree ( ecc_stngs [ nid ] ) ;
ecc_stngs [ nid ] = NULL ;
2010-10-14 18:01:30 +04:00
2009-04-27 22:01:01 +04:00
/* Free the EDAC CORE resources */
2009-12-21 17:15:59 +03:00
mci - > pvt_info = NULL ;
2010-10-15 21:25:38 +04:00
mcis [ nid ] = NULL ;
2009-12-21 17:15:59 +03:00
kfree ( pvt ) ;
2009-04-27 22:01:01 +04:00
edac_mc_free ( mci ) ;
}
/*
* This table is part of the interface for loading drivers for PCI devices . The
* PCI core identifies what devices are on a system during boot , and then
* inquiry this table to see if this driver is for a given device found .
*/
2012-02-27 10:41:47 +04:00
static DEFINE_PCI_DEVICE_TABLE ( amd64_pci_table ) = {
2009-04-27 22:01:01 +04:00
{
. vendor = PCI_VENDOR_ID_AMD ,
. device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL ,
. subvendor = PCI_ANY_ID ,
. subdevice = PCI_ANY_ID ,
. class = 0 ,
. class_mask = 0 ,
} ,
{
. vendor = PCI_VENDOR_ID_AMD ,
. device = PCI_DEVICE_ID_AMD_10H_NB_DRAM ,
. subvendor = PCI_ANY_ID ,
. subdevice = PCI_ANY_ID ,
. class = 0 ,
. class_mask = 0 ,
} ,
2011-01-19 20:15:10 +03:00
{
. vendor = PCI_VENDOR_ID_AMD ,
. device = PCI_DEVICE_ID_AMD_15H_NB_F2 ,
. subvendor = PCI_ANY_ID ,
. subdevice = PCI_ANY_ID ,
. class = 0 ,
. class_mask = 0 ,
} ,
2009-04-27 22:01:01 +04:00
{ 0 , }
} ;
MODULE_DEVICE_TABLE ( pci , amd64_pci_table ) ;
static struct pci_driver amd64_pci_driver = {
. name = EDAC_MOD_STR ,
2010-10-15 19:44:04 +04:00
. probe = amd64_probe_one_instance ,
2009-04-27 22:01:01 +04:00
. remove = __devexit_p ( amd64_remove_one_instance ) ,
. id_table = amd64_pci_table ,
} ;
2010-10-15 21:25:38 +04:00
static void setup_pci_device ( void )
2009-04-27 22:01:01 +04:00
{
struct mem_ctl_info * mci ;
struct amd64_pvt * pvt ;
if ( amd64_ctl_pci )
return ;
2010-10-13 18:11:59 +04:00
mci = mcis [ 0 ] ;
2009-04-27 22:01:01 +04:00
if ( mci ) {
pvt = mci - > pvt_info ;
amd64_ctl_pci =
2010-10-01 22:11:07 +04:00
edac_pci_create_generic_ctl ( & pvt - > F2 - > dev , EDAC_MOD_STR ) ;
2009-04-27 22:01:01 +04:00
if ( ! amd64_ctl_pci ) {
pr_warning ( " %s(): Unable to create PCI control \n " ,
__func__ ) ;
pr_warning ( " %s(): PCI error report via EDAC not set \n " ,
__func__ ) ;
}
}
}
static int __init amd64_edac_init ( void )
{
2010-10-15 21:25:38 +04:00
int err = - ENODEV ;
2009-04-27 22:01:01 +04:00
2011-01-19 20:15:10 +03:00
printk ( KERN_INFO " AMD64 EDAC driver v%s \n " , EDAC_AMD64_VERSION ) ;
2009-04-27 22:01:01 +04:00
opstate_init ( ) ;
2010-10-29 19:14:31 +04:00
if ( amd_cache_northbridges ( ) < 0 )
2009-12-21 20:13:01 +03:00
goto err_ret ;
2009-04-27 22:01:01 +04:00
2010-10-13 18:11:59 +04:00
err = - ENOMEM ;
2010-10-14 18:01:30 +04:00
mcis = kzalloc ( amd_nb_num ( ) * sizeof ( mcis [ 0 ] ) , GFP_KERNEL ) ;
ecc_stngs = kzalloc ( amd_nb_num ( ) * sizeof ( ecc_stngs [ 0 ] ) , GFP_KERNEL ) ;
2010-10-15 21:25:38 +04:00
if ( ! ( mcis & & ecc_stngs ) )
2011-03-29 20:10:53 +04:00
goto err_free ;
2010-10-13 18:11:59 +04:00
2009-12-11 20:14:40 +03:00
msrs = msrs_alloc ( ) ;
2009-12-21 20:13:01 +03:00
if ( ! msrs )
2010-10-15 21:25:38 +04:00
goto err_free ;
2009-12-11 20:14:40 +03:00
2009-04-27 22:01:01 +04:00
err = pci_register_driver ( & amd64_pci_driver ) ;
if ( err )
2009-12-21 20:13:01 +03:00
goto err_pci ;
2009-04-27 22:01:01 +04:00
2009-12-21 20:13:01 +03:00
err = - ENODEV ;
2010-10-15 21:25:38 +04:00
if ( ! atomic_read ( & drv_instances ) )
goto err_no_instances ;
2009-04-27 22:01:01 +04:00
2010-10-15 21:25:38 +04:00
setup_pci_device ( ) ;
return 0 ;
2009-04-27 22:01:01 +04:00
2010-10-15 21:25:38 +04:00
err_no_instances :
2009-04-27 22:01:01 +04:00
pci_unregister_driver ( & amd64_pci_driver ) ;
2010-10-13 18:11:59 +04:00
2009-12-21 20:13:01 +03:00
err_pci :
msrs_free ( msrs ) ;
msrs = NULL ;
2010-10-13 18:11:59 +04:00
2010-10-15 21:25:38 +04:00
err_free :
kfree ( mcis ) ;
mcis = NULL ;
kfree ( ecc_stngs ) ;
ecc_stngs = NULL ;
2009-12-21 20:13:01 +03:00
err_ret :
2009-04-27 22:01:01 +04:00
return err ;
}
static void __exit amd64_edac_exit ( void )
{
if ( amd64_ctl_pci )
edac_pci_release_generic_ctl ( amd64_ctl_pci ) ;
pci_unregister_driver ( & amd64_pci_driver ) ;
2009-12-11 20:14:40 +03:00
2010-10-14 18:01:30 +04:00
kfree ( ecc_stngs ) ;
ecc_stngs = NULL ;
2010-10-13 18:11:59 +04:00
kfree ( mcis ) ;
mcis = NULL ;
2009-12-11 20:14:40 +03:00
msrs_free ( msrs ) ;
msrs = NULL ;
2009-04-27 22:01:01 +04:00
}
module_init ( amd64_edac_init ) ;
module_exit ( amd64_edac_exit ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_AUTHOR ( " SoftwareBitMaker: Doug Thompson, "
" Dave Peterson, Thayne Harbaugh " ) ;
MODULE_DESCRIPTION ( " MC support for AMD64 memory controllers - "
EDAC_AMD64_VERSION ) ;
module_param ( edac_op_state , int , 0444 ) ;
MODULE_PARM_DESC ( edac_op_state , " EDAC Error Reporting state: 0=Poll,1=NMI " ) ;