2005-11-05 17:25:53 +01:00
/*
2006-06-26 13:58:53 +02:00
* ( c ) 2005 , 2006 Advanced Micro Devices , Inc .
2005-11-05 17:25:53 +01:00
* Your use of this code is subject to the terms and conditions of the
* GNU general public license version 2. See " COPYING " or
* http : //www.gnu.org/licenses/gpl.html
*
* Written by Jacob Shin - AMD , Inc .
*
* Support : jacob . shin @ amd . com
*
2006-06-26 13:58:53 +02:00
* April 2006
* - added support for AMD Family 0x10 processors
2005-11-05 17:25:53 +01:00
*
2006-06-26 13:58:53 +02:00
* All MC4_MISCi registers are shared between multi - cores
2005-11-05 17:25:53 +01:00
*/
# include <linux/cpu.h>
# include <linux/errno.h>
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/kobject.h>
# include <linux/notifier.h>
# include <linux/sched.h>
# include <linux/smp.h>
# include <linux/sysdev.h>
# include <linux/sysfs.h>
# include <asm/apic.h>
# include <asm/mce.h>
# include <asm/msr.h>
# include <asm/percpu.h>
2006-01-11 22:44:36 +01:00
# include <asm/idle.h>
2005-11-05 17:25:53 +01:00
2006-06-26 13:58:56 +02:00
# define PFX "mce_threshold: "
# define VERSION "version 1.1.1"
# define NR_BANKS 6
# define NR_BLOCKS 9
# define THRESHOLD_MAX 0xFFF
# define INT_TYPE_APIC 0x00020000
# define MASK_VALID_HI 0x80000000
# define MASK_LVTOFF_HI 0x00F00000
# define MASK_COUNT_EN_HI 0x00080000
# define MASK_INT_TYPE_HI 0x00060000
# define MASK_OVERFLOW_HI 0x00010000
2005-11-05 17:25:53 +01:00
# define MASK_ERR_COUNT_HI 0x00000FFF
2006-06-26 13:58:53 +02:00
# define MASK_BLKPTR_LO 0xFF000000
# define MCG_XBLK_ADDR 0xC0000400
2005-11-05 17:25:53 +01:00
2006-06-26 13:58:53 +02:00
struct threshold_block {
unsigned int block ;
unsigned int bank ;
2005-11-05 17:25:53 +01:00
unsigned int cpu ;
2006-06-26 13:58:53 +02:00
u32 address ;
u16 interrupt_enable ;
2005-11-05 17:25:53 +01:00
u16 threshold_limit ;
struct kobject kobj ;
2006-06-26 13:58:53 +02:00
struct list_head miscj ;
2005-11-05 17:25:53 +01:00
} ;
2006-06-26 13:58:53 +02:00
/* defaults used early on boot */
static struct threshold_block threshold_defaults = {
2005-11-05 17:25:53 +01:00
. interrupt_enable = 0 ,
. threshold_limit = THRESHOLD_MAX ,
} ;
2006-06-26 13:58:53 +02:00
struct threshold_bank {
struct kobject kobj ;
struct threshold_block * blocks ;
cpumask_t cpus ;
} ;
static DEFINE_PER_CPU ( struct threshold_bank * , threshold_banks [ NR_BANKS ] ) ;
2005-11-05 17:25:53 +01:00
# ifdef CONFIG_SMP
static unsigned char shared_bank [ NR_BANKS ] = {
0 , 0 , 0 , 0 , 1
} ;
# endif
static DEFINE_PER_CPU ( unsigned char , bank_map ) ; /* see which banks are on */
/*
* CPU Initialization
*/
/* must be called with correct cpu affinity */
2006-06-26 13:58:53 +02:00
static void threshold_restart_bank ( struct threshold_block * b ,
2005-11-05 17:25:53 +01:00
int reset , u16 old_limit )
{
u32 mci_misc_hi , mci_misc_lo ;
2006-06-26 13:58:53 +02:00
rdmsr ( b - > address , mci_misc_lo , mci_misc_hi ) ;
2005-11-05 17:25:53 +01:00
if ( b - > threshold_limit < ( mci_misc_hi & THRESHOLD_MAX ) )
reset = 1 ; /* limit cannot be lower than err count */
if ( reset ) { /* reset err count and overflow bit */
mci_misc_hi =
( mci_misc_hi & ~ ( MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI ) ) |
( THRESHOLD_MAX - b - > threshold_limit ) ;
} else if ( old_limit ) { /* change limit w/o reset */
int new_count = ( mci_misc_hi & THRESHOLD_MAX ) +
( old_limit - b - > threshold_limit ) ;
mci_misc_hi = ( mci_misc_hi & ~ MASK_ERR_COUNT_HI ) |
( new_count & THRESHOLD_MAX ) ;
}
b - > interrupt_enable ?
( mci_misc_hi = ( mci_misc_hi & ~ MASK_INT_TYPE_HI ) | INT_TYPE_APIC ) :
( mci_misc_hi & = ~ MASK_INT_TYPE_HI ) ;
mci_misc_hi | = MASK_COUNT_EN_HI ;
2006-06-26 13:58:53 +02:00
wrmsr ( b - > address , mci_misc_lo , mci_misc_hi ) ;
2005-11-05 17:25:53 +01:00
}
2006-06-26 13:58:53 +02:00
/* cpu init entry point, called from mce.c with preempt off */
2005-11-05 17:25:53 +01:00
void __cpuinit mce_amd_feature_init ( struct cpuinfo_x86 * c )
{
2006-06-26 13:58:53 +02:00
unsigned int bank , block ;
2005-11-05 17:25:53 +01:00
unsigned int cpu = smp_processor_id ( ) ;
2006-06-26 13:58:53 +02:00
u32 low = 0 , high = 0 , address = 0 ;
2005-11-05 17:25:53 +01:00
for ( bank = 0 ; bank < NR_BANKS ; + + bank ) {
2006-06-26 13:58:53 +02:00
for ( block = 0 ; block < NR_BLOCKS ; + + block ) {
if ( block = = 0 )
address = MSR_IA32_MC0_MISC + bank * 4 ;
else if ( block = = 1 )
address = MCG_XBLK_ADDR
+ ( ( low & MASK_BLKPTR_LO ) > > 21 ) ;
else
+ + address ;
if ( rdmsr_safe ( address , & low , & high ) )
continue ;
if ( ! ( high & MASK_VALID_HI ) ) {
if ( block )
continue ;
else
break ;
}
if ( ! ( high & MASK_VALID_HI > > 1 ) | |
( high & MASK_VALID_HI > > 2 ) )
continue ;
if ( ! block )
per_cpu ( bank_map , cpu ) | = ( 1 < < bank ) ;
2005-11-05 17:25:53 +01:00
# ifdef CONFIG_SMP
2006-06-26 13:58:53 +02:00
if ( shared_bank [ bank ] & & c - > cpu_core_id )
break ;
2005-11-05 17:25:53 +01:00
# endif
2006-06-26 13:58:53 +02:00
high & = ~ MASK_LVTOFF_HI ;
high | = K8_APIC_EXT_LVT_ENTRY_THRESHOLD < < 20 ;
wrmsr ( address , low , high ) ;
setup_APIC_extened_lvt ( K8_APIC_EXT_LVT_ENTRY_THRESHOLD ,
THRESHOLD_APIC_VECTOR ,
K8_APIC_EXT_INT_MSG_FIX , 0 ) ;
2005-11-05 17:25:53 +01:00
2006-06-26 13:58:53 +02:00
threshold_defaults . address = address ;
threshold_restart_bank ( & threshold_defaults , 0 , 0 ) ;
}
2005-11-05 17:25:53 +01:00
}
}
/*
* APIC Interrupt Handler
*/
/*
* threshold interrupt handler will service THRESHOLD_APIC_VECTOR .
* the interrupt goes off when error_count reaches threshold_limit .
* the handler will simply log mcelog w / software defined bank number .
*/
asmlinkage void mce_threshold_interrupt ( void )
{
2006-06-26 13:58:53 +02:00
unsigned int bank , block ;
2005-11-05 17:25:53 +01:00
struct mce m ;
2006-06-26 13:58:53 +02:00
u32 low = 0 , high = 0 , address = 0 ;
2005-11-05 17:25:53 +01:00
ack_APIC_irq ( ) ;
2006-01-11 22:44:36 +01:00
exit_idle ( ) ;
2005-11-05 17:25:53 +01:00
irq_enter ( ) ;
memset ( & m , 0 , sizeof ( m ) ) ;
rdtscll ( m . tsc ) ;
m . cpu = smp_processor_id ( ) ;
/* assume first bank caused it */
for ( bank = 0 ; bank < NR_BANKS ; + + bank ) {
2006-06-26 13:58:53 +02:00
for ( block = 0 ; block < NR_BLOCKS ; + + block ) {
if ( block = = 0 )
address = MSR_IA32_MC0_MISC + bank * 4 ;
else if ( block = = 1 )
address = MCG_XBLK_ADDR
+ ( ( low & MASK_BLKPTR_LO ) > > 21 ) ;
else
+ + address ;
if ( rdmsr_safe ( address , & low , & high ) )
continue ;
if ( ! ( high & MASK_VALID_HI ) ) {
if ( block )
continue ;
else
break ;
}
if ( ! ( high & MASK_VALID_HI > > 1 ) | |
( high & MASK_VALID_HI > > 2 ) )
continue ;
if ( high & MASK_OVERFLOW_HI ) {
rdmsrl ( address , m . misc ) ;
rdmsrl ( MSR_IA32_MC0_STATUS + bank * 4 ,
m . status ) ;
m . bank = K8_MCE_THRESHOLD_BASE
+ bank * NR_BLOCKS
+ block ;
mce_log ( & m ) ;
goto out ;
}
2005-11-05 17:25:53 +01:00
}
}
2006-06-26 13:58:56 +02:00
out :
2005-11-05 17:25:53 +01:00
irq_exit ( ) ;
}
/*
* Sysfs Interface
*/
struct threshold_attr {
2006-06-26 13:58:56 +02:00
struct attribute attr ;
2006-06-26 13:58:53 +02:00
ssize_t ( * show ) ( struct threshold_block * , char * ) ;
ssize_t ( * store ) ( struct threshold_block * , const char * , size_t count ) ;
2005-11-05 17:25:53 +01:00
} ;
static cpumask_t affinity_set ( unsigned int cpu )
{
cpumask_t oldmask = current - > cpus_allowed ;
cpumask_t newmask = CPU_MASK_NONE ;
cpu_set ( cpu , newmask ) ;
set_cpus_allowed ( current , newmask ) ;
return oldmask ;
}
static void affinity_restore ( cpumask_t oldmask )
{
set_cpus_allowed ( current , oldmask ) ;
}
2006-06-26 13:58:56 +02:00
# define SHOW_FIELDS(name) \
static ssize_t show_ # # name ( struct threshold_block * b , char * buf ) \
{ \
return sprintf ( buf , " %lx \n " , ( unsigned long ) b - > name ) ; \
}
2005-11-05 17:25:53 +01:00
SHOW_FIELDS ( interrupt_enable )
SHOW_FIELDS ( threshold_limit )
2006-06-26 13:58:53 +02:00
static ssize_t store_interrupt_enable ( struct threshold_block * b ,
2005-11-05 17:25:53 +01:00
const char * buf , size_t count )
{
char * end ;
cpumask_t oldmask ;
unsigned long new = simple_strtoul ( buf , & end , 0 ) ;
if ( end = = buf )
return - EINVAL ;
b - > interrupt_enable = ! ! new ;
oldmask = affinity_set ( b - > cpu ) ;
threshold_restart_bank ( b , 0 , 0 ) ;
affinity_restore ( oldmask ) ;
return end - buf ;
}
2006-06-26 13:58:53 +02:00
static ssize_t store_threshold_limit ( struct threshold_block * b ,
2005-11-05 17:25:53 +01:00
const char * buf , size_t count )
{
char * end ;
cpumask_t oldmask ;
u16 old ;
unsigned long new = simple_strtoul ( buf , & end , 0 ) ;
if ( end = = buf )
return - EINVAL ;
if ( new > THRESHOLD_MAX )
new = THRESHOLD_MAX ;
if ( new < 1 )
new = 1 ;
old = b - > threshold_limit ;
b - > threshold_limit = new ;
oldmask = affinity_set ( b - > cpu ) ;
threshold_restart_bank ( b , 0 , old ) ;
affinity_restore ( oldmask ) ;
return end - buf ;
}
2006-06-26 13:58:53 +02:00
static ssize_t show_error_count ( struct threshold_block * b , char * buf )
2005-11-05 17:25:53 +01:00
{
u32 high , low ;
cpumask_t oldmask ;
oldmask = affinity_set ( b - > cpu ) ;
2006-06-26 13:58:53 +02:00
rdmsr ( b - > address , low , high ) ;
2005-11-05 17:25:53 +01:00
affinity_restore ( oldmask ) ;
return sprintf ( buf , " %x \n " ,
( high & 0xFFF ) - ( THRESHOLD_MAX - b - > threshold_limit ) ) ;
}
2006-06-26 13:58:53 +02:00
static ssize_t store_error_count ( struct threshold_block * b ,
2005-11-05 17:25:53 +01:00
const char * buf , size_t count )
{
cpumask_t oldmask ;
oldmask = affinity_set ( b - > cpu ) ;
threshold_restart_bank ( b , 1 , 0 ) ;
affinity_restore ( oldmask ) ;
return 1 ;
}
# define THRESHOLD_ATTR(_name,_mode,_show,_store) { \
. attr = { . name = __stringify ( _name ) , . mode = _mode } , \
. show = _show , \
. store = _store , \
} ;
2006-06-26 13:58:56 +02:00
# define RW_ATTR(name) \
static struct threshold_attr name = \
2005-11-05 17:25:53 +01:00
THRESHOLD_ATTR ( name , 0644 , show_ # # name , store_ # # name )
2006-06-26 13:58:56 +02:00
RW_ATTR ( interrupt_enable ) ;
RW_ATTR ( threshold_limit ) ;
RW_ATTR ( error_count ) ;
2005-11-05 17:25:53 +01:00
static struct attribute * default_attrs [ ] = {
& interrupt_enable . attr ,
& threshold_limit . attr ,
& error_count . attr ,
NULL
} ;
2006-06-26 13:58:53 +02:00
# define to_block(k) container_of(k, struct threshold_block, kobj)
2006-06-26 13:58:56 +02:00
# define to_attr(a) container_of(a, struct threshold_attr, attr)
2005-11-05 17:25:53 +01:00
static ssize_t show ( struct kobject * kobj , struct attribute * attr , char * buf )
{
2006-06-26 13:58:53 +02:00
struct threshold_block * b = to_block ( kobj ) ;
2005-11-05 17:25:53 +01:00
struct threshold_attr * a = to_attr ( attr ) ;
ssize_t ret ;
ret = a - > show ? a - > show ( b , buf ) : - EIO ;
return ret ;
}
static ssize_t store ( struct kobject * kobj , struct attribute * attr ,
const char * buf , size_t count )
{
2006-06-26 13:58:53 +02:00
struct threshold_block * b = to_block ( kobj ) ;
2005-11-05 17:25:53 +01:00
struct threshold_attr * a = to_attr ( attr ) ;
ssize_t ret ;
ret = a - > store ? a - > store ( b , buf , count ) : - EIO ;
return ret ;
}
static struct sysfs_ops threshold_ops = {
. show = show ,
. store = store ,
} ;
static struct kobj_type threshold_ktype = {
. sysfs_ops = & threshold_ops ,
. default_attrs = default_attrs ,
} ;
2006-06-26 13:58:53 +02:00
static __cpuinit int allocate_threshold_blocks ( unsigned int cpu ,
unsigned int bank ,
unsigned int block ,
u32 address )
{
int err ;
u32 low , high ;
struct threshold_block * b = NULL ;
if ( ( bank > = NR_BANKS ) | | ( block > = NR_BLOCKS ) )
return 0 ;
if ( rdmsr_safe ( address , & low , & high ) )
goto recurse ;
if ( ! ( high & MASK_VALID_HI ) ) {
if ( block )
goto recurse ;
else
return 0 ;
}
if ( ! ( high & MASK_VALID_HI > > 1 ) | |
( high & MASK_VALID_HI > > 2 ) )
goto recurse ;
b = kzalloc ( sizeof ( struct threshold_block ) , GFP_KERNEL ) ;
if ( ! b )
return - ENOMEM ;
memset ( b , 0 , sizeof ( struct threshold_block ) ) ;
b - > block = block ;
b - > bank = bank ;
b - > cpu = cpu ;
b - > address = address ;
b - > interrupt_enable = 0 ;
b - > threshold_limit = THRESHOLD_MAX ;
INIT_LIST_HEAD ( & b - > miscj ) ;
if ( per_cpu ( threshold_banks , cpu ) [ bank ] - > blocks )
list_add ( & b - > miscj ,
& per_cpu ( threshold_banks , cpu ) [ bank ] - > blocks - > miscj ) ;
else
per_cpu ( threshold_banks , cpu ) [ bank ] - > blocks = b ;
kobject_set_name ( & b - > kobj , " misc%i " , block ) ;
b - > kobj . parent = & per_cpu ( threshold_banks , cpu ) [ bank ] - > kobj ;
b - > kobj . ktype = & threshold_ktype ;
err = kobject_register ( & b - > kobj ) ;
if ( err )
goto out_free ;
recurse :
if ( ! block ) {
address = ( low & MASK_BLKPTR_LO ) > > 21 ;
if ( ! address )
return 0 ;
address + = MCG_XBLK_ADDR ;
} else
+ + address ;
err = allocate_threshold_blocks ( cpu , bank , + + block , address ) ;
if ( err )
goto out_free ;
return err ;
out_free :
if ( b ) {
kobject_unregister ( & b - > kobj ) ;
kfree ( b ) ;
}
return err ;
}
2005-11-05 17:25:53 +01:00
/* symlinks sibling shared banks to first core. first core owns dir/files. */
2006-06-26 13:58:53 +02:00
static __cpuinit int threshold_create_bank ( unsigned int cpu , unsigned int bank )
2005-11-05 17:25:53 +01:00
{
2006-06-26 13:58:53 +02:00
int i , err = 0 ;
2006-01-11 22:45:57 +01:00
struct threshold_bank * b = NULL ;
2006-06-26 13:58:53 +02:00
cpumask_t oldmask = CPU_MASK_NONE ;
char name [ 32 ] ;
sprintf ( name , " threshold_bank%i " , bank ) ;
2005-11-05 17:25:53 +01:00
# ifdef CONFIG_SMP
2006-06-26 13:58:17 +02:00
if ( cpu_data [ cpu ] . cpu_core_id & & shared_bank [ bank ] ) { /* symlink */
2006-06-26 13:58:53 +02:00
i = first_cpu ( cpu_core_map [ cpu ] ) ;
/* first core not up yet */
if ( cpu_data [ i ] . cpu_core_id )
goto out ;
/* already linked */
if ( per_cpu ( threshold_banks , cpu ) [ bank ] )
goto out ;
b = per_cpu ( threshold_banks , i ) [ bank ] ;
2005-11-05 17:25:53 +01:00
if ( ! b )
goto out ;
2006-06-26 13:58:53 +02:00
2006-06-26 13:58:50 +02:00
err = sysfs_create_link ( & per_cpu ( device_mce , cpu ) . kobj ,
2005-11-05 17:25:53 +01:00
& b - > kobj , name ) ;
if ( err )
goto out ;
2006-06-26 13:58:53 +02:00
b - > cpus = cpu_core_map [ cpu ] ;
2005-11-05 17:25:53 +01:00
per_cpu ( threshold_banks , cpu ) [ bank ] = b ;
goto out ;
}
# endif
2006-06-26 13:58:53 +02:00
b = kzalloc ( sizeof ( struct threshold_bank ) , GFP_KERNEL ) ;
2005-11-05 17:25:53 +01:00
if ( ! b ) {
err = - ENOMEM ;
goto out ;
}
memset ( b , 0 , sizeof ( struct threshold_bank ) ) ;
2006-06-26 13:58:50 +02:00
kobject_set_name ( & b - > kobj , " threshold_bank%i " , bank ) ;
b - > kobj . parent = & per_cpu ( device_mce , cpu ) . kobj ;
2006-06-26 13:58:53 +02:00
# ifndef CONFIG_SMP
b - > cpus = CPU_MASK_ALL ;
# else
b - > cpus = cpu_core_map [ cpu ] ;
# endif
2005-11-05 17:25:53 +01:00
err = kobject_register ( & b - > kobj ) ;
2006-06-26 13:58:53 +02:00
if ( err )
goto out_free ;
2005-11-05 17:25:53 +01:00
per_cpu ( threshold_banks , cpu ) [ bank ] = b ;
2006-06-26 13:58:53 +02:00
oldmask = affinity_set ( cpu ) ;
err = allocate_threshold_blocks ( cpu , bank , 0 ,
MSR_IA32_MC0_MISC + bank * 4 ) ;
affinity_restore ( oldmask ) ;
if ( err )
goto out_free ;
for_each_cpu_mask ( i , b - > cpus ) {
if ( i = = cpu )
continue ;
err = sysfs_create_link ( & per_cpu ( device_mce , i ) . kobj ,
& b - > kobj , name ) ;
if ( err )
goto out ;
per_cpu ( threshold_banks , i ) [ bank ] = b ;
}
goto out ;
out_free :
per_cpu ( threshold_banks , cpu ) [ bank ] = NULL ;
kfree ( b ) ;
2006-06-26 13:58:56 +02:00
out :
2005-11-05 17:25:53 +01:00
return err ;
}
/* create dir/files for all valid threshold banks */
static __cpuinit int threshold_create_device ( unsigned int cpu )
{
2006-06-26 13:58:56 +02:00
unsigned int bank ;
2005-11-05 17:25:53 +01:00
int err = 0 ;
for ( bank = 0 ; bank < NR_BANKS ; + + bank ) {
if ( ! ( per_cpu ( bank_map , cpu ) & 1 < < bank ) )
continue ;
err = threshold_create_bank ( cpu , bank ) ;
if ( err )
goto out ;
}
2006-06-26 13:58:56 +02:00
out :
2005-11-05 17:25:53 +01:00
return err ;
}
# ifdef CONFIG_HOTPLUG_CPU
/*
* let ' s be hotplug friendly .
* in case of multiple core processors , the first core always takes ownership
* of shared sysfs dir / files , and rest of the cores will be symlinked to it .
*/
2006-07-30 03:03:37 -07:00
static void deallocate_threshold_block ( unsigned int cpu ,
2006-06-26 13:58:53 +02:00
unsigned int bank )
{
struct threshold_block * pos = NULL ;
struct threshold_block * tmp = NULL ;
struct threshold_bank * head = per_cpu ( threshold_banks , cpu ) [ bank ] ;
if ( ! head )
return ;
list_for_each_entry_safe ( pos , tmp , & head - > blocks - > miscj , miscj ) {
kobject_unregister ( & pos - > kobj ) ;
list_del ( & pos - > miscj ) ;
kfree ( pos ) ;
}
kfree ( per_cpu ( threshold_banks , cpu ) [ bank ] - > blocks ) ;
per_cpu ( threshold_banks , cpu ) [ bank ] - > blocks = NULL ;
}
2006-07-30 03:03:37 -07:00
static void threshold_remove_bank ( unsigned int cpu , int bank )
2005-11-05 17:25:53 +01:00
{
2006-06-26 13:58:53 +02:00
int i = 0 ;
2005-11-05 17:25:53 +01:00
struct threshold_bank * b ;
2006-06-26 13:58:53 +02:00
char name [ 32 ] ;
2005-11-05 17:25:53 +01:00
b = per_cpu ( threshold_banks , cpu ) [ bank ] ;
2006-06-26 13:58:53 +02:00
2005-11-05 17:25:53 +01:00
if ( ! b )
return ;
2006-06-26 13:58:53 +02:00
if ( ! b - > blocks )
goto free_out ;
sprintf ( name , " threshold_bank%i " , bank ) ;
/* sibling symlink */
if ( shared_bank [ bank ] & & b - > blocks - > cpu ! = cpu ) {
2006-06-26 13:58:50 +02:00
sysfs_remove_link ( & per_cpu ( device_mce , cpu ) . kobj , name ) ;
2006-07-10 17:06:09 +02:00
per_cpu ( threshold_banks , cpu ) [ bank ] = NULL ;
2006-06-26 13:58:53 +02:00
return ;
2005-11-05 17:25:53 +01:00
}
2006-06-26 13:58:53 +02:00
/* remove all sibling symlinks before unregistering */
for_each_cpu_mask ( i , b - > cpus ) {
if ( i = = cpu )
continue ;
sysfs_remove_link ( & per_cpu ( device_mce , i ) . kobj , name ) ;
per_cpu ( threshold_banks , i ) [ bank ] = NULL ;
}
deallocate_threshold_block ( cpu , bank ) ;
free_out :
kobject_unregister ( & b - > kobj ) ;
kfree ( b ) ;
per_cpu ( threshold_banks , cpu ) [ bank ] = NULL ;
2005-11-05 17:25:53 +01:00
}
2006-07-30 03:03:37 -07:00
static void threshold_remove_device ( unsigned int cpu )
2005-11-05 17:25:53 +01:00
{
2006-06-26 13:58:56 +02:00
unsigned int bank ;
2005-11-05 17:25:53 +01:00
for ( bank = 0 ; bank < NR_BANKS ; + + bank ) {
if ( ! ( per_cpu ( bank_map , cpu ) & 1 < < bank ) )
continue ;
threshold_remove_bank ( cpu , bank ) ;
}
}
/* get notified when a cpu comes on/off */
2006-07-30 03:03:37 -07:00
static int threshold_cpu_callback ( struct notifier_block * nfb ,
2005-11-05 17:25:53 +01:00
unsigned long action , void * hcpu )
{
/* cpu was unsigned int to begin with */
unsigned int cpu = ( unsigned long ) hcpu ;
if ( cpu > = NR_CPUS )
goto out ;
switch ( action ) {
case CPU_ONLINE :
threshold_create_device ( cpu ) ;
break ;
case CPU_DEAD :
threshold_remove_device ( cpu ) ;
break ;
default :
break ;
}
out :
return NOTIFY_OK ;
}
2006-07-30 03:03:37 -07:00
static struct notifier_block threshold_cpu_notifier = {
2005-11-05 17:25:53 +01:00
. notifier_call = threshold_cpu_callback ,
} ;
2006-07-30 03:03:37 -07:00
# endif /* CONFIG_HOTPLUG_CPU */
2005-11-05 17:25:53 +01:00
static __init int threshold_init_device ( void )
{
2006-06-26 13:58:56 +02:00
unsigned lcpu = 0 ;
2005-11-05 17:25:53 +01:00
/* to hit CPUs online before the notifier is up */
for_each_online_cpu ( lcpu ) {
2006-06-26 13:58:50 +02:00
int err = threshold_create_device ( lcpu ) ;
2005-11-05 17:25:53 +01:00
if ( err )
2006-06-26 13:58:50 +02:00
return err ;
2005-11-05 17:25:53 +01:00
}
2006-07-30 03:03:37 -07:00
register_hotcpu_notifier ( & threshold_cpu_notifier ) ;
2006-06-26 13:58:50 +02:00
return 0 ;
2005-11-05 17:25:53 +01:00
}
device_initcall ( threshold_init_device ) ;