2005-04-16 15:20:36 -07:00
/*
2008-07-28 16:20:08 +02:00
* Routines to indentify caches on Intel CPU .
2005-04-16 15:20:36 -07:00
*
2008-07-28 16:20:08 +02:00
* Changes :
* Venkatesh Pallipadi : Adding cache identification through cpuid ( 4 )
2009-07-04 00:35:45 +01:00
* Ashok Raj < ashok . raj @ intel . com > : Work with CPU hotplug infrastructure .
2007-07-21 17:10:03 +02:00
* Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD .
2005-04-16 15:20:36 -07:00
*/
# include <linux/init.h>
# include <linux/slab.h>
# include <linux/device.h>
# include <linux/compiler.h>
# include <linux/cpu.h>
2005-10-30 15:03:48 -08:00
# include <linux/sched.h>
2008-07-22 13:06:02 -05:00
# include <linux/pci.h>
2005-04-16 15:20:36 -07:00
# include <asm/processor.h>
2009-07-04 00:35:45 +01:00
# include <linux/smp.h>
2009-04-09 15:16:17 +02:00
# include <asm/k8.h>
2010-01-22 16:01:05 +01:00
# include <asm/smp.h>
2005-04-16 15:20:36 -07:00
# define LVL_1_INST 1
# define LVL_1_DATA 2
# define LVL_2 3
# define LVL_3 4
# define LVL_TRACE 5
2009-07-04 00:35:45 +01:00
struct _cache_table {
2005-04-16 15:20:36 -07:00
unsigned char descriptor ;
char cache_type ;
short size ;
} ;
2010-01-04 09:47:35 -05:00
# define MB(x) ((x) * 1024)
2009-07-04 00:35:45 +01:00
/* All the cache descriptor types we care about (no TLB or
trace cache entries ) */
2009-03-12 12:08:49 +00:00
static const struct _cache_table __cpuinitconst cache_table [ ] =
2005-04-16 15:20:36 -07:00
{
{ 0x06 , LVL_1_INST , 8 } , /* 4-way set assoc, 32 byte line size */
{ 0x08 , LVL_1_INST , 16 } , /* 4-way set assoc, 32 byte line size */
2009-01-31 20:12:14 -05:00
{ 0x09 , LVL_1_INST , 32 } , /* 4-way set assoc, 64 byte line size */
2005-04-16 15:20:36 -07:00
{ 0x0a , LVL_1_DATA , 8 } , /* 2 way set assoc, 32 byte line size */
{ 0x0c , LVL_1_DATA , 16 } , /* 4-way set assoc, 32 byte line size */
2009-01-31 20:12:14 -05:00
{ 0x0d , LVL_1_DATA , 16 } , /* 4-way set assoc, 64 byte line size */
{ 0x21 , LVL_2 , 256 } , /* 8-way set assoc, 64 byte line size */
2005-04-16 15:20:36 -07:00
{ 0x22 , LVL_3 , 512 } , /* 4-way set assoc, sectored cache, 64 byte line size */
2010-01-04 09:47:35 -05:00
{ 0x23 , LVL_3 , MB ( 1 ) } , /* 8-way set assoc, sectored cache, 64 byte line size */
{ 0x25 , LVL_3 , MB ( 2 ) } , /* 8-way set assoc, sectored cache, 64 byte line size */
{ 0x29 , LVL_3 , MB ( 4 ) } , /* 8-way set assoc, sectored cache, 64 byte line size */
2005-04-16 15:20:36 -07:00
{ 0x2c , LVL_1_DATA , 32 } , /* 8-way set assoc, 64 byte line size */
{ 0x30 , LVL_1_INST , 32 } , /* 8-way set assoc, 64 byte line size */
{ 0x39 , LVL_2 , 128 } , /* 4-way set assoc, sectored cache, 64 byte line size */
2006-01-26 22:40:40 -08:00
{ 0x3a , LVL_2 , 192 } , /* 6-way set assoc, sectored cache, 64 byte line size */
2005-04-16 15:20:36 -07:00
{ 0x3b , LVL_2 , 128 } , /* 2-way set assoc, sectored cache, 64 byte line size */
{ 0x3c , LVL_2 , 256 } , /* 4-way set assoc, sectored cache, 64 byte line size */
2006-01-26 22:40:40 -08:00
{ 0x3d , LVL_2 , 384 } , /* 6-way set assoc, sectored cache, 64 byte line size */
{ 0x3e , LVL_2 , 512 } , /* 4-way set assoc, sectored cache, 64 byte line size */
2007-12-21 01:27:19 +01:00
{ 0x3f , LVL_2 , 256 } , /* 2-way set assoc, 64 byte line size */
2005-04-16 15:20:36 -07:00
{ 0x41 , LVL_2 , 128 } , /* 4-way set assoc, 32 byte line size */
{ 0x42 , LVL_2 , 256 } , /* 4-way set assoc, 32 byte line size */
{ 0x43 , LVL_2 , 512 } , /* 4-way set assoc, 32 byte line size */
2010-01-04 09:47:35 -05:00
{ 0x44 , LVL_2 , MB ( 1 ) } , /* 4-way set assoc, 32 byte line size */
{ 0x45 , LVL_2 , MB ( 2 ) } , /* 4-way set assoc, 32 byte line size */
{ 0x46 , LVL_3 , MB ( 4 ) } , /* 4-way set assoc, 64 byte line size */
{ 0x47 , LVL_3 , MB ( 8 ) } , /* 8-way set assoc, 64 byte line size */
{ 0x49 , LVL_3 , MB ( 4 ) } , /* 16-way set assoc, 64 byte line size */
{ 0x4a , LVL_3 , MB ( 6 ) } , /* 12-way set assoc, 64 byte line size */
{ 0x4b , LVL_3 , MB ( 8 ) } , /* 16-way set assoc, 64 byte line size */
{ 0x4c , LVL_3 , MB ( 12 ) } , /* 12-way set assoc, 64 byte line size */
{ 0x4d , LVL_3 , MB ( 16 ) } , /* 16-way set assoc, 64 byte line size */
{ 0x4e , LVL_2 , MB ( 6 ) } , /* 24-way set assoc, 64 byte line size */
2005-04-16 15:20:36 -07:00
{ 0x60 , LVL_1_DATA , 16 } , /* 8-way set assoc, sectored cache, 64 byte line size */
{ 0x66 , LVL_1_DATA , 8 } , /* 4-way set assoc, sectored cache, 64 byte line size */
{ 0x67 , LVL_1_DATA , 16 } , /* 4-way set assoc, sectored cache, 64 byte line size */
{ 0x68 , LVL_1_DATA , 32 } , /* 4-way set assoc, sectored cache, 64 byte line size */
{ 0x70 , LVL_TRACE , 12 } , /* 8-way set assoc */
{ 0x71 , LVL_TRACE , 16 } , /* 8-way set assoc */
{ 0x72 , LVL_TRACE , 32 } , /* 8-way set assoc */
2006-01-26 22:40:40 -08:00
{ 0x73 , LVL_TRACE , 64 } , /* 8-way set assoc */
2010-01-04 09:47:35 -05:00
{ 0x78 , LVL_2 , MB ( 1 ) } , /* 4-way set assoc, 64 byte line size */
{ 0x79 , LVL_2 , 128 } , /* 8-way set assoc, sectored cache, 64 byte line size */
{ 0x7a , LVL_2 , 256 } , /* 8-way set assoc, sectored cache, 64 byte line size */
{ 0x7b , LVL_2 , 512 } , /* 8-way set assoc, sectored cache, 64 byte line size */
{ 0x7c , LVL_2 , MB ( 1 ) } , /* 8-way set assoc, sectored cache, 64 byte line size */
{ 0x7d , LVL_2 , MB ( 2 ) } , /* 8-way set assoc, 64 byte line size */
{ 0x7f , LVL_2 , 512 } , /* 2-way set assoc, 64 byte line size */
{ 0x82 , LVL_2 , 256 } , /* 8-way set assoc, 32 byte line size */
{ 0x83 , LVL_2 , 512 } , /* 8-way set assoc, 32 byte line size */
{ 0x84 , LVL_2 , MB ( 1 ) } , /* 8-way set assoc, 32 byte line size */
{ 0x85 , LVL_2 , MB ( 2 ) } , /* 8-way set assoc, 32 byte line size */
{ 0x86 , LVL_2 , 512 } , /* 4-way set assoc, 64 byte line size */
{ 0x87 , LVL_2 , MB ( 1 ) } , /* 8-way set assoc, 64 byte line size */
{ 0xd0 , LVL_3 , 512 } , /* 4-way set assoc, 64 byte line size */
{ 0xd1 , LVL_3 , MB ( 1 ) } , /* 4-way set assoc, 64 byte line size */
{ 0xd2 , LVL_3 , MB ( 2 ) } , /* 4-way set assoc, 64 byte line size */
{ 0xd6 , LVL_3 , MB ( 1 ) } , /* 8-way set assoc, 64 byte line size */
{ 0xd7 , LVL_3 , MB ( 2 ) } , /* 8-way set assoc, 64 byte line size */
{ 0xd8 , LVL_3 , MB ( 4 ) } , /* 12-way set assoc, 64 byte line size */
{ 0xdc , LVL_3 , MB ( 2 ) } , /* 12-way set assoc, 64 byte line size */
{ 0xdd , LVL_3 , MB ( 4 ) } , /* 12-way set assoc, 64 byte line size */
{ 0xde , LVL_3 , MB ( 8 ) } , /* 12-way set assoc, 64 byte line size */
{ 0xe2 , LVL_3 , MB ( 2 ) } , /* 16-way set assoc, 64 byte line size */
{ 0xe3 , LVL_3 , MB ( 4 ) } , /* 16-way set assoc, 64 byte line size */
{ 0xe4 , LVL_3 , MB ( 8 ) } , /* 16-way set assoc, 64 byte line size */
{ 0xea , LVL_3 , MB ( 12 ) } , /* 24-way set assoc, 64 byte line size */
{ 0xeb , LVL_3 , MB ( 18 ) } , /* 24-way set assoc, 64 byte line size */
{ 0xec , LVL_3 , MB ( 24 ) } , /* 24-way set assoc, 64 byte line size */
2005-04-16 15:20:36 -07:00
{ 0x00 , 0 , 0 }
} ;
2009-07-04 00:35:45 +01:00
enum _cache_type {
2005-04-16 15:20:36 -07:00
CACHE_TYPE_NULL = 0 ,
CACHE_TYPE_DATA = 1 ,
CACHE_TYPE_INST = 2 ,
CACHE_TYPE_UNIFIED = 3
} ;
union _cpuid4_leaf_eax {
struct {
enum _cache_type type : 5 ;
unsigned int level : 3 ;
unsigned int is_self_initializing : 1 ;
unsigned int is_fully_associative : 1 ;
unsigned int reserved : 4 ;
unsigned int num_threads_sharing : 12 ;
unsigned int num_cores_on_die : 6 ;
} split ;
u32 full ;
} ;
union _cpuid4_leaf_ebx {
struct {
unsigned int coherency_line_size : 12 ;
unsigned int physical_line_partition : 10 ;
unsigned int ways_of_associativity : 10 ;
} split ;
u32 full ;
} ;
union _cpuid4_leaf_ecx {
struct {
unsigned int number_of_sets : 32 ;
} split ;
u32 full ;
} ;
struct _cpuid4_info {
union _cpuid4_leaf_eax eax ;
union _cpuid4_leaf_ebx ebx ;
union _cpuid4_leaf_ecx ecx ;
unsigned long size ;
2010-01-22 16:01:07 +01:00
bool can_disable ;
unsigned int l3_indices ;
2009-01-10 21:58:10 -08:00
DECLARE_BITMAP ( shared_cpu_map , NR_CPUS ) ;
} ;
/* subset of above _cpuid4_info w/o shared_cpu_map */
struct _cpuid4_info_regs {
union _cpuid4_leaf_eax eax ;
union _cpuid4_leaf_ebx ebx ;
union _cpuid4_leaf_ecx ecx ;
unsigned long size ;
2010-01-22 16:01:07 +01:00
bool can_disable ;
unsigned int l3_indices ;
2005-04-16 15:20:36 -07:00
} ;
2006-06-26 13:56:13 +02:00
unsigned short num_cache_leaves ;
/* AMD doesn't have CPUID4. Emulate it here to report the same
information to the user . This makes some assumptions about the machine :
2007-07-21 17:10:03 +02:00
L2 not shared , no SMT etc . that is currently true on AMD CPUs .
2006-06-26 13:56:13 +02:00
In theory the TLBs could be reported as fake type ( they are in " dummy " ) .
Maybe later */
union l1_cache {
struct {
2009-07-04 00:35:45 +01:00
unsigned line_size : 8 ;
unsigned lines_per_tag : 8 ;
unsigned assoc : 8 ;
unsigned size_in_kb : 8 ;
2006-06-26 13:56:13 +02:00
} ;
unsigned val ;
} ;
union l2_cache {
struct {
2009-07-04 00:35:45 +01:00
unsigned line_size : 8 ;
unsigned lines_per_tag : 4 ;
unsigned assoc : 4 ;
unsigned size_in_kb : 16 ;
2006-06-26 13:56:13 +02:00
} ;
unsigned val ;
} ;
2007-07-21 17:10:03 +02:00
union l3_cache {
struct {
2009-07-04 00:35:45 +01:00
unsigned line_size : 8 ;
unsigned lines_per_tag : 4 ;
unsigned assoc : 4 ;
unsigned res : 2 ;
unsigned size_encoded : 14 ;
2007-07-21 17:10:03 +02:00
} ;
unsigned val ;
} ;
2009-03-12 12:08:49 +00:00
static const unsigned short __cpuinitconst assocs [ ] = {
2009-04-09 15:47:10 +02:00
[ 1 ] = 1 ,
[ 2 ] = 2 ,
[ 4 ] = 4 ,
[ 6 ] = 8 ,
[ 8 ] = 16 ,
[ 0xa ] = 32 ,
[ 0xb ] = 48 ,
2007-07-21 17:10:03 +02:00
[ 0xc ] = 64 ,
2009-04-09 15:47:10 +02:00
[ 0xd ] = 96 ,
[ 0xe ] = 128 ,
[ 0xf ] = 0xffff /* fully associative - no way to show this currently */
2007-07-21 17:10:03 +02:00
} ;
2009-03-12 12:08:49 +00:00
static const unsigned char __cpuinitconst levels [ ] = { 1 , 1 , 2 , 3 } ;
static const unsigned char __cpuinitconst types [ ] = { 1 , 2 , 3 , 3 } ;
2006-06-26 13:56:13 +02:00
2008-07-28 16:20:08 +02:00
static void __cpuinit
amd_cpuid4 ( int leaf , union _cpuid4_leaf_eax * eax ,
union _cpuid4_leaf_ebx * ebx ,
union _cpuid4_leaf_ecx * ecx )
2006-06-26 13:56:13 +02:00
{
unsigned dummy ;
unsigned line_size , lines_per_tag , assoc , size_in_kb ;
union l1_cache l1i , l1d ;
union l2_cache l2 ;
2007-07-21 17:10:03 +02:00
union l3_cache l3 ;
union l1_cache * l1 = & l1d ;
2006-06-26 13:56:13 +02:00
eax - > full = 0 ;
ebx - > full = 0 ;
ecx - > full = 0 ;
cpuid ( 0x80000005 , & dummy , & dummy , & l1d . val , & l1i . val ) ;
2007-07-21 17:10:03 +02:00
cpuid ( 0x80000006 , & dummy , & dummy , & l2 . val , & l3 . val ) ;
2006-06-26 13:56:13 +02:00
2007-07-21 17:10:03 +02:00
switch ( leaf ) {
case 1 :
l1 = & l1i ;
case 0 :
if ( ! l1 - > val )
return ;
2009-09-03 09:41:19 +02:00
assoc = assocs [ l1 - > assoc ] ;
2006-06-26 13:56:13 +02:00
line_size = l1 - > line_size ;
lines_per_tag = l1 - > lines_per_tag ;
size_in_kb = l1 - > size_in_kb ;
2007-07-21 17:10:03 +02:00
break ;
case 2 :
if ( ! l2 . val )
return ;
2009-09-03 09:41:19 +02:00
assoc = assocs [ l2 . assoc ] ;
2006-06-26 13:56:13 +02:00
line_size = l2 . line_size ;
lines_per_tag = l2 . lines_per_tag ;
/* cpu_data has errata corrections for K7 applied */
size_in_kb = current_cpu_data . x86_cache_size ;
2007-07-21 17:10:03 +02:00
break ;
case 3 :
if ( ! l3 . val )
return ;
2009-09-03 09:41:19 +02:00
assoc = assocs [ l3 . assoc ] ;
2007-07-21 17:10:03 +02:00
line_size = l3 . line_size ;
lines_per_tag = l3 . lines_per_tag ;
size_in_kb = l3 . size_encoded * 512 ;
2009-09-03 09:41:19 +02:00
if ( boot_cpu_has ( X86_FEATURE_AMD_DCM ) ) {
size_in_kb = size_in_kb > > 1 ;
assoc = assoc > > 1 ;
}
2007-07-21 17:10:03 +02:00
break ;
default :
return ;
2006-06-26 13:56:13 +02:00
}
2007-07-21 17:10:03 +02:00
eax - > split . is_self_initializing = 1 ;
eax - > split . type = types [ leaf ] ;
eax - > split . level = levels [ leaf ] ;
2009-09-03 09:41:19 +02:00
eax - > split . num_threads_sharing = 0 ;
2007-07-21 17:10:03 +02:00
eax - > split . num_cores_on_die = current_cpu_data . x86_max_cores - 1 ;
2009-09-03 09:41:19 +02:00
if ( assoc = = 0xffff )
2006-06-26 13:56:13 +02:00
eax - > split . is_fully_associative = 1 ;
ebx - > split . coherency_line_size = line_size - 1 ;
2009-09-03 09:41:19 +02:00
ebx - > split . ways_of_associativity = assoc - 1 ;
2006-06-26 13:56:13 +02:00
ebx - > split . physical_line_partition = lines_per_tag - 1 ;
ecx - > split . number_of_sets = ( size_in_kb * 1024 ) / line_size /
( ebx - > split . ways_of_associativity + 1 ) - 1 ;
}
2005-04-16 15:20:36 -07:00
2010-02-18 19:37:14 +01:00
struct _cache_attr {
struct attribute attr ;
ssize_t ( * show ) ( struct _cpuid4_info * , char * ) ;
ssize_t ( * store ) ( struct _cpuid4_info * , const char * , size_t count ) ;
} ;
# ifdef CONFIG_CPU_SUP_AMD
2010-01-22 16:01:07 +01:00
static unsigned int __cpuinit amd_calc_l3_indices ( void )
{
/*
* We ' re called over smp_call_function_single ( ) and therefore
* are on the correct cpu .
*/
int cpu = smp_processor_id ( ) ;
int node = cpu_to_node ( cpu ) ;
struct pci_dev * dev = node_to_k8_nb_misc ( node ) ;
unsigned int sc0 , sc1 , sc2 , sc3 ;
2010-02-18 19:37:14 +01:00
u32 val = 0 ;
2010-01-22 16:01:07 +01:00
pci_read_config_dword ( dev , 0x1C4 , & val ) ;
/* calculate subcache sizes */
sc0 = ! ( val & BIT ( 0 ) ) ;
sc1 = ! ( val & BIT ( 4 ) ) ;
sc2 = ! ( val & BIT ( 8 ) ) + ! ( val & BIT ( 9 ) ) ;
sc3 = ! ( val & BIT ( 12 ) ) + ! ( val & BIT ( 13 ) ) ;
return ( max ( max ( max ( sc0 , sc1 ) , sc2 ) , sc3 ) < < 10 ) - 1 ;
}
2008-07-21 13:34:21 +02:00
static void __cpuinit
2009-01-10 21:58:10 -08:00
amd_check_l3_disable ( int index , struct _cpuid4_info_regs * this_leaf )
2008-07-18 16:03:52 -05:00
{
2010-04-22 16:06:58 +02:00
if ( boot_cpu_data . x86 ! = 0x10 )
2008-07-18 16:03:52 -05:00
return ;
2009-04-09 15:05:10 +02:00
2010-04-22 16:06:58 +02:00
if ( index < 3 )
2009-04-09 15:05:10 +02:00
return ;
2010-01-22 16:01:05 +01:00
/* see errata #382 and #388 */
2010-04-22 16:06:58 +02:00
if ( boot_cpu_data . x86_model < 0x8 )
2009-04-09 15:05:10 +02:00
return ;
2010-04-22 16:06:58 +02:00
if ( ( boot_cpu_data . x86_model = = 0x8 | |
boot_cpu_data . x86_model = = 0x9 )
& &
boot_cpu_data . x86_mask < 0x1 )
return ;
2010-04-22 16:06:59 +02:00
/* not in virtualized environments */
if ( num_k8_northbridges = = 0 )
return ;
2010-01-22 16:01:07 +01:00
this_leaf - > can_disable = true ;
this_leaf - > l3_indices = amd_calc_l3_indices ( ) ;
2008-07-18 16:03:52 -05:00
}
2010-02-18 19:37:14 +01:00
static ssize_t show_cache_disable ( struct _cpuid4_info * this_leaf , char * buf ,
unsigned int index )
{
int cpu = cpumask_first ( to_cpumask ( this_leaf - > shared_cpu_map ) ) ;
int node = amd_get_nb_id ( cpu ) ;
struct pci_dev * dev = node_to_k8_nb_misc ( node ) ;
unsigned int reg = 0 ;
if ( ! this_leaf - > can_disable )
return - EINVAL ;
if ( ! dev )
return - EINVAL ;
pci_read_config_dword ( dev , 0x1BC + index * 4 , & reg ) ;
return sprintf ( buf , " 0x%08x \n " , reg ) ;
}
# define SHOW_CACHE_DISABLE(index) \
static ssize_t \
show_cache_disable_ # # index ( struct _cpuid4_info * this_leaf , char * buf ) \
{ \
return show_cache_disable ( this_leaf , buf , index ) ; \
}
SHOW_CACHE_DISABLE ( 0 )
SHOW_CACHE_DISABLE ( 1 )
static ssize_t store_cache_disable ( struct _cpuid4_info * this_leaf ,
const char * buf , size_t count , unsigned int index )
{
int cpu = cpumask_first ( to_cpumask ( this_leaf - > shared_cpu_map ) ) ;
int node = amd_get_nb_id ( cpu ) ;
struct pci_dev * dev = node_to_k8_nb_misc ( node ) ;
unsigned long val = 0 ;
# define SUBCACHE_MASK (3UL << 20)
# define SUBCACHE_INDEX 0xfff
if ( ! this_leaf - > can_disable )
return - EINVAL ;
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
if ( ! dev )
return - EINVAL ;
if ( strict_strtoul ( buf , 10 , & val ) < 0 )
return - EINVAL ;
/* do not allow writes outside of allowed bits */
if ( ( val & ~ ( SUBCACHE_MASK | SUBCACHE_INDEX ) ) | |
( ( val & SUBCACHE_INDEX ) > this_leaf - > l3_indices ) )
return - EINVAL ;
val | = BIT ( 30 ) ;
pci_write_config_dword ( dev , 0x1BC + index * 4 , val ) ;
/*
* We need to WBINVD on a core on the node containing the L3 cache which
* indices we disable therefore a simple wbinvd ( ) is not sufficient .
*/
wbinvd_on_cpu ( cpu ) ;
pci_write_config_dword ( dev , 0x1BC + index * 4 , val | BIT ( 31 ) ) ;
return count ;
}
# define STORE_CACHE_DISABLE(index) \
static ssize_t \
store_cache_disable_ # # index ( struct _cpuid4_info * this_leaf , \
const char * buf , size_t count ) \
{ \
return store_cache_disable ( this_leaf , buf , count , index ) ; \
2008-07-18 16:03:52 -05:00
}
2010-02-18 19:37:14 +01:00
STORE_CACHE_DISABLE ( 0 )
STORE_CACHE_DISABLE ( 1 )
static struct _cache_attr cache_disable_0 = __ATTR ( cache_disable_0 , 0644 ,
show_cache_disable_0 , store_cache_disable_0 ) ;
static struct _cache_attr cache_disable_1 = __ATTR ( cache_disable_1 , 0644 ,
show_cache_disable_1 , store_cache_disable_1 ) ;
# else /* CONFIG_CPU_SUP_AMD */
static void __cpuinit
amd_check_l3_disable ( int index , struct _cpuid4_info_regs * this_leaf )
{
} ;
# endif /* CONFIG_CPU_SUP_AMD */
2008-07-18 16:03:52 -05:00
2008-07-21 13:34:21 +02:00
static int
2009-01-10 21:58:10 -08:00
__cpuinit cpuid4_cache_lookup_regs ( int index ,
struct _cpuid4_info_regs * this_leaf )
2005-04-16 15:20:36 -07:00
{
2006-06-26 13:56:13 +02:00
union _cpuid4_leaf_eax eax ;
union _cpuid4_leaf_ebx ebx ;
union _cpuid4_leaf_ecx ecx ;
unsigned edx ;
2005-04-16 15:20:36 -07:00
2008-07-18 16:03:52 -05:00
if ( boot_cpu_data . x86_vendor = = X86_VENDOR_AMD ) {
2006-06-26 13:56:13 +02:00
amd_cpuid4 ( index , & eax , & ebx , & ecx ) ;
2010-04-22 16:06:58 +02:00
amd_check_l3_disable ( index , this_leaf ) ;
2008-07-21 13:34:21 +02:00
} else {
cpuid_count ( 4 , index , & eax . full , & ebx . full , & ecx . full , & edx ) ;
}
2006-06-26 13:56:13 +02:00
if ( eax . split . type = = CACHE_TYPE_NULL )
2005-07-28 21:15:46 -07:00
return - EIO ; /* better error ? */
2005-04-16 15:20:36 -07:00
2006-06-26 13:56:13 +02:00
this_leaf - > eax = eax ;
this_leaf - > ebx = ebx ;
this_leaf - > ecx = ecx ;
2008-07-21 13:34:21 +02:00
this_leaf - > size = ( ecx . split . number_of_sets + 1 ) *
( ebx . split . coherency_line_size + 1 ) *
( ebx . split . physical_line_partition + 1 ) *
( ebx . split . ways_of_associativity + 1 ) ;
2005-04-16 15:20:36 -07:00
return 0 ;
}
2007-07-21 04:37:39 -07:00
static int __cpuinit find_num_cache_leaves ( void )
2005-04-16 15:20:36 -07:00
{
unsigned int eax , ebx , ecx , edx ;
union _cpuid4_leaf_eax cache_eax ;
2005-10-30 14:59:30 -08:00
int i = - 1 ;
2005-04-16 15:20:36 -07:00
2005-10-30 14:59:30 -08:00
do {
+ + i ;
/* Do cpuid(4) loop to find out num_cache_leaves */
2005-04-16 15:20:36 -07:00
cpuid_count ( 4 , i , & eax , & ebx , & ecx , & edx ) ;
cache_eax . full = eax ;
2005-10-30 14:59:30 -08:00
} while ( cache_eax . split . type ! = CACHE_TYPE_NULL ) ;
return i ;
2005-04-16 15:20:36 -07:00
}
2005-10-30 14:59:50 -08:00
unsigned int __cpuinit init_intel_cacheinfo ( struct cpuinfo_x86 * c )
2005-04-16 15:20:36 -07:00
{
2009-07-04 00:35:45 +01:00
/* Cache sizes */
unsigned int trace = 0 , l1i = 0 , l1d = 0 , l2 = 0 , l3 = 0 ;
2005-04-16 15:20:36 -07:00
unsigned int new_l1d = 0 , new_l1i = 0 ; /* Cache sizes from cpuid(4) */
unsigned int new_l2 = 0 , new_l3 = 0 , i ; /* Cache sizes from cpuid(4) */
2006-03-27 01:15:22 -08:00
unsigned int l2_id = 0 , l3_id = 0 , num_threads_sharing , index_msb ;
2006-06-27 02:53:49 -07:00
# ifdef CONFIG_X86_HT
2007-10-19 20:35:04 +02:00
unsigned int cpu = c - > cpu_index ;
2006-03-27 01:15:22 -08:00
# endif
2005-04-16 15:20:36 -07:00
2006-03-23 02:59:52 -08:00
if ( c - > cpuid_level > 3 ) {
2005-04-16 15:20:36 -07:00
static int is_initialized ;
if ( is_initialized = = 0 ) {
/* Init num_cache_leaves from boot CPU */
num_cache_leaves = find_num_cache_leaves ( ) ;
is_initialized + + ;
}
/*
* Whenever possible use cpuid ( 4 ) , deterministic cache
* parameters cpuid leaf to find the cache details
*/
for ( i = 0 ; i < num_cache_leaves ; i + + ) {
2009-01-10 21:58:10 -08:00
struct _cpuid4_info_regs this_leaf ;
2005-04-16 15:20:36 -07:00
int retval ;
2009-01-10 21:58:10 -08:00
retval = cpuid4_cache_lookup_regs ( i , & this_leaf ) ;
2005-04-16 15:20:36 -07:00
if ( retval > = 0 ) {
2009-07-04 00:35:45 +01:00
switch ( this_leaf . eax . split . level ) {
case 1 :
2005-04-16 15:20:36 -07:00
if ( this_leaf . eax . split . type = =
CACHE_TYPE_DATA )
new_l1d = this_leaf . size / 1024 ;
else if ( this_leaf . eax . split . type = =
CACHE_TYPE_INST )
new_l1i = this_leaf . size / 1024 ;
break ;
2009-07-04 00:35:45 +01:00
case 2 :
2005-04-16 15:20:36 -07:00
new_l2 = this_leaf . size / 1024 ;
2006-03-27 01:15:22 -08:00
num_threads_sharing = 1 + this_leaf . eax . split . num_threads_sharing ;
index_msb = get_count_order ( num_threads_sharing ) ;
l2_id = c - > apicid > > index_msb ;
2005-04-16 15:20:36 -07:00
break ;
2009-07-04 00:35:45 +01:00
case 3 :
2005-04-16 15:20:36 -07:00
new_l3 = this_leaf . size / 1024 ;
2006-03-27 01:15:22 -08:00
num_threads_sharing = 1 + this_leaf . eax . split . num_threads_sharing ;
2009-07-04 00:35:45 +01:00
index_msb = get_count_order (
num_threads_sharing ) ;
2006-03-27 01:15:22 -08:00
l3_id = c - > apicid > > index_msb ;
2005-04-16 15:20:36 -07:00
break ;
2009-07-04 00:35:45 +01:00
default :
2005-04-16 15:20:36 -07:00
break ;
}
}
}
}
2006-03-27 01:15:24 -08:00
/*
* Don ' t use cpuid2 if cpuid4 is supported . For P4 , we use cpuid2 for
* trace cache
*/
if ( ( num_cache_leaves = = 0 | | c - > x86 = = 15 ) & & c - > cpuid_level > 1 ) {
2005-04-16 15:20:36 -07:00
/* supports eax=2 call */
2008-01-31 22:05:43 +01:00
int j , n ;
unsigned int regs [ 4 ] ;
2005-04-16 15:20:36 -07:00
unsigned char * dp = ( unsigned char * ) regs ;
2006-03-27 01:15:24 -08:00
int only_trace = 0 ;
if ( num_cache_leaves ! = 0 & & c - > x86 = = 15 )
only_trace = 1 ;
2005-04-16 15:20:36 -07:00
/* Number of times to iterate */
n = cpuid_eax ( 2 ) & 0xFF ;
2009-07-04 00:35:45 +01:00
for ( i = 0 ; i < n ; i + + ) {
2005-04-16 15:20:36 -07:00
cpuid ( 2 , & regs [ 0 ] , & regs [ 1 ] , & regs [ 2 ] , & regs [ 3 ] ) ;
/* If bit 31 is set, this is an unknown format */
2009-07-04 00:35:45 +01:00
for ( j = 0 ; j < 3 ; j + + )
if ( regs [ j ] & ( 1 < < 31 ) )
regs [ j ] = 0 ;
2005-04-16 15:20:36 -07:00
/* Byte 0 is level count, not a descriptor */
2009-07-04 00:35:45 +01:00
for ( j = 1 ; j < 16 ; j + + ) {
2005-04-16 15:20:36 -07:00
unsigned char des = dp [ j ] ;
unsigned char k = 0 ;
/* look up this descriptor in the table */
2009-07-04 00:35:45 +01:00
while ( cache_table [ k ] . descriptor ! = 0 ) {
2005-04-16 15:20:36 -07:00
if ( cache_table [ k ] . descriptor = = des ) {
2006-03-27 01:15:24 -08:00
if ( only_trace & & cache_table [ k ] . cache_type ! = LVL_TRACE )
break ;
2005-04-16 15:20:36 -07:00
switch ( cache_table [ k ] . cache_type ) {
case LVL_1_INST :
l1i + = cache_table [ k ] . size ;
break ;
case LVL_1_DATA :
l1d + = cache_table [ k ] . size ;
break ;
case LVL_2 :
l2 + = cache_table [ k ] . size ;
break ;
case LVL_3 :
l3 + = cache_table [ k ] . size ;
break ;
case LVL_TRACE :
trace + = cache_table [ k ] . size ;
break ;
}
break ;
}
k + + ;
}
}
}
2006-03-27 01:15:24 -08:00
}
2005-04-16 15:20:36 -07:00
2006-03-27 01:15:24 -08:00
if ( new_l1d )
l1d = new_l1d ;
2005-04-16 15:20:36 -07:00
2006-03-27 01:15:24 -08:00
if ( new_l1i )
l1i = new_l1i ;
2005-04-16 15:20:36 -07:00
2006-03-27 01:15:24 -08:00
if ( new_l2 ) {
l2 = new_l2 ;
2006-06-27 02:53:49 -07:00
# ifdef CONFIG_X86_HT
2007-10-19 20:35:03 +02:00
per_cpu ( cpu_llc_id , cpu ) = l2_id ;
2006-03-27 01:15:22 -08:00
# endif
2006-03-27 01:15:24 -08:00
}
2005-04-16 15:20:36 -07:00
2006-03-27 01:15:24 -08:00
if ( new_l3 ) {
l3 = new_l3 ;
2006-06-27 02:53:49 -07:00
# ifdef CONFIG_X86_HT
2007-10-19 20:35:03 +02:00
per_cpu ( cpu_llc_id , cpu ) = l3_id ;
2006-03-27 01:15:22 -08:00
# endif
2005-04-16 15:20:36 -07:00
}
2006-03-27 01:15:24 -08:00
c - > x86_cache_size = l3 ? l3 : ( l2 ? l2 : ( l1i + l1d ) ) ;
2005-04-16 15:20:36 -07:00
return l2 ;
}
2008-10-18 21:24:45 +02:00
# ifdef CONFIG_SYSFS
2005-04-16 15:20:36 -07:00
/* pointer to _cpuid4_info array (for each cache leaf) */
2009-10-29 22:34:14 +09:00
static DEFINE_PER_CPU ( struct _cpuid4_info * , ici_cpuid4_info ) ;
# define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
2005-04-16 15:20:36 -07:00
# ifdef CONFIG_SMP
2005-10-30 14:59:50 -08:00
static void __cpuinit cache_shared_cpu_map_setup ( unsigned int cpu , int index )
2005-04-16 15:20:36 -07:00
{
2005-11-05 17:25:54 +01:00
struct _cpuid4_info * this_leaf , * sibling_leaf ;
2005-04-16 15:20:36 -07:00
unsigned long num_threads_sharing ;
2009-12-09 13:36:45 -05:00
int index_msb , i , sibling ;
2007-10-19 20:35:04 +02:00
struct cpuinfo_x86 * c = & cpu_data ( cpu ) ;
2005-04-16 15:20:36 -07:00
2009-09-03 09:41:19 +02:00
if ( ( index = = 3 ) & & ( c - > x86_vendor = = X86_VENDOR_AMD ) ) {
2009-12-09 13:36:45 -05:00
for_each_cpu ( i , c - > llc_shared_map ) {
2009-10-29 22:34:14 +09:00
if ( ! per_cpu ( ici_cpuid4_info , i ) )
2009-09-03 09:41:19 +02:00
continue ;
this_leaf = CPUID4_INFO_IDX ( i , index ) ;
2009-12-09 13:36:45 -05:00
for_each_cpu ( sibling , c - > llc_shared_map ) {
if ( ! cpu_online ( sibling ) )
continue ;
set_bit ( sibling , this_leaf - > shared_cpu_map ) ;
}
2009-09-03 09:41:19 +02:00
}
return ;
}
2005-04-16 15:20:36 -07:00
this_leaf = CPUID4_INFO_IDX ( cpu , index ) ;
num_threads_sharing = 1 + this_leaf - > eax . split . num_threads_sharing ;
if ( num_threads_sharing = = 1 )
2009-01-10 21:58:10 -08:00
cpumask_set_cpu ( cpu , to_cpumask ( this_leaf - > shared_cpu_map ) ) ;
2005-11-05 17:25:54 +01:00
else {
index_msb = get_count_order ( num_threads_sharing ) ;
for_each_online_cpu ( i ) {
2007-10-19 20:35:04 +02:00
if ( cpu_data ( i ) . apicid > > index_msb = =
c - > apicid > > index_msb ) {
2009-01-10 21:58:10 -08:00
cpumask_set_cpu ( i ,
to_cpumask ( this_leaf - > shared_cpu_map ) ) ;
2009-10-29 22:34:14 +09:00
if ( i ! = cpu & & per_cpu ( ici_cpuid4_info , i ) ) {
2009-01-10 21:58:10 -08:00
sibling_leaf =
CPUID4_INFO_IDX ( i , index ) ;
cpumask_set_cpu ( cpu , to_cpumask (
sibling_leaf - > shared_cpu_map ) ) ;
2005-11-05 17:25:54 +01:00
}
}
}
}
}
2006-03-23 02:59:33 -08:00
static void __cpuinit cache_remove_shared_cpu_map ( unsigned int cpu , int index )
2005-11-05 17:25:54 +01:00
{
struct _cpuid4_info * this_leaf , * sibling_leaf ;
int sibling ;
this_leaf = CPUID4_INFO_IDX ( cpu , index ) ;
2009-01-10 21:58:10 -08:00
for_each_cpu ( sibling , to_cpumask ( this_leaf - > shared_cpu_map ) ) {
2008-07-28 16:20:08 +02:00
sibling_leaf = CPUID4_INFO_IDX ( sibling , index ) ;
2009-01-10 21:58:10 -08:00
cpumask_clear_cpu ( cpu ,
to_cpumask ( sibling_leaf - > shared_cpu_map ) ) ;
2005-11-05 17:25:54 +01:00
}
2005-04-16 15:20:36 -07:00
}
# else
2009-07-04 00:35:45 +01:00
static void __cpuinit cache_shared_cpu_map_setup ( unsigned int cpu , int index )
{
}
static void __cpuinit cache_remove_shared_cpu_map ( unsigned int cpu , int index )
{
}
2005-04-16 15:20:36 -07:00
# endif
2007-12-04 17:19:07 +01:00
static void __cpuinit free_cache_attributes ( unsigned int cpu )
2005-04-16 15:20:36 -07:00
{
2007-10-18 03:05:16 -07:00
int i ;
for ( i = 0 ; i < num_cache_leaves ; i + + )
cache_remove_shared_cpu_map ( cpu , i ) ;
2009-10-29 22:34:14 +09:00
kfree ( per_cpu ( ici_cpuid4_info , cpu ) ) ;
per_cpu ( ici_cpuid4_info , cpu ) = NULL ;
2005-04-16 15:20:36 -07:00
}
2009-01-26 18:08:47 -08:00
static int
__cpuinit cpuid4_cache_lookup ( int index , struct _cpuid4_info * this_leaf )
{
struct _cpuid4_info_regs * leaf_regs =
( struct _cpuid4_info_regs * ) this_leaf ;
return cpuid4_cache_lookup_regs ( index , leaf_regs ) ;
}
2008-12-28 04:12:26 -03:00
static void __cpuinit get_cpu_leaves ( void * _retval )
2005-04-16 15:20:36 -07:00
{
2008-12-16 17:34:03 -08:00
int j , * retval = _retval , cpu = smp_processor_id ( ) ;
2005-07-28 21:15:46 -07:00
2005-04-16 15:20:36 -07:00
/* Do cpuid and store the results */
for ( j = 0 ; j < num_cache_leaves ; j + + ) {
2008-12-16 17:34:03 -08:00
struct _cpuid4_info * this_leaf ;
2005-04-16 15:20:36 -07:00
this_leaf = CPUID4_INFO_IDX ( cpu , j ) ;
2008-12-16 17:34:03 -08:00
* retval = cpuid4_cache_lookup ( j , this_leaf ) ;
if ( unlikely ( * retval < 0 ) ) {
2007-10-18 03:05:16 -07:00
int i ;
for ( i = 0 ; i < j ; i + + )
cache_remove_shared_cpu_map ( cpu , i ) ;
2005-07-28 21:15:46 -07:00
break ;
2007-10-18 03:05:16 -07:00
}
2005-04-16 15:20:36 -07:00
cache_shared_cpu_map_setup ( cpu , j ) ;
}
2008-12-16 17:34:03 -08:00
}
static int __cpuinit detect_cache_attributes ( unsigned int cpu )
{
int retval ;
if ( num_cache_leaves = = 0 )
return - ENOENT ;
2009-10-29 22:34:14 +09:00
per_cpu ( ici_cpuid4_info , cpu ) = kzalloc (
2008-12-16 17:34:03 -08:00
sizeof ( struct _cpuid4_info ) * num_cache_leaves , GFP_KERNEL ) ;
2009-10-29 22:34:14 +09:00
if ( per_cpu ( ici_cpuid4_info , cpu ) = = NULL )
2008-12-16 17:34:03 -08:00
return - ENOMEM ;
2005-04-16 15:20:36 -07:00
2008-12-16 17:34:03 -08:00
smp_call_function_single ( cpu , get_cpu_leaves , & retval , true ) ;
2007-10-18 03:05:16 -07:00
if ( retval ) {
2009-10-29 22:34:14 +09:00
kfree ( per_cpu ( ici_cpuid4_info , cpu ) ) ;
per_cpu ( ici_cpuid4_info , cpu ) = NULL ;
2007-10-18 03:05:16 -07:00
}
2005-07-28 21:15:46 -07:00
return retval ;
2005-04-16 15:20:36 -07:00
}
# include <linux/kobject.h>
# include <linux/sysfs.h>
extern struct sysdev_class cpu_sysdev_class ; /* from drivers/base/cpu.c */
/* pointer to kobject for cpuX/cache */
2009-10-29 22:34:14 +09:00
static DEFINE_PER_CPU ( struct kobject * , ici_cache_kobject ) ;
2005-04-16 15:20:36 -07:00
struct _index_kobject {
struct kobject kobj ;
unsigned int cpu ;
unsigned short index ;
} ;
/* pointer to array of kobjects for cpuX/cache/indexY */
2009-10-29 22:34:14 +09:00
static DEFINE_PER_CPU ( struct _index_kobject * , ici_index_kobject ) ;
# define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
2005-04-16 15:20:36 -07:00
# define show_one_plus(file_name, object, val) \
static ssize_t show_ # # file_name \
( struct _cpuid4_info * this_leaf , char * buf ) \
{ \
2009-07-04 00:35:45 +01:00
return sprintf ( buf , " %lu \n " , ( unsigned long ) this_leaf - > object + val ) ; \
2005-04-16 15:20:36 -07:00
}
show_one_plus ( level , eax . split . level , 0 ) ;
show_one_plus ( coherency_line_size , ebx . split . coherency_line_size , 1 ) ;
show_one_plus ( physical_line_partition , ebx . split . physical_line_partition , 1 ) ;
show_one_plus ( ways_of_associativity , ebx . split . ways_of_associativity , 1 ) ;
show_one_plus ( number_of_sets , ecx . split . number_of_sets , 1 ) ;
static ssize_t show_size ( struct _cpuid4_info * this_leaf , char * buf )
{
2009-07-04 00:35:45 +01:00
return sprintf ( buf , " %luK \n " , this_leaf - > size / 1024 ) ;
2005-04-16 15:20:36 -07:00
}
2008-04-08 11:43:02 -07:00
static ssize_t show_shared_cpu_map_func ( struct _cpuid4_info * this_leaf ,
int type , char * buf )
2005-04-16 15:20:36 -07:00
{
2008-04-08 11:43:02 -07:00
ptrdiff_t len = PTR_ALIGN ( buf + PAGE_SIZE - 1 , PAGE_SIZE ) - buf ;
2008-03-25 15:06:56 -07:00
int n = 0 ;
2008-04-08 11:43:02 -07:00
if ( len > 1 ) {
2009-01-10 21:58:10 -08:00
const struct cpumask * mask ;
2008-04-08 11:43:02 -07:00
2009-01-10 21:58:10 -08:00
mask = to_cpumask ( this_leaf - > shared_cpu_map ) ;
2009-07-04 00:35:45 +01:00
n = type ?
2008-12-13 21:20:25 +10:30
cpulist_scnprintf ( buf , len - 2 , mask ) :
cpumask_scnprintf ( buf , len - 2 , mask ) ;
2008-04-08 11:43:02 -07:00
buf [ n + + ] = ' \n ' ;
buf [ n ] = ' \0 ' ;
2008-03-25 15:06:56 -07:00
}
return n ;
2005-04-16 15:20:36 -07:00
}
2008-04-08 11:43:02 -07:00
static inline ssize_t show_shared_cpu_map ( struct _cpuid4_info * leaf , char * buf )
{
return show_shared_cpu_map_func ( leaf , 0 , buf ) ;
}
static inline ssize_t show_shared_cpu_list ( struct _cpuid4_info * leaf , char * buf )
{
return show_shared_cpu_map_func ( leaf , 1 , buf ) ;
}
2008-11-29 22:33:16 +01:00
static ssize_t show_type ( struct _cpuid4_info * this_leaf , char * buf )
{
switch ( this_leaf - > eax . split . type ) {
case CACHE_TYPE_DATA :
2005-04-16 15:20:36 -07:00
return sprintf ( buf , " Data \n " ) ;
2008-11-29 22:33:16 +01:00
case CACHE_TYPE_INST :
2005-04-16 15:20:36 -07:00
return sprintf ( buf , " Instruction \n " ) ;
2008-11-29 22:33:16 +01:00
case CACHE_TYPE_UNIFIED :
2005-04-16 15:20:36 -07:00
return sprintf ( buf , " Unified \n " ) ;
2008-11-29 22:33:16 +01:00
default :
2005-04-16 15:20:36 -07:00
return sprintf ( buf , " Unknown \n " ) ;
}
}
2008-07-21 13:34:21 +02:00
# define to_object(k) container_of(k, struct _index_kobject, kobj)
# define to_attr(a) container_of(a, struct _cache_attr, attr)
2008-07-18 16:03:52 -05:00
2005-04-16 15:20:36 -07:00
# define define_one_ro(_name) \
static struct _cache_attr _name = \
__ATTR ( _name , 0444 , show_ # # _name , NULL )
define_one_ro ( level ) ;
define_one_ro ( type ) ;
define_one_ro ( coherency_line_size ) ;
define_one_ro ( physical_line_partition ) ;
define_one_ro ( ways_of_associativity ) ;
define_one_ro ( number_of_sets ) ;
define_one_ro ( size ) ;
define_one_ro ( shared_cpu_map ) ;
2008-04-08 11:43:02 -07:00
define_one_ro ( shared_cpu_list ) ;
2005-04-16 15:20:36 -07:00
2010-01-22 16:01:06 +01:00
# define DEFAULT_SYSFS_CACHE_ATTRS \
& type . attr , \
& level . attr , \
& coherency_line_size . attr , \
& physical_line_partition . attr , \
& ways_of_associativity . attr , \
& number_of_sets . attr , \
& size . attr , \
& shared_cpu_map . attr , \
& shared_cpu_list . attr
2008-07-18 16:03:52 -05:00
2009-07-04 00:35:45 +01:00
static struct attribute * default_attrs [ ] = {
2010-01-22 16:01:06 +01:00
DEFAULT_SYSFS_CACHE_ATTRS ,
NULL
} ;
static struct attribute * default_l3_attrs [ ] = {
DEFAULT_SYSFS_CACHE_ATTRS ,
2010-02-18 19:37:14 +01:00
# ifdef CONFIG_CPU_SUP_AMD
2009-04-09 15:18:49 +02:00
& cache_disable_0 . attr ,
& cache_disable_1 . attr ,
2010-02-18 19:37:14 +01:00
# endif
2005-04-16 15:20:36 -07:00
NULL
} ;
2009-07-04 00:35:45 +01:00
static ssize_t show ( struct kobject * kobj , struct attribute * attr , char * buf )
2005-04-16 15:20:36 -07:00
{
struct _cache_attr * fattr = to_attr ( attr ) ;
struct _index_kobject * this_leaf = to_object ( kobj ) ;
ssize_t ret ;
ret = fattr - > show ?
fattr - > show ( CPUID4_INFO_IDX ( this_leaf - > cpu , this_leaf - > index ) ,
buf ) :
2008-07-28 16:20:08 +02:00
0 ;
2005-04-16 15:20:36 -07:00
return ret ;
}
2009-07-04 00:35:45 +01:00
static ssize_t store ( struct kobject * kobj , struct attribute * attr ,
const char * buf , size_t count )
2005-04-16 15:20:36 -07:00
{
2008-07-18 16:03:52 -05:00
struct _cache_attr * fattr = to_attr ( attr ) ;
struct _index_kobject * this_leaf = to_object ( kobj ) ;
ssize_t ret ;
2008-07-28 16:20:08 +02:00
ret = fattr - > store ?
fattr - > store ( CPUID4_INFO_IDX ( this_leaf - > cpu , this_leaf - > index ) ,
buf , count ) :
2008-07-18 16:03:52 -05:00
0 ;
return ret ;
2005-04-16 15:20:36 -07:00
}
2010-01-19 02:58:23 +01:00
static const struct sysfs_ops sysfs_ops = {
2005-04-16 15:20:36 -07:00
. show = show ,
. store = store ,
} ;
static struct kobj_type ktype_cache = {
. sysfs_ops = & sysfs_ops ,
. default_attrs = default_attrs ,
} ;
static struct kobj_type ktype_percpu_entry = {
. sysfs_ops = & sysfs_ops ,
} ;
2007-10-18 03:05:16 -07:00
static void __cpuinit cpuid4_cache_sysfs_exit ( unsigned int cpu )
2005-04-16 15:20:36 -07:00
{
2009-10-29 22:34:14 +09:00
kfree ( per_cpu ( ici_cache_kobject , cpu ) ) ;
kfree ( per_cpu ( ici_index_kobject , cpu ) ) ;
per_cpu ( ici_cache_kobject , cpu ) = NULL ;
per_cpu ( ici_index_kobject , cpu ) = NULL ;
2005-04-16 15:20:36 -07:00
free_cache_attributes ( cpu ) ;
}
2005-10-30 14:59:50 -08:00
static int __cpuinit cpuid4_cache_sysfs_init ( unsigned int cpu )
2005-04-16 15:20:36 -07:00
{
2007-10-18 03:05:16 -07:00
int err ;
2005-04-16 15:20:36 -07:00
if ( num_cache_leaves = = 0 )
return - ENOENT ;
2007-10-18 03:05:16 -07:00
err = detect_cache_attributes ( cpu ) ;
if ( err )
return err ;
2005-04-16 15:20:36 -07:00
/* Allocate all required memory */
2009-10-29 22:34:14 +09:00
per_cpu ( ici_cache_kobject , cpu ) =
2008-03-25 15:06:56 -07:00
kzalloc ( sizeof ( struct kobject ) , GFP_KERNEL ) ;
2009-10-29 22:34:14 +09:00
if ( unlikely ( per_cpu ( ici_cache_kobject , cpu ) = = NULL ) )
2005-04-16 15:20:36 -07:00
goto err_out ;
2009-10-29 22:34:14 +09:00
per_cpu ( ici_index_kobject , cpu ) = kzalloc (
2009-07-04 00:35:45 +01:00
sizeof ( struct _index_kobject ) * num_cache_leaves , GFP_KERNEL ) ;
2009-10-29 22:34:14 +09:00
if ( unlikely ( per_cpu ( ici_index_kobject , cpu ) = = NULL ) )
2005-04-16 15:20:36 -07:00
goto err_out ;
return 0 ;
err_out :
cpuid4_cache_sysfs_exit ( cpu ) ;
return - ENOMEM ;
}
2009-01-10 21:58:10 -08:00
static DECLARE_BITMAP ( cache_dev_map , NR_CPUS ) ;
2007-10-18 03:05:16 -07:00
2005-04-16 15:20:36 -07:00
/* Add/Remove cache interface for CPU device */
2005-10-30 14:59:50 -08:00
static int __cpuinit cache_add_dev ( struct sys_device * sys_dev )
2005-04-16 15:20:36 -07:00
{
unsigned int cpu = sys_dev - > id ;
unsigned long i , j ;
struct _index_kobject * this_object ;
2010-01-22 16:01:06 +01:00
struct _cpuid4_info * this_leaf ;
2007-10-18 03:05:16 -07:00
int retval ;
2005-04-16 15:20:36 -07:00
retval = cpuid4_cache_sysfs_init ( cpu ) ;
if ( unlikely ( retval < 0 ) )
return retval ;
2009-10-29 22:34:14 +09:00
retval = kobject_init_and_add ( per_cpu ( ici_cache_kobject , cpu ) ,
2008-03-25 15:06:56 -07:00
& ktype_percpu_entry ,
2007-12-17 15:54:39 -04:00
& sys_dev - > kobj , " %s " , " cache " ) ;
2007-10-18 03:05:16 -07:00
if ( retval < 0 ) {
cpuid4_cache_sysfs_exit ( cpu ) ;
return retval ;
}
2005-04-16 15:20:36 -07:00
for ( i = 0 ; i < num_cache_leaves ; i + + ) {
2009-07-04 00:35:45 +01:00
this_object = INDEX_KOBJECT_PTR ( cpu , i ) ;
2005-04-16 15:20:36 -07:00
this_object - > cpu = cpu ;
this_object - > index = i ;
2010-01-22 16:01:06 +01:00
this_leaf = CPUID4_INFO_IDX ( cpu , i ) ;
if ( this_leaf - > can_disable )
ktype_cache . default_attrs = default_l3_attrs ;
else
ktype_cache . default_attrs = default_attrs ;
2007-12-17 15:54:39 -04:00
retval = kobject_init_and_add ( & ( this_object - > kobj ) ,
2008-03-25 15:06:56 -07:00
& ktype_cache ,
2009-10-29 22:34:14 +09:00
per_cpu ( ici_cache_kobject , cpu ) ,
2007-12-17 15:54:39 -04:00
" index%1lu " , i ) ;
2005-04-16 15:20:36 -07:00
if ( unlikely ( retval ) ) {
2009-07-04 00:35:45 +01:00
for ( j = 0 ; j < i ; j + + )
kobject_put ( & ( INDEX_KOBJECT_PTR ( cpu , j ) - > kobj ) ) ;
2009-10-29 22:34:14 +09:00
kobject_put ( per_cpu ( ici_cache_kobject , cpu ) ) ;
2005-04-16 15:20:36 -07:00
cpuid4_cache_sysfs_exit ( cpu ) ;
2008-07-15 17:09:03 +09:00
return retval ;
2005-04-16 15:20:36 -07:00
}
2007-12-17 15:54:39 -04:00
kobject_uevent ( & ( this_object - > kobj ) , KOBJ_ADD ) ;
2005-04-16 15:20:36 -07:00
}
2009-01-10 21:58:10 -08:00
cpumask_set_cpu ( cpu , to_cpumask ( cache_dev_map ) ) ;
2007-10-18 03:05:16 -07:00
2009-10-29 22:34:14 +09:00
kobject_uevent ( per_cpu ( ici_cache_kobject , cpu ) , KOBJ_ADD ) ;
2008-07-15 17:09:03 +09:00
return 0 ;
2005-04-16 15:20:36 -07:00
}
2007-07-21 17:11:08 +02:00
static void __cpuinit cache_remove_dev ( struct sys_device * sys_dev )
2005-04-16 15:20:36 -07:00
{
unsigned int cpu = sys_dev - > id ;
unsigned long i ;
2009-10-29 22:34:14 +09:00
if ( per_cpu ( ici_cpuid4_info , cpu ) = = NULL )
2007-09-11 14:02:11 +02:00
return ;
2009-01-10 21:58:10 -08:00
if ( ! cpumask_test_cpu ( cpu , to_cpumask ( cache_dev_map ) ) )
2007-10-18 03:05:16 -07:00
return ;
2009-01-10 21:58:10 -08:00
cpumask_clear_cpu ( cpu , to_cpumask ( cache_dev_map ) ) ;
2007-10-18 03:05:16 -07:00
for ( i = 0 ; i < num_cache_leaves ; i + + )
2009-07-04 00:35:45 +01:00
kobject_put ( & ( INDEX_KOBJECT_PTR ( cpu , i ) - > kobj ) ) ;
2009-10-29 22:34:14 +09:00
kobject_put ( per_cpu ( ici_cache_kobject , cpu ) ) ;
2005-04-16 15:20:36 -07:00
cpuid4_cache_sysfs_exit ( cpu ) ;
2005-10-30 14:59:50 -08:00
}
2006-06-27 02:54:07 -07:00
static int __cpuinit cacheinfo_cpu_callback ( struct notifier_block * nfb ,
2005-10-30 14:59:50 -08:00
unsigned long action , void * hcpu )
{
unsigned int cpu = ( unsigned long ) hcpu ;
struct sys_device * sys_dev ;
sys_dev = get_cpu_sysdev ( cpu ) ;
switch ( action ) {
case CPU_ONLINE :
2007-05-09 02:35:10 -07:00
case CPU_ONLINE_FROZEN :
2005-10-30 14:59:50 -08:00
cache_add_dev ( sys_dev ) ;
break ;
case CPU_DEAD :
2007-05-09 02:35:10 -07:00
case CPU_DEAD_FROZEN :
2005-10-30 14:59:50 -08:00
cache_remove_dev ( sys_dev ) ;
break ;
}
return NOTIFY_OK ;
2005-04-16 15:20:36 -07:00
}
2009-07-04 00:35:45 +01:00
static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
2007-10-18 03:05:16 -07:00
. notifier_call = cacheinfo_cpu_callback ,
2005-04-16 15:20:36 -07:00
} ;
2005-10-30 14:59:50 -08:00
static int __cpuinit cache_sysfs_init ( void )
2005-04-16 15:20:36 -07:00
{
2005-10-30 14:59:50 -08:00
int i ;
2005-04-16 15:20:36 -07:00
if ( num_cache_leaves = = 0 )
return 0 ;
2005-10-30 14:59:50 -08:00
for_each_online_cpu ( i ) {
2007-10-18 03:05:16 -07:00
int err ;
struct sys_device * sys_dev = get_cpu_sysdev ( i ) ;
2007-10-17 18:04:40 +02:00
2007-10-18 03:05:16 -07:00
err = cache_add_dev ( sys_dev ) ;
if ( err )
return err ;
2005-10-30 14:59:50 -08:00
}
2007-10-18 03:05:16 -07:00
register_hotcpu_notifier ( & cacheinfo_cpu_notifier ) ;
2005-10-30 14:59:50 -08:00
return 0 ;
2005-04-16 15:20:36 -07:00
}
2005-10-30 14:59:50 -08:00
device_initcall ( cache_sysfs_init ) ;
2005-04-16 15:20:36 -07:00
# endif