2019-05-20 10:18:57 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2009-12-16 23:38:25 +03:00
/*
2019-12-24 18:20:55 +03:00
* k10temp . c - AMD Family 10 h / 11 h / 12 h / 14 h / 15 h / 16 h / 17 h
* processor hardware monitoring
2009-12-16 23:38:25 +03:00
*
* Copyright ( c ) 2009 Clemens Ladisch < clemens @ ladisch . de >
2019-12-24 18:20:55 +03:00
* Copyright ( c ) 2020 Guenter Roeck < linux @ roeck - us . net >
2009-12-16 23:38:25 +03:00
*/
2018-04-29 18:39:24 +03:00
# include <linux/bitops.h>
2009-12-16 23:38:25 +03:00
# include <linux/err.h>
# include <linux/hwmon.h>
# include <linux/init.h>
# include <linux/module.h>
# include <linux/pci.h>
2018-11-06 23:08:14 +03:00
# include <linux/pci_ids.h>
2018-05-04 23:01:33 +03:00
# include <asm/amd_nb.h>
2009-12-16 23:38:25 +03:00
# include <asm/processor.h>
2011-05-25 22:43:31 +04:00
MODULE_DESCRIPTION ( " AMD Family 10h+ CPU core temperature monitor " ) ;
2009-12-16 23:38:25 +03:00
MODULE_AUTHOR ( " Clemens Ladisch <clemens@ladisch.de> " ) ;
MODULE_LICENSE ( " GPL " ) ;
static bool force ;
module_param ( force , bool , 0444 ) ;
MODULE_PARM_DESC ( force , " force loading on processors with erratum 319 " ) ;
2014-08-15 03:15:27 +04:00
/* Provide lock for writing to NB_SMU_IND_ADDR */
static DEFINE_MUTEX ( nb_smu_ind_mutex ) ;
2018-04-29 19:16:45 +03:00
# ifndef PCI_DEVICE_ID_AMD_15H_M70H_NB_F3
# define PCI_DEVICE_ID_AMD_15H_M70H_NB_F3 0x15b3
# endif
2010-01-10 22:52:34 +03:00
/* CPUID function 0x80000001, ebx */
2018-04-29 18:39:24 +03:00
# define CPUID_PKGTYPE_MASK GENMASK(31, 28)
2010-01-10 22:52:34 +03:00
# define CPUID_PKGTYPE_F 0x00000000
# define CPUID_PKGTYPE_AM2R2_AM3 0x10000000
/* DRAM controller (PCI function 2) */
# define REG_DCT0_CONFIG_HIGH 0x094
2018-04-29 18:39:24 +03:00
# define DDR3_MODE BIT(8)
2010-01-10 22:52:34 +03:00
/* miscellaneous (PCI function 3) */
2009-12-16 23:38:25 +03:00
# define REG_HARDWARE_THERMAL_CONTROL 0x64
2018-04-29 18:39:24 +03:00
# define HTC_ENABLE BIT(0)
2009-12-16 23:38:25 +03:00
# define REG_REPORTED_TEMPERATURE 0xa4
# define REG_NORTHBRIDGE_CAPABILITIES 0xe8
2018-04-29 18:39:24 +03:00
# define NB_CAP_HTC BIT(10)
2009-12-16 23:38:25 +03:00
2014-08-15 03:15:27 +04:00
/*
2018-04-29 18:08:24 +03:00
* For F15h M60h and M70h , REG_HARDWARE_THERMAL_CONTROL
* and REG_REPORTED_TEMPERATURE have been moved to
* D0F0xBC_xD820_0C64 [ Hardware Temperature Control ]
* D0F0xBC_xD820_0CA4 [ Reported Temperature Control ]
2014-08-15 03:15:27 +04:00
*/
2018-04-29 18:08:24 +03:00
# define F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET 0xd8200c64
2014-08-15 03:15:27 +04:00
# define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4
2017-09-05 04:33:53 +03:00
/* F17h M01h Access througn SMN */
# define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET 0x00059800
2018-04-29 18:39:24 +03:00
# define CUR_TEMP_SHIFT 21
# define CUR_TEMP_RANGE_SEL_MASK BIT(19)
2017-09-05 04:33:53 +03:00
struct k10temp_data {
struct pci_dev * pdev ;
2018-04-29 18:08:24 +03:00
void ( * read_htcreg ) ( struct pci_dev * pdev , u32 * regval ) ;
2017-09-05 04:33:53 +03:00
void ( * read_tempreg ) ( struct pci_dev * pdev , u32 * regval ) ;
2017-09-05 04:33:53 +03:00
int temp_offset ;
2018-04-24 16:55:55 +03:00
u32 temp_adjust_mask ;
2018-04-26 22:22:29 +03:00
bool show_tdie ;
2017-09-05 04:33:53 +03:00
} ;
struct tctl_offset {
u8 model ;
char const * id ;
int offset ;
} ;
static const struct tctl_offset tctl_offset_table [ ] = {
2017-11-13 23:38:23 +03:00
{ 0x17 , " AMD Ryzen 5 1600X " , 20000 } ,
2017-09-05 04:33:53 +03:00
{ 0x17 , " AMD Ryzen 7 1700X " , 20000 } ,
{ 0x17 , " AMD Ryzen 7 1800X " , 20000 } ,
2018-04-24 16:55:55 +03:00
{ 0x17 , " AMD Ryzen 7 2700X " , 10000 } ,
2018-08-09 21:50:46 +03:00
{ 0x17 , " AMD Ryzen Threadripper 19 " , 27000 } , /* 19{00,20,50}X */
{ 0x17 , " AMD Ryzen Threadripper 29 " , 27000 } , /* 29{20,50,70,90}[W]X */
2017-09-05 04:33:53 +03:00
} ;
2018-04-29 18:08:24 +03:00
static void read_htcreg_pci ( struct pci_dev * pdev , u32 * regval )
{
pci_read_config_dword ( pdev , REG_HARDWARE_THERMAL_CONTROL , regval ) ;
}
2017-09-05 04:33:53 +03:00
static void read_tempreg_pci ( struct pci_dev * pdev , u32 * regval )
{
pci_read_config_dword ( pdev , REG_REPORTED_TEMPERATURE , regval ) ;
}
static void amd_nb_index_read ( struct pci_dev * pdev , unsigned int devfn ,
unsigned int base , int offset , u32 * val )
2014-08-15 03:15:27 +04:00
{
mutex_lock ( & nb_smu_ind_mutex ) ;
pci_bus_write_config_dword ( pdev - > bus , devfn ,
2017-09-05 04:33:53 +03:00
base , offset ) ;
2014-08-15 03:15:27 +04:00
pci_bus_read_config_dword ( pdev - > bus , devfn ,
2017-09-05 04:33:53 +03:00
base + 4 , val ) ;
2014-08-15 03:15:27 +04:00
mutex_unlock ( & nb_smu_ind_mutex ) ;
}
2018-04-29 18:08:24 +03:00
static void read_htcreg_nb_f15 ( struct pci_dev * pdev , u32 * regval )
{
amd_nb_index_read ( pdev , PCI_DEVFN ( 0 , 0 ) , 0xb8 ,
F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET , regval ) ;
}
2017-09-05 04:33:53 +03:00
static void read_tempreg_nb_f15 ( struct pci_dev * pdev , u32 * regval )
{
amd_nb_index_read ( pdev , PCI_DEVFN ( 0 , 0 ) , 0xb8 ,
F15H_M60H_REPORTED_TEMP_CTRL_OFFSET , regval ) ;
}
2017-09-05 04:33:53 +03:00
static void read_tempreg_nb_f17 ( struct pci_dev * pdev , u32 * regval )
{
2018-05-04 23:01:33 +03:00
amd_smn_read ( amd_pci_dev_to_node_id ( pdev ) ,
F17H_M01H_REPORTED_TEMP_CTRL_OFFSET , regval ) ;
2017-09-05 04:33:53 +03:00
}
2019-12-24 18:20:55 +03:00
static long get_raw_temp ( struct k10temp_data * data )
2009-12-16 23:38:25 +03:00
{
2018-04-26 22:22:29 +03:00
u32 regval ;
2019-12-24 18:20:55 +03:00
long temp ;
2017-09-05 04:33:53 +03:00
data - > read_tempreg ( data - > pdev , & regval ) ;
2018-04-29 18:39:24 +03:00
temp = ( regval > > CUR_TEMP_SHIFT ) * 125 ;
2018-04-24 16:55:55 +03:00
if ( regval & data - > temp_adjust_mask )
temp - = 49000 ;
2018-04-26 22:22:29 +03:00
return temp ;
}
2019-12-24 18:20:55 +03:00
const char * k10temp_temp_label [ ] = {
" Tdie " ,
" Tctl " ,
} ;
2018-04-26 22:22:29 +03:00
2019-12-24 18:20:55 +03:00
static int k10temp_read_labels ( struct device * dev ,
enum hwmon_sensor_types type ,
u32 attr , int channel , const char * * str )
2009-12-16 23:38:25 +03:00
{
2019-12-24 18:20:55 +03:00
* str = k10temp_temp_label [ channel ] ;
return 0 ;
2009-12-16 23:38:25 +03:00
}
2019-12-24 18:20:55 +03:00
static int k10temp_read ( struct device * dev , enum hwmon_sensor_types type ,
u32 attr , int channel , long * val )
2009-12-16 23:38:25 +03:00
{
2017-09-05 04:33:53 +03:00
struct k10temp_data * data = dev_get_drvdata ( dev ) ;
2009-12-16 23:38:25 +03:00
u32 regval ;
2019-12-24 18:20:55 +03:00
switch ( attr ) {
case hwmon_temp_input :
switch ( channel ) {
case 0 : /* Tdie */
* val = get_raw_temp ( data ) - data - > temp_offset ;
if ( * val < 0 )
* val = 0 ;
break ;
case 1 : /* Tctl */
* val = get_raw_temp ( data ) ;
if ( * val < 0 )
* val = 0 ;
break ;
default :
return - EOPNOTSUPP ;
}
break ;
case hwmon_temp_max :
* val = 70 * 1000 ;
break ;
case hwmon_temp_crit :
data - > read_htcreg ( data - > pdev , & regval ) ;
* val = ( ( regval > > 16 ) & 0x7f ) * 500 + 52000 ;
break ;
case hwmon_temp_crit_hyst :
data - > read_htcreg ( data - > pdev , & regval ) ;
* val = ( ( ( regval > > 16 ) & 0x7f )
- ( ( regval > > 24 ) & 0xf ) ) * 500 + 52000 ;
break ;
default :
return - EOPNOTSUPP ;
}
return 0 ;
2009-12-16 23:38:25 +03:00
}
2019-12-24 18:20:55 +03:00
static umode_t k10temp_is_visible ( const void * _data ,
enum hwmon_sensor_types type ,
u32 attr , int channel )
2014-08-15 20:27:03 +04:00
{
2019-12-24 18:20:55 +03:00
const struct k10temp_data * data = _data ;
2017-09-05 04:33:53 +03:00
struct pci_dev * pdev = data - > pdev ;
2018-04-26 22:22:29 +03:00
u32 reg ;
2014-08-15 20:27:03 +04:00
2019-12-24 18:20:55 +03:00
switch ( type ) {
case hwmon_temp :
switch ( attr ) {
case hwmon_temp_input :
if ( channel & & ! data - > show_tdie )
return 0 ;
break ;
case hwmon_temp_max :
if ( channel )
return 0 ;
break ;
case hwmon_temp_crit :
case hwmon_temp_crit_hyst :
if ( channel | | ! data - > read_htcreg )
return 0 ;
pci_read_config_dword ( pdev ,
REG_NORTHBRIDGE_CAPABILITIES ,
& reg ) ;
if ( ! ( reg & NB_CAP_HTC ) )
return 0 ;
data - > read_htcreg ( data - > pdev , & reg ) ;
if ( ! ( reg & HTC_ENABLE ) )
return 0 ;
break ;
case hwmon_temp_label :
if ( ! data - > show_tdie )
return 0 ;
break ;
default :
2018-04-26 22:22:29 +03:00
return 0 ;
2019-12-24 18:20:55 +03:00
}
2018-04-26 22:22:29 +03:00
break ;
2019-12-24 18:20:55 +03:00
default :
return 0 ;
2014-08-15 20:27:03 +04:00
}
2019-12-24 18:20:55 +03:00
return 0444 ;
2014-08-15 20:27:03 +04:00
}
2012-11-19 22:22:35 +04:00
static bool has_erratum_319 ( struct pci_dev * pdev )
2009-12-16 23:38:25 +03:00
{
2010-01-10 22:52:34 +03:00
u32 pkg_type , reg_dram_cfg ;
if ( boot_cpu_data . x86 ! = 0x10 )
return false ;
2009-12-16 23:38:25 +03:00
/*
2010-01-10 22:52:34 +03:00
* Erratum 319 : The thermal sensor of Socket F / AM2 + processors
* may be unreliable .
2009-12-16 23:38:25 +03:00
*/
2010-01-10 22:52:34 +03:00
pkg_type = cpuid_ebx ( 0x80000001 ) & CPUID_PKGTYPE_MASK ;
if ( pkg_type = = CPUID_PKGTYPE_F )
return true ;
if ( pkg_type ! = CPUID_PKGTYPE_AM2R2_AM3 )
return false ;
2010-06-20 11:22:31 +04:00
/* DDR3 memory implies socket AM3, which is good */
2010-01-10 22:52:34 +03:00
pci_bus_read_config_dword ( pdev - > bus ,
PCI_DEVFN ( PCI_SLOT ( pdev - > devfn ) , 2 ) ,
REG_DCT0_CONFIG_HIGH , & reg_dram_cfg ) ;
2010-06-20 11:22:31 +04:00
if ( reg_dram_cfg & DDR3_MODE )
return false ;
/*
* Unfortunately it is possible to run a socket AM3 CPU with DDR2
* memory . We blacklist all the cores which do exist in socket AM2 +
* format . It still isn ' t perfect , as RB - C2 cores exist in both AM2 +
* and AM3 formats , but that ' s the best we can do .
*/
return boot_cpu_data . x86_model < 4 | |
2018-01-01 04:52:10 +03:00
( boot_cpu_data . x86_model = = 4 & & boot_cpu_data . x86_stepping < = 2 ) ;
2009-12-16 23:38:25 +03:00
}
2019-12-24 18:20:55 +03:00
static const struct hwmon_channel_info * k10temp_info [ ] = {
HWMON_CHANNEL_INFO ( temp ,
HWMON_T_INPUT | HWMON_T_MAX |
HWMON_T_CRIT | HWMON_T_CRIT_HYST |
HWMON_T_LABEL ,
HWMON_T_INPUT | HWMON_T_LABEL ) ,
NULL
} ;
static const struct hwmon_ops k10temp_hwmon_ops = {
. is_visible = k10temp_is_visible ,
. read = k10temp_read ,
. read_string = k10temp_read_labels ,
} ;
static const struct hwmon_chip_info k10temp_chip_info = {
. ops = & k10temp_hwmon_ops ,
. info = k10temp_info ,
} ;
static int k10temp_probe ( struct pci_dev * pdev , const struct pci_device_id * id )
2009-12-16 23:38:25 +03:00
{
2010-01-10 22:52:34 +03:00
int unreliable = has_erratum_319 ( pdev ) ;
2014-08-15 20:27:03 +04:00
struct device * dev = & pdev - > dev ;
2017-09-05 04:33:53 +03:00
struct k10temp_data * data ;
2014-08-15 20:27:03 +04:00
struct device * hwmon_dev ;
2017-09-05 04:33:53 +03:00
int i ;
2009-12-16 23:38:25 +03:00
2014-08-15 20:27:03 +04:00
if ( unreliable ) {
if ( ! force ) {
dev_err ( dev ,
" unreliable CPU thermal sensor; monitoring disabled \n " ) ;
return - ENODEV ;
}
dev_warn ( dev ,
2009-12-16 23:38:25 +03:00
" unreliable CPU thermal sensor; check erratum 319 \n " ) ;
2014-08-15 20:27:03 +04:00
}
2009-12-16 23:38:25 +03:00
2017-09-05 04:33:53 +03:00
data = devm_kzalloc ( dev , sizeof ( * data ) , GFP_KERNEL ) ;
if ( ! data )
return - ENOMEM ;
data - > pdev = pdev ;
2018-09-02 22:02:53 +03:00
if ( boot_cpu_data . x86 = = 0x15 & &
( ( boot_cpu_data . x86_model & 0xf0 ) = = 0x60 | |
( boot_cpu_data . x86_model & 0xf0 ) = = 0x70 ) ) {
2018-04-29 18:08:24 +03:00
data - > read_htcreg = read_htcreg_nb_f15 ;
2017-09-05 04:33:53 +03:00
data - > read_tempreg = read_tempreg_nb_f15 ;
2018-12-08 09:33:28 +03:00
} else if ( boot_cpu_data . x86 = = 0x17 | | boot_cpu_data . x86 = = 0x18 ) {
2018-04-29 18:39:24 +03:00
data - > temp_adjust_mask = CUR_TEMP_RANGE_SEL_MASK ;
2017-09-05 04:33:53 +03:00
data - > read_tempreg = read_tempreg_nb_f17 ;
2018-04-26 22:22:29 +03:00
data - > show_tdie = true ;
2018-04-24 16:55:55 +03:00
} else {
2018-04-29 18:08:24 +03:00
data - > read_htcreg = read_htcreg_pci ;
2017-09-05 04:33:53 +03:00
data - > read_tempreg = read_tempreg_pci ;
2018-04-24 16:55:55 +03:00
}
2017-09-05 04:33:53 +03:00
2017-09-05 04:33:53 +03:00
for ( i = 0 ; i < ARRAY_SIZE ( tctl_offset_table ) ; i + + ) {
const struct tctl_offset * entry = & tctl_offset_table [ i ] ;
if ( boot_cpu_data . x86 = = entry - > model & &
strstr ( boot_cpu_data . x86_model_id , entry - > id ) ) {
data - > temp_offset = entry - > offset ;
break ;
}
}
2019-12-24 18:20:55 +03:00
hwmon_dev = devm_hwmon_device_register_with_info ( dev , " k10temp " , data ,
& k10temp_chip_info ,
NULL ) ;
2014-08-15 20:27:03 +04:00
return PTR_ERR_OR_ZERO ( hwmon_dev ) ;
2009-12-16 23:38:25 +03:00
}
2013-12-03 11:10:29 +04:00
static const struct pci_device_id k10temp_id_table [ ] = {
2009-12-16 23:38:25 +03:00
{ PCI_VDEVICE ( AMD , PCI_DEVICE_ID_AMD_10H_NB_MISC ) } ,
{ PCI_VDEVICE ( AMD , PCI_DEVICE_ID_AMD_11H_NB_MISC ) } ,
2011-02-17 11:22:40 +03:00
{ PCI_VDEVICE ( AMD , PCI_DEVICE_ID_AMD_CNB17H_F3 ) } ,
2011-05-25 22:43:31 +04:00
{ PCI_VDEVICE ( AMD , PCI_DEVICE_ID_AMD_15H_NB_F3 ) } ,
2012-05-04 20:28:21 +04:00
{ PCI_VDEVICE ( AMD , PCI_DEVICE_ID_AMD_15H_M10H_F3 ) } ,
2014-01-14 22:46:46 +04:00
{ PCI_VDEVICE ( AMD , PCI_DEVICE_ID_AMD_15H_M30H_NB_F3 ) } ,
2014-08-15 03:15:27 +04:00
{ PCI_VDEVICE ( AMD , PCI_DEVICE_ID_AMD_15H_M60H_NB_F3 ) } ,
2018-04-29 19:16:45 +03:00
{ PCI_VDEVICE ( AMD , PCI_DEVICE_ID_AMD_15H_M70H_NB_F3 ) } ,
2013-08-24 00:14:03 +04:00
{ PCI_VDEVICE ( AMD , PCI_DEVICE_ID_AMD_16H_NB_F3 ) } ,
2014-03-12 01:25:59 +04:00
{ PCI_VDEVICE ( AMD , PCI_DEVICE_ID_AMD_16H_M30H_NB_F3 ) } ,
2017-09-05 04:33:53 +03:00
{ PCI_VDEVICE ( AMD , PCI_DEVICE_ID_AMD_17H_DF_F3 ) } ,
2018-05-04 23:01:33 +03:00
{ PCI_VDEVICE ( AMD , PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 ) } ,
2018-11-06 23:08:21 +03:00
{ PCI_VDEVICE ( AMD , PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 ) } ,
2019-07-22 20:46:53 +03:00
{ PCI_VDEVICE ( AMD , PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 ) } ,
2018-12-08 09:33:28 +03:00
{ PCI_VDEVICE ( HYGON , PCI_DEVICE_ID_AMD_17H_DF_F3 ) } ,
2009-12-16 23:38:25 +03:00
{ }
} ;
MODULE_DEVICE_TABLE ( pci , k10temp_id_table ) ;
static struct pci_driver k10temp_driver = {
. name = " k10temp " ,
. id_table = k10temp_id_table ,
. probe = k10temp_probe ,
} ;
2012-04-03 05:25:46 +04:00
module_pci_driver ( k10temp_driver ) ;