2015-04-20 16:55:21 -04:00
/*
* Copyright 2011 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* Authors : Alex Deucher
*/
2017-04-24 13:50:21 +09:00
# include <drm/drmP.h>
2015-04-20 16:55:21 -04:00
# include "amdgpu.h"
# include "amdgpu_atombios.h"
# include "amdgpu_i2c.h"
# include "amdgpu_dpm.h"
# include "atom.h"
2018-06-25 13:07:50 -05:00
# include "amd_pcie.h"
2015-04-20 16:55:21 -04:00
void amdgpu_dpm_print_class_info ( u32 class , u32 class2 )
{
2017-02-27 17:31:03 -08:00
const char * s ;
2015-04-20 16:55:21 -04:00
switch ( class & ATOM_PPLIB_CLASSIFICATION_UI_MASK ) {
case ATOM_PPLIB_CLASSIFICATION_UI_NONE :
default :
2017-02-27 17:31:03 -08:00
s = " none " ;
2015-04-20 16:55:21 -04:00
break ;
case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY :
2017-02-27 17:31:03 -08:00
s = " battery " ;
2015-04-20 16:55:21 -04:00
break ;
case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED :
2017-02-27 17:31:03 -08:00
s = " balanced " ;
2015-04-20 16:55:21 -04:00
break ;
case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE :
2017-02-27 17:31:03 -08:00
s = " performance " ;
2015-04-20 16:55:21 -04:00
break ;
}
2017-02-27 17:31:03 -08:00
printk ( " \t ui class: %s \n " , s ) ;
printk ( " \t internal class: " ) ;
2015-04-20 16:55:21 -04:00
if ( ( ( class & ~ ATOM_PPLIB_CLASSIFICATION_UI_MASK ) = = 0 ) & &
( class2 = = 0 ) )
2017-02-27 17:31:03 -08:00
pr_cont ( " none " ) ;
2015-04-20 16:55:21 -04:00
else {
if ( class & ATOM_PPLIB_CLASSIFICATION_BOOT )
2017-02-27 17:31:03 -08:00
pr_cont ( " boot " ) ;
2015-04-20 16:55:21 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_THERMAL )
2017-02-27 17:31:03 -08:00
pr_cont ( " thermal " ) ;
2015-04-20 16:55:21 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE )
2017-02-27 17:31:03 -08:00
pr_cont ( " limited_pwr " ) ;
2015-04-20 16:55:21 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_REST )
2017-02-27 17:31:03 -08:00
pr_cont ( " rest " ) ;
2015-04-20 16:55:21 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_FORCED )
2017-02-27 17:31:03 -08:00
pr_cont ( " forced " ) ;
2015-04-20 16:55:21 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE )
2017-02-27 17:31:03 -08:00
pr_cont ( " 3d_perf " ) ;
2015-04-20 16:55:21 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE )
2017-02-27 17:31:03 -08:00
pr_cont ( " ovrdrv " ) ;
2015-04-20 16:55:21 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE )
2017-02-27 17:31:03 -08:00
pr_cont ( " uvd " ) ;
2015-04-20 16:55:21 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_3DLOW )
2017-02-27 17:31:03 -08:00
pr_cont ( " 3d_low " ) ;
2015-04-20 16:55:21 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_ACPI )
2017-02-27 17:31:03 -08:00
pr_cont ( " acpi " ) ;
2015-04-20 16:55:21 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_HD2STATE )
2017-02-27 17:31:03 -08:00
pr_cont ( " uvd_hd2 " ) ;
2015-04-20 16:55:21 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_HDSTATE )
2017-02-27 17:31:03 -08:00
pr_cont ( " uvd_hd " ) ;
2015-04-20 16:55:21 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_SDSTATE )
2017-02-27 17:31:03 -08:00
pr_cont ( " uvd_sd " ) ;
2015-04-20 16:55:21 -04:00
if ( class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 )
2017-02-27 17:31:03 -08:00
pr_cont ( " limited_pwr2 " ) ;
2015-04-20 16:55:21 -04:00
if ( class2 & ATOM_PPLIB_CLASSIFICATION2_ULV )
2017-02-27 17:31:03 -08:00
pr_cont ( " ulv " ) ;
2015-04-20 16:55:21 -04:00
if ( class2 & ATOM_PPLIB_CLASSIFICATION2_MVC )
2017-02-27 17:31:03 -08:00
pr_cont ( " uvd_mvc " ) ;
2015-04-20 16:55:21 -04:00
}
2017-02-27 17:31:03 -08:00
pr_cont ( " \n " ) ;
2015-04-20 16:55:21 -04:00
}
void amdgpu_dpm_print_cap_info ( u32 caps )
{
2017-02-27 17:31:03 -08:00
printk ( " \t caps: " ) ;
2015-04-20 16:55:21 -04:00
if ( caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY )
2017-02-27 17:31:03 -08:00
pr_cont ( " single_disp " ) ;
2015-04-20 16:55:21 -04:00
if ( caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK )
2017-02-27 17:31:03 -08:00
pr_cont ( " video " ) ;
2015-04-20 16:55:21 -04:00
if ( caps & ATOM_PPLIB_DISALLOW_ON_DC )
2017-02-27 17:31:03 -08:00
pr_cont ( " no_dc " ) ;
pr_cont ( " \n " ) ;
2015-04-20 16:55:21 -04:00
}
void amdgpu_dpm_print_ps_status ( struct amdgpu_device * adev ,
struct amdgpu_ps * rps )
{
2017-02-27 17:31:03 -08:00
printk ( " \t status: " ) ;
2015-04-20 16:55:21 -04:00
if ( rps = = adev - > pm . dpm . current_ps )
2017-02-27 17:31:03 -08:00
pr_cont ( " c " ) ;
2015-04-20 16:55:21 -04:00
if ( rps = = adev - > pm . dpm . requested_ps )
2017-02-27 17:31:03 -08:00
pr_cont ( " r " ) ;
2015-04-20 16:55:21 -04:00
if ( rps = = adev - > pm . dpm . boot_ps )
2017-02-27 17:31:03 -08:00
pr_cont ( " b " ) ;
pr_cont ( " \n " ) ;
2015-04-20 16:55:21 -04:00
}
2018-03-26 22:08:29 +08:00
void amdgpu_dpm_get_active_displays ( struct amdgpu_device * adev )
{
struct drm_device * ddev = adev - > ddev ;
struct drm_crtc * crtc ;
struct amdgpu_crtc * amdgpu_crtc ;
adev - > pm . dpm . new_active_crtcs = 0 ;
adev - > pm . dpm . new_active_crtc_count = 0 ;
if ( adev - > mode_info . num_crtc & & adev - > mode_info . mode_config_initialized ) {
list_for_each_entry ( crtc ,
& ddev - > mode_config . crtc_list , head ) {
amdgpu_crtc = to_amdgpu_crtc ( crtc ) ;
if ( amdgpu_crtc - > enabled ) {
adev - > pm . dpm . new_active_crtcs | = ( 1 < < amdgpu_crtc - > crtc_id ) ;
adev - > pm . dpm . new_active_crtc_count + + ;
}
}
}
}
2016-10-10 15:57:21 +08:00
2015-04-20 16:55:21 -04:00
u32 amdgpu_dpm_get_vblank_time ( struct amdgpu_device * adev )
{
struct drm_device * dev = adev - > ddev ;
struct drm_crtc * crtc ;
struct amdgpu_crtc * amdgpu_crtc ;
2016-10-10 15:57:21 +08:00
u32 vblank_in_pixels ;
2015-04-20 16:55:21 -04:00
u32 vblank_time_us = 0xffffffff ; /* if the displays are off, vblank time is max */
if ( adev - > mode_info . num_crtc & & adev - > mode_info . mode_config_initialized ) {
list_for_each_entry ( crtc , & dev - > mode_config . crtc_list , head ) {
amdgpu_crtc = to_amdgpu_crtc ( crtc ) ;
if ( crtc - > enabled & & amdgpu_crtc - > enabled & & amdgpu_crtc - > hw_mode . clock ) {
2016-10-10 15:57:21 +08:00
vblank_in_pixels =
amdgpu_crtc - > hw_mode . crtc_htotal *
( amdgpu_crtc - > hw_mode . crtc_vblank_end -
2015-04-20 16:55:21 -04:00
amdgpu_crtc - > hw_mode . crtc_vdisplay +
2016-10-10 15:57:21 +08:00
( amdgpu_crtc - > v_border * 2 ) ) ;
vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc - > hw_mode . clock ;
2015-04-20 16:55:21 -04:00
break ;
}
}
}
return vblank_time_us ;
}
u32 amdgpu_dpm_get_vrefresh ( struct amdgpu_device * adev )
{
struct drm_device * dev = adev - > ddev ;
struct drm_crtc * crtc ;
struct amdgpu_crtc * amdgpu_crtc ;
u32 vrefresh = 0 ;
if ( adev - > mode_info . num_crtc & & adev - > mode_info . mode_config_initialized ) {
list_for_each_entry ( crtc , & dev - > mode_config . crtc_list , head ) {
amdgpu_crtc = to_amdgpu_crtc ( crtc ) ;
if ( crtc - > enabled & & amdgpu_crtc - > enabled & & amdgpu_crtc - > hw_mode . clock ) {
2016-05-02 10:24:41 -04:00
vrefresh = drm_mode_vrefresh ( & amdgpu_crtc - > hw_mode ) ;
2015-04-20 16:55:21 -04:00
break ;
}
}
}
return vrefresh ;
}
bool amdgpu_is_internal_thermal_sensor ( enum amdgpu_int_thermal_type sensor )
{
switch ( sensor ) {
case THERMAL_TYPE_RV6XX :
case THERMAL_TYPE_RV770 :
case THERMAL_TYPE_EVERGREEN :
case THERMAL_TYPE_SUMO :
case THERMAL_TYPE_NI :
case THERMAL_TYPE_SI :
case THERMAL_TYPE_CI :
case THERMAL_TYPE_KV :
return true ;
case THERMAL_TYPE_ADT7473_WITH_INTERNAL :
case THERMAL_TYPE_EMC2103_WITH_INTERNAL :
return false ; /* need special handling */
case THERMAL_TYPE_NONE :
case THERMAL_TYPE_EXTERNAL :
case THERMAL_TYPE_EXTERNAL_GPIO :
default :
return false ;
}
}
union power_info {
struct _ATOM_POWERPLAY_INFO info ;
struct _ATOM_POWERPLAY_INFO_V2 info_2 ;
struct _ATOM_POWERPLAY_INFO_V3 info_3 ;
struct _ATOM_PPLIB_POWERPLAYTABLE pplib ;
struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2 ;
struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3 ;
struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4 ;
struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5 ;
} ;
union fan_info {
struct _ATOM_PPLIB_FANTABLE fan ;
struct _ATOM_PPLIB_FANTABLE2 fan2 ;
struct _ATOM_PPLIB_FANTABLE3 fan3 ;
} ;
static int amdgpu_parse_clk_voltage_dep_table ( struct amdgpu_clock_voltage_dependency_table * amdgpu_table ,
ATOM_PPLIB_Clock_Voltage_Dependency_Table * atom_table )
{
u32 size = atom_table - > ucNumEntries *
sizeof ( struct amdgpu_clock_voltage_dependency_entry ) ;
int i ;
ATOM_PPLIB_Clock_Voltage_Dependency_Record * entry ;
amdgpu_table - > entries = kzalloc ( size , GFP_KERNEL ) ;
if ( ! amdgpu_table - > entries )
return - ENOMEM ;
entry = & atom_table - > entries [ 0 ] ;
for ( i = 0 ; i < atom_table - > ucNumEntries ; i + + ) {
amdgpu_table - > entries [ i ] . clk = le16_to_cpu ( entry - > usClockLow ) |
( entry - > ucClockHigh < < 16 ) ;
amdgpu_table - > entries [ i ] . v = le16_to_cpu ( entry - > usVoltage ) ;
entry = ( ATOM_PPLIB_Clock_Voltage_Dependency_Record * )
( ( u8 * ) entry + sizeof ( ATOM_PPLIB_Clock_Voltage_Dependency_Record ) ) ;
}
amdgpu_table - > count = atom_table - > ucNumEntries ;
return 0 ;
}
int amdgpu_get_platform_caps ( struct amdgpu_device * adev )
{
struct amdgpu_mode_info * mode_info = & adev - > mode_info ;
union power_info * power_info ;
int index = GetIndexIntoMasterTable ( DATA , PowerPlayInfo ) ;
u16 data_offset ;
u8 frev , crev ;
if ( ! amdgpu_atom_parse_data_header ( mode_info - > atom_context , index , NULL ,
& frev , & crev , & data_offset ) )
return - EINVAL ;
power_info = ( union power_info * ) ( mode_info - > atom_context - > bios + data_offset ) ;
adev - > pm . dpm . platform_caps = le32_to_cpu ( power_info - > pplib . ulPlatformCaps ) ;
adev - > pm . dpm . backbias_response_time = le16_to_cpu ( power_info - > pplib . usBackbiasTime ) ;
adev - > pm . dpm . voltage_response_time = le16_to_cpu ( power_info - > pplib . usVoltageTime ) ;
return 0 ;
}
/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
# define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
# define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
# define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
# define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
# define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
# define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
# define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
# define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
int amdgpu_parse_extended_power_table ( struct amdgpu_device * adev )
{
struct amdgpu_mode_info * mode_info = & adev - > mode_info ;
union power_info * power_info ;
union fan_info * fan_info ;
ATOM_PPLIB_Clock_Voltage_Dependency_Table * dep_table ;
int index = GetIndexIntoMasterTable ( DATA , PowerPlayInfo ) ;
u16 data_offset ;
u8 frev , crev ;
int ret , i ;
if ( ! amdgpu_atom_parse_data_header ( mode_info - > atom_context , index , NULL ,
& frev , & crev , & data_offset ) )
return - EINVAL ;
power_info = ( union power_info * ) ( mode_info - > atom_context - > bios + data_offset ) ;
/* fan table */
if ( le16_to_cpu ( power_info - > pplib . usTableSize ) > =
sizeof ( struct _ATOM_PPLIB_POWERPLAYTABLE3 ) ) {
if ( power_info - > pplib3 . usFanTableOffset ) {
fan_info = ( union fan_info * ) ( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( power_info - > pplib3 . usFanTableOffset ) ) ;
adev - > pm . dpm . fan . t_hyst = fan_info - > fan . ucTHyst ;
adev - > pm . dpm . fan . t_min = le16_to_cpu ( fan_info - > fan . usTMin ) ;
adev - > pm . dpm . fan . t_med = le16_to_cpu ( fan_info - > fan . usTMed ) ;
adev - > pm . dpm . fan . t_high = le16_to_cpu ( fan_info - > fan . usTHigh ) ;
adev - > pm . dpm . fan . pwm_min = le16_to_cpu ( fan_info - > fan . usPWMMin ) ;
adev - > pm . dpm . fan . pwm_med = le16_to_cpu ( fan_info - > fan . usPWMMed ) ;
adev - > pm . dpm . fan . pwm_high = le16_to_cpu ( fan_info - > fan . usPWMHigh ) ;
if ( fan_info - > fan . ucFanTableFormat > = 2 )
adev - > pm . dpm . fan . t_max = le16_to_cpu ( fan_info - > fan2 . usTMax ) ;
else
adev - > pm . dpm . fan . t_max = 10900 ;
adev - > pm . dpm . fan . cycle_delay = 100000 ;
if ( fan_info - > fan . ucFanTableFormat > = 3 ) {
adev - > pm . dpm . fan . control_mode = fan_info - > fan3 . ucFanControlMode ;
adev - > pm . dpm . fan . default_max_fan_pwm =
le16_to_cpu ( fan_info - > fan3 . usFanPWMMax ) ;
adev - > pm . dpm . fan . default_fan_output_sensitivity = 4836 ;
adev - > pm . dpm . fan . fan_output_sensitivity =
le16_to_cpu ( fan_info - > fan3 . usFanOutputSensitivity ) ;
}
adev - > pm . dpm . fan . ucode_fan_control = true ;
}
}
/* clock dependancy tables, shedding tables */
if ( le16_to_cpu ( power_info - > pplib . usTableSize ) > =
sizeof ( struct _ATOM_PPLIB_POWERPLAYTABLE4 ) ) {
if ( power_info - > pplib4 . usVddcDependencyOnSCLKOffset ) {
dep_table = ( ATOM_PPLIB_Clock_Voltage_Dependency_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( power_info - > pplib4 . usVddcDependencyOnSCLKOffset ) ) ;
ret = amdgpu_parse_clk_voltage_dep_table ( & adev - > pm . dpm . dyn_state . vddc_dependency_on_sclk ,
dep_table ) ;
if ( ret ) {
amdgpu_free_extended_power_table ( adev ) ;
return ret ;
}
}
if ( power_info - > pplib4 . usVddciDependencyOnMCLKOffset ) {
dep_table = ( ATOM_PPLIB_Clock_Voltage_Dependency_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( power_info - > pplib4 . usVddciDependencyOnMCLKOffset ) ) ;
ret = amdgpu_parse_clk_voltage_dep_table ( & adev - > pm . dpm . dyn_state . vddci_dependency_on_mclk ,
dep_table ) ;
if ( ret ) {
amdgpu_free_extended_power_table ( adev ) ;
return ret ;
}
}
if ( power_info - > pplib4 . usVddcDependencyOnMCLKOffset ) {
dep_table = ( ATOM_PPLIB_Clock_Voltage_Dependency_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( power_info - > pplib4 . usVddcDependencyOnMCLKOffset ) ) ;
ret = amdgpu_parse_clk_voltage_dep_table ( & adev - > pm . dpm . dyn_state . vddc_dependency_on_mclk ,
dep_table ) ;
if ( ret ) {
amdgpu_free_extended_power_table ( adev ) ;
return ret ;
}
}
if ( power_info - > pplib4 . usMvddDependencyOnMCLKOffset ) {
dep_table = ( ATOM_PPLIB_Clock_Voltage_Dependency_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( power_info - > pplib4 . usMvddDependencyOnMCLKOffset ) ) ;
ret = amdgpu_parse_clk_voltage_dep_table ( & adev - > pm . dpm . dyn_state . mvdd_dependency_on_mclk ,
dep_table ) ;
if ( ret ) {
amdgpu_free_extended_power_table ( adev ) ;
return ret ;
}
}
if ( power_info - > pplib4 . usMaxClockVoltageOnDCOffset ) {
ATOM_PPLIB_Clock_Voltage_Limit_Table * clk_v =
( ATOM_PPLIB_Clock_Voltage_Limit_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( power_info - > pplib4 . usMaxClockVoltageOnDCOffset ) ) ;
if ( clk_v - > ucNumEntries ) {
adev - > pm . dpm . dyn_state . max_clock_voltage_on_dc . sclk =
le16_to_cpu ( clk_v - > entries [ 0 ] . usSclkLow ) |
( clk_v - > entries [ 0 ] . ucSclkHigh < < 16 ) ;
adev - > pm . dpm . dyn_state . max_clock_voltage_on_dc . mclk =
le16_to_cpu ( clk_v - > entries [ 0 ] . usMclkLow ) |
( clk_v - > entries [ 0 ] . ucMclkHigh < < 16 ) ;
adev - > pm . dpm . dyn_state . max_clock_voltage_on_dc . vddc =
le16_to_cpu ( clk_v - > entries [ 0 ] . usVddc ) ;
adev - > pm . dpm . dyn_state . max_clock_voltage_on_dc . vddci =
le16_to_cpu ( clk_v - > entries [ 0 ] . usVddci ) ;
}
}
if ( power_info - > pplib4 . usVddcPhaseShedLimitsTableOffset ) {
ATOM_PPLIB_PhaseSheddingLimits_Table * psl =
( ATOM_PPLIB_PhaseSheddingLimits_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( power_info - > pplib4 . usVddcPhaseShedLimitsTableOffset ) ) ;
ATOM_PPLIB_PhaseSheddingLimits_Record * entry ;
adev - > pm . dpm . dyn_state . phase_shedding_limits_table . entries =
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 14:03:40 -07:00
kcalloc ( psl - > ucNumEntries ,
2015-04-20 16:55:21 -04:00
sizeof ( struct amdgpu_phase_shedding_limits_entry ) ,
GFP_KERNEL ) ;
if ( ! adev - > pm . dpm . dyn_state . phase_shedding_limits_table . entries ) {
amdgpu_free_extended_power_table ( adev ) ;
return - ENOMEM ;
}
entry = & psl - > entries [ 0 ] ;
for ( i = 0 ; i < psl - > ucNumEntries ; i + + ) {
adev - > pm . dpm . dyn_state . phase_shedding_limits_table . entries [ i ] . sclk =
le16_to_cpu ( entry - > usSclkLow ) | ( entry - > ucSclkHigh < < 16 ) ;
adev - > pm . dpm . dyn_state . phase_shedding_limits_table . entries [ i ] . mclk =
le16_to_cpu ( entry - > usMclkLow ) | ( entry - > ucMclkHigh < < 16 ) ;
adev - > pm . dpm . dyn_state . phase_shedding_limits_table . entries [ i ] . voltage =
le16_to_cpu ( entry - > usVoltage ) ;
entry = ( ATOM_PPLIB_PhaseSheddingLimits_Record * )
( ( u8 * ) entry + sizeof ( ATOM_PPLIB_PhaseSheddingLimits_Record ) ) ;
}
adev - > pm . dpm . dyn_state . phase_shedding_limits_table . count =
psl - > ucNumEntries ;
}
}
/* cac data */
if ( le16_to_cpu ( power_info - > pplib . usTableSize ) > =
sizeof ( struct _ATOM_PPLIB_POWERPLAYTABLE5 ) ) {
adev - > pm . dpm . tdp_limit = le32_to_cpu ( power_info - > pplib5 . ulTDPLimit ) ;
adev - > pm . dpm . near_tdp_limit = le32_to_cpu ( power_info - > pplib5 . ulNearTDPLimit ) ;
adev - > pm . dpm . near_tdp_limit_adjusted = adev - > pm . dpm . near_tdp_limit ;
adev - > pm . dpm . tdp_od_limit = le16_to_cpu ( power_info - > pplib5 . usTDPODLimit ) ;
if ( adev - > pm . dpm . tdp_od_limit )
adev - > pm . dpm . power_control = true ;
else
adev - > pm . dpm . power_control = false ;
adev - > pm . dpm . tdp_adjustment = 0 ;
adev - > pm . dpm . sq_ramping_threshold = le32_to_cpu ( power_info - > pplib5 . ulSQRampingThreshold ) ;
adev - > pm . dpm . cac_leakage = le32_to_cpu ( power_info - > pplib5 . ulCACLeakage ) ;
adev - > pm . dpm . load_line_slope = le16_to_cpu ( power_info - > pplib5 . usLoadLineSlope ) ;
if ( power_info - > pplib5 . usCACLeakageTableOffset ) {
ATOM_PPLIB_CAC_Leakage_Table * cac_table =
( ATOM_PPLIB_CAC_Leakage_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( power_info - > pplib5 . usCACLeakageTableOffset ) ) ;
ATOM_PPLIB_CAC_Leakage_Record * entry ;
u32 size = cac_table - > ucNumEntries * sizeof ( struct amdgpu_cac_leakage_table ) ;
adev - > pm . dpm . dyn_state . cac_leakage_table . entries = kzalloc ( size , GFP_KERNEL ) ;
if ( ! adev - > pm . dpm . dyn_state . cac_leakage_table . entries ) {
amdgpu_free_extended_power_table ( adev ) ;
return - ENOMEM ;
}
entry = & cac_table - > entries [ 0 ] ;
for ( i = 0 ; i < cac_table - > ucNumEntries ; i + + ) {
if ( adev - > pm . dpm . platform_caps & ATOM_PP_PLATFORM_CAP_EVV ) {
adev - > pm . dpm . dyn_state . cac_leakage_table . entries [ i ] . vddc1 =
le16_to_cpu ( entry - > usVddc1 ) ;
adev - > pm . dpm . dyn_state . cac_leakage_table . entries [ i ] . vddc2 =
le16_to_cpu ( entry - > usVddc2 ) ;
adev - > pm . dpm . dyn_state . cac_leakage_table . entries [ i ] . vddc3 =
le16_to_cpu ( entry - > usVddc3 ) ;
} else {
adev - > pm . dpm . dyn_state . cac_leakage_table . entries [ i ] . vddc =
le16_to_cpu ( entry - > usVddc ) ;
adev - > pm . dpm . dyn_state . cac_leakage_table . entries [ i ] . leakage =
le32_to_cpu ( entry - > ulLeakageValue ) ;
}
entry = ( ATOM_PPLIB_CAC_Leakage_Record * )
( ( u8 * ) entry + sizeof ( ATOM_PPLIB_CAC_Leakage_Record ) ) ;
}
adev - > pm . dpm . dyn_state . cac_leakage_table . count = cac_table - > ucNumEntries ;
}
}
/* ext tables */
if ( le16_to_cpu ( power_info - > pplib . usTableSize ) > =
sizeof ( struct _ATOM_PPLIB_POWERPLAYTABLE3 ) ) {
ATOM_PPLIB_EXTENDEDHEADER * ext_hdr = ( ATOM_PPLIB_EXTENDEDHEADER * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( power_info - > pplib3 . usExtendendedHeaderOffset ) ) ;
if ( ( le16_to_cpu ( ext_hdr - > usSize ) > = SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 ) & &
ext_hdr - > usVCETableOffset ) {
VCEClockInfoArray * array = ( VCEClockInfoArray * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( ext_hdr - > usVCETableOffset ) + 1 ) ;
ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table * limits =
( ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( ext_hdr - > usVCETableOffset ) + 1 +
1 + array - > ucNumEntries * sizeof ( VCEClockInfo ) ) ;
ATOM_PPLIB_VCE_State_Table * states =
( ATOM_PPLIB_VCE_State_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( ext_hdr - > usVCETableOffset ) + 1 +
1 + ( array - > ucNumEntries * sizeof ( VCEClockInfo ) ) +
1 + ( limits - > numEntries * sizeof ( ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record ) ) ) ;
ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record * entry ;
ATOM_PPLIB_VCE_State_Record * state_entry ;
VCEClockInfo * vce_clk ;
u32 size = limits - > numEntries *
sizeof ( struct amdgpu_vce_clock_voltage_dependency_entry ) ;
adev - > pm . dpm . dyn_state . vce_clock_voltage_dependency_table . entries =
kzalloc ( size , GFP_KERNEL ) ;
if ( ! adev - > pm . dpm . dyn_state . vce_clock_voltage_dependency_table . entries ) {
amdgpu_free_extended_power_table ( adev ) ;
return - ENOMEM ;
}
adev - > pm . dpm . dyn_state . vce_clock_voltage_dependency_table . count =
limits - > numEntries ;
entry = & limits - > entries [ 0 ] ;
state_entry = & states - > entries [ 0 ] ;
for ( i = 0 ; i < limits - > numEntries ; i + + ) {
vce_clk = ( VCEClockInfo * )
( ( u8 * ) & array - > entries [ 0 ] +
( entry - > ucVCEClockInfoIndex * sizeof ( VCEClockInfo ) ) ) ;
adev - > pm . dpm . dyn_state . vce_clock_voltage_dependency_table . entries [ i ] . evclk =
le16_to_cpu ( vce_clk - > usEVClkLow ) | ( vce_clk - > ucEVClkHigh < < 16 ) ;
adev - > pm . dpm . dyn_state . vce_clock_voltage_dependency_table . entries [ i ] . ecclk =
le16_to_cpu ( vce_clk - > usECClkLow ) | ( vce_clk - > ucECClkHigh < < 16 ) ;
adev - > pm . dpm . dyn_state . vce_clock_voltage_dependency_table . entries [ i ] . v =
le16_to_cpu ( entry - > usVoltage ) ;
entry = ( ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record * )
( ( u8 * ) entry + sizeof ( ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record ) ) ;
}
2016-10-12 15:38:56 +08:00
adev - > pm . dpm . num_of_vce_states =
states - > numEntries > AMD_MAX_VCE_LEVELS ?
AMD_MAX_VCE_LEVELS : states - > numEntries ;
for ( i = 0 ; i < adev - > pm . dpm . num_of_vce_states ; i + + ) {
2015-04-20 16:55:21 -04:00
vce_clk = ( VCEClockInfo * )
( ( u8 * ) & array - > entries [ 0 ] +
( state_entry - > ucVCEClockInfoIndex * sizeof ( VCEClockInfo ) ) ) ;
adev - > pm . dpm . vce_states [ i ] . evclk =
le16_to_cpu ( vce_clk - > usEVClkLow ) | ( vce_clk - > ucEVClkHigh < < 16 ) ;
adev - > pm . dpm . vce_states [ i ] . ecclk =
le16_to_cpu ( vce_clk - > usECClkLow ) | ( vce_clk - > ucECClkHigh < < 16 ) ;
adev - > pm . dpm . vce_states [ i ] . clk_idx =
state_entry - > ucClockInfoIndex & 0x3f ;
adev - > pm . dpm . vce_states [ i ] . pstate =
( state_entry - > ucClockInfoIndex & 0xc0 ) > > 6 ;
state_entry = ( ATOM_PPLIB_VCE_State_Record * )
( ( u8 * ) state_entry + sizeof ( ATOM_PPLIB_VCE_State_Record ) ) ;
}
}
if ( ( le16_to_cpu ( ext_hdr - > usSize ) > = SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 ) & &
ext_hdr - > usUVDTableOffset ) {
UVDClockInfoArray * array = ( UVDClockInfoArray * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( ext_hdr - > usUVDTableOffset ) + 1 ) ;
ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table * limits =
( ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( ext_hdr - > usUVDTableOffset ) + 1 +
1 + ( array - > ucNumEntries * sizeof ( UVDClockInfo ) ) ) ;
ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record * entry ;
u32 size = limits - > numEntries *
sizeof ( struct amdgpu_uvd_clock_voltage_dependency_entry ) ;
adev - > pm . dpm . dyn_state . uvd_clock_voltage_dependency_table . entries =
kzalloc ( size , GFP_KERNEL ) ;
if ( ! adev - > pm . dpm . dyn_state . uvd_clock_voltage_dependency_table . entries ) {
amdgpu_free_extended_power_table ( adev ) ;
return - ENOMEM ;
}
adev - > pm . dpm . dyn_state . uvd_clock_voltage_dependency_table . count =
limits - > numEntries ;
entry = & limits - > entries [ 0 ] ;
for ( i = 0 ; i < limits - > numEntries ; i + + ) {
UVDClockInfo * uvd_clk = ( UVDClockInfo * )
( ( u8 * ) & array - > entries [ 0 ] +
( entry - > ucUVDClockInfoIndex * sizeof ( UVDClockInfo ) ) ) ;
adev - > pm . dpm . dyn_state . uvd_clock_voltage_dependency_table . entries [ i ] . vclk =
le16_to_cpu ( uvd_clk - > usVClkLow ) | ( uvd_clk - > ucVClkHigh < < 16 ) ;
adev - > pm . dpm . dyn_state . uvd_clock_voltage_dependency_table . entries [ i ] . dclk =
le16_to_cpu ( uvd_clk - > usDClkLow ) | ( uvd_clk - > ucDClkHigh < < 16 ) ;
adev - > pm . dpm . dyn_state . uvd_clock_voltage_dependency_table . entries [ i ] . v =
le16_to_cpu ( entry - > usVoltage ) ;
entry = ( ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record * )
( ( u8 * ) entry + sizeof ( ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record ) ) ;
}
}
if ( ( le16_to_cpu ( ext_hdr - > usSize ) > = SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 ) & &
ext_hdr - > usSAMUTableOffset ) {
ATOM_PPLIB_SAMClk_Voltage_Limit_Table * limits =
( ATOM_PPLIB_SAMClk_Voltage_Limit_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( ext_hdr - > usSAMUTableOffset ) + 1 ) ;
ATOM_PPLIB_SAMClk_Voltage_Limit_Record * entry ;
u32 size = limits - > numEntries *
sizeof ( struct amdgpu_clock_voltage_dependency_entry ) ;
adev - > pm . dpm . dyn_state . samu_clock_voltage_dependency_table . entries =
kzalloc ( size , GFP_KERNEL ) ;
if ( ! adev - > pm . dpm . dyn_state . samu_clock_voltage_dependency_table . entries ) {
amdgpu_free_extended_power_table ( adev ) ;
return - ENOMEM ;
}
adev - > pm . dpm . dyn_state . samu_clock_voltage_dependency_table . count =
limits - > numEntries ;
entry = & limits - > entries [ 0 ] ;
for ( i = 0 ; i < limits - > numEntries ; i + + ) {
adev - > pm . dpm . dyn_state . samu_clock_voltage_dependency_table . entries [ i ] . clk =
le16_to_cpu ( entry - > usSAMClockLow ) | ( entry - > ucSAMClockHigh < < 16 ) ;
adev - > pm . dpm . dyn_state . samu_clock_voltage_dependency_table . entries [ i ] . v =
le16_to_cpu ( entry - > usVoltage ) ;
entry = ( ATOM_PPLIB_SAMClk_Voltage_Limit_Record * )
( ( u8 * ) entry + sizeof ( ATOM_PPLIB_SAMClk_Voltage_Limit_Record ) ) ;
}
}
if ( ( le16_to_cpu ( ext_hdr - > usSize ) > = SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 ) & &
ext_hdr - > usPPMTableOffset ) {
ATOM_PPLIB_PPM_Table * ppm = ( ATOM_PPLIB_PPM_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( ext_hdr - > usPPMTableOffset ) ) ;
adev - > pm . dpm . dyn_state . ppm_table =
kzalloc ( sizeof ( struct amdgpu_ppm_table ) , GFP_KERNEL ) ;
if ( ! adev - > pm . dpm . dyn_state . ppm_table ) {
amdgpu_free_extended_power_table ( adev ) ;
return - ENOMEM ;
}
adev - > pm . dpm . dyn_state . ppm_table - > ppm_design = ppm - > ucPpmDesign ;
adev - > pm . dpm . dyn_state . ppm_table - > cpu_core_number =
le16_to_cpu ( ppm - > usCpuCoreNumber ) ;
adev - > pm . dpm . dyn_state . ppm_table - > platform_tdp =
le32_to_cpu ( ppm - > ulPlatformTDP ) ;
adev - > pm . dpm . dyn_state . ppm_table - > small_ac_platform_tdp =
le32_to_cpu ( ppm - > ulSmallACPlatformTDP ) ;
adev - > pm . dpm . dyn_state . ppm_table - > platform_tdc =
le32_to_cpu ( ppm - > ulPlatformTDC ) ;
adev - > pm . dpm . dyn_state . ppm_table - > small_ac_platform_tdc =
le32_to_cpu ( ppm - > ulSmallACPlatformTDC ) ;
adev - > pm . dpm . dyn_state . ppm_table - > apu_tdp =
le32_to_cpu ( ppm - > ulApuTDP ) ;
adev - > pm . dpm . dyn_state . ppm_table - > dgpu_tdp =
le32_to_cpu ( ppm - > ulDGpuTDP ) ;
adev - > pm . dpm . dyn_state . ppm_table - > dgpu_ulv_power =
le32_to_cpu ( ppm - > ulDGpuUlvPower ) ;
adev - > pm . dpm . dyn_state . ppm_table - > tj_max =
le32_to_cpu ( ppm - > ulTjmax ) ;
}
if ( ( le16_to_cpu ( ext_hdr - > usSize ) > = SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 ) & &
ext_hdr - > usACPTableOffset ) {
ATOM_PPLIB_ACPClk_Voltage_Limit_Table * limits =
( ATOM_PPLIB_ACPClk_Voltage_Limit_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( ext_hdr - > usACPTableOffset ) + 1 ) ;
ATOM_PPLIB_ACPClk_Voltage_Limit_Record * entry ;
u32 size = limits - > numEntries *
sizeof ( struct amdgpu_clock_voltage_dependency_entry ) ;
adev - > pm . dpm . dyn_state . acp_clock_voltage_dependency_table . entries =
kzalloc ( size , GFP_KERNEL ) ;
if ( ! adev - > pm . dpm . dyn_state . acp_clock_voltage_dependency_table . entries ) {
amdgpu_free_extended_power_table ( adev ) ;
return - ENOMEM ;
}
adev - > pm . dpm . dyn_state . acp_clock_voltage_dependency_table . count =
limits - > numEntries ;
entry = & limits - > entries [ 0 ] ;
for ( i = 0 ; i < limits - > numEntries ; i + + ) {
adev - > pm . dpm . dyn_state . acp_clock_voltage_dependency_table . entries [ i ] . clk =
le16_to_cpu ( entry - > usACPClockLow ) | ( entry - > ucACPClockHigh < < 16 ) ;
adev - > pm . dpm . dyn_state . acp_clock_voltage_dependency_table . entries [ i ] . v =
le16_to_cpu ( entry - > usVoltage ) ;
entry = ( ATOM_PPLIB_ACPClk_Voltage_Limit_Record * )
( ( u8 * ) entry + sizeof ( ATOM_PPLIB_ACPClk_Voltage_Limit_Record ) ) ;
}
}
if ( ( le16_to_cpu ( ext_hdr - > usSize ) > = SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 ) & &
ext_hdr - > usPowerTuneTableOffset ) {
u8 rev = * ( u8 * ) ( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( ext_hdr - > usPowerTuneTableOffset ) ) ;
ATOM_PowerTune_Table * pt ;
adev - > pm . dpm . dyn_state . cac_tdp_table =
kzalloc ( sizeof ( struct amdgpu_cac_tdp_table ) , GFP_KERNEL ) ;
if ( ! adev - > pm . dpm . dyn_state . cac_tdp_table ) {
amdgpu_free_extended_power_table ( adev ) ;
return - ENOMEM ;
}
if ( rev > 0 ) {
ATOM_PPLIB_POWERTUNE_Table_V1 * ppt = ( ATOM_PPLIB_POWERTUNE_Table_V1 * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( ext_hdr - > usPowerTuneTableOffset ) ) ;
adev - > pm . dpm . dyn_state . cac_tdp_table - > maximum_power_delivery_limit =
ppt - > usMaximumPowerDeliveryLimit ;
pt = & ppt - > power_tune_table ;
} else {
ATOM_PPLIB_POWERTUNE_Table * ppt = ( ATOM_PPLIB_POWERTUNE_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( ext_hdr - > usPowerTuneTableOffset ) ) ;
adev - > pm . dpm . dyn_state . cac_tdp_table - > maximum_power_delivery_limit = 255 ;
pt = & ppt - > power_tune_table ;
}
adev - > pm . dpm . dyn_state . cac_tdp_table - > tdp = le16_to_cpu ( pt - > usTDP ) ;
adev - > pm . dpm . dyn_state . cac_tdp_table - > configurable_tdp =
le16_to_cpu ( pt - > usConfigurableTDP ) ;
adev - > pm . dpm . dyn_state . cac_tdp_table - > tdc = le16_to_cpu ( pt - > usTDC ) ;
adev - > pm . dpm . dyn_state . cac_tdp_table - > battery_power_limit =
le16_to_cpu ( pt - > usBatteryPowerLimit ) ;
adev - > pm . dpm . dyn_state . cac_tdp_table - > small_power_limit =
le16_to_cpu ( pt - > usSmallPowerLimit ) ;
adev - > pm . dpm . dyn_state . cac_tdp_table - > low_cac_leakage =
le16_to_cpu ( pt - > usLowCACLeakage ) ;
adev - > pm . dpm . dyn_state . cac_tdp_table - > high_cac_leakage =
le16_to_cpu ( pt - > usHighCACLeakage ) ;
}
if ( ( le16_to_cpu ( ext_hdr - > usSize ) > = SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 ) & &
ext_hdr - > usSclkVddgfxTableOffset ) {
dep_table = ( ATOM_PPLIB_Clock_Voltage_Dependency_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( ext_hdr - > usSclkVddgfxTableOffset ) ) ;
ret = amdgpu_parse_clk_voltage_dep_table (
& adev - > pm . dpm . dyn_state . vddgfx_dependency_on_sclk ,
dep_table ) ;
if ( ret ) {
kfree ( adev - > pm . dpm . dyn_state . vddgfx_dependency_on_sclk . entries ) ;
return ret ;
}
}
}
return 0 ;
}
void amdgpu_free_extended_power_table ( struct amdgpu_device * adev )
{
struct amdgpu_dpm_dynamic_state * dyn_state = & adev - > pm . dpm . dyn_state ;
kfree ( dyn_state - > vddc_dependency_on_sclk . entries ) ;
kfree ( dyn_state - > vddci_dependency_on_mclk . entries ) ;
kfree ( dyn_state - > vddc_dependency_on_mclk . entries ) ;
kfree ( dyn_state - > mvdd_dependency_on_mclk . entries ) ;
kfree ( dyn_state - > cac_leakage_table . entries ) ;
kfree ( dyn_state - > phase_shedding_limits_table . entries ) ;
kfree ( dyn_state - > ppm_table ) ;
kfree ( dyn_state - > cac_tdp_table ) ;
kfree ( dyn_state - > vce_clock_voltage_dependency_table . entries ) ;
kfree ( dyn_state - > uvd_clock_voltage_dependency_table . entries ) ;
kfree ( dyn_state - > samu_clock_voltage_dependency_table . entries ) ;
kfree ( dyn_state - > acp_clock_voltage_dependency_table . entries ) ;
kfree ( dyn_state - > vddgfx_dependency_on_sclk . entries ) ;
}
static const char * pp_lib_thermal_controller_names [ ] = {
" NONE " ,
" lm63 " ,
" adm1032 " ,
" adm1030 " ,
" max6649 " ,
" lm64 " ,
" f75375 " ,
" RV6xx " ,
" RV770 " ,
" adt7473 " ,
" NONE " ,
" External GPIO " ,
" Evergreen " ,
" emc2103 " ,
" Sumo " ,
" Northern Islands " ,
" Southern Islands " ,
" lm96163 " ,
" Sea Islands " ,
" Kaveri/Kabini " ,
} ;
void amdgpu_add_thermal_controller ( struct amdgpu_device * adev )
{
struct amdgpu_mode_info * mode_info = & adev - > mode_info ;
ATOM_PPLIB_POWERPLAYTABLE * power_table ;
int index = GetIndexIntoMasterTable ( DATA , PowerPlayInfo ) ;
ATOM_PPLIB_THERMALCONTROLLER * controller ;
struct amdgpu_i2c_bus_rec i2c_bus ;
u16 data_offset ;
u8 frev , crev ;
if ( ! amdgpu_atom_parse_data_header ( mode_info - > atom_context , index , NULL ,
& frev , & crev , & data_offset ) )
return ;
power_table = ( ATOM_PPLIB_POWERPLAYTABLE * )
( mode_info - > atom_context - > bios + data_offset ) ;
controller = & power_table - > sThermalController ;
/* add the i2c bus for thermal/fan chip */
if ( controller - > ucType > 0 ) {
if ( controller - > ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN )
adev - > pm . no_fan = true ;
adev - > pm . fan_pulses_per_revolution =
controller - > ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK ;
if ( adev - > pm . fan_pulses_per_revolution ) {
adev - > pm . fan_min_rpm = controller - > ucFanMinRPM ;
adev - > pm . fan_max_rpm = controller - > ucFanMaxRPM ;
}
if ( controller - > ucType = = ATOM_PP_THERMALCONTROLLER_RV6xx ) {
DRM_INFO ( " Internal thermal controller %s fan control \n " ,
( controller - > ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN ) ? " without " : " with " ) ;
adev - > pm . int_thermal_type = THERMAL_TYPE_RV6XX ;
} else if ( controller - > ucType = = ATOM_PP_THERMALCONTROLLER_RV770 ) {
DRM_INFO ( " Internal thermal controller %s fan control \n " ,
( controller - > ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN ) ? " without " : " with " ) ;
adev - > pm . int_thermal_type = THERMAL_TYPE_RV770 ;
} else if ( controller - > ucType = = ATOM_PP_THERMALCONTROLLER_EVERGREEN ) {
DRM_INFO ( " Internal thermal controller %s fan control \n " ,
( controller - > ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN ) ? " without " : " with " ) ;
adev - > pm . int_thermal_type = THERMAL_TYPE_EVERGREEN ;
} else if ( controller - > ucType = = ATOM_PP_THERMALCONTROLLER_SUMO ) {
DRM_INFO ( " Internal thermal controller %s fan control \n " ,
( controller - > ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN ) ? " without " : " with " ) ;
adev - > pm . int_thermal_type = THERMAL_TYPE_SUMO ;
} else if ( controller - > ucType = = ATOM_PP_THERMALCONTROLLER_NISLANDS ) {
DRM_INFO ( " Internal thermal controller %s fan control \n " ,
( controller - > ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN ) ? " without " : " with " ) ;
adev - > pm . int_thermal_type = THERMAL_TYPE_NI ;
} else if ( controller - > ucType = = ATOM_PP_THERMALCONTROLLER_SISLANDS ) {
DRM_INFO ( " Internal thermal controller %s fan control \n " ,
( controller - > ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN ) ? " without " : " with " ) ;
adev - > pm . int_thermal_type = THERMAL_TYPE_SI ;
} else if ( controller - > ucType = = ATOM_PP_THERMALCONTROLLER_CISLANDS ) {
DRM_INFO ( " Internal thermal controller %s fan control \n " ,
( controller - > ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN ) ? " without " : " with " ) ;
adev - > pm . int_thermal_type = THERMAL_TYPE_CI ;
} else if ( controller - > ucType = = ATOM_PP_THERMALCONTROLLER_KAVERI ) {
DRM_INFO ( " Internal thermal controller %s fan control \n " ,
( controller - > ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN ) ? " without " : " with " ) ;
adev - > pm . int_thermal_type = THERMAL_TYPE_KV ;
} else if ( controller - > ucType = = ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO ) {
DRM_INFO ( " External GPIO thermal controller %s fan control \n " ,
( controller - > ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN ) ? " without " : " with " ) ;
adev - > pm . int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO ;
} else if ( controller - > ucType = =
ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL ) {
DRM_INFO ( " ADT7473 with internal thermal controller %s fan control \n " ,
( controller - > ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN ) ? " without " : " with " ) ;
adev - > pm . int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL ;
} else if ( controller - > ucType = =
ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL ) {
DRM_INFO ( " EMC2103 with internal thermal controller %s fan control \n " ,
( controller - > ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN ) ? " without " : " with " ) ;
adev - > pm . int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL ;
} else if ( controller - > ucType < ARRAY_SIZE ( pp_lib_thermal_controller_names ) ) {
DRM_INFO ( " Possible %s thermal controller at 0x%02x %s fan control \n " ,
pp_lib_thermal_controller_names [ controller - > ucType ] ,
controller - > ucI2cAddress > > 1 ,
( controller - > ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN ) ? " without " : " with " ) ;
adev - > pm . int_thermal_type = THERMAL_TYPE_EXTERNAL ;
i2c_bus = amdgpu_atombios_lookup_i2c_gpio ( adev , controller - > ucI2cLine ) ;
adev - > pm . i2c_bus = amdgpu_i2c_lookup ( adev , & i2c_bus ) ;
if ( adev - > pm . i2c_bus ) {
struct i2c_board_info info = { } ;
const char * name = pp_lib_thermal_controller_names [ controller - > ucType ] ;
info . addr = controller - > ucI2cAddress > > 1 ;
strlcpy ( info . type , name , sizeof ( info . type ) ) ;
i2c_new_device ( & adev - > pm . i2c_bus - > adapter , & info ) ;
}
} else {
DRM_INFO ( " Unknown thermal controller type %d at 0x%02x %s fan control \n " ,
controller - > ucType ,
controller - > ucI2cAddress > > 1 ,
( controller - > ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN ) ? " without " : " with " ) ;
}
}
}
enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support ( struct amdgpu_device * adev ,
u32 sys_mask ,
enum amdgpu_pcie_gen asic_gen ,
enum amdgpu_pcie_gen default_gen )
{
switch ( asic_gen ) {
case AMDGPU_PCIE_GEN1 :
return AMDGPU_PCIE_GEN1 ;
case AMDGPU_PCIE_GEN2 :
return AMDGPU_PCIE_GEN2 ;
case AMDGPU_PCIE_GEN3 :
return AMDGPU_PCIE_GEN3 ;
default :
2018-06-25 13:07:50 -05:00
if ( ( sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 ) & &
( default_gen = = AMDGPU_PCIE_GEN3 ) )
2015-04-20 16:55:21 -04:00
return AMDGPU_PCIE_GEN3 ;
2018-06-25 13:07:50 -05:00
else if ( ( sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 ) & &
( default_gen = = AMDGPU_PCIE_GEN2 ) )
2015-04-20 16:55:21 -04:00
return AMDGPU_PCIE_GEN2 ;
else
return AMDGPU_PCIE_GEN1 ;
}
return AMDGPU_PCIE_GEN1 ;
}
2016-10-07 12:38:04 -04:00
struct amd_vce_state *
2017-09-06 15:27:59 +08:00
amdgpu_get_vce_clock_state ( void * handle , u32 idx )
2016-10-07 12:38:04 -04:00
{
2017-09-06 15:27:59 +08:00
struct amdgpu_device * adev = ( struct amdgpu_device * ) handle ;
2016-10-07 12:38:04 -04:00
if ( idx < adev - > pm . dpm . num_of_vce_states )
return & adev - > pm . dpm . vce_states [ idx ] ;
return NULL ;
}
2019-01-28 19:12:10 +08:00
int amdgpu_dpm_get_sclk ( struct amdgpu_device * adev , bool low )
{
if ( is_support_sw_smu ( adev ) )
return smu_get_sclk ( & adev - > smu , low ) ;
else
return ( adev ) - > powerplay . pp_funcs - > get_sclk ( ( adev ) - > powerplay . pp_handle , ( low ) ) ;
}
int amdgpu_dpm_get_mclk ( struct amdgpu_device * adev , bool low )
{
if ( is_support_sw_smu ( adev ) )
return smu_get_mclk ( & adev - > smu , low ) ;
else
return ( adev ) - > powerplay . pp_funcs - > get_mclk ( ( adev ) - > powerplay . pp_handle , ( low ) ) ;
}