2013-04-12 13:58:03 -04:00
/*
* Copyright 2011 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* Authors : Alex Deucher
*/
# include "radeon.h"
2014-10-14 18:25:09 +02:00
# include "radeon_asic.h"
2013-04-12 13:58:03 -04:00
# include "r600d.h"
# include "r600_dpm.h"
# include "atom.h"
const u32 r600_utc [ R600_PM_NUMBER_OF_TC ] =
{
R600_UTC_DFLT_00 ,
R600_UTC_DFLT_01 ,
R600_UTC_DFLT_02 ,
R600_UTC_DFLT_03 ,
R600_UTC_DFLT_04 ,
R600_UTC_DFLT_05 ,
R600_UTC_DFLT_06 ,
R600_UTC_DFLT_07 ,
R600_UTC_DFLT_08 ,
R600_UTC_DFLT_09 ,
R600_UTC_DFLT_10 ,
R600_UTC_DFLT_11 ,
R600_UTC_DFLT_12 ,
R600_UTC_DFLT_13 ,
R600_UTC_DFLT_14 ,
} ;
const u32 r600_dtc [ R600_PM_NUMBER_OF_TC ] =
{
R600_DTC_DFLT_00 ,
R600_DTC_DFLT_01 ,
R600_DTC_DFLT_02 ,
R600_DTC_DFLT_03 ,
R600_DTC_DFLT_04 ,
R600_DTC_DFLT_05 ,
R600_DTC_DFLT_06 ,
R600_DTC_DFLT_07 ,
R600_DTC_DFLT_08 ,
R600_DTC_DFLT_09 ,
R600_DTC_DFLT_10 ,
R600_DTC_DFLT_11 ,
R600_DTC_DFLT_12 ,
R600_DTC_DFLT_13 ,
R600_DTC_DFLT_14 ,
} ;
void r600_dpm_print_class_info ( u32 class , u32 class2 )
{
2017-02-27 17:31:03 -08:00
const char * s ;
2013-04-12 13:58:03 -04:00
switch ( class & ATOM_PPLIB_CLASSIFICATION_UI_MASK ) {
case ATOM_PPLIB_CLASSIFICATION_UI_NONE :
default :
2017-02-27 17:31:03 -08:00
s = " none " ;
2013-04-12 13:58:03 -04:00
break ;
case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY :
2017-02-27 17:31:03 -08:00
s = " battery " ;
2013-04-12 13:58:03 -04:00
break ;
case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED :
2017-02-27 17:31:03 -08:00
s = " balanced " ;
2013-04-12 13:58:03 -04:00
break ;
case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE :
2017-02-27 17:31:03 -08:00
s = " performance " ;
2013-04-12 13:58:03 -04:00
break ;
}
2017-02-27 17:31:03 -08:00
printk ( " \t ui class: %s \n " , s ) ;
printk ( " \t internal class: " ) ;
2013-04-12 13:58:03 -04:00
if ( ( ( class & ~ ATOM_PPLIB_CLASSIFICATION_UI_MASK ) = = 0 ) & &
( class2 = = 0 ) )
2017-02-27 17:31:03 -08:00
pr_cont ( " none " ) ;
2013-04-12 13:58:03 -04:00
else {
if ( class & ATOM_PPLIB_CLASSIFICATION_BOOT )
2017-02-27 17:31:03 -08:00
pr_cont ( " boot " ) ;
2013-04-12 13:58:03 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_THERMAL )
2017-02-27 17:31:03 -08:00
pr_cont ( " thermal " ) ;
2013-04-12 13:58:03 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE )
2017-02-27 17:31:03 -08:00
pr_cont ( " limited_pwr " ) ;
2013-04-12 13:58:03 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_REST )
2017-02-27 17:31:03 -08:00
pr_cont ( " rest " ) ;
2013-04-12 13:58:03 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_FORCED )
2017-02-27 17:31:03 -08:00
pr_cont ( " forced " ) ;
2013-04-12 13:58:03 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE )
2017-02-27 17:31:03 -08:00
pr_cont ( " 3d_perf " ) ;
2013-04-12 13:58:03 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE )
2017-02-27 17:31:03 -08:00
pr_cont ( " ovrdrv " ) ;
2013-04-12 13:58:03 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE )
2017-02-27 17:31:03 -08:00
pr_cont ( " uvd " ) ;
2013-04-12 13:58:03 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_3DLOW )
2017-02-27 17:31:03 -08:00
pr_cont ( " 3d_low " ) ;
2013-04-12 13:58:03 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_ACPI )
2017-02-27 17:31:03 -08:00
pr_cont ( " acpi " ) ;
2013-04-12 13:58:03 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_HD2STATE )
2017-02-27 17:31:03 -08:00
pr_cont ( " uvd_hd2 " ) ;
2013-04-12 13:58:03 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_HDSTATE )
2017-02-27 17:31:03 -08:00
pr_cont ( " uvd_hd " ) ;
2013-04-12 13:58:03 -04:00
if ( class & ATOM_PPLIB_CLASSIFICATION_SDSTATE )
2017-02-27 17:31:03 -08:00
pr_cont ( " uvd_sd " ) ;
2013-04-12 13:58:03 -04:00
if ( class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 )
2017-02-27 17:31:03 -08:00
pr_cont ( " limited_pwr2 " ) ;
2013-04-12 13:58:03 -04:00
if ( class2 & ATOM_PPLIB_CLASSIFICATION2_ULV )
2017-02-27 17:31:03 -08:00
pr_cont ( " ulv " ) ;
2013-04-12 13:58:03 -04:00
if ( class2 & ATOM_PPLIB_CLASSIFICATION2_MVC )
2017-02-27 17:31:03 -08:00
pr_cont ( " uvd_mvc " ) ;
2013-04-12 13:58:03 -04:00
}
2017-02-27 17:31:03 -08:00
pr_cont ( " \n " ) ;
2013-04-12 13:58:03 -04:00
}
void r600_dpm_print_cap_info ( u32 caps )
{
2017-02-27 17:31:03 -08:00
printk ( " \t caps: " ) ;
2013-04-12 13:58:03 -04:00
if ( caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY )
2017-02-27 17:31:03 -08:00
pr_cont ( " single_disp " ) ;
2013-04-12 13:58:03 -04:00
if ( caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK )
2017-02-27 17:31:03 -08:00
pr_cont ( " video " ) ;
2013-04-12 13:58:03 -04:00
if ( caps & ATOM_PPLIB_DISALLOW_ON_DC )
2017-02-27 17:31:03 -08:00
pr_cont ( " no_dc " ) ;
pr_cont ( " \n " ) ;
2013-04-12 13:58:03 -04:00
}
void r600_dpm_print_ps_status ( struct radeon_device * rdev ,
struct radeon_ps * rps )
{
2017-02-27 17:31:03 -08:00
printk ( " \t status: " ) ;
2013-04-12 13:58:03 -04:00
if ( rps = = rdev - > pm . dpm . current_ps )
2017-02-27 17:31:03 -08:00
pr_cont ( " c " ) ;
2013-04-12 13:58:03 -04:00
if ( rps = = rdev - > pm . dpm . requested_ps )
2017-02-27 17:31:03 -08:00
pr_cont ( " r " ) ;
2013-04-12 13:58:03 -04:00
if ( rps = = rdev - > pm . dpm . boot_ps )
2017-02-27 17:31:03 -08:00
pr_cont ( " b " ) ;
pr_cont ( " \n " ) ;
2013-04-12 13:58:03 -04:00
}
2013-07-08 11:26:42 -04:00
u32 r600_dpm_get_vblank_time ( struct radeon_device * rdev )
{
struct drm_device * dev = rdev - > ddev ;
struct drm_crtc * crtc ;
struct radeon_crtc * radeon_crtc ;
2016-10-12 15:28:55 -04:00
u32 vblank_in_pixels ;
2013-07-08 11:26:42 -04:00
u32 vblank_time_us = 0xffffffff ; /* if the displays are off, vblank time is max */
2014-04-15 12:44:33 -04:00
if ( rdev - > num_crtc & & rdev - > mode_info . mode_config_initialized ) {
list_for_each_entry ( crtc , & dev - > mode_config . crtc_list , head ) {
radeon_crtc = to_radeon_crtc ( crtc ) ;
if ( crtc - > enabled & & radeon_crtc - > enabled & & radeon_crtc - > hw_mode . clock ) {
2016-10-12 15:28:55 -04:00
vblank_in_pixels =
radeon_crtc - > hw_mode . crtc_htotal *
( radeon_crtc - > hw_mode . crtc_vblank_end -
radeon_crtc - > hw_mode . crtc_vdisplay +
( radeon_crtc - > v_border * 2 ) ) ;
vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc - > hw_mode . clock ;
2014-04-15 12:44:33 -04:00
break ;
}
2013-07-08 11:26:42 -04:00
}
}
return vblank_time_us ;
}
2013-07-19 12:42:08 -04:00
u32 r600_dpm_get_vrefresh ( struct radeon_device * rdev )
{
struct drm_device * dev = rdev - > ddev ;
struct drm_crtc * crtc ;
struct radeon_crtc * radeon_crtc ;
u32 vrefresh = 0 ;
2014-04-15 12:44:33 -04:00
if ( rdev - > num_crtc & & rdev - > mode_info . mode_config_initialized ) {
list_for_each_entry ( crtc , & dev - > mode_config . crtc_list , head ) {
radeon_crtc = to_radeon_crtc ( crtc ) ;
if ( crtc - > enabled & & radeon_crtc - > enabled & & radeon_crtc - > hw_mode . clock ) {
2015-02-18 01:05:30 -05:00
vrefresh = drm_mode_vrefresh ( & radeon_crtc - > hw_mode ) ;
2014-04-15 12:44:33 -04:00
break ;
}
2013-07-19 12:42:08 -04:00
}
}
return vrefresh ;
}
2013-04-12 13:58:03 -04:00
void r600_calculate_u_and_p ( u32 i , u32 r_c , u32 p_b ,
u32 * p , u32 * u )
{
u32 b_c = 0 ;
u32 i_c ;
u32 tmp ;
i_c = ( i * r_c ) / 100 ;
tmp = i_c > > p_b ;
while ( tmp ) {
b_c + + ;
tmp > > = 1 ;
}
* u = ( b_c + 1 ) / 2 ;
* p = i_c / ( 1 < < ( 2 * ( * u ) ) ) ;
}
int r600_calculate_at ( u32 t , u32 h , u32 fh , u32 fl , u32 * tl , u32 * th )
{
u32 k , a , ah , al ;
u32 t1 ;
if ( ( fl = = 0 ) | | ( fh = = 0 ) | | ( fl > fh ) )
return - EINVAL ;
k = ( 100 * fh ) / fl ;
t1 = ( t * ( k - 100 ) ) ;
a = ( 1000 * ( 100 * h + t1 ) ) / ( 10000 + ( t1 / 100 ) ) ;
a = ( a + 5 ) / 10 ;
ah = ( ( a * t ) + 5000 ) / 10000 ;
al = a - ah ;
* th = t - ah ;
* tl = t + al ;
return 0 ;
}
void r600_gfx_clockgating_enable ( struct radeon_device * rdev , bool enable )
{
int i ;
if ( enable ) {
WREG32_P ( SCLK_PWRMGT_CNTL , DYN_GFX_CLK_OFF_EN , ~ DYN_GFX_CLK_OFF_EN ) ;
} else {
WREG32_P ( SCLK_PWRMGT_CNTL , 0 , ~ DYN_GFX_CLK_OFF_EN ) ;
WREG32 ( CG_RLC_REQ_AND_RSP , 0x2 ) ;
for ( i = 0 ; i < rdev - > usec_timeout ; i + + ) {
if ( ( ( RREG32 ( CG_RLC_REQ_AND_RSP ) & CG_RLC_RSP_TYPE_MASK ) > > CG_RLC_RSP_TYPE_SHIFT ) = = 1 )
break ;
udelay ( 1 ) ;
}
WREG32 ( CG_RLC_REQ_AND_RSP , 0x0 ) ;
WREG32 ( GRBM_PWR_CNTL , 0x1 ) ;
RREG32 ( GRBM_PWR_CNTL ) ;
}
}
void r600_dynamicpm_enable ( struct radeon_device * rdev , bool enable )
{
if ( enable )
WREG32_P ( GENERAL_PWRMGT , GLOBAL_PWRMGT_EN , ~ GLOBAL_PWRMGT_EN ) ;
else
WREG32_P ( GENERAL_PWRMGT , 0 , ~ GLOBAL_PWRMGT_EN ) ;
}
void r600_enable_thermal_protection ( struct radeon_device * rdev , bool enable )
{
if ( enable )
WREG32_P ( GENERAL_PWRMGT , 0 , ~ THERMAL_PROTECTION_DIS ) ;
else
WREG32_P ( GENERAL_PWRMGT , THERMAL_PROTECTION_DIS , ~ THERMAL_PROTECTION_DIS ) ;
}
void r600_enable_acpi_pm ( struct radeon_device * rdev )
{
WREG32_P ( GENERAL_PWRMGT , STATIC_PM_EN , ~ STATIC_PM_EN ) ;
}
void r600_enable_dynamic_pcie_gen2 ( struct radeon_device * rdev , bool enable )
{
if ( enable )
WREG32_P ( GENERAL_PWRMGT , ENABLE_GEN2PCIE , ~ ENABLE_GEN2PCIE ) ;
else
WREG32_P ( GENERAL_PWRMGT , 0 , ~ ENABLE_GEN2PCIE ) ;
}
bool r600_dynamicpm_enabled ( struct radeon_device * rdev )
{
if ( RREG32 ( GENERAL_PWRMGT ) & GLOBAL_PWRMGT_EN )
return true ;
else
return false ;
}
void r600_enable_sclk_control ( struct radeon_device * rdev , bool enable )
{
if ( enable )
2013-07-25 21:46:21 -04:00
WREG32_P ( SCLK_PWRMGT_CNTL , 0 , ~ SCLK_PWRMGT_OFF ) ;
2013-04-12 13:58:03 -04:00
else
2013-07-25 21:46:21 -04:00
WREG32_P ( SCLK_PWRMGT_CNTL , SCLK_PWRMGT_OFF , ~ SCLK_PWRMGT_OFF ) ;
2013-04-12 13:58:03 -04:00
}
void r600_enable_mclk_control ( struct radeon_device * rdev , bool enable )
{
if ( enable )
WREG32_P ( MCLK_PWRMGT_CNTL , 0 , ~ MPLL_PWRMGT_OFF ) ;
else
WREG32_P ( MCLK_PWRMGT_CNTL , MPLL_PWRMGT_OFF , ~ MPLL_PWRMGT_OFF ) ;
}
void r600_enable_spll_bypass ( struct radeon_device * rdev , bool enable )
{
if ( enable )
WREG32_P ( CG_SPLL_FUNC_CNTL , SPLL_BYPASS_EN , ~ SPLL_BYPASS_EN ) ;
else
WREG32_P ( CG_SPLL_FUNC_CNTL , 0 , ~ SPLL_BYPASS_EN ) ;
}
void r600_wait_for_spll_change ( struct radeon_device * rdev )
{
int i ;
for ( i = 0 ; i < rdev - > usec_timeout ; i + + ) {
if ( RREG32 ( CG_SPLL_FUNC_CNTL ) & SPLL_CHG_STATUS )
break ;
udelay ( 1 ) ;
}
}
void r600_set_bsp ( struct radeon_device * rdev , u32 u , u32 p )
{
WREG32 ( CG_BSP , BSP ( p ) | BSU ( u ) ) ;
}
void r600_set_at ( struct radeon_device * rdev ,
u32 l_to_m , u32 m_to_h ,
u32 h_to_m , u32 m_to_l )
{
WREG32 ( CG_RT , FLS ( l_to_m ) | FMS ( m_to_h ) ) ;
WREG32 ( CG_LT , FHS ( h_to_m ) | FMS ( m_to_l ) ) ;
}
void r600_set_tc ( struct radeon_device * rdev ,
u32 index , u32 u_t , u32 d_t )
{
WREG32 ( CG_FFCT_0 + ( index * 4 ) , UTC_0 ( u_t ) | DTC_0 ( d_t ) ) ;
}
void r600_select_td ( struct radeon_device * rdev ,
enum r600_td td )
{
if ( td = = R600_TD_AUTO )
WREG32_P ( SCLK_PWRMGT_CNTL , 0 , ~ FIR_FORCE_TREND_SEL ) ;
else
WREG32_P ( SCLK_PWRMGT_CNTL , FIR_FORCE_TREND_SEL , ~ FIR_FORCE_TREND_SEL ) ;
if ( td = = R600_TD_UP )
WREG32_P ( SCLK_PWRMGT_CNTL , 0 , ~ FIR_TREND_MODE ) ;
if ( td = = R600_TD_DOWN )
WREG32_P ( SCLK_PWRMGT_CNTL , FIR_TREND_MODE , ~ FIR_TREND_MODE ) ;
}
void r600_set_vrc ( struct radeon_device * rdev , u32 vrv )
{
WREG32 ( CG_FTV , vrv ) ;
}
void r600_set_tpu ( struct radeon_device * rdev , u32 u )
{
WREG32_P ( CG_TPC , TPU ( u ) , ~ TPU_MASK ) ;
}
void r600_set_tpc ( struct radeon_device * rdev , u32 c )
{
WREG32_P ( CG_TPC , TPCC ( c ) , ~ TPCC_MASK ) ;
}
void r600_set_sstu ( struct radeon_device * rdev , u32 u )
{
WREG32_P ( CG_SSP , CG_SSTU ( u ) , ~ CG_SSTU_MASK ) ;
}
void r600_set_sst ( struct radeon_device * rdev , u32 t )
{
WREG32_P ( CG_SSP , CG_SST ( t ) , ~ CG_SST_MASK ) ;
}
void r600_set_git ( struct radeon_device * rdev , u32 t )
{
WREG32_P ( CG_GIT , CG_GICST ( t ) , ~ CG_GICST_MASK ) ;
}
void r600_set_fctu ( struct radeon_device * rdev , u32 u )
{
WREG32_P ( CG_FC_T , FC_TU ( u ) , ~ FC_TU_MASK ) ;
}
void r600_set_fct ( struct radeon_device * rdev , u32 t )
{
WREG32_P ( CG_FC_T , FC_T ( t ) , ~ FC_T_MASK ) ;
}
void r600_set_ctxcgtt3d_rphc ( struct radeon_device * rdev , u32 p )
{
WREG32_P ( CG_CTX_CGTT3D_R , PHC ( p ) , ~ PHC_MASK ) ;
}
void r600_set_ctxcgtt3d_rsdc ( struct radeon_device * rdev , u32 s )
{
WREG32_P ( CG_CTX_CGTT3D_R , SDC ( s ) , ~ SDC_MASK ) ;
}
void r600_set_vddc3d_oorsu ( struct radeon_device * rdev , u32 u )
{
WREG32_P ( CG_VDDC3D_OOR , SU ( u ) , ~ SU_MASK ) ;
}
void r600_set_vddc3d_oorphc ( struct radeon_device * rdev , u32 p )
{
WREG32_P ( CG_VDDC3D_OOR , PHC ( p ) , ~ PHC_MASK ) ;
}
void r600_set_vddc3d_oorsdc ( struct radeon_device * rdev , u32 s )
{
WREG32_P ( CG_VDDC3D_OOR , SDC ( s ) , ~ SDC_MASK ) ;
}
void r600_set_mpll_lock_time ( struct radeon_device * rdev , u32 lock_time )
{
WREG32_P ( MPLL_TIME , MPLL_LOCK_TIME ( lock_time ) , ~ MPLL_LOCK_TIME_MASK ) ;
}
void r600_set_mpll_reset_time ( struct radeon_device * rdev , u32 reset_time )
{
WREG32_P ( MPLL_TIME , MPLL_RESET_TIME ( reset_time ) , ~ MPLL_RESET_TIME_MASK ) ;
}
void r600_engine_clock_entry_enable ( struct radeon_device * rdev ,
u32 index , bool enable )
{
if ( enable )
WREG32_P ( SCLK_FREQ_SETTING_STEP_0_PART2 + ( index * 4 * 2 ) ,
STEP_0_SPLL_ENTRY_VALID , ~ STEP_0_SPLL_ENTRY_VALID ) ;
else
WREG32_P ( SCLK_FREQ_SETTING_STEP_0_PART2 + ( index * 4 * 2 ) ,
0 , ~ STEP_0_SPLL_ENTRY_VALID ) ;
}
void r600_engine_clock_entry_enable_pulse_skipping ( struct radeon_device * rdev ,
u32 index , bool enable )
{
if ( enable )
WREG32_P ( SCLK_FREQ_SETTING_STEP_0_PART2 + ( index * 4 * 2 ) ,
STEP_0_SPLL_STEP_ENABLE , ~ STEP_0_SPLL_STEP_ENABLE ) ;
else
WREG32_P ( SCLK_FREQ_SETTING_STEP_0_PART2 + ( index * 4 * 2 ) ,
0 , ~ STEP_0_SPLL_STEP_ENABLE ) ;
}
void r600_engine_clock_entry_enable_post_divider ( struct radeon_device * rdev ,
u32 index , bool enable )
{
if ( enable )
WREG32_P ( SCLK_FREQ_SETTING_STEP_0_PART2 + ( index * 4 * 2 ) ,
STEP_0_POST_DIV_EN , ~ STEP_0_POST_DIV_EN ) ;
else
WREG32_P ( SCLK_FREQ_SETTING_STEP_0_PART2 + ( index * 4 * 2 ) ,
0 , ~ STEP_0_POST_DIV_EN ) ;
}
void r600_engine_clock_entry_set_post_divider ( struct radeon_device * rdev ,
u32 index , u32 divider )
{
WREG32_P ( SCLK_FREQ_SETTING_STEP_0_PART1 + ( index * 4 * 2 ) ,
STEP_0_SPLL_POST_DIV ( divider ) , ~ STEP_0_SPLL_POST_DIV_MASK ) ;
}
void r600_engine_clock_entry_set_reference_divider ( struct radeon_device * rdev ,
u32 index , u32 divider )
{
WREG32_P ( SCLK_FREQ_SETTING_STEP_0_PART1 + ( index * 4 * 2 ) ,
STEP_0_SPLL_REF_DIV ( divider ) , ~ STEP_0_SPLL_REF_DIV_MASK ) ;
}
void r600_engine_clock_entry_set_feedback_divider ( struct radeon_device * rdev ,
u32 index , u32 divider )
{
WREG32_P ( SCLK_FREQ_SETTING_STEP_0_PART1 + ( index * 4 * 2 ) ,
STEP_0_SPLL_FB_DIV ( divider ) , ~ STEP_0_SPLL_FB_DIV_MASK ) ;
}
void r600_engine_clock_entry_set_step_time ( struct radeon_device * rdev ,
u32 index , u32 step_time )
{
WREG32_P ( SCLK_FREQ_SETTING_STEP_0_PART1 + ( index * 4 * 2 ) ,
STEP_0_SPLL_STEP_TIME ( step_time ) , ~ STEP_0_SPLL_STEP_TIME_MASK ) ;
}
void r600_vid_rt_set_ssu ( struct radeon_device * rdev , u32 u )
{
WREG32_P ( VID_RT , SSTU ( u ) , ~ SSTU_MASK ) ;
}
void r600_vid_rt_set_vru ( struct radeon_device * rdev , u32 u )
{
WREG32_P ( VID_RT , VID_CRTU ( u ) , ~ VID_CRTU_MASK ) ;
}
void r600_vid_rt_set_vrt ( struct radeon_device * rdev , u32 rt )
{
WREG32_P ( VID_RT , VID_CRT ( rt ) , ~ VID_CRT_MASK ) ;
}
void r600_voltage_control_enable_pins ( struct radeon_device * rdev ,
u64 mask )
{
WREG32 ( LOWER_GPIO_ENABLE , mask & 0xffffffff ) ;
WREG32 ( UPPER_GPIO_ENABLE , upper_32_bits ( mask ) ) ;
}
void r600_voltage_control_program_voltages ( struct radeon_device * rdev ,
enum r600_power_level index , u64 pins )
{
u32 tmp , mask ;
u32 ix = 3 - ( 3 & index ) ;
WREG32 ( CTXSW_VID_LOWER_GPIO_CNTL + ( ix * 4 ) , pins & 0xffffffff ) ;
mask = 7 < < ( 3 * ix ) ;
tmp = RREG32 ( VID_UPPER_GPIO_CNTL ) ;
tmp = ( tmp & ~ mask ) | ( ( pins > > ( 32 - ( 3 * ix ) ) ) & mask ) ;
WREG32 ( VID_UPPER_GPIO_CNTL , tmp ) ;
}
void r600_voltage_control_deactivate_static_control ( struct radeon_device * rdev ,
u64 mask )
{
u32 gpio ;
gpio = RREG32 ( GPIOPAD_MASK ) ;
gpio & = ~ mask ;
WREG32 ( GPIOPAD_MASK , gpio ) ;
gpio = RREG32 ( GPIOPAD_EN ) ;
gpio & = ~ mask ;
WREG32 ( GPIOPAD_EN , gpio ) ;
gpio = RREG32 ( GPIOPAD_A ) ;
gpio & = ~ mask ;
WREG32 ( GPIOPAD_A , gpio ) ;
}
void r600_power_level_enable ( struct radeon_device * rdev ,
enum r600_power_level index , bool enable )
{
u32 ix = 3 - ( 3 & index ) ;
if ( enable )
WREG32_P ( CTXSW_PROFILE_INDEX + ( ix * 4 ) , CTXSW_FREQ_STATE_ENABLE ,
~ CTXSW_FREQ_STATE_ENABLE ) ;
else
WREG32_P ( CTXSW_PROFILE_INDEX + ( ix * 4 ) , 0 ,
~ CTXSW_FREQ_STATE_ENABLE ) ;
}
void r600_power_level_set_voltage_index ( struct radeon_device * rdev ,
enum r600_power_level index , u32 voltage_index )
{
u32 ix = 3 - ( 3 & index ) ;
WREG32_P ( CTXSW_PROFILE_INDEX + ( ix * 4 ) ,
CTXSW_FREQ_VIDS_CFG_INDEX ( voltage_index ) , ~ CTXSW_FREQ_VIDS_CFG_INDEX_MASK ) ;
}
void r600_power_level_set_mem_clock_index ( struct radeon_device * rdev ,
enum r600_power_level index , u32 mem_clock_index )
{
u32 ix = 3 - ( 3 & index ) ;
WREG32_P ( CTXSW_PROFILE_INDEX + ( ix * 4 ) ,
CTXSW_FREQ_MCLK_CFG_INDEX ( mem_clock_index ) , ~ CTXSW_FREQ_MCLK_CFG_INDEX_MASK ) ;
}
void r600_power_level_set_eng_clock_index ( struct radeon_device * rdev ,
enum r600_power_level index , u32 eng_clock_index )
{
u32 ix = 3 - ( 3 & index ) ;
WREG32_P ( CTXSW_PROFILE_INDEX + ( ix * 4 ) ,
CTXSW_FREQ_SCLK_CFG_INDEX ( eng_clock_index ) , ~ CTXSW_FREQ_SCLK_CFG_INDEX_MASK ) ;
}
void r600_power_level_set_watermark_id ( struct radeon_device * rdev ,
enum r600_power_level index ,
enum r600_display_watermark watermark_id )
{
u32 ix = 3 - ( 3 & index ) ;
u32 tmp = 0 ;
if ( watermark_id = = R600_DISPLAY_WATERMARK_HIGH )
tmp = CTXSW_FREQ_DISPLAY_WATERMARK ;
WREG32_P ( CTXSW_PROFILE_INDEX + ( ix * 4 ) , tmp , ~ CTXSW_FREQ_DISPLAY_WATERMARK ) ;
}
void r600_power_level_set_pcie_gen2 ( struct radeon_device * rdev ,
enum r600_power_level index , bool compatible )
{
u32 ix = 3 - ( 3 & index ) ;
u32 tmp = 0 ;
if ( compatible )
tmp = CTXSW_FREQ_GEN2PCIE_VOLT ;
WREG32_P ( CTXSW_PROFILE_INDEX + ( ix * 4 ) , tmp , ~ CTXSW_FREQ_GEN2PCIE_VOLT ) ;
}
enum r600_power_level r600_power_level_get_current_index ( struct radeon_device * rdev )
{
u32 tmp ;
tmp = RREG32 ( TARGET_AND_CURRENT_PROFILE_INDEX ) & CURRENT_PROFILE_INDEX_MASK ;
tmp > > = CURRENT_PROFILE_INDEX_SHIFT ;
return tmp ;
}
enum r600_power_level r600_power_level_get_target_index ( struct radeon_device * rdev )
{
u32 tmp ;
tmp = RREG32 ( TARGET_AND_CURRENT_PROFILE_INDEX ) & TARGET_PROFILE_INDEX_MASK ;
tmp > > = TARGET_PROFILE_INDEX_SHIFT ;
return tmp ;
}
void r600_power_level_set_enter_index ( struct radeon_device * rdev ,
enum r600_power_level index )
{
WREG32_P ( TARGET_AND_CURRENT_PROFILE_INDEX , DYN_PWR_ENTER_INDEX ( index ) ,
~ DYN_PWR_ENTER_INDEX_MASK ) ;
}
void r600_wait_for_power_level_unequal ( struct radeon_device * rdev ,
enum r600_power_level index )
{
int i ;
for ( i = 0 ; i < rdev - > usec_timeout ; i + + ) {
if ( r600_power_level_get_target_index ( rdev ) ! = index )
break ;
udelay ( 1 ) ;
}
for ( i = 0 ; i < rdev - > usec_timeout ; i + + ) {
if ( r600_power_level_get_current_index ( rdev ) ! = index )
break ;
udelay ( 1 ) ;
}
}
void r600_wait_for_power_level ( struct radeon_device * rdev ,
enum r600_power_level index )
{
int i ;
for ( i = 0 ; i < rdev - > usec_timeout ; i + + ) {
if ( r600_power_level_get_target_index ( rdev ) = = index )
break ;
udelay ( 1 ) ;
}
for ( i = 0 ; i < rdev - > usec_timeout ; i + + ) {
if ( r600_power_level_get_current_index ( rdev ) = = index )
break ;
udelay ( 1 ) ;
}
}
void r600_start_dpm ( struct radeon_device * rdev )
{
r600_enable_sclk_control ( rdev , false ) ;
r600_enable_mclk_control ( rdev , false ) ;
r600_dynamicpm_enable ( rdev , true ) ;
radeon_wait_for_vblank ( rdev , 0 ) ;
radeon_wait_for_vblank ( rdev , 1 ) ;
r600_enable_spll_bypass ( rdev , true ) ;
r600_wait_for_spll_change ( rdev ) ;
r600_enable_spll_bypass ( rdev , false ) ;
r600_wait_for_spll_change ( rdev ) ;
r600_enable_spll_bypass ( rdev , true ) ;
r600_wait_for_spll_change ( rdev ) ;
r600_enable_spll_bypass ( rdev , false ) ;
r600_wait_for_spll_change ( rdev ) ;
r600_enable_sclk_control ( rdev , true ) ;
r600_enable_mclk_control ( rdev , true ) ;
}
void r600_stop_dpm ( struct radeon_device * rdev )
{
r600_dynamicpm_enable ( rdev , false ) ;
}
2013-01-16 13:13:42 -05:00
int r600_dpm_pre_set_power_state ( struct radeon_device * rdev )
{
return 0 ;
}
void r600_dpm_post_set_power_state ( struct radeon_device * rdev )
{
}
2013-04-12 13:58:03 -04:00
bool r600_is_uvd_state ( u32 class , u32 class2 )
{
if ( class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE )
return true ;
if ( class & ATOM_PPLIB_CLASSIFICATION_HD2STATE )
return true ;
if ( class & ATOM_PPLIB_CLASSIFICATION_HDSTATE )
return true ;
if ( class & ATOM_PPLIB_CLASSIFICATION_SDSTATE )
return true ;
if ( class2 & ATOM_PPLIB_CLASSIFICATION2_MVC )
return true ;
return false ;
}
2013-04-12 14:04:10 -04:00
2013-12-19 16:17:47 -05:00
static int r600_set_thermal_temperature_range ( struct radeon_device * rdev ,
int min_temp , int max_temp )
2013-04-12 14:04:10 -04:00
{
int low_temp = 0 * 1000 ;
int high_temp = 255 * 1000 ;
if ( low_temp < min_temp )
low_temp = min_temp ;
if ( high_temp > max_temp )
high_temp = max_temp ;
if ( high_temp < low_temp ) {
DRM_ERROR ( " invalid thermal range: %d - %d \n " , low_temp , high_temp ) ;
return - EINVAL ;
}
WREG32_P ( CG_THERMAL_INT , DIG_THERM_INTH ( high_temp / 1000 ) , ~ DIG_THERM_INTH_MASK ) ;
WREG32_P ( CG_THERMAL_INT , DIG_THERM_INTL ( low_temp / 1000 ) , ~ DIG_THERM_INTL_MASK ) ;
WREG32_P ( CG_THERMAL_CTRL , DIG_THERM_DPM ( high_temp / 1000 ) , ~ DIG_THERM_DPM_MASK ) ;
rdev - > pm . dpm . thermal . min_temp = low_temp ;
rdev - > pm . dpm . thermal . max_temp = high_temp ;
return 0 ;
}
bool r600_is_internal_thermal_sensor ( enum radeon_int_thermal_type sensor )
{
switch ( sensor ) {
case THERMAL_TYPE_RV6XX :
case THERMAL_TYPE_RV770 :
case THERMAL_TYPE_EVERGREEN :
case THERMAL_TYPE_SUMO :
case THERMAL_TYPE_NI :
2013-03-26 19:25:06 -04:00
case THERMAL_TYPE_SI :
2013-03-26 19:25:29 -04:00
case THERMAL_TYPE_CI :
2013-07-16 16:59:08 -04:00
case THERMAL_TYPE_KV :
2013-04-12 14:04:10 -04:00
return true ;
case THERMAL_TYPE_ADT7473_WITH_INTERNAL :
case THERMAL_TYPE_EMC2103_WITH_INTERNAL :
return false ; /* need special handling */
case THERMAL_TYPE_NONE :
case THERMAL_TYPE_EXTERNAL :
case THERMAL_TYPE_EXTERNAL_GPIO :
default :
return false ;
}
}
2012-11-14 19:57:42 -05:00
2013-12-19 12:18:13 -05:00
int r600_dpm_late_enable ( struct radeon_device * rdev )
{
int ret ;
if ( rdev - > irq . installed & &
r600_is_internal_thermal_sensor ( rdev - > pm . int_thermal_type ) ) {
ret = r600_set_thermal_temperature_range ( rdev , R600_TEMP_RANGE_MIN , R600_TEMP_RANGE_MAX ) ;
if ( ret )
return ret ;
rdev - > irq . dpm_thermal = true ;
radeon_irq_set ( rdev ) ;
}
return 0 ;
}
2012-11-14 19:57:42 -05:00
union power_info {
struct _ATOM_POWERPLAY_INFO info ;
struct _ATOM_POWERPLAY_INFO_V2 info_2 ;
struct _ATOM_POWERPLAY_INFO_V3 info_3 ;
struct _ATOM_PPLIB_POWERPLAYTABLE pplib ;
struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2 ;
struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3 ;
struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4 ;
struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5 ;
} ;
union fan_info {
struct _ATOM_PPLIB_FANTABLE fan ;
struct _ATOM_PPLIB_FANTABLE2 fan2 ;
2014-09-15 00:15:22 -04:00
struct _ATOM_PPLIB_FANTABLE3 fan3 ;
2012-11-14 19:57:42 -05:00
} ;
static int r600_parse_clk_voltage_dep_table ( struct radeon_clock_voltage_dependency_table * radeon_table ,
ATOM_PPLIB_Clock_Voltage_Dependency_Table * atom_table )
{
u32 size = atom_table - > ucNumEntries *
sizeof ( struct radeon_clock_voltage_dependency_entry ) ;
int i ;
2013-08-23 15:28:42 -04:00
ATOM_PPLIB_Clock_Voltage_Dependency_Record * entry ;
2012-11-14 19:57:42 -05:00
radeon_table - > entries = kzalloc ( size , GFP_KERNEL ) ;
if ( ! radeon_table - > entries )
return - ENOMEM ;
2013-08-23 15:28:42 -04:00
entry = & atom_table - > entries [ 0 ] ;
2012-11-14 19:57:42 -05:00
for ( i = 0 ; i < atom_table - > ucNumEntries ; i + + ) {
2013-08-23 15:28:42 -04:00
radeon_table - > entries [ i ] . clk = le16_to_cpu ( entry - > usClockLow ) |
( entry - > ucClockHigh < < 16 ) ;
radeon_table - > entries [ i ] . v = le16_to_cpu ( entry - > usVoltage ) ;
entry = ( ATOM_PPLIB_Clock_Voltage_Dependency_Record * )
( ( u8 * ) entry + sizeof ( ATOM_PPLIB_Clock_Voltage_Dependency_Record ) ) ;
2012-11-14 19:57:42 -05:00
}
radeon_table - > count = atom_table - > ucNumEntries ;
return 0 ;
}
2013-08-21 10:02:32 -04:00
int r600_get_platform_caps ( struct radeon_device * rdev )
{
struct radeon_mode_info * mode_info = & rdev - > mode_info ;
union power_info * power_info ;
int index = GetIndexIntoMasterTable ( DATA , PowerPlayInfo ) ;
2016-03-16 12:56:45 +01:00
u16 data_offset ;
2013-08-21 10:02:32 -04:00
u8 frev , crev ;
if ( ! atom_parse_data_header ( mode_info - > atom_context , index , NULL ,
& frev , & crev , & data_offset ) )
return - EINVAL ;
power_info = ( union power_info * ) ( mode_info - > atom_context - > bios + data_offset ) ;
rdev - > pm . dpm . platform_caps = le32_to_cpu ( power_info - > pplib . ulPlatformCaps ) ;
rdev - > pm . dpm . backbias_response_time = le16_to_cpu ( power_info - > pplib . usBackbiasTime ) ;
rdev - > pm . dpm . voltage_response_time = le16_to_cpu ( power_info - > pplib . usVoltageTime ) ;
return 0 ;
}
2013-03-20 13:00:18 -04:00
/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
# define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
# define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
# define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
# define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
# define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
# define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
2012-11-14 19:57:42 -05:00
int r600_parse_extended_power_table ( struct radeon_device * rdev )
{
struct radeon_mode_info * mode_info = & rdev - > mode_info ;
union power_info * power_info ;
union fan_info * fan_info ;
ATOM_PPLIB_Clock_Voltage_Dependency_Table * dep_table ;
int index = GetIndexIntoMasterTable ( DATA , PowerPlayInfo ) ;
2016-03-16 12:56:45 +01:00
u16 data_offset ;
2012-11-14 19:57:42 -05:00
u8 frev , crev ;
int ret , i ;
if ( ! atom_parse_data_header ( mode_info - > atom_context , index , NULL ,
& frev , & crev , & data_offset ) )
return - EINVAL ;
power_info = ( union power_info * ) ( mode_info - > atom_context - > bios + data_offset ) ;
/* fan table */
2013-03-20 12:44:11 -04:00
if ( le16_to_cpu ( power_info - > pplib . usTableSize ) > =
sizeof ( struct _ATOM_PPLIB_POWERPLAYTABLE3 ) ) {
2012-11-14 19:57:42 -05:00
if ( power_info - > pplib3 . usFanTableOffset ) {
fan_info = ( union fan_info * ) ( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( power_info - > pplib3 . usFanTableOffset ) ) ;
rdev - > pm . dpm . fan . t_hyst = fan_info - > fan . ucTHyst ;
rdev - > pm . dpm . fan . t_min = le16_to_cpu ( fan_info - > fan . usTMin ) ;
rdev - > pm . dpm . fan . t_med = le16_to_cpu ( fan_info - > fan . usTMed ) ;
rdev - > pm . dpm . fan . t_high = le16_to_cpu ( fan_info - > fan . usTHigh ) ;
rdev - > pm . dpm . fan . pwm_min = le16_to_cpu ( fan_info - > fan . usPWMMin ) ;
rdev - > pm . dpm . fan . pwm_med = le16_to_cpu ( fan_info - > fan . usPWMMed ) ;
rdev - > pm . dpm . fan . pwm_high = le16_to_cpu ( fan_info - > fan . usPWMHigh ) ;
if ( fan_info - > fan . ucFanTableFormat > = 2 )
rdev - > pm . dpm . fan . t_max = le16_to_cpu ( fan_info - > fan2 . usTMax ) ;
else
rdev - > pm . dpm . fan . t_max = 10900 ;
rdev - > pm . dpm . fan . cycle_delay = 100000 ;
2014-09-15 00:15:22 -04:00
if ( fan_info - > fan . ucFanTableFormat > = 3 ) {
rdev - > pm . dpm . fan . control_mode = fan_info - > fan3 . ucFanControlMode ;
rdev - > pm . dpm . fan . default_max_fan_pwm =
le16_to_cpu ( fan_info - > fan3 . usFanPWMMax ) ;
rdev - > pm . dpm . fan . default_fan_output_sensitivity = 4836 ;
rdev - > pm . dpm . fan . fan_output_sensitivity =
le16_to_cpu ( fan_info - > fan3 . usFanOutputSensitivity ) ;
}
2012-11-14 19:57:42 -05:00
rdev - > pm . dpm . fan . ucode_fan_control = true ;
}
}
2013-03-20 12:30:25 -04:00
/* clock dependancy tables, shedding tables */
2013-03-20 12:44:11 -04:00
if ( le16_to_cpu ( power_info - > pplib . usTableSize ) > =
sizeof ( struct _ATOM_PPLIB_POWERPLAYTABLE4 ) ) {
2012-11-14 19:57:42 -05:00
if ( power_info - > pplib4 . usVddcDependencyOnSCLKOffset ) {
dep_table = ( ATOM_PPLIB_Clock_Voltage_Dependency_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( power_info - > pplib4 . usVddcDependencyOnSCLKOffset ) ) ;
ret = r600_parse_clk_voltage_dep_table ( & rdev - > pm . dpm . dyn_state . vddc_dependency_on_sclk ,
dep_table ) ;
if ( ret )
return ret ;
}
if ( power_info - > pplib4 . usVddciDependencyOnMCLKOffset ) {
dep_table = ( ATOM_PPLIB_Clock_Voltage_Dependency_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( power_info - > pplib4 . usVddciDependencyOnMCLKOffset ) ) ;
ret = r600_parse_clk_voltage_dep_table ( & rdev - > pm . dpm . dyn_state . vddci_dependency_on_mclk ,
dep_table ) ;
if ( ret ) {
kfree ( rdev - > pm . dpm . dyn_state . vddc_dependency_on_sclk . entries ) ;
return ret ;
}
}
if ( power_info - > pplib4 . usVddcDependencyOnMCLKOffset ) {
dep_table = ( ATOM_PPLIB_Clock_Voltage_Dependency_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( power_info - > pplib4 . usVddcDependencyOnMCLKOffset ) ) ;
ret = r600_parse_clk_voltage_dep_table ( & rdev - > pm . dpm . dyn_state . vddc_dependency_on_mclk ,
dep_table ) ;
if ( ret ) {
kfree ( rdev - > pm . dpm . dyn_state . vddc_dependency_on_sclk . entries ) ;
kfree ( rdev - > pm . dpm . dyn_state . vddci_dependency_on_mclk . entries ) ;
return ret ;
}
}
2013-05-06 14:37:56 -04:00
if ( power_info - > pplib4 . usMvddDependencyOnMCLKOffset ) {
dep_table = ( ATOM_PPLIB_Clock_Voltage_Dependency_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( power_info - > pplib4 . usMvddDependencyOnMCLKOffset ) ) ;
ret = r600_parse_clk_voltage_dep_table ( & rdev - > pm . dpm . dyn_state . mvdd_dependency_on_mclk ,
dep_table ) ;
if ( ret ) {
kfree ( rdev - > pm . dpm . dyn_state . vddc_dependency_on_sclk . entries ) ;
kfree ( rdev - > pm . dpm . dyn_state . vddci_dependency_on_mclk . entries ) ;
kfree ( rdev - > pm . dpm . dyn_state . vddc_dependency_on_mclk . entries ) ;
return ret ;
}
}
2012-11-14 19:57:42 -05:00
if ( power_info - > pplib4 . usMaxClockVoltageOnDCOffset ) {
ATOM_PPLIB_Clock_Voltage_Limit_Table * clk_v =
( ATOM_PPLIB_Clock_Voltage_Limit_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( power_info - > pplib4 . usMaxClockVoltageOnDCOffset ) ) ;
if ( clk_v - > ucNumEntries ) {
rdev - > pm . dpm . dyn_state . max_clock_voltage_on_dc . sclk =
le16_to_cpu ( clk_v - > entries [ 0 ] . usSclkLow ) |
( clk_v - > entries [ 0 ] . ucSclkHigh < < 16 ) ;
rdev - > pm . dpm . dyn_state . max_clock_voltage_on_dc . mclk =
le16_to_cpu ( clk_v - > entries [ 0 ] . usMclkLow ) |
( clk_v - > entries [ 0 ] . ucMclkHigh < < 16 ) ;
rdev - > pm . dpm . dyn_state . max_clock_voltage_on_dc . vddc =
le16_to_cpu ( clk_v - > entries [ 0 ] . usVddc ) ;
rdev - > pm . dpm . dyn_state . max_clock_voltage_on_dc . vddci =
le16_to_cpu ( clk_v - > entries [ 0 ] . usVddci ) ;
}
}
2013-03-20 12:30:25 -04:00
if ( power_info - > pplib4 . usVddcPhaseShedLimitsTableOffset ) {
ATOM_PPLIB_PhaseSheddingLimits_Table * psl =
( ATOM_PPLIB_PhaseSheddingLimits_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( power_info - > pplib4 . usVddcPhaseShedLimitsTableOffset ) ) ;
2013-08-23 15:28:42 -04:00
ATOM_PPLIB_PhaseSheddingLimits_Record * entry ;
2013-03-20 12:30:25 -04:00
rdev - > pm . dpm . dyn_state . phase_shedding_limits_table . entries =
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 14:03:40 -07:00
kcalloc ( psl - > ucNumEntries ,
2013-03-20 12:30:25 -04:00
sizeof ( struct radeon_phase_shedding_limits_entry ) ,
GFP_KERNEL ) ;
2013-03-22 15:38:15 -04:00
if ( ! rdev - > pm . dpm . dyn_state . phase_shedding_limits_table . entries ) {
2013-05-09 17:21:56 -04:00
r600_free_extended_power_table ( rdev ) ;
2013-03-20 12:30:25 -04:00
return - ENOMEM ;
2013-03-22 15:38:15 -04:00
}
2013-03-20 12:30:25 -04:00
2013-08-23 15:28:42 -04:00
entry = & psl - > entries [ 0 ] ;
2013-03-20 12:30:25 -04:00
for ( i = 0 ; i < psl - > ucNumEntries ; i + + ) {
rdev - > pm . dpm . dyn_state . phase_shedding_limits_table . entries [ i ] . sclk =
2013-08-23 15:28:42 -04:00
le16_to_cpu ( entry - > usSclkLow ) | ( entry - > ucSclkHigh < < 16 ) ;
2013-03-20 12:30:25 -04:00
rdev - > pm . dpm . dyn_state . phase_shedding_limits_table . entries [ i ] . mclk =
2013-08-23 15:28:42 -04:00
le16_to_cpu ( entry - > usMclkLow ) | ( entry - > ucMclkHigh < < 16 ) ;
2013-03-20 12:30:25 -04:00
rdev - > pm . dpm . dyn_state . phase_shedding_limits_table . entries [ i ] . voltage =
2013-08-23 15:28:42 -04:00
le16_to_cpu ( entry - > usVoltage ) ;
entry = ( ATOM_PPLIB_PhaseSheddingLimits_Record * )
( ( u8 * ) entry + sizeof ( ATOM_PPLIB_PhaseSheddingLimits_Record ) ) ;
2013-03-20 12:30:25 -04:00
}
rdev - > pm . dpm . dyn_state . phase_shedding_limits_table . count =
psl - > ucNumEntries ;
}
2012-11-14 19:57:42 -05:00
}
/* cac data */
2013-03-20 12:44:11 -04:00
if ( le16_to_cpu ( power_info - > pplib . usTableSize ) > =
sizeof ( struct _ATOM_PPLIB_POWERPLAYTABLE5 ) ) {
2012-11-14 19:57:42 -05:00
rdev - > pm . dpm . tdp_limit = le32_to_cpu ( power_info - > pplib5 . ulTDPLimit ) ;
rdev - > pm . dpm . near_tdp_limit = le32_to_cpu ( power_info - > pplib5 . ulNearTDPLimit ) ;
2013-06-25 17:56:16 -04:00
rdev - > pm . dpm . near_tdp_limit_adjusted = rdev - > pm . dpm . near_tdp_limit ;
2012-11-14 19:57:42 -05:00
rdev - > pm . dpm . tdp_od_limit = le16_to_cpu ( power_info - > pplib5 . usTDPODLimit ) ;
if ( rdev - > pm . dpm . tdp_od_limit )
rdev - > pm . dpm . power_control = true ;
else
rdev - > pm . dpm . power_control = false ;
rdev - > pm . dpm . tdp_adjustment = 0 ;
rdev - > pm . dpm . sq_ramping_threshold = le32_to_cpu ( power_info - > pplib5 . ulSQRampingThreshold ) ;
rdev - > pm . dpm . cac_leakage = le32_to_cpu ( power_info - > pplib5 . ulCACLeakage ) ;
rdev - > pm . dpm . load_line_slope = le16_to_cpu ( power_info - > pplib5 . usLoadLineSlope ) ;
if ( power_info - > pplib5 . usCACLeakageTableOffset ) {
ATOM_PPLIB_CAC_Leakage_Table * cac_table =
( ATOM_PPLIB_CAC_Leakage_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( power_info - > pplib5 . usCACLeakageTableOffset ) ) ;
2013-08-23 15:28:42 -04:00
ATOM_PPLIB_CAC_Leakage_Record * entry ;
2012-11-14 19:57:42 -05:00
u32 size = cac_table - > ucNumEntries * sizeof ( struct radeon_cac_leakage_table ) ;
rdev - > pm . dpm . dyn_state . cac_leakage_table . entries = kzalloc ( size , GFP_KERNEL ) ;
if ( ! rdev - > pm . dpm . dyn_state . cac_leakage_table . entries ) {
2013-05-09 17:21:56 -04:00
r600_free_extended_power_table ( rdev ) ;
2012-11-14 19:57:42 -05:00
return - ENOMEM ;
}
2013-08-23 15:28:42 -04:00
entry = & cac_table - > entries [ 0 ] ;
2012-11-14 19:57:42 -05:00
for ( i = 0 ; i < cac_table - > ucNumEntries ; i + + ) {
2013-05-06 11:31:04 -04:00
if ( rdev - > pm . dpm . platform_caps & ATOM_PP_PLATFORM_CAP_EVV ) {
rdev - > pm . dpm . dyn_state . cac_leakage_table . entries [ i ] . vddc1 =
2013-08-23 15:28:42 -04:00
le16_to_cpu ( entry - > usVddc1 ) ;
2013-05-06 11:31:04 -04:00
rdev - > pm . dpm . dyn_state . cac_leakage_table . entries [ i ] . vddc2 =
2013-08-23 15:28:42 -04:00
le16_to_cpu ( entry - > usVddc2 ) ;
2013-05-06 11:31:04 -04:00
rdev - > pm . dpm . dyn_state . cac_leakage_table . entries [ i ] . vddc3 =
2013-08-23 15:28:42 -04:00
le16_to_cpu ( entry - > usVddc3 ) ;
2013-05-06 11:31:04 -04:00
} else {
rdev - > pm . dpm . dyn_state . cac_leakage_table . entries [ i ] . vddc =
2013-08-23 15:28:42 -04:00
le16_to_cpu ( entry - > usVddc ) ;
2013-05-06 11:31:04 -04:00
rdev - > pm . dpm . dyn_state . cac_leakage_table . entries [ i ] . leakage =
2013-08-23 15:28:42 -04:00
le32_to_cpu ( entry - > ulLeakageValue ) ;
2013-05-06 11:31:04 -04:00
}
2013-08-23 15:28:42 -04:00
entry = ( ATOM_PPLIB_CAC_Leakage_Record * )
( ( u8 * ) entry + sizeof ( ATOM_PPLIB_CAC_Leakage_Record ) ) ;
2012-11-14 19:57:42 -05:00
}
rdev - > pm . dpm . dyn_state . cac_leakage_table . count = cac_table - > ucNumEntries ;
}
}
2013-05-09 17:04:27 -04:00
/* ext tables */
2013-03-20 13:00:18 -04:00
if ( le16_to_cpu ( power_info - > pplib . usTableSize ) > =
sizeof ( struct _ATOM_PPLIB_POWERPLAYTABLE3 ) ) {
ATOM_PPLIB_EXTENDEDHEADER * ext_hdr = ( ATOM_PPLIB_EXTENDEDHEADER * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( power_info - > pplib3 . usExtendendedHeaderOffset ) ) ;
2013-05-09 17:04:27 -04:00
if ( ( le16_to_cpu ( ext_hdr - > usSize ) > = SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 ) & &
ext_hdr - > usVCETableOffset ) {
VCEClockInfoArray * array = ( VCEClockInfoArray * )
( mode_info - > atom_context - > bios + data_offset +
2016-03-16 12:56:45 +01:00
le16_to_cpu ( ext_hdr - > usVCETableOffset ) + 1 ) ;
2013-05-09 17:04:27 -04:00
ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table * limits =
( ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( ext_hdr - > usVCETableOffset ) + 1 +
1 + array - > ucNumEntries * sizeof ( VCEClockInfo ) ) ;
2013-09-04 16:13:56 -04:00
ATOM_PPLIB_VCE_State_Table * states =
( ATOM_PPLIB_VCE_State_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( ext_hdr - > usVCETableOffset ) + 1 +
1 + ( array - > ucNumEntries * sizeof ( VCEClockInfo ) ) +
1 + ( limits - > numEntries * sizeof ( ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record ) ) ) ;
2013-08-23 15:28:42 -04:00
ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record * entry ;
2013-09-04 16:13:56 -04:00
ATOM_PPLIB_VCE_State_Record * state_entry ;
VCEClockInfo * vce_clk ;
2013-05-09 17:04:27 -04:00
u32 size = limits - > numEntries *
sizeof ( struct radeon_vce_clock_voltage_dependency_entry ) ;
rdev - > pm . dpm . dyn_state . vce_clock_voltage_dependency_table . entries =
kzalloc ( size , GFP_KERNEL ) ;
if ( ! rdev - > pm . dpm . dyn_state . vce_clock_voltage_dependency_table . entries ) {
2013-05-09 17:21:56 -04:00
r600_free_extended_power_table ( rdev ) ;
2013-05-09 17:04:27 -04:00
return - ENOMEM ;
}
rdev - > pm . dpm . dyn_state . vce_clock_voltage_dependency_table . count =
limits - > numEntries ;
2013-08-23 15:28:42 -04:00
entry = & limits - > entries [ 0 ] ;
2013-09-04 16:13:56 -04:00
state_entry = & states - > entries [ 0 ] ;
2013-05-09 17:04:27 -04:00
for ( i = 0 ; i < limits - > numEntries ; i + + ) {
2013-09-04 16:13:56 -04:00
vce_clk = ( VCEClockInfo * )
2013-08-23 15:28:42 -04:00
( ( u8 * ) & array - > entries [ 0 ] +
( entry - > ucVCEClockInfoIndex * sizeof ( VCEClockInfo ) ) ) ;
2013-05-09 17:04:27 -04:00
rdev - > pm . dpm . dyn_state . vce_clock_voltage_dependency_table . entries [ i ] . evclk =
le16_to_cpu ( vce_clk - > usEVClkLow ) | ( vce_clk - > ucEVClkHigh < < 16 ) ;
rdev - > pm . dpm . dyn_state . vce_clock_voltage_dependency_table . entries [ i ] . ecclk =
le16_to_cpu ( vce_clk - > usECClkLow ) | ( vce_clk - > ucECClkHigh < < 16 ) ;
rdev - > pm . dpm . dyn_state . vce_clock_voltage_dependency_table . entries [ i ] . v =
2013-08-23 15:28:42 -04:00
le16_to_cpu ( entry - > usVoltage ) ;
entry = ( ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record * )
( ( u8 * ) entry + sizeof ( ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record ) ) ;
2013-05-09 17:04:27 -04:00
}
2013-09-04 16:13:56 -04:00
for ( i = 0 ; i < states - > numEntries ; i + + ) {
if ( i > = RADEON_MAX_VCE_LEVELS )
break ;
vce_clk = ( VCEClockInfo * )
( ( u8 * ) & array - > entries [ 0 ] +
( state_entry - > ucVCEClockInfoIndex * sizeof ( VCEClockInfo ) ) ) ;
rdev - > pm . dpm . vce_states [ i ] . evclk =
le16_to_cpu ( vce_clk - > usEVClkLow ) | ( vce_clk - > ucEVClkHigh < < 16 ) ;
rdev - > pm . dpm . vce_states [ i ] . ecclk =
le16_to_cpu ( vce_clk - > usECClkLow ) | ( vce_clk - > ucECClkHigh < < 16 ) ;
rdev - > pm . dpm . vce_states [ i ] . clk_idx =
state_entry - > ucClockInfoIndex & 0x3f ;
rdev - > pm . dpm . vce_states [ i ] . pstate =
( state_entry - > ucClockInfoIndex & 0xc0 ) > > 6 ;
state_entry = ( ATOM_PPLIB_VCE_State_Record * )
( ( u8 * ) state_entry + sizeof ( ATOM_PPLIB_VCE_State_Record ) ) ;
}
2013-05-09 17:04:27 -04:00
}
2013-05-09 17:14:11 -04:00
if ( ( le16_to_cpu ( ext_hdr - > usSize ) > = SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 ) & &
ext_hdr - > usUVDTableOffset ) {
UVDClockInfoArray * array = ( UVDClockInfoArray * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( ext_hdr - > usUVDTableOffset ) + 1 ) ;
ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table * limits =
( ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( ext_hdr - > usUVDTableOffset ) + 1 +
1 + ( array - > ucNumEntries * sizeof ( UVDClockInfo ) ) ) ;
2013-08-23 15:28:42 -04:00
ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record * entry ;
2013-05-09 17:14:11 -04:00
u32 size = limits - > numEntries *
sizeof ( struct radeon_uvd_clock_voltage_dependency_entry ) ;
rdev - > pm . dpm . dyn_state . uvd_clock_voltage_dependency_table . entries =
kzalloc ( size , GFP_KERNEL ) ;
if ( ! rdev - > pm . dpm . dyn_state . uvd_clock_voltage_dependency_table . entries ) {
2013-05-09 17:21:56 -04:00
r600_free_extended_power_table ( rdev ) ;
2013-05-09 17:14:11 -04:00
return - ENOMEM ;
}
rdev - > pm . dpm . dyn_state . uvd_clock_voltage_dependency_table . count =
limits - > numEntries ;
2013-08-23 15:28:42 -04:00
entry = & limits - > entries [ 0 ] ;
2013-05-09 17:14:11 -04:00
for ( i = 0 ; i < limits - > numEntries ; i + + ) {
2013-08-23 15:28:42 -04:00
UVDClockInfo * uvd_clk = ( UVDClockInfo * )
( ( u8 * ) & array - > entries [ 0 ] +
( entry - > ucUVDClockInfoIndex * sizeof ( UVDClockInfo ) ) ) ;
2013-05-09 17:14:11 -04:00
rdev - > pm . dpm . dyn_state . uvd_clock_voltage_dependency_table . entries [ i ] . vclk =
le16_to_cpu ( uvd_clk - > usVClkLow ) | ( uvd_clk - > ucVClkHigh < < 16 ) ;
rdev - > pm . dpm . dyn_state . uvd_clock_voltage_dependency_table . entries [ i ] . dclk =
le16_to_cpu ( uvd_clk - > usDClkLow ) | ( uvd_clk - > ucDClkHigh < < 16 ) ;
rdev - > pm . dpm . dyn_state . uvd_clock_voltage_dependency_table . entries [ i ] . v =
2013-09-21 07:55:36 -04:00
le16_to_cpu ( entry - > usVoltage ) ;
2013-08-23 15:28:42 -04:00
entry = ( ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record * )
( ( u8 * ) entry + sizeof ( ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record ) ) ;
2013-05-09 17:14:11 -04:00
}
}
2013-05-09 17:27:49 -04:00
if ( ( le16_to_cpu ( ext_hdr - > usSize ) > = SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 ) & &
ext_hdr - > usSAMUTableOffset ) {
ATOM_PPLIB_SAMClk_Voltage_Limit_Table * limits =
( ATOM_PPLIB_SAMClk_Voltage_Limit_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( ext_hdr - > usSAMUTableOffset ) + 1 ) ;
2013-08-23 15:28:42 -04:00
ATOM_PPLIB_SAMClk_Voltage_Limit_Record * entry ;
2013-05-09 17:27:49 -04:00
u32 size = limits - > numEntries *
sizeof ( struct radeon_clock_voltage_dependency_entry ) ;
rdev - > pm . dpm . dyn_state . samu_clock_voltage_dependency_table . entries =
kzalloc ( size , GFP_KERNEL ) ;
if ( ! rdev - > pm . dpm . dyn_state . samu_clock_voltage_dependency_table . entries ) {
r600_free_extended_power_table ( rdev ) ;
return - ENOMEM ;
}
rdev - > pm . dpm . dyn_state . samu_clock_voltage_dependency_table . count =
limits - > numEntries ;
2013-08-23 15:28:42 -04:00
entry = & limits - > entries [ 0 ] ;
2013-05-09 17:27:49 -04:00
for ( i = 0 ; i < limits - > numEntries ; i + + ) {
rdev - > pm . dpm . dyn_state . samu_clock_voltage_dependency_table . entries [ i ] . clk =
2013-08-23 15:28:42 -04:00
le16_to_cpu ( entry - > usSAMClockLow ) | ( entry - > ucSAMClockHigh < < 16 ) ;
2013-05-09 17:27:49 -04:00
rdev - > pm . dpm . dyn_state . samu_clock_voltage_dependency_table . entries [ i ] . v =
2013-08-23 15:28:42 -04:00
le16_to_cpu ( entry - > usVoltage ) ;
entry = ( ATOM_PPLIB_SAMClk_Voltage_Limit_Record * )
( ( u8 * ) entry + sizeof ( ATOM_PPLIB_SAMClk_Voltage_Limit_Record ) ) ;
2013-05-09 17:27:49 -04:00
}
}
2013-03-20 13:00:18 -04:00
if ( ( le16_to_cpu ( ext_hdr - > usSize ) > = SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 ) & &
ext_hdr - > usPPMTableOffset ) {
ATOM_PPLIB_PPM_Table * ppm = ( ATOM_PPLIB_PPM_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( ext_hdr - > usPPMTableOffset ) ) ;
rdev - > pm . dpm . dyn_state . ppm_table =
kzalloc ( sizeof ( struct radeon_ppm_table ) , GFP_KERNEL ) ;
2013-03-22 15:38:15 -04:00
if ( ! rdev - > pm . dpm . dyn_state . ppm_table ) {
2013-05-09 17:21:56 -04:00
r600_free_extended_power_table ( rdev ) ;
2013-03-20 13:00:18 -04:00
return - ENOMEM ;
2013-03-22 15:38:15 -04:00
}
2013-03-20 13:00:18 -04:00
rdev - > pm . dpm . dyn_state . ppm_table - > ppm_design = ppm - > ucPpmDesign ;
rdev - > pm . dpm . dyn_state . ppm_table - > cpu_core_number =
le16_to_cpu ( ppm - > usCpuCoreNumber ) ;
rdev - > pm . dpm . dyn_state . ppm_table - > platform_tdp =
le32_to_cpu ( ppm - > ulPlatformTDP ) ;
rdev - > pm . dpm . dyn_state . ppm_table - > small_ac_platform_tdp =
le32_to_cpu ( ppm - > ulSmallACPlatformTDP ) ;
rdev - > pm . dpm . dyn_state . ppm_table - > platform_tdc =
le32_to_cpu ( ppm - > ulPlatformTDC ) ;
rdev - > pm . dpm . dyn_state . ppm_table - > small_ac_platform_tdc =
le32_to_cpu ( ppm - > ulSmallACPlatformTDC ) ;
rdev - > pm . dpm . dyn_state . ppm_table - > apu_tdp =
le32_to_cpu ( ppm - > ulApuTDP ) ;
rdev - > pm . dpm . dyn_state . ppm_table - > dgpu_tdp =
le32_to_cpu ( ppm - > ulDGpuTDP ) ;
rdev - > pm . dpm . dyn_state . ppm_table - > dgpu_ulv_power =
le32_to_cpu ( ppm - > ulDGpuUlvPower ) ;
rdev - > pm . dpm . dyn_state . ppm_table - > tj_max =
le32_to_cpu ( ppm - > ulTjmax ) ;
}
2013-05-09 17:34:45 -04:00
if ( ( le16_to_cpu ( ext_hdr - > usSize ) > = SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 ) & &
ext_hdr - > usACPTableOffset ) {
ATOM_PPLIB_ACPClk_Voltage_Limit_Table * limits =
( ATOM_PPLIB_ACPClk_Voltage_Limit_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( ext_hdr - > usACPTableOffset ) + 1 ) ;
2013-08-23 15:28:42 -04:00
ATOM_PPLIB_ACPClk_Voltage_Limit_Record * entry ;
2013-05-09 17:34:45 -04:00
u32 size = limits - > numEntries *
sizeof ( struct radeon_clock_voltage_dependency_entry ) ;
rdev - > pm . dpm . dyn_state . acp_clock_voltage_dependency_table . entries =
kzalloc ( size , GFP_KERNEL ) ;
if ( ! rdev - > pm . dpm . dyn_state . acp_clock_voltage_dependency_table . entries ) {
r600_free_extended_power_table ( rdev ) ;
return - ENOMEM ;
}
rdev - > pm . dpm . dyn_state . acp_clock_voltage_dependency_table . count =
limits - > numEntries ;
2013-08-23 15:28:42 -04:00
entry = & limits - > entries [ 0 ] ;
2013-05-09 17:34:45 -04:00
for ( i = 0 ; i < limits - > numEntries ; i + + ) {
rdev - > pm . dpm . dyn_state . acp_clock_voltage_dependency_table . entries [ i ] . clk =
2013-08-23 15:28:42 -04:00
le16_to_cpu ( entry - > usACPClockLow ) | ( entry - > ucACPClockHigh < < 16 ) ;
2013-05-09 17:34:45 -04:00
rdev - > pm . dpm . dyn_state . acp_clock_voltage_dependency_table . entries [ i ] . v =
2013-08-23 15:28:42 -04:00
le16_to_cpu ( entry - > usVoltage ) ;
entry = ( ATOM_PPLIB_ACPClk_Voltage_Limit_Record * )
( ( u8 * ) entry + sizeof ( ATOM_PPLIB_ACPClk_Voltage_Limit_Record ) ) ;
2013-05-09 17:34:45 -04:00
}
}
2013-05-06 12:15:33 -04:00
if ( ( le16_to_cpu ( ext_hdr - > usSize ) > = SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 ) & &
ext_hdr - > usPowerTuneTableOffset ) {
u8 rev = * ( u8 * ) ( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( ext_hdr - > usPowerTuneTableOffset ) ) ;
ATOM_PowerTune_Table * pt ;
rdev - > pm . dpm . dyn_state . cac_tdp_table =
kzalloc ( sizeof ( struct radeon_cac_tdp_table ) , GFP_KERNEL ) ;
if ( ! rdev - > pm . dpm . dyn_state . cac_tdp_table ) {
2013-05-09 17:21:56 -04:00
r600_free_extended_power_table ( rdev ) ;
2013-05-06 12:15:33 -04:00
return - ENOMEM ;
}
if ( rev > 0 ) {
ATOM_PPLIB_POWERTUNE_Table_V1 * ppt = ( ATOM_PPLIB_POWERTUNE_Table_V1 * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( ext_hdr - > usPowerTuneTableOffset ) ) ;
rdev - > pm . dpm . dyn_state . cac_tdp_table - > maximum_power_delivery_limit =
2014-11-12 19:17:02 -05:00
le16_to_cpu ( ppt - > usMaximumPowerDeliveryLimit ) ;
2013-05-06 12:15:33 -04:00
pt = & ppt - > power_tune_table ;
} else {
ATOM_PPLIB_POWERTUNE_Table * ppt = ( ATOM_PPLIB_POWERTUNE_Table * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( ext_hdr - > usPowerTuneTableOffset ) ) ;
rdev - > pm . dpm . dyn_state . cac_tdp_table - > maximum_power_delivery_limit = 255 ;
pt = & ppt - > power_tune_table ;
}
rdev - > pm . dpm . dyn_state . cac_tdp_table - > tdp = le16_to_cpu ( pt - > usTDP ) ;
rdev - > pm . dpm . dyn_state . cac_tdp_table - > configurable_tdp =
le16_to_cpu ( pt - > usConfigurableTDP ) ;
rdev - > pm . dpm . dyn_state . cac_tdp_table - > tdc = le16_to_cpu ( pt - > usTDC ) ;
rdev - > pm . dpm . dyn_state . cac_tdp_table - > battery_power_limit =
le16_to_cpu ( pt - > usBatteryPowerLimit ) ;
rdev - > pm . dpm . dyn_state . cac_tdp_table - > small_power_limit =
le16_to_cpu ( pt - > usSmallPowerLimit ) ;
rdev - > pm . dpm . dyn_state . cac_tdp_table - > low_cac_leakage =
le16_to_cpu ( pt - > usLowCACLeakage ) ;
rdev - > pm . dpm . dyn_state . cac_tdp_table - > high_cac_leakage =
le16_to_cpu ( pt - > usHighCACLeakage ) ;
}
2013-03-20 13:00:18 -04:00
}
2012-11-14 19:57:42 -05:00
return 0 ;
}
void r600_free_extended_power_table ( struct radeon_device * rdev )
{
2013-09-04 12:32:52 +03:00
struct radeon_dpm_dynamic_state * dyn_state = & rdev - > pm . dpm . dyn_state ;
kfree ( dyn_state - > vddc_dependency_on_sclk . entries ) ;
kfree ( dyn_state - > vddci_dependency_on_mclk . entries ) ;
kfree ( dyn_state - > vddc_dependency_on_mclk . entries ) ;
kfree ( dyn_state - > mvdd_dependency_on_mclk . entries ) ;
kfree ( dyn_state - > cac_leakage_table . entries ) ;
kfree ( dyn_state - > phase_shedding_limits_table . entries ) ;
kfree ( dyn_state - > ppm_table ) ;
kfree ( dyn_state - > cac_tdp_table ) ;
kfree ( dyn_state - > vce_clock_voltage_dependency_table . entries ) ;
kfree ( dyn_state - > uvd_clock_voltage_dependency_table . entries ) ;
kfree ( dyn_state - > samu_clock_voltage_dependency_table . entries ) ;
kfree ( dyn_state - > acp_clock_voltage_dependency_table . entries ) ;
2012-11-14 19:57:42 -05:00
}
2013-03-25 18:28:29 -04:00
enum radeon_pcie_gen r600_get_pcie_gen_support ( struct radeon_device * rdev ,
u32 sys_mask ,
enum radeon_pcie_gen asic_gen ,
enum radeon_pcie_gen default_gen )
{
switch ( asic_gen ) {
case RADEON_PCIE_GEN1 :
return RADEON_PCIE_GEN1 ;
case RADEON_PCIE_GEN2 :
return RADEON_PCIE_GEN2 ;
case RADEON_PCIE_GEN3 :
return RADEON_PCIE_GEN3 ;
default :
2018-06-25 14:37:45 -05:00
if ( ( sys_mask & RADEON_PCIE_SPEED_80 ) & & ( default_gen = = RADEON_PCIE_GEN3 ) )
2013-03-25 18:28:29 -04:00
return RADEON_PCIE_GEN3 ;
2018-06-25 14:37:45 -05:00
else if ( ( sys_mask & RADEON_PCIE_SPEED_50 ) & & ( default_gen = = RADEON_PCIE_GEN2 ) )
2013-03-25 18:28:29 -04:00
return RADEON_PCIE_GEN2 ;
else
return RADEON_PCIE_GEN1 ;
}
return RADEON_PCIE_GEN1 ;
}
2013-05-14 17:44:31 -04:00
u16 r600_get_pcie_lane_support ( struct radeon_device * rdev ,
u16 asic_lanes ,
u16 default_lanes )
{
switch ( asic_lanes ) {
case 0 :
default :
return default_lanes ;
case 1 :
return 1 ;
case 2 :
return 2 ;
case 4 :
return 4 ;
case 8 :
return 8 ;
case 12 :
return 12 ;
case 16 :
return 16 ;
}
}
2013-05-15 17:25:03 -04:00
u8 r600_encode_pci_lane_width ( u32 lanes )
{
u8 encoded_lanes [ ] = { 0 , 1 , 2 , 0 , 3 , 0 , 0 , 0 , 4 , 0 , 0 , 0 , 5 , 0 , 0 , 0 , 6 } ;
if ( lanes > 16 )
return 0 ;
return encoded_lanes [ lanes ] ;
}