2013-08-14 01:01:40 -04:00
/*
* Copyright 2013 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
*/
2019-12-03 11:04:02 +01:00
# include <linux/pci.h>
2019-06-08 10:02:41 +02:00
# include <linux/seq_file.h>
2013-08-14 01:01:40 -04:00
# include "cikd.h"
# include "kv_dpm.h"
2019-06-08 10:02:41 +02:00
# include "r600_dpm.h"
# include "radeon.h"
2013-08-13 11:56:53 +02:00
# include "radeon_asic.h"
2013-08-14 01:01:40 -04:00
# define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
# define KV_MINIMUM_ENGINE_CLOCK 800
# define SMC_RAM_END 0x40000
2014-09-18 11:16:31 -04:00
static int kv_enable_nb_dpm ( struct radeon_device * rdev ,
bool enable ) ;
2013-08-14 01:01:40 -04:00
static void kv_init_graphics_levels ( struct radeon_device * rdev ) ;
static int kv_calculate_ds_divider ( struct radeon_device * rdev ) ;
static int kv_calculate_nbps_level_settings ( struct radeon_device * rdev ) ;
static int kv_calculate_dpm_settings ( struct radeon_device * rdev ) ;
static void kv_enable_new_levels ( struct radeon_device * rdev ) ;
static void kv_program_nbps_index_settings ( struct radeon_device * rdev ,
struct radeon_ps * new_rps ) ;
2013-09-04 12:01:28 -04:00
static int kv_set_enabled_level ( struct radeon_device * rdev , u32 level ) ;
2013-08-14 01:01:40 -04:00
static int kv_set_enabled_levels ( struct radeon_device * rdev ) ;
2013-07-18 16:48:46 -04:00
static int kv_force_dpm_highest ( struct radeon_device * rdev ) ;
2013-08-14 01:01:40 -04:00
static int kv_force_dpm_lowest ( struct radeon_device * rdev ) ;
static void kv_apply_state_adjust_rules ( struct radeon_device * rdev ,
struct radeon_ps * new_rps ,
struct radeon_ps * old_rps ) ;
static int kv_set_thermal_temperature_range ( struct radeon_device * rdev ,
int min_temp , int max_temp ) ;
static int kv_init_fps_limits ( struct radeon_device * rdev ) ;
2013-08-09 10:02:40 -04:00
void kv_dpm_powergate_uvd ( struct radeon_device * rdev , bool gate ) ;
2013-08-14 01:01:40 -04:00
static void kv_dpm_powergate_vce ( struct radeon_device * rdev , bool gate ) ;
static void kv_dpm_powergate_samu ( struct radeon_device * rdev , bool gate ) ;
static void kv_dpm_powergate_acp ( struct radeon_device * rdev , bool gate ) ;
extern void cik_enter_rlc_safe_mode ( struct radeon_device * rdev ) ;
extern void cik_exit_rlc_safe_mode ( struct radeon_device * rdev ) ;
extern void cik_update_cg ( struct radeon_device * rdev ,
u32 block , bool enable ) ;
static const struct kv_pt_config_reg didt_config_kv [ ] =
{
{ 0x10 , 0x000000ff , 0 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x10 , 0x0000ff00 , 8 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x10 , 0x00ff0000 , 16 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x10 , 0xff000000 , 24 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x11 , 0x000000ff , 0 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x11 , 0x0000ff00 , 8 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x11 , 0x00ff0000 , 16 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x11 , 0xff000000 , 24 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x12 , 0x000000ff , 0 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x12 , 0x0000ff00 , 8 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x12 , 0x00ff0000 , 16 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x12 , 0xff000000 , 24 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x2 , 0x00003fff , 0 , 0x4 , KV_CONFIGREG_DIDT_IND } ,
{ 0x2 , 0x03ff0000 , 16 , 0x80 , KV_CONFIGREG_DIDT_IND } ,
{ 0x2 , 0x78000000 , 27 , 0x3 , KV_CONFIGREG_DIDT_IND } ,
{ 0x1 , 0x0000ffff , 0 , 0x3FFF , KV_CONFIGREG_DIDT_IND } ,
{ 0x1 , 0xffff0000 , 16 , 0x3FFF , KV_CONFIGREG_DIDT_IND } ,
{ 0x0 , 0x00000001 , 0 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x30 , 0x000000ff , 0 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x30 , 0x0000ff00 , 8 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x30 , 0x00ff0000 , 16 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x30 , 0xff000000 , 24 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x31 , 0x000000ff , 0 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x31 , 0x0000ff00 , 8 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x31 , 0x00ff0000 , 16 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x31 , 0xff000000 , 24 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x32 , 0x000000ff , 0 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x32 , 0x0000ff00 , 8 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x32 , 0x00ff0000 , 16 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x32 , 0xff000000 , 24 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x22 , 0x00003fff , 0 , 0x4 , KV_CONFIGREG_DIDT_IND } ,
{ 0x22 , 0x03ff0000 , 16 , 0x80 , KV_CONFIGREG_DIDT_IND } ,
{ 0x22 , 0x78000000 , 27 , 0x3 , KV_CONFIGREG_DIDT_IND } ,
{ 0x21 , 0x0000ffff , 0 , 0x3FFF , KV_CONFIGREG_DIDT_IND } ,
{ 0x21 , 0xffff0000 , 16 , 0x3FFF , KV_CONFIGREG_DIDT_IND } ,
{ 0x20 , 0x00000001 , 0 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x50 , 0x000000ff , 0 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x50 , 0x0000ff00 , 8 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x50 , 0x00ff0000 , 16 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x50 , 0xff000000 , 24 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x51 , 0x000000ff , 0 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x51 , 0x0000ff00 , 8 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x51 , 0x00ff0000 , 16 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x51 , 0xff000000 , 24 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x52 , 0x000000ff , 0 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x52 , 0x0000ff00 , 8 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x52 , 0x00ff0000 , 16 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x52 , 0xff000000 , 24 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x42 , 0x00003fff , 0 , 0x4 , KV_CONFIGREG_DIDT_IND } ,
{ 0x42 , 0x03ff0000 , 16 , 0x80 , KV_CONFIGREG_DIDT_IND } ,
{ 0x42 , 0x78000000 , 27 , 0x3 , KV_CONFIGREG_DIDT_IND } ,
{ 0x41 , 0x0000ffff , 0 , 0x3FFF , KV_CONFIGREG_DIDT_IND } ,
{ 0x41 , 0xffff0000 , 16 , 0x3FFF , KV_CONFIGREG_DIDT_IND } ,
{ 0x40 , 0x00000001 , 0 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x70 , 0x000000ff , 0 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x70 , 0x0000ff00 , 8 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x70 , 0x00ff0000 , 16 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x70 , 0xff000000 , 24 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x71 , 0x000000ff , 0 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x71 , 0x0000ff00 , 8 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x71 , 0x00ff0000 , 16 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x71 , 0xff000000 , 24 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x72 , 0x000000ff , 0 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x72 , 0x0000ff00 , 8 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x72 , 0x00ff0000 , 16 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x72 , 0xff000000 , 24 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0x62 , 0x00003fff , 0 , 0x4 , KV_CONFIGREG_DIDT_IND } ,
{ 0x62 , 0x03ff0000 , 16 , 0x80 , KV_CONFIGREG_DIDT_IND } ,
{ 0x62 , 0x78000000 , 27 , 0x3 , KV_CONFIGREG_DIDT_IND } ,
{ 0x61 , 0x0000ffff , 0 , 0x3FFF , KV_CONFIGREG_DIDT_IND } ,
{ 0x61 , 0xffff0000 , 16 , 0x3FFF , KV_CONFIGREG_DIDT_IND } ,
{ 0x60 , 0x00000001 , 0 , 0x0 , KV_CONFIGREG_DIDT_IND } ,
{ 0xFFFFFFFF }
} ;
static struct kv_ps * kv_get_ps ( struct radeon_ps * rps )
{
struct kv_ps * ps = rps - > ps_priv ;
return ps ;
}
static struct kv_power_info * kv_get_pi ( struct radeon_device * rdev )
{
struct kv_power_info * pi = rdev - > pm . dpm . priv ;
return pi ;
}
static int kv_program_pt_config_registers ( struct radeon_device * rdev ,
const struct kv_pt_config_reg * cac_config_regs )
{
const struct kv_pt_config_reg * config_regs = cac_config_regs ;
u32 data ;
u32 cache = 0 ;
if ( config_regs = = NULL )
return - EINVAL ;
while ( config_regs - > offset ! = 0xFFFFFFFF ) {
if ( config_regs - > type = = KV_CONFIGREG_CACHE ) {
cache | = ( ( config_regs - > value < < config_regs - > shift ) & config_regs - > mask ) ;
} else {
switch ( config_regs - > type ) {
case KV_CONFIGREG_SMC_IND :
data = RREG32_SMC ( config_regs - > offset ) ;
break ;
case KV_CONFIGREG_DIDT_IND :
data = RREG32_DIDT ( config_regs - > offset ) ;
break ;
default :
data = RREG32 ( config_regs - > offset < < 2 ) ;
break ;
}
data & = ~ config_regs - > mask ;
data | = ( ( config_regs - > value < < config_regs - > shift ) & config_regs - > mask ) ;
data | = cache ;
cache = 0 ;
switch ( config_regs - > type ) {
case KV_CONFIGREG_SMC_IND :
WREG32_SMC ( config_regs - > offset , data ) ;
break ;
case KV_CONFIGREG_DIDT_IND :
WREG32_DIDT ( config_regs - > offset , data ) ;
break ;
default :
WREG32 ( config_regs - > offset < < 2 , data ) ;
break ;
}
}
config_regs + + ;
}
return 0 ;
}
static void kv_do_enable_didt ( struct radeon_device * rdev , bool enable )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
u32 data ;
if ( pi - > caps_sq_ramping ) {
data = RREG32_DIDT ( DIDT_SQ_CTRL0 ) ;
if ( enable )
data | = DIDT_CTRL_EN ;
else
data & = ~ DIDT_CTRL_EN ;
WREG32_DIDT ( DIDT_SQ_CTRL0 , data ) ;
}
if ( pi - > caps_db_ramping ) {
data = RREG32_DIDT ( DIDT_DB_CTRL0 ) ;
if ( enable )
data | = DIDT_CTRL_EN ;
else
data & = ~ DIDT_CTRL_EN ;
WREG32_DIDT ( DIDT_DB_CTRL0 , data ) ;
}
if ( pi - > caps_td_ramping ) {
data = RREG32_DIDT ( DIDT_TD_CTRL0 ) ;
if ( enable )
data | = DIDT_CTRL_EN ;
else
data & = ~ DIDT_CTRL_EN ;
WREG32_DIDT ( DIDT_TD_CTRL0 , data ) ;
}
if ( pi - > caps_tcp_ramping ) {
data = RREG32_DIDT ( DIDT_TCP_CTRL0 ) ;
if ( enable )
data | = DIDT_CTRL_EN ;
else
data & = ~ DIDT_CTRL_EN ;
WREG32_DIDT ( DIDT_TCP_CTRL0 , data ) ;
}
}
static int kv_enable_didt ( struct radeon_device * rdev , bool enable )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
int ret ;
if ( pi - > caps_sq_ramping | |
pi - > caps_db_ramping | |
pi - > caps_td_ramping | |
pi - > caps_tcp_ramping ) {
cik_enter_rlc_safe_mode ( rdev ) ;
if ( enable ) {
ret = kv_program_pt_config_registers ( rdev , didt_config_kv ) ;
if ( ret ) {
cik_exit_rlc_safe_mode ( rdev ) ;
return ret ;
}
}
kv_do_enable_didt ( rdev , enable ) ;
cik_exit_rlc_safe_mode ( rdev ) ;
}
return 0 ;
}
static int kv_enable_smc_cac ( struct radeon_device * rdev , bool enable )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
int ret = 0 ;
if ( pi - > caps_cac ) {
if ( enable ) {
ret = kv_notify_message_to_smu ( rdev , PPSMC_MSG_EnableCac ) ;
if ( ret )
pi - > cac_enabled = false ;
else
pi - > cac_enabled = true ;
} else if ( pi - > cac_enabled ) {
kv_notify_message_to_smu ( rdev , PPSMC_MSG_DisableCac ) ;
pi - > cac_enabled = false ;
}
}
return ret ;
}
static int kv_process_firmware_header ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
u32 tmp ;
int ret ;
ret = kv_read_smc_sram_dword ( rdev , SMU7_FIRMWARE_HEADER_LOCATION +
offsetof ( SMU7_Firmware_Header , DpmTable ) ,
& tmp , pi - > sram_end ) ;
if ( ret = = 0 )
pi - > dpm_table_start = tmp ;
ret = kv_read_smc_sram_dword ( rdev , SMU7_FIRMWARE_HEADER_LOCATION +
offsetof ( SMU7_Firmware_Header , SoftRegisters ) ,
& tmp , pi - > sram_end ) ;
if ( ret = = 0 )
pi - > soft_regs_start = tmp ;
return ret ;
}
static int kv_enable_dpm_voltage_scaling ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
int ret ;
pi - > graphics_voltage_change_enable = 1 ;
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , GraphicsVoltageChangeEnable ) ,
& pi - > graphics_voltage_change_enable ,
sizeof ( u8 ) , pi - > sram_end ) ;
return ret ;
}
static int kv_set_dpm_interval ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
int ret ;
pi - > graphics_interval = 1 ;
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , GraphicsInterval ) ,
& pi - > graphics_interval ,
sizeof ( u8 ) , pi - > sram_end ) ;
return ret ;
}
static int kv_set_dpm_boot_state ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
int ret ;
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , GraphicsBootLevel ) ,
& pi - > graphics_boot_level ,
sizeof ( u8 ) , pi - > sram_end ) ;
return ret ;
}
static void kv_program_vc ( struct radeon_device * rdev )
{
2013-09-04 12:01:28 -04:00
WREG32_SMC ( CG_FTV_0 , 0x3FFFC100 ) ;
2013-08-14 01:01:40 -04:00
}
static void kv_clear_vc ( struct radeon_device * rdev )
{
WREG32_SMC ( CG_FTV_0 , 0 ) ;
}
static int kv_set_divider_value ( struct radeon_device * rdev ,
u32 index , u32 sclk )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
struct atom_clock_dividers dividers ;
int ret ;
ret = radeon_atom_get_clock_dividers ( rdev , COMPUTE_ENGINE_PLL_PARAM ,
sclk , false , & dividers ) ;
if ( ret )
return ret ;
pi - > graphics_level [ index ] . SclkDid = ( u8 ) dividers . post_div ;
pi - > graphics_level [ index ] . SclkFrequency = cpu_to_be32 ( sclk ) ;
return 0 ;
}
2014-04-30 18:40:52 -04:00
static u32 kv_convert_vid2_to_vid7 ( struct radeon_device * rdev ,
struct sumo_vid_mapping_table * vid_mapping_table ,
u32 vid_2bit )
{
struct radeon_clock_voltage_dependency_table * vddc_sclk_table =
& rdev - > pm . dpm . dyn_state . vddc_dependency_on_sclk ;
u32 i ;
if ( vddc_sclk_table & & vddc_sclk_table - > count ) {
if ( vid_2bit < vddc_sclk_table - > count )
return vddc_sclk_table - > entries [ vid_2bit ] . v ;
else
return vddc_sclk_table - > entries [ vddc_sclk_table - > count - 1 ] . v ;
} else {
for ( i = 0 ; i < vid_mapping_table - > num_entries ; i + + ) {
if ( vid_mapping_table - > entries [ i ] . vid_2bit = = vid_2bit )
return vid_mapping_table - > entries [ i ] . vid_7bit ;
}
return vid_mapping_table - > entries [ vid_mapping_table - > num_entries - 1 ] . vid_7bit ;
}
}
static u32 kv_convert_vid7_to_vid2 ( struct radeon_device * rdev ,
struct sumo_vid_mapping_table * vid_mapping_table ,
u32 vid_7bit )
{
struct radeon_clock_voltage_dependency_table * vddc_sclk_table =
& rdev - > pm . dpm . dyn_state . vddc_dependency_on_sclk ;
u32 i ;
if ( vddc_sclk_table & & vddc_sclk_table - > count ) {
for ( i = 0 ; i < vddc_sclk_table - > count ; i + + ) {
if ( vddc_sclk_table - > entries [ i ] . v = = vid_7bit )
return i ;
}
return vddc_sclk_table - > count - 1 ;
} else {
for ( i = 0 ; i < vid_mapping_table - > num_entries ; i + + ) {
if ( vid_mapping_table - > entries [ i ] . vid_7bit = = vid_7bit )
return vid_mapping_table - > entries [ i ] . vid_2bit ;
}
return vid_mapping_table - > entries [ vid_mapping_table - > num_entries - 1 ] . vid_2bit ;
}
}
2013-08-14 01:01:40 -04:00
static u16 kv_convert_8bit_index_to_voltage ( struct radeon_device * rdev ,
u16 voltage )
{
return 6200 - ( voltage * 25 ) ;
}
static u16 kv_convert_2bit_index_to_voltage ( struct radeon_device * rdev ,
u32 vid_2bit )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
2014-04-30 18:40:52 -04:00
u32 vid_8bit = kv_convert_vid2_to_vid7 ( rdev ,
& pi - > sys_info . vid_mapping_table ,
vid_2bit ) ;
2013-08-14 01:01:40 -04:00
return kv_convert_8bit_index_to_voltage ( rdev , ( u16 ) vid_8bit ) ;
}
static int kv_set_vid ( struct radeon_device * rdev , u32 index , u32 vid )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
pi - > graphics_level [ index ] . VoltageDownH = ( u8 ) pi - > voltage_drop_t ;
pi - > graphics_level [ index ] . MinVddNb =
cpu_to_be32 ( kv_convert_2bit_index_to_voltage ( rdev , vid ) ) ;
return 0 ;
}
static int kv_set_at ( struct radeon_device * rdev , u32 index , u32 at )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
pi - > graphics_level [ index ] . AT = cpu_to_be16 ( ( u16 ) at ) ;
return 0 ;
}
static void kv_dpm_power_level_enable ( struct radeon_device * rdev ,
u32 index , bool enable )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
pi - > graphics_level [ index ] . EnabledForActivity = enable ? 1 : 0 ;
}
static void kv_start_dpm ( struct radeon_device * rdev )
{
u32 tmp = RREG32_SMC ( GENERAL_PWRMGT ) ;
tmp | = GLOBAL_PWRMGT_EN ;
WREG32_SMC ( GENERAL_PWRMGT , tmp ) ;
kv_smc_dpm_enable ( rdev , true ) ;
}
static void kv_stop_dpm ( struct radeon_device * rdev )
{
kv_smc_dpm_enable ( rdev , false ) ;
}
static void kv_start_am ( struct radeon_device * rdev )
{
u32 sclk_pwrmgt_cntl = RREG32_SMC ( SCLK_PWRMGT_CNTL ) ;
sclk_pwrmgt_cntl & = ~ ( RESET_SCLK_CNT | RESET_BUSY_CNT ) ;
sclk_pwrmgt_cntl | = DYNAMIC_PM_EN ;
WREG32_SMC ( SCLK_PWRMGT_CNTL , sclk_pwrmgt_cntl ) ;
}
static void kv_reset_am ( struct radeon_device * rdev )
{
u32 sclk_pwrmgt_cntl = RREG32_SMC ( SCLK_PWRMGT_CNTL ) ;
sclk_pwrmgt_cntl | = ( RESET_SCLK_CNT | RESET_BUSY_CNT ) ;
WREG32_SMC ( SCLK_PWRMGT_CNTL , sclk_pwrmgt_cntl ) ;
}
static int kv_freeze_sclk_dpm ( struct radeon_device * rdev , bool freeze )
{
return kv_notify_message_to_smu ( rdev , freeze ?
PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel ) ;
}
static int kv_force_lowest_valid ( struct radeon_device * rdev )
{
return kv_force_dpm_lowest ( rdev ) ;
}
static int kv_unforce_levels ( struct radeon_device * rdev )
{
2014-04-30 18:40:51 -04:00
if ( rdev - > family = = CHIP_KABINI | | rdev - > family = = CHIP_MULLINS )
2013-09-04 12:01:28 -04:00
return kv_notify_message_to_smu ( rdev , PPSMC_MSG_NoForcedLevel ) ;
else
return kv_set_enabled_levels ( rdev ) ;
2013-08-14 01:01:40 -04:00
}
static int kv_update_sclk_t ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
u32 low_sclk_interrupt_t = 0 ;
int ret = 0 ;
if ( pi - > caps_sclk_throttle_low_notification ) {
low_sclk_interrupt_t = cpu_to_be32 ( pi - > low_sclk_interrupt_t ) ;
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , LowSclkInterruptT ) ,
( u8 * ) & low_sclk_interrupt_t ,
sizeof ( u32 ) , pi - > sram_end ) ;
}
return ret ;
}
static int kv_program_bootup_state ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
u32 i ;
struct radeon_clock_voltage_dependency_table * table =
& rdev - > pm . dpm . dyn_state . vddc_dependency_on_sclk ;
if ( table & & table - > count ) {
2013-09-04 12:31:36 +03:00
for ( i = pi - > graphics_dpm_level_count - 1 ; i > 0 ; i - - ) {
if ( table - > entries [ i ] . clk = = pi - > boot_pl . sclk )
2013-08-14 01:01:40 -04:00
break ;
}
pi - > graphics_boot_level = ( u8 ) i ;
kv_dpm_power_level_enable ( rdev , i , true ) ;
} else {
struct sumo_sclk_voltage_mapping_table * table =
& pi - > sys_info . sclk_voltage_mapping_table ;
if ( table - > num_max_dpm_entries = = 0 )
return - EINVAL ;
2013-09-04 12:31:36 +03:00
for ( i = pi - > graphics_dpm_level_count - 1 ; i > 0 ; i - - ) {
if ( table - > entries [ i ] . sclk_frequency = = pi - > boot_pl . sclk )
2013-08-14 01:01:40 -04:00
break ;
}
pi - > graphics_boot_level = ( u8 ) i ;
kv_dpm_power_level_enable ( rdev , i , true ) ;
}
return 0 ;
}
static int kv_enable_auto_thermal_throttling ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
int ret ;
pi - > graphics_therm_throttle_enable = 1 ;
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , GraphicsThermThrottleEnable ) ,
& pi - > graphics_therm_throttle_enable ,
sizeof ( u8 ) , pi - > sram_end ) ;
return ret ;
}
static int kv_upload_dpm_settings ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
int ret ;
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , GraphicsLevel ) ,
( u8 * ) & pi - > graphics_level ,
sizeof ( SMU7_Fusion_GraphicsLevel ) * SMU7_MAX_LEVELS_GRAPHICS ,
pi - > sram_end ) ;
if ( ret )
return ret ;
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , GraphicsDpmLevelCount ) ,
& pi - > graphics_dpm_level_count ,
sizeof ( u8 ) , pi - > sram_end ) ;
return ret ;
}
static u32 kv_get_clock_difference ( u32 a , u32 b )
{
return ( a > = b ) ? a - b : b - a ;
}
static u32 kv_get_clk_bypass ( struct radeon_device * rdev , u32 clk )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
u32 value ;
if ( pi - > caps_enable_dfs_bypass ) {
if ( kv_get_clock_difference ( clk , 40000 ) < 200 )
value = 3 ;
else if ( kv_get_clock_difference ( clk , 30000 ) < 200 )
value = 2 ;
else if ( kv_get_clock_difference ( clk , 20000 ) < 200 )
value = 7 ;
else if ( kv_get_clock_difference ( clk , 15000 ) < 200 )
value = 6 ;
else if ( kv_get_clock_difference ( clk , 10000 ) < 200 )
value = 8 ;
else
value = 0 ;
} else {
value = 0 ;
}
return value ;
}
static int kv_populate_uvd_table ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
struct radeon_uvd_clock_voltage_dependency_table * table =
& rdev - > pm . dpm . dyn_state . uvd_clock_voltage_dependency_table ;
struct atom_clock_dividers dividers ;
int ret ;
u32 i ;
if ( table = = NULL | | table - > count = = 0 )
return 0 ;
pi - > uvd_level_count = 0 ;
for ( i = 0 ; i < table - > count ; i + + ) {
if ( pi - > high_voltage_t & &
( pi - > high_voltage_t < table - > entries [ i ] . v ) )
break ;
pi - > uvd_level [ i ] . VclkFrequency = cpu_to_be32 ( table - > entries [ i ] . vclk ) ;
pi - > uvd_level [ i ] . DclkFrequency = cpu_to_be32 ( table - > entries [ i ] . dclk ) ;
pi - > uvd_level [ i ] . MinVddNb = cpu_to_be16 ( table - > entries [ i ] . v ) ;
pi - > uvd_level [ i ] . VClkBypassCntl =
( u8 ) kv_get_clk_bypass ( rdev , table - > entries [ i ] . vclk ) ;
pi - > uvd_level [ i ] . DClkBypassCntl =
( u8 ) kv_get_clk_bypass ( rdev , table - > entries [ i ] . dclk ) ;
ret = radeon_atom_get_clock_dividers ( rdev , COMPUTE_ENGINE_PLL_PARAM ,
table - > entries [ i ] . vclk , false , & dividers ) ;
if ( ret )
return ret ;
pi - > uvd_level [ i ] . VclkDivider = ( u8 ) dividers . post_div ;
ret = radeon_atom_get_clock_dividers ( rdev , COMPUTE_ENGINE_PLL_PARAM ,
table - > entries [ i ] . dclk , false , & dividers ) ;
if ( ret )
return ret ;
pi - > uvd_level [ i ] . DclkDivider = ( u8 ) dividers . post_div ;
pi - > uvd_level_count + + ;
}
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , UvdLevelCount ) ,
( u8 * ) & pi - > uvd_level_count ,
sizeof ( u8 ) , pi - > sram_end ) ;
if ( ret )
return ret ;
pi - > uvd_interval = 1 ;
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , UVDInterval ) ,
& pi - > uvd_interval ,
sizeof ( u8 ) , pi - > sram_end ) ;
if ( ret )
return ret ;
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , UvdLevel ) ,
( u8 * ) & pi - > uvd_level ,
sizeof ( SMU7_Fusion_UvdLevel ) * SMU7_MAX_LEVELS_UVD ,
pi - > sram_end ) ;
return ret ;
}
static int kv_populate_vce_table ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
int ret ;
u32 i ;
struct radeon_vce_clock_voltage_dependency_table * table =
& rdev - > pm . dpm . dyn_state . vce_clock_voltage_dependency_table ;
struct atom_clock_dividers dividers ;
if ( table = = NULL | | table - > count = = 0 )
return 0 ;
pi - > vce_level_count = 0 ;
for ( i = 0 ; i < table - > count ; i + + ) {
if ( pi - > high_voltage_t & &
pi - > high_voltage_t < table - > entries [ i ] . v )
break ;
pi - > vce_level [ i ] . Frequency = cpu_to_be32 ( table - > entries [ i ] . evclk ) ;
pi - > vce_level [ i ] . MinVoltage = cpu_to_be16 ( table - > entries [ i ] . v ) ;
pi - > vce_level [ i ] . ClkBypassCntl =
( u8 ) kv_get_clk_bypass ( rdev , table - > entries [ i ] . evclk ) ;
ret = radeon_atom_get_clock_dividers ( rdev , COMPUTE_ENGINE_PLL_PARAM ,
table - > entries [ i ] . evclk , false , & dividers ) ;
if ( ret )
return ret ;
pi - > vce_level [ i ] . Divider = ( u8 ) dividers . post_div ;
pi - > vce_level_count + + ;
}
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , VceLevelCount ) ,
( u8 * ) & pi - > vce_level_count ,
sizeof ( u8 ) ,
pi - > sram_end ) ;
if ( ret )
return ret ;
pi - > vce_interval = 1 ;
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , VCEInterval ) ,
( u8 * ) & pi - > vce_interval ,
sizeof ( u8 ) ,
pi - > sram_end ) ;
if ( ret )
return ret ;
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , VceLevel ) ,
( u8 * ) & pi - > vce_level ,
sizeof ( SMU7_Fusion_ExtClkLevel ) * SMU7_MAX_LEVELS_VCE ,
pi - > sram_end ) ;
return ret ;
}
static int kv_populate_samu_table ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
struct radeon_clock_voltage_dependency_table * table =
& rdev - > pm . dpm . dyn_state . samu_clock_voltage_dependency_table ;
struct atom_clock_dividers dividers ;
int ret ;
u32 i ;
if ( table = = NULL | | table - > count = = 0 )
return 0 ;
pi - > samu_level_count = 0 ;
for ( i = 0 ; i < table - > count ; i + + ) {
if ( pi - > high_voltage_t & &
pi - > high_voltage_t < table - > entries [ i ] . v )
break ;
pi - > samu_level [ i ] . Frequency = cpu_to_be32 ( table - > entries [ i ] . clk ) ;
pi - > samu_level [ i ] . MinVoltage = cpu_to_be16 ( table - > entries [ i ] . v ) ;
pi - > samu_level [ i ] . ClkBypassCntl =
( u8 ) kv_get_clk_bypass ( rdev , table - > entries [ i ] . clk ) ;
ret = radeon_atom_get_clock_dividers ( rdev , COMPUTE_ENGINE_PLL_PARAM ,
table - > entries [ i ] . clk , false , & dividers ) ;
if ( ret )
return ret ;
pi - > samu_level [ i ] . Divider = ( u8 ) dividers . post_div ;
pi - > samu_level_count + + ;
}
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , SamuLevelCount ) ,
( u8 * ) & pi - > samu_level_count ,
sizeof ( u8 ) ,
pi - > sram_end ) ;
if ( ret )
return ret ;
pi - > samu_interval = 1 ;
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , SAMUInterval ) ,
( u8 * ) & pi - > samu_interval ,
sizeof ( u8 ) ,
pi - > sram_end ) ;
if ( ret )
return ret ;
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , SamuLevel ) ,
( u8 * ) & pi - > samu_level ,
sizeof ( SMU7_Fusion_ExtClkLevel ) * SMU7_MAX_LEVELS_SAMU ,
pi - > sram_end ) ;
if ( ret )
return ret ;
return ret ;
}
static int kv_populate_acp_table ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
struct radeon_clock_voltage_dependency_table * table =
& rdev - > pm . dpm . dyn_state . acp_clock_voltage_dependency_table ;
struct atom_clock_dividers dividers ;
int ret ;
u32 i ;
if ( table = = NULL | | table - > count = = 0 )
return 0 ;
pi - > acp_level_count = 0 ;
for ( i = 0 ; i < table - > count ; i + + ) {
pi - > acp_level [ i ] . Frequency = cpu_to_be32 ( table - > entries [ i ] . clk ) ;
pi - > acp_level [ i ] . MinVoltage = cpu_to_be16 ( table - > entries [ i ] . v ) ;
ret = radeon_atom_get_clock_dividers ( rdev , COMPUTE_ENGINE_PLL_PARAM ,
table - > entries [ i ] . clk , false , & dividers ) ;
if ( ret )
return ret ;
pi - > acp_level [ i ] . Divider = ( u8 ) dividers . post_div ;
pi - > acp_level_count + + ;
}
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , AcpLevelCount ) ,
( u8 * ) & pi - > acp_level_count ,
sizeof ( u8 ) ,
pi - > sram_end ) ;
if ( ret )
return ret ;
pi - > acp_interval = 1 ;
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , ACPInterval ) ,
( u8 * ) & pi - > acp_interval ,
sizeof ( u8 ) ,
pi - > sram_end ) ;
if ( ret )
return ret ;
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , AcpLevel ) ,
( u8 * ) & pi - > acp_level ,
sizeof ( SMU7_Fusion_ExtClkLevel ) * SMU7_MAX_LEVELS_ACP ,
pi - > sram_end ) ;
if ( ret )
return ret ;
return ret ;
}
static void kv_calculate_dfs_bypass_settings ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
u32 i ;
struct radeon_clock_voltage_dependency_table * table =
& rdev - > pm . dpm . dyn_state . vddc_dependency_on_sclk ;
if ( table & & table - > count ) {
for ( i = 0 ; i < pi - > graphics_dpm_level_count ; i + + ) {
if ( pi - > caps_enable_dfs_bypass ) {
if ( kv_get_clock_difference ( table - > entries [ i ] . clk , 40000 ) < 200 )
pi - > graphics_level [ i ] . ClkBypassCntl = 3 ;
else if ( kv_get_clock_difference ( table - > entries [ i ] . clk , 30000 ) < 200 )
pi - > graphics_level [ i ] . ClkBypassCntl = 2 ;
else if ( kv_get_clock_difference ( table - > entries [ i ] . clk , 26600 ) < 200 )
pi - > graphics_level [ i ] . ClkBypassCntl = 7 ;
else if ( kv_get_clock_difference ( table - > entries [ i ] . clk , 20000 ) < 200 )
pi - > graphics_level [ i ] . ClkBypassCntl = 6 ;
else if ( kv_get_clock_difference ( table - > entries [ i ] . clk , 10000 ) < 200 )
pi - > graphics_level [ i ] . ClkBypassCntl = 8 ;
else
pi - > graphics_level [ i ] . ClkBypassCntl = 0 ;
} else {
pi - > graphics_level [ i ] . ClkBypassCntl = 0 ;
}
}
} else {
struct sumo_sclk_voltage_mapping_table * table =
& pi - > sys_info . sclk_voltage_mapping_table ;
for ( i = 0 ; i < pi - > graphics_dpm_level_count ; i + + ) {
if ( pi - > caps_enable_dfs_bypass ) {
if ( kv_get_clock_difference ( table - > entries [ i ] . sclk_frequency , 40000 ) < 200 )
pi - > graphics_level [ i ] . ClkBypassCntl = 3 ;
else if ( kv_get_clock_difference ( table - > entries [ i ] . sclk_frequency , 30000 ) < 200 )
pi - > graphics_level [ i ] . ClkBypassCntl = 2 ;
else if ( kv_get_clock_difference ( table - > entries [ i ] . sclk_frequency , 26600 ) < 200 )
pi - > graphics_level [ i ] . ClkBypassCntl = 7 ;
else if ( kv_get_clock_difference ( table - > entries [ i ] . sclk_frequency , 20000 ) < 200 )
pi - > graphics_level [ i ] . ClkBypassCntl = 6 ;
else if ( kv_get_clock_difference ( table - > entries [ i ] . sclk_frequency , 10000 ) < 200 )
pi - > graphics_level [ i ] . ClkBypassCntl = 8 ;
else
pi - > graphics_level [ i ] . ClkBypassCntl = 0 ;
} else {
pi - > graphics_level [ i ] . ClkBypassCntl = 0 ;
}
}
}
}
static int kv_enable_ulv ( struct radeon_device * rdev , bool enable )
{
return kv_notify_message_to_smu ( rdev , enable ?
PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV ) ;
}
2013-09-04 12:01:28 -04:00
static void kv_reset_acp_boot_level ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
pi - > acp_boot_level = 0xff ;
}
2013-08-14 01:01:40 -04:00
static void kv_update_current_ps ( struct radeon_device * rdev ,
struct radeon_ps * rps )
{
struct kv_ps * new_ps = kv_get_ps ( rps ) ;
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
pi - > current_rps = * rps ;
pi - > current_ps = * new_ps ;
pi - > current_rps . ps_priv = & pi - > current_ps ;
}
static void kv_update_requested_ps ( struct radeon_device * rdev ,
struct radeon_ps * rps )
{
struct kv_ps * new_ps = kv_get_ps ( rps ) ;
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
pi - > requested_rps = * rps ;
pi - > requested_ps = * new_ps ;
pi - > requested_rps . ps_priv = & pi - > requested_ps ;
}
2013-09-09 19:33:08 -04:00
void kv_dpm_enable_bapm ( struct radeon_device * rdev , bool enable )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
int ret ;
if ( pi - > bapm_enable ) {
ret = kv_smc_bapm_enable ( rdev , enable ) ;
if ( ret )
DRM_ERROR ( " kv_smc_bapm_enable failed \n " ) ;
}
}
2015-02-06 12:53:27 -05:00
static void kv_enable_thermal_int ( struct radeon_device * rdev , bool enable )
{
u32 thermal_int ;
thermal_int = RREG32_SMC ( CG_THERMAL_INT_CTRL ) ;
if ( enable )
thermal_int | = THERM_INTH_MASK | THERM_INTL_MASK ;
else
thermal_int & = ~ ( THERM_INTH_MASK | THERM_INTL_MASK ) ;
WREG32_SMC ( CG_THERMAL_INT_CTRL , thermal_int ) ;
}
2013-08-14 01:01:40 -04:00
int kv_dpm_enable ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
int ret ;
ret = kv_process_firmware_header ( rdev ) ;
if ( ret ) {
DRM_ERROR ( " kv_process_firmware_header failed \n " ) ;
return ret ;
}
kv_init_fps_limits ( rdev ) ;
kv_init_graphics_levels ( rdev ) ;
ret = kv_program_bootup_state ( rdev ) ;
if ( ret ) {
DRM_ERROR ( " kv_program_bootup_state failed \n " ) ;
return ret ;
}
kv_calculate_dfs_bypass_settings ( rdev ) ;
ret = kv_upload_dpm_settings ( rdev ) ;
if ( ret ) {
DRM_ERROR ( " kv_upload_dpm_settings failed \n " ) ;
return ret ;
}
ret = kv_populate_uvd_table ( rdev ) ;
if ( ret ) {
DRM_ERROR ( " kv_populate_uvd_table failed \n " ) ;
return ret ;
}
ret = kv_populate_vce_table ( rdev ) ;
if ( ret ) {
DRM_ERROR ( " kv_populate_vce_table failed \n " ) ;
return ret ;
}
ret = kv_populate_samu_table ( rdev ) ;
if ( ret ) {
DRM_ERROR ( " kv_populate_samu_table failed \n " ) ;
return ret ;
}
ret = kv_populate_acp_table ( rdev ) ;
if ( ret ) {
DRM_ERROR ( " kv_populate_acp_table failed \n " ) ;
return ret ;
}
kv_program_vc ( rdev ) ;
2020-11-10 19:30:49 +00:00
2013-08-14 01:01:40 -04:00
kv_start_am ( rdev ) ;
if ( pi - > enable_auto_thermal_throttling ) {
ret = kv_enable_auto_thermal_throttling ( rdev ) ;
if ( ret ) {
DRM_ERROR ( " kv_enable_auto_thermal_throttling failed \n " ) ;
return ret ;
}
}
ret = kv_enable_dpm_voltage_scaling ( rdev ) ;
if ( ret ) {
DRM_ERROR ( " kv_enable_dpm_voltage_scaling failed \n " ) ;
return ret ;
}
ret = kv_set_dpm_interval ( rdev ) ;
if ( ret ) {
DRM_ERROR ( " kv_set_dpm_interval failed \n " ) ;
return ret ;
}
ret = kv_set_dpm_boot_state ( rdev ) ;
if ( ret ) {
DRM_ERROR ( " kv_set_dpm_boot_state failed \n " ) ;
return ret ;
}
ret = kv_enable_ulv ( rdev , true ) ;
if ( ret ) {
DRM_ERROR ( " kv_enable_ulv failed \n " ) ;
return ret ;
}
kv_start_dpm ( rdev ) ;
ret = kv_enable_didt ( rdev , true ) ;
if ( ret ) {
DRM_ERROR ( " kv_enable_didt failed \n " ) ;
return ret ;
}
ret = kv_enable_smc_cac ( rdev , true ) ;
if ( ret ) {
DRM_ERROR ( " kv_enable_smc_cac failed \n " ) ;
return ret ;
}
2013-09-04 12:01:28 -04:00
kv_reset_acp_boot_level ( rdev ) ;
2013-09-09 19:27:01 -04:00
ret = kv_smc_bapm_enable ( rdev , false ) ;
if ( ret ) {
DRM_ERROR ( " kv_smc_bapm_enable failed \n " ) ;
return ret ;
}
2013-08-14 01:01:40 -04:00
kv_update_current_ps ( rdev , rdev - > pm . dpm . boot_ps ) ;
return ret ;
}
2013-12-19 14:03:36 -05:00
int kv_dpm_late_enable ( struct radeon_device * rdev )
{
2014-01-30 21:17:30 -05:00
int ret = 0 ;
2013-12-19 14:03:36 -05:00
if ( rdev - > irq . installed & &
r600_is_internal_thermal_sensor ( rdev - > pm . int_thermal_type ) ) {
ret = kv_set_thermal_temperature_range ( rdev , R600_TEMP_RANGE_MIN , R600_TEMP_RANGE_MAX ) ;
if ( ret ) {
DRM_ERROR ( " kv_set_thermal_temperature_range failed \n " ) ;
return ret ;
}
2015-02-06 12:53:27 -05:00
kv_enable_thermal_int ( rdev , true ) ;
2013-12-19 14:03:36 -05:00
}
/* powerdown unused blocks for now */
kv_dpm_powergate_acp ( rdev , true ) ;
kv_dpm_powergate_samu ( rdev , true ) ;
kv_dpm_powergate_vce ( rdev , true ) ;
kv_dpm_powergate_uvd ( rdev , true ) ;
return ret ;
}
2013-08-14 01:01:40 -04:00
void kv_dpm_disable ( struct radeon_device * rdev )
{
2013-09-09 19:27:01 -04:00
kv_smc_bapm_enable ( rdev , false ) ;
2014-09-18 11:16:31 -04:00
if ( rdev - > family = = CHIP_MULLINS )
kv_enable_nb_dpm ( rdev , false ) ;
2013-08-26 09:46:51 -04:00
/* powerup blocks */
kv_dpm_powergate_acp ( rdev , false ) ;
kv_dpm_powergate_samu ( rdev , false ) ;
kv_dpm_powergate_vce ( rdev , false ) ;
kv_dpm_powergate_uvd ( rdev , false ) ;
2013-08-14 01:01:40 -04:00
kv_enable_smc_cac ( rdev , false ) ;
kv_enable_didt ( rdev , false ) ;
kv_clear_vc ( rdev ) ;
kv_stop_dpm ( rdev ) ;
kv_enable_ulv ( rdev , false ) ;
kv_reset_am ( rdev ) ;
2015-02-06 12:53:27 -05:00
kv_enable_thermal_int ( rdev , false ) ;
2013-08-14 01:01:40 -04:00
kv_update_current_ps ( rdev , rdev - > pm . dpm . boot_ps ) ;
}
static void kv_init_sclk_t ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
pi - > low_sclk_interrupt_t = 0 ;
}
static int kv_init_fps_limits ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
int ret = 0 ;
if ( pi - > caps_fps ) {
u16 tmp ;
tmp = 45 ;
pi - > fps_high_t = cpu_to_be16 ( tmp ) ;
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , FpsHighT ) ,
( u8 * ) & pi - > fps_high_t ,
sizeof ( u16 ) , pi - > sram_end ) ;
tmp = 30 ;
pi - > fps_low_t = cpu_to_be16 ( tmp ) ;
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , FpsLowT ) ,
( u8 * ) & pi - > fps_low_t ,
sizeof ( u16 ) , pi - > sram_end ) ;
}
return ret ;
}
static void kv_init_powergate_state ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
pi - > uvd_power_gated = false ;
pi - > vce_power_gated = false ;
pi - > samu_power_gated = false ;
pi - > acp_power_gated = false ;
}
static int kv_enable_uvd_dpm ( struct radeon_device * rdev , bool enable )
{
return kv_notify_message_to_smu ( rdev , enable ?
PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable ) ;
}
static int kv_enable_vce_dpm ( struct radeon_device * rdev , bool enable )
{
return kv_notify_message_to_smu ( rdev , enable ?
PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable ) ;
}
static int kv_enable_samu_dpm ( struct radeon_device * rdev , bool enable )
{
return kv_notify_message_to_smu ( rdev , enable ?
PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable ) ;
}
static int kv_enable_acp_dpm ( struct radeon_device * rdev , bool enable )
{
return kv_notify_message_to_smu ( rdev , enable ?
PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable ) ;
}
static int kv_update_uvd_dpm ( struct radeon_device * rdev , bool gate )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
struct radeon_uvd_clock_voltage_dependency_table * table =
& rdev - > pm . dpm . dyn_state . uvd_clock_voltage_dependency_table ;
int ret ;
2014-04-30 18:40:52 -04:00
u32 mask ;
2013-08-14 01:01:40 -04:00
if ( ! gate ) {
2014-04-30 18:40:52 -04:00
if ( table - > count )
2013-08-14 01:01:40 -04:00
pi - > uvd_boot_level = table - > count - 1 ;
else
pi - > uvd_boot_level = 0 ;
2014-04-30 18:40:52 -04:00
if ( ! pi - > caps_uvd_dpm | | pi - > caps_stable_p_state ) {
mask = 1 < < pi - > uvd_boot_level ;
} else {
mask = 0x1f ;
}
2013-08-14 01:01:40 -04:00
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , UvdBootLevel ) ,
( uint8_t * ) & pi - > uvd_boot_level ,
sizeof ( u8 ) , pi - > sram_end ) ;
if ( ret )
return ret ;
2014-04-30 18:40:52 -04:00
kv_send_msg_to_smc_with_parameter ( rdev ,
PPSMC_MSG_UVDDPM_SetEnabledMask ,
mask ) ;
2013-08-14 01:01:40 -04:00
}
return kv_enable_uvd_dpm ( rdev , ! gate ) ;
}
2014-08-14 01:22:31 -04:00
static u8 kv_get_vce_boot_level ( struct radeon_device * rdev , u32 evclk )
2013-08-14 01:01:40 -04:00
{
u8 i ;
struct radeon_vce_clock_voltage_dependency_table * table =
& rdev - > pm . dpm . dyn_state . vce_clock_voltage_dependency_table ;
for ( i = 0 ; i < table - > count ; i + + ) {
2014-08-14 01:22:31 -04:00
if ( table - > entries [ i ] . evclk > = evclk )
2013-08-14 01:01:40 -04:00
break ;
}
return i ;
}
static int kv_update_vce_dpm ( struct radeon_device * rdev ,
struct radeon_ps * radeon_new_state ,
struct radeon_ps * radeon_current_state )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
struct radeon_vce_clock_voltage_dependency_table * table =
& rdev - > pm . dpm . dyn_state . vce_clock_voltage_dependency_table ;
int ret ;
if ( radeon_new_state - > evclk > 0 & & radeon_current_state - > evclk = = 0 ) {
2013-09-04 16:17:07 -04:00
kv_dpm_powergate_vce ( rdev , false ) ;
2013-09-06 12:33:04 -04:00
/* turn the clocks on when encoding */
cik_update_cg ( rdev , RADEON_CG_BLOCK_VCE , false ) ;
2013-08-14 01:01:40 -04:00
if ( pi - > caps_stable_p_state )
pi - > vce_boot_level = table - > count - 1 ;
else
2014-08-14 01:22:31 -04:00
pi - > vce_boot_level = kv_get_vce_boot_level ( rdev , radeon_new_state - > evclk ) ;
2013-08-14 01:01:40 -04:00
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , VceBootLevel ) ,
( u8 * ) & pi - > vce_boot_level ,
sizeof ( u8 ) ,
pi - > sram_end ) ;
if ( ret )
return ret ;
if ( pi - > caps_stable_p_state )
kv_send_msg_to_smc_with_parameter ( rdev ,
PPSMC_MSG_VCEDPM_SetEnabledMask ,
( 1 < < pi - > vce_boot_level ) ) ;
kv_enable_vce_dpm ( rdev , true ) ;
} else if ( radeon_new_state - > evclk = = 0 & & radeon_current_state - > evclk > 0 ) {
kv_enable_vce_dpm ( rdev , false ) ;
2013-09-06 12:33:04 -04:00
/* turn the clocks off when not encoding */
cik_update_cg ( rdev , RADEON_CG_BLOCK_VCE , true ) ;
2013-09-04 16:17:07 -04:00
kv_dpm_powergate_vce ( rdev , true ) ;
2013-08-14 01:01:40 -04:00
}
return 0 ;
}
static int kv_update_samu_dpm ( struct radeon_device * rdev , bool gate )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
struct radeon_clock_voltage_dependency_table * table =
& rdev - > pm . dpm . dyn_state . samu_clock_voltage_dependency_table ;
int ret ;
if ( ! gate ) {
if ( pi - > caps_stable_p_state )
pi - > samu_boot_level = table - > count - 1 ;
else
pi - > samu_boot_level = 0 ;
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , SamuBootLevel ) ,
( u8 * ) & pi - > samu_boot_level ,
sizeof ( u8 ) ,
pi - > sram_end ) ;
if ( ret )
return ret ;
if ( pi - > caps_stable_p_state )
kv_send_msg_to_smc_with_parameter ( rdev ,
PPSMC_MSG_SAMUDPM_SetEnabledMask ,
( 1 < < pi - > samu_boot_level ) ) ;
}
return kv_enable_samu_dpm ( rdev , ! gate ) ;
}
2013-09-04 12:01:28 -04:00
static u8 kv_get_acp_boot_level ( struct radeon_device * rdev )
{
u8 i ;
struct radeon_clock_voltage_dependency_table * table =
& rdev - > pm . dpm . dyn_state . acp_clock_voltage_dependency_table ;
for ( i = 0 ; i < table - > count ; i + + ) {
if ( table - > entries [ i ] . clk > = 0 ) /* XXX */
break ;
}
if ( i > = table - > count )
i = table - > count - 1 ;
return i ;
}
static void kv_update_acp_boot_level ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
u8 acp_boot_level ;
if ( ! pi - > caps_stable_p_state ) {
acp_boot_level = kv_get_acp_boot_level ( rdev ) ;
if ( acp_boot_level ! = pi - > acp_boot_level ) {
pi - > acp_boot_level = acp_boot_level ;
kv_send_msg_to_smc_with_parameter ( rdev ,
PPSMC_MSG_ACPDPM_SetEnabledMask ,
( 1 < < pi - > acp_boot_level ) ) ;
}
}
}
2013-08-14 01:01:40 -04:00
static int kv_update_acp_dpm ( struct radeon_device * rdev , bool gate )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
struct radeon_clock_voltage_dependency_table * table =
& rdev - > pm . dpm . dyn_state . acp_clock_voltage_dependency_table ;
int ret ;
if ( ! gate ) {
if ( pi - > caps_stable_p_state )
pi - > acp_boot_level = table - > count - 1 ;
else
2013-09-04 12:01:28 -04:00
pi - > acp_boot_level = kv_get_acp_boot_level ( rdev ) ;
2013-08-14 01:01:40 -04:00
ret = kv_copy_bytes_to_smc ( rdev ,
pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , AcpBootLevel ) ,
( u8 * ) & pi - > acp_boot_level ,
sizeof ( u8 ) ,
pi - > sram_end ) ;
if ( ret )
return ret ;
if ( pi - > caps_stable_p_state )
kv_send_msg_to_smc_with_parameter ( rdev ,
PPSMC_MSG_ACPDPM_SetEnabledMask ,
( 1 < < pi - > acp_boot_level ) ) ;
}
return kv_enable_acp_dpm ( rdev , ! gate ) ;
}
2013-08-09 10:02:40 -04:00
void kv_dpm_powergate_uvd ( struct radeon_device * rdev , bool gate )
2013-08-14 01:01:40 -04:00
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
if ( pi - > uvd_power_gated = = gate )
return ;
pi - > uvd_power_gated = gate ;
if ( gate ) {
2013-08-28 18:46:01 -04:00
if ( pi - > caps_uvd_pg ) {
uvd_v1_0_stop ( rdev ) ;
cik_update_cg ( rdev , RADEON_CG_BLOCK_UVD , false ) ;
}
2013-08-09 10:02:40 -04:00
kv_update_uvd_dpm ( rdev , gate ) ;
2013-08-14 01:01:40 -04:00
if ( pi - > caps_uvd_pg )
kv_notify_message_to_smu ( rdev , PPSMC_MSG_UVDPowerOFF ) ;
} else {
2013-08-28 18:46:01 -04:00
if ( pi - > caps_uvd_pg ) {
2013-08-14 01:01:40 -04:00
kv_notify_message_to_smu ( rdev , PPSMC_MSG_UVDPowerON ) ;
2013-08-28 18:46:01 -04:00
uvd_v4_2_resume ( rdev ) ;
uvd_v1_0_start ( rdev ) ;
cik_update_cg ( rdev , RADEON_CG_BLOCK_UVD , true ) ;
}
2013-08-09 10:02:40 -04:00
kv_update_uvd_dpm ( rdev , gate ) ;
2013-08-14 01:01:40 -04:00
}
}
static void kv_dpm_powergate_vce ( struct radeon_device * rdev , bool gate )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
if ( pi - > vce_power_gated = = gate )
return ;
pi - > vce_power_gated = gate ;
if ( gate ) {
2013-08-28 18:53:50 -04:00
if ( pi - > caps_vce_pg ) {
/* XXX do we need a vce_v1_0_stop() ? */
2013-08-14 01:01:40 -04:00
kv_notify_message_to_smu ( rdev , PPSMC_MSG_VCEPowerOFF ) ;
2013-08-28 18:53:50 -04:00
}
2013-08-14 01:01:40 -04:00
} else {
2013-08-28 18:53:50 -04:00
if ( pi - > caps_vce_pg ) {
2013-08-14 01:01:40 -04:00
kv_notify_message_to_smu ( rdev , PPSMC_MSG_VCEPowerON ) ;
2013-08-28 18:53:50 -04:00
vce_v2_0_resume ( rdev ) ;
vce_v1_0_start ( rdev ) ;
}
2013-08-14 01:01:40 -04:00
}
}
static void kv_dpm_powergate_samu ( struct radeon_device * rdev , bool gate )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
if ( pi - > samu_power_gated = = gate )
return ;
pi - > samu_power_gated = gate ;
if ( gate ) {
kv_update_samu_dpm ( rdev , true ) ;
if ( pi - > caps_samu_pg )
kv_notify_message_to_smu ( rdev , PPSMC_MSG_SAMPowerOFF ) ;
} else {
if ( pi - > caps_samu_pg )
kv_notify_message_to_smu ( rdev , PPSMC_MSG_SAMPowerON ) ;
kv_update_samu_dpm ( rdev , false ) ;
}
}
static void kv_dpm_powergate_acp ( struct radeon_device * rdev , bool gate )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
if ( pi - > acp_power_gated = = gate )
return ;
2014-04-30 18:40:51 -04:00
if ( rdev - > family = = CHIP_KABINI | | rdev - > family = = CHIP_MULLINS )
2013-08-14 01:01:40 -04:00
return ;
pi - > acp_power_gated = gate ;
if ( gate ) {
kv_update_acp_dpm ( rdev , true ) ;
if ( pi - > caps_acp_pg )
kv_notify_message_to_smu ( rdev , PPSMC_MSG_ACPPowerOFF ) ;
} else {
if ( pi - > caps_acp_pg )
kv_notify_message_to_smu ( rdev , PPSMC_MSG_ACPPowerON ) ;
kv_update_acp_dpm ( rdev , false ) ;
}
}
static void kv_set_valid_clock_range ( struct radeon_device * rdev ,
struct radeon_ps * new_rps )
{
struct kv_ps * new_ps = kv_get_ps ( new_rps ) ;
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
u32 i ;
struct radeon_clock_voltage_dependency_table * table =
& rdev - > pm . dpm . dyn_state . vddc_dependency_on_sclk ;
if ( table & & table - > count ) {
for ( i = 0 ; i < pi - > graphics_dpm_level_count ; i + + ) {
if ( ( table - > entries [ i ] . clk > = new_ps - > levels [ 0 ] . sclk ) | |
( i = = ( pi - > graphics_dpm_level_count - 1 ) ) ) {
pi - > lowest_valid = i ;
break ;
}
}
2013-09-04 12:31:36 +03:00
for ( i = pi - > graphics_dpm_level_count - 1 ; i > 0 ; i - - ) {
if ( table - > entries [ i ] . clk < = new_ps - > levels [ new_ps - > num_levels - 1 ] . sclk )
2013-08-14 01:01:40 -04:00
break ;
}
2013-09-04 12:31:36 +03:00
pi - > highest_valid = i ;
2013-08-14 01:01:40 -04:00
if ( pi - > lowest_valid > pi - > highest_valid ) {
if ( ( new_ps - > levels [ 0 ] . sclk - table - > entries [ pi - > highest_valid ] . clk ) >
( table - > entries [ pi - > lowest_valid ] . clk - new_ps - > levels [ new_ps - > num_levels - 1 ] . sclk ) )
pi - > highest_valid = pi - > lowest_valid ;
else
pi - > lowest_valid = pi - > highest_valid ;
}
} else {
struct sumo_sclk_voltage_mapping_table * table =
& pi - > sys_info . sclk_voltage_mapping_table ;
for ( i = 0 ; i < ( int ) pi - > graphics_dpm_level_count ; i + + ) {
if ( table - > entries [ i ] . sclk_frequency > = new_ps - > levels [ 0 ] . sclk | |
i = = ( int ) ( pi - > graphics_dpm_level_count - 1 ) ) {
pi - > lowest_valid = i ;
break ;
}
}
2013-09-04 12:31:36 +03:00
for ( i = pi - > graphics_dpm_level_count - 1 ; i > 0 ; i - - ) {
2013-08-14 01:01:40 -04:00
if ( table - > entries [ i ] . sclk_frequency < =
2013-09-04 12:31:36 +03:00
new_ps - > levels [ new_ps - > num_levels - 1 ] . sclk )
2013-08-14 01:01:40 -04:00
break ;
}
2013-09-04 12:31:36 +03:00
pi - > highest_valid = i ;
2013-08-14 01:01:40 -04:00
if ( pi - > lowest_valid > pi - > highest_valid ) {
if ( ( new_ps - > levels [ 0 ] . sclk -
table - > entries [ pi - > highest_valid ] . sclk_frequency ) >
( table - > entries [ pi - > lowest_valid ] . sclk_frequency -
new_ps - > levels [ new_ps - > num_levels - 1 ] . sclk ) )
pi - > highest_valid = pi - > lowest_valid ;
else
pi - > lowest_valid = pi - > highest_valid ;
}
}
}
static int kv_update_dfs_bypass_settings ( struct radeon_device * rdev ,
struct radeon_ps * new_rps )
{
struct kv_ps * new_ps = kv_get_ps ( new_rps ) ;
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
int ret = 0 ;
u8 clk_bypass_cntl ;
if ( pi - > caps_enable_dfs_bypass ) {
clk_bypass_cntl = new_ps - > need_dfs_bypass ?
pi - > graphics_level [ pi - > graphics_boot_level ] . ClkBypassCntl : 0 ;
ret = kv_copy_bytes_to_smc ( rdev ,
( pi - > dpm_table_start +
offsetof ( SMU7_Fusion_DpmTable , GraphicsLevel ) +
( pi - > graphics_boot_level * sizeof ( SMU7_Fusion_GraphicsLevel ) ) +
offsetof ( SMU7_Fusion_GraphicsLevel , ClkBypassCntl ) ) ,
& clk_bypass_cntl ,
sizeof ( u8 ) , pi - > sram_end ) ;
}
return ret ;
}
2014-09-18 11:16:31 -04:00
static int kv_enable_nb_dpm ( struct radeon_device * rdev ,
bool enable )
2013-08-14 01:01:40 -04:00
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
int ret = 0 ;
2014-09-18 11:16:31 -04:00
if ( enable ) {
if ( pi - > enable_nb_dpm & & ! pi - > nb_dpm_enabled ) {
ret = kv_notify_message_to_smu ( rdev , PPSMC_MSG_NBDPM_Enable ) ;
if ( ret = = 0 )
pi - > nb_dpm_enabled = true ;
}
} else {
if ( pi - > enable_nb_dpm & & pi - > nb_dpm_enabled ) {
ret = kv_notify_message_to_smu ( rdev , PPSMC_MSG_NBDPM_Disable ) ;
if ( ret = = 0 )
pi - > nb_dpm_enabled = false ;
}
2013-08-14 01:01:40 -04:00
}
return ret ;
}
2013-07-18 16:48:46 -04:00
int kv_dpm_force_performance_level ( struct radeon_device * rdev ,
enum radeon_dpm_forced_level level )
{
int ret ;
if ( level = = RADEON_DPM_FORCED_LEVEL_HIGH ) {
ret = kv_force_dpm_highest ( rdev ) ;
if ( ret )
return ret ;
} else if ( level = = RADEON_DPM_FORCED_LEVEL_LOW ) {
ret = kv_force_dpm_lowest ( rdev ) ;
if ( ret )
return ret ;
} else if ( level = = RADEON_DPM_FORCED_LEVEL_AUTO ) {
ret = kv_unforce_levels ( rdev ) ;
if ( ret )
return ret ;
}
rdev - > pm . dpm . forced_level = level ;
return 0 ;
}
2013-08-14 01:01:40 -04:00
int kv_dpm_pre_set_power_state ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
struct radeon_ps requested_ps = * rdev - > pm . dpm . requested_ps ;
struct radeon_ps * new_ps = & requested_ps ;
kv_update_requested_ps ( rdev , new_ps ) ;
kv_apply_state_adjust_rules ( rdev ,
& pi - > requested_rps ,
& pi - > current_rps ) ;
return 0 ;
}
int kv_dpm_set_power_state ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
struct radeon_ps * new_ps = & pi - > requested_rps ;
2013-09-04 16:17:07 -04:00
struct radeon_ps * old_ps = & pi - > current_rps ;
2013-08-14 01:01:40 -04:00
int ret ;
2013-09-09 19:33:08 -04:00
if ( pi - > bapm_enable ) {
ret = kv_smc_bapm_enable ( rdev , rdev - > pm . dpm . ac_power ) ;
if ( ret ) {
DRM_ERROR ( " kv_smc_bapm_enable failed \n " ) ;
return ret ;
}
}
2014-04-30 18:40:51 -04:00
if ( rdev - > family = = CHIP_KABINI | | rdev - > family = = CHIP_MULLINS ) {
2013-08-14 01:01:40 -04:00
if ( pi - > enable_dpm ) {
kv_set_valid_clock_range ( rdev , new_ps ) ;
kv_update_dfs_bypass_settings ( rdev , new_ps ) ;
ret = kv_calculate_ds_divider ( rdev ) ;
if ( ret ) {
DRM_ERROR ( " kv_calculate_ds_divider failed \n " ) ;
return ret ;
}
kv_calculate_nbps_level_settings ( rdev ) ;
kv_calculate_dpm_settings ( rdev ) ;
kv_force_lowest_valid ( rdev ) ;
kv_enable_new_levels ( rdev ) ;
kv_upload_dpm_settings ( rdev ) ;
kv_program_nbps_index_settings ( rdev , new_ps ) ;
kv_unforce_levels ( rdev ) ;
kv_set_enabled_levels ( rdev ) ;
kv_force_lowest_valid ( rdev ) ;
kv_unforce_levels ( rdev ) ;
2013-09-04 16:17:07 -04:00
2013-08-14 01:01:40 -04:00
ret = kv_update_vce_dpm ( rdev , new_ps , old_ps ) ;
if ( ret ) {
DRM_ERROR ( " kv_update_vce_dpm failed \n " ) ;
return ret ;
}
kv_update_sclk_t ( rdev ) ;
2014-04-30 18:40:52 -04:00
if ( rdev - > family = = CHIP_MULLINS )
2014-09-18 11:16:31 -04:00
kv_enable_nb_dpm ( rdev , true ) ;
2013-08-14 01:01:40 -04:00
}
} else {
if ( pi - > enable_dpm ) {
kv_set_valid_clock_range ( rdev , new_ps ) ;
kv_update_dfs_bypass_settings ( rdev , new_ps ) ;
ret = kv_calculate_ds_divider ( rdev ) ;
if ( ret ) {
DRM_ERROR ( " kv_calculate_ds_divider failed \n " ) ;
return ret ;
}
kv_calculate_nbps_level_settings ( rdev ) ;
kv_calculate_dpm_settings ( rdev ) ;
kv_freeze_sclk_dpm ( rdev , true ) ;
kv_upload_dpm_settings ( rdev ) ;
kv_program_nbps_index_settings ( rdev , new_ps ) ;
kv_freeze_sclk_dpm ( rdev , false ) ;
kv_set_enabled_levels ( rdev ) ;
ret = kv_update_vce_dpm ( rdev , new_ps , old_ps ) ;
if ( ret ) {
DRM_ERROR ( " kv_update_vce_dpm failed \n " ) ;
return ret ;
}
2013-09-04 12:01:28 -04:00
kv_update_acp_boot_level ( rdev ) ;
2013-08-14 01:01:40 -04:00
kv_update_sclk_t ( rdev ) ;
2014-09-18 11:16:31 -04:00
kv_enable_nb_dpm ( rdev , true ) ;
2013-08-14 01:01:40 -04:00
}
}
2013-08-14 19:55:46 -04:00
2013-08-14 01:01:40 -04:00
return 0 ;
}
void kv_dpm_post_set_power_state ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
struct radeon_ps * new_ps = & pi - > requested_rps ;
kv_update_current_ps ( rdev , new_ps ) ;
}
void kv_dpm_setup_asic ( struct radeon_device * rdev )
{
sumo_take_smu_control ( rdev , true ) ;
kv_init_powergate_state ( rdev ) ;
kv_init_sclk_t ( rdev ) ;
}
//XXX use sumo_dpm_display_configuration_changed
static void kv_construct_max_power_limits_table ( struct radeon_device * rdev ,
struct radeon_clock_and_voltage_limits * table )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
if ( pi - > sys_info . sclk_voltage_mapping_table . num_max_dpm_entries > 0 ) {
int idx = pi - > sys_info . sclk_voltage_mapping_table . num_max_dpm_entries - 1 ;
table - > sclk =
pi - > sys_info . sclk_voltage_mapping_table . entries [ idx ] . sclk_frequency ;
table - > vddc =
kv_convert_2bit_index_to_voltage ( rdev ,
pi - > sys_info . sclk_voltage_mapping_table . entries [ idx ] . vid_2bit ) ;
}
table - > mclk = pi - > sys_info . nbp_memory_clock [ 0 ] ;
}
static void kv_patch_voltage_values ( struct radeon_device * rdev )
{
int i ;
2014-04-30 18:40:52 -04:00
struct radeon_uvd_clock_voltage_dependency_table * uvd_table =
2013-08-14 01:01:40 -04:00
& rdev - > pm . dpm . dyn_state . uvd_clock_voltage_dependency_table ;
2014-04-30 18:40:52 -04:00
struct radeon_vce_clock_voltage_dependency_table * vce_table =
& rdev - > pm . dpm . dyn_state . vce_clock_voltage_dependency_table ;
struct radeon_clock_voltage_dependency_table * samu_table =
& rdev - > pm . dpm . dyn_state . samu_clock_voltage_dependency_table ;
struct radeon_clock_voltage_dependency_table * acp_table =
& rdev - > pm . dpm . dyn_state . acp_clock_voltage_dependency_table ;
if ( uvd_table - > count ) {
for ( i = 0 ; i < uvd_table - > count ; i + + )
uvd_table - > entries [ i ] . v =
kv_convert_8bit_index_to_voltage ( rdev ,
uvd_table - > entries [ i ] . v ) ;
}
if ( vce_table - > count ) {
for ( i = 0 ; i < vce_table - > count ; i + + )
vce_table - > entries [ i ] . v =
kv_convert_8bit_index_to_voltage ( rdev ,
vce_table - > entries [ i ] . v ) ;
}
2013-08-14 01:01:40 -04:00
2014-04-30 18:40:52 -04:00
if ( samu_table - > count ) {
for ( i = 0 ; i < samu_table - > count ; i + + )
samu_table - > entries [ i ] . v =
2013-08-14 01:01:40 -04:00
kv_convert_8bit_index_to_voltage ( rdev ,
2014-04-30 18:40:52 -04:00
samu_table - > entries [ i ] . v ) ;
}
if ( acp_table - > count ) {
for ( i = 0 ; i < acp_table - > count ; i + + )
acp_table - > entries [ i ] . v =
kv_convert_8bit_index_to_voltage ( rdev ,
acp_table - > entries [ i ] . v ) ;
2013-08-14 01:01:40 -04:00
}
}
static void kv_construct_boot_state ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
pi - > boot_pl . sclk = pi - > sys_info . bootup_sclk ;
pi - > boot_pl . vddc_index = pi - > sys_info . bootup_nb_voltage_index ;
pi - > boot_pl . ds_divider_index = 0 ;
pi - > boot_pl . ss_divider_index = 0 ;
pi - > boot_pl . allow_gnb_slow = 1 ;
pi - > boot_pl . force_nbp_state = 0 ;
pi - > boot_pl . display_wm = 0 ;
pi - > boot_pl . vce_wm = 0 ;
}
2013-07-18 16:48:46 -04:00
static int kv_force_dpm_highest ( struct radeon_device * rdev )
{
int ret ;
u32 enable_mask , i ;
ret = kv_dpm_get_enable_mask ( rdev , & enable_mask ) ;
if ( ret )
return ret ;
2013-09-04 12:31:36 +03:00
for ( i = SMU7_MAX_LEVELS_GRAPHICS - 1 ; i > 0 ; i - - ) {
2013-07-18 16:48:46 -04:00
if ( enable_mask & ( 1 < < i ) )
break ;
}
2014-04-30 18:40:51 -04:00
if ( rdev - > family = = CHIP_KABINI | | rdev - > family = = CHIP_MULLINS )
2013-09-04 12:01:28 -04:00
return kv_send_msg_to_smc_with_parameter ( rdev , PPSMC_MSG_DPM_ForceState , i ) ;
else
return kv_set_enabled_level ( rdev , i ) ;
2013-07-18 16:48:46 -04:00
}
2013-08-14 01:01:40 -04:00
static int kv_force_dpm_lowest ( struct radeon_device * rdev )
{
int ret ;
u32 enable_mask , i ;
ret = kv_dpm_get_enable_mask ( rdev , & enable_mask ) ;
if ( ret )
return ret ;
for ( i = 0 ; i < SMU7_MAX_LEVELS_GRAPHICS ; i + + ) {
if ( enable_mask & ( 1 < < i ) )
break ;
}
2014-04-30 18:40:51 -04:00
if ( rdev - > family = = CHIP_KABINI | | rdev - > family = = CHIP_MULLINS )
2013-09-04 12:01:28 -04:00
return kv_send_msg_to_smc_with_parameter ( rdev , PPSMC_MSG_DPM_ForceState , i ) ;
else
return kv_set_enabled_level ( rdev , i ) ;
2013-08-14 01:01:40 -04:00
}
static u8 kv_get_sleep_divider_id_from_clock ( struct radeon_device * rdev ,
u32 sclk , u32 min_sclk_in_sr )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
u32 i ;
u32 temp ;
u32 min = ( min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK ) ?
min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK ;
if ( sclk < min )
return 0 ;
if ( ! pi - > caps_sclk_ds )
return 0 ;
2013-09-04 12:31:36 +03:00
for ( i = KV_MAX_DEEPSLEEP_DIVIDER_ID ; i > 0 ; i - - ) {
2013-08-14 01:01:40 -04:00
temp = sclk / sumo_get_sleep_divider_from_id ( i ) ;
2013-09-04 12:31:36 +03:00
if ( temp > = min )
2013-08-14 01:01:40 -04:00
break ;
}
return ( u8 ) i ;
}
static int kv_get_high_voltage_limit ( struct radeon_device * rdev , int * limit )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
struct radeon_clock_voltage_dependency_table * table =
& rdev - > pm . dpm . dyn_state . vddc_dependency_on_sclk ;
int i ;
if ( table & & table - > count ) {
for ( i = table - > count - 1 ; i > = 0 ; i - - ) {
if ( pi - > high_voltage_t & &
( kv_convert_8bit_index_to_voltage ( rdev , table - > entries [ i ] . v ) < =
pi - > high_voltage_t ) ) {
* limit = i ;
return 0 ;
}
}
} else {
struct sumo_sclk_voltage_mapping_table * table =
& pi - > sys_info . sclk_voltage_mapping_table ;
for ( i = table - > num_max_dpm_entries - 1 ; i > = 0 ; i - - ) {
if ( pi - > high_voltage_t & &
( kv_convert_2bit_index_to_voltage ( rdev , table - > entries [ i ] . vid_2bit ) < =
pi - > high_voltage_t ) ) {
* limit = i ;
return 0 ;
}
}
}
* limit = 0 ;
return 0 ;
}
static void kv_apply_state_adjust_rules ( struct radeon_device * rdev ,
struct radeon_ps * new_rps ,
struct radeon_ps * old_rps )
{
struct kv_ps * ps = kv_get_ps ( new_rps ) ;
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
u32 min_sclk = 10000 ; /* ??? */
u32 sclk , mclk = 0 ;
int i , limit ;
bool force_high ;
struct radeon_clock_voltage_dependency_table * table =
& rdev - > pm . dpm . dyn_state . vddc_dependency_on_sclk ;
u32 stable_p_state_sclk = 0 ;
struct radeon_clock_and_voltage_limits * max_limits =
& rdev - > pm . dpm . dyn_state . max_clock_voltage_on_ac ;
2013-09-04 16:17:07 -04:00
if ( new_rps - > vce_active ) {
new_rps - > evclk = rdev - > pm . dpm . vce_states [ rdev - > pm . dpm . vce_level ] . evclk ;
new_rps - > ecclk = rdev - > pm . dpm . vce_states [ rdev - > pm . dpm . vce_level ] . ecclk ;
} else {
new_rps - > evclk = 0 ;
new_rps - > ecclk = 0 ;
}
2013-08-14 01:01:40 -04:00
mclk = max_limits - > mclk ;
sclk = min_sclk ;
if ( pi - > caps_stable_p_state ) {
stable_p_state_sclk = ( max_limits - > sclk * 75 ) / 100 ;
2016-05-15 21:58:11 -07:00
for ( i = table - > count - 1 ; i > = 0 ; i - - ) {
2013-08-14 01:01:40 -04:00
if ( stable_p_state_sclk > = table - > entries [ i ] . clk ) {
stable_p_state_sclk = table - > entries [ i ] . clk ;
break ;
}
}
if ( i > 0 )
stable_p_state_sclk = table - > entries [ 0 ] . clk ;
sclk = stable_p_state_sclk ;
}
2013-09-04 16:17:07 -04:00
if ( new_rps - > vce_active ) {
if ( sclk < rdev - > pm . dpm . vce_states [ rdev - > pm . dpm . vce_level ] . sclk )
sclk = rdev - > pm . dpm . vce_states [ rdev - > pm . dpm . vce_level ] . sclk ;
}
2013-08-14 01:01:40 -04:00
ps - > need_dfs_bypass = true ;
for ( i = 0 ; i < ps - > num_levels ; i + + ) {
if ( ps - > levels [ i ] . sclk < sclk )
ps - > levels [ i ] . sclk = sclk ;
}
if ( table & & table - > count ) {
for ( i = 0 ; i < ps - > num_levels ; i + + ) {
if ( pi - > high_voltage_t & &
( pi - > high_voltage_t <
kv_convert_8bit_index_to_voltage ( rdev , ps - > levels [ i ] . vddc_index ) ) ) {
kv_get_high_voltage_limit ( rdev , & limit ) ;
ps - > levels [ i ] . sclk = table - > entries [ limit ] . clk ;
}
}
} else {
struct sumo_sclk_voltage_mapping_table * table =
& pi - > sys_info . sclk_voltage_mapping_table ;
for ( i = 0 ; i < ps - > num_levels ; i + + ) {
if ( pi - > high_voltage_t & &
( pi - > high_voltage_t <
kv_convert_8bit_index_to_voltage ( rdev , ps - > levels [ i ] . vddc_index ) ) ) {
kv_get_high_voltage_limit ( rdev , & limit ) ;
ps - > levels [ i ] . sclk = table - > entries [ limit ] . sclk_frequency ;
}
}
}
if ( pi - > caps_stable_p_state ) {
for ( i = 0 ; i < ps - > num_levels ; i + + ) {
ps - > levels [ i ] . sclk = stable_p_state_sclk ;
}
}
2013-09-04 16:17:07 -04:00
pi - > video_start = new_rps - > dclk | | new_rps - > vclk | |
new_rps - > evclk | | new_rps - > ecclk ;
2013-08-14 01:01:40 -04:00
if ( ( new_rps - > class & ATOM_PPLIB_CLASSIFICATION_UI_MASK ) = =
ATOM_PPLIB_CLASSIFICATION_UI_BATTERY )
pi - > battery_state = true ;
else
pi - > battery_state = false ;
2014-04-30 18:40:51 -04:00
if ( rdev - > family = = CHIP_KABINI | | rdev - > family = = CHIP_MULLINS ) {
2013-08-14 01:01:40 -04:00
ps - > dpm0_pg_nb_ps_lo = 0x1 ;
ps - > dpm0_pg_nb_ps_hi = 0x0 ;
ps - > dpmx_nb_ps_lo = 0x1 ;
ps - > dpmx_nb_ps_hi = 0x0 ;
} else {
2013-09-04 12:01:28 -04:00
ps - > dpm0_pg_nb_ps_lo = 0x3 ;
2013-08-14 01:01:40 -04:00
ps - > dpm0_pg_nb_ps_hi = 0x0 ;
2013-09-04 12:01:28 -04:00
ps - > dpmx_nb_ps_lo = 0x3 ;
ps - > dpmx_nb_ps_hi = 0x0 ;
2013-08-14 01:01:40 -04:00
2013-09-04 12:01:28 -04:00
if ( pi - > sys_info . nb_dpm_enable ) {
2013-08-14 01:01:40 -04:00
force_high = ( mclk > = pi - > sys_info . nbp_memory_clock [ 3 ] ) | |
pi - > video_start | | ( rdev - > pm . dpm . new_active_crtc_count > = 3 ) | |
pi - > disable_nb_ps3_in_battery ;
ps - > dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3 ;
ps - > dpm0_pg_nb_ps_hi = 0x2 ;
ps - > dpmx_nb_ps_lo = force_high ? 0x2 : 0x3 ;
ps - > dpmx_nb_ps_hi = 0x2 ;
}
}
}
static void kv_dpm_power_level_enabled_for_throttle ( struct radeon_device * rdev ,
u32 index , bool enable )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
pi - > graphics_level [ index ] . EnabledForThrottle = enable ? 1 : 0 ;
}
static int kv_calculate_ds_divider ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
u32 sclk_in_sr = 10000 ; /* ??? */
u32 i ;
if ( pi - > lowest_valid > pi - > highest_valid )
return - EINVAL ;
for ( i = pi - > lowest_valid ; i < = pi - > highest_valid ; i + + ) {
pi - > graphics_level [ i ] . DeepSleepDivId =
kv_get_sleep_divider_id_from_clock ( rdev ,
be32_to_cpu ( pi - > graphics_level [ i ] . SclkFrequency ) ,
sclk_in_sr ) ;
}
return 0 ;
}
static int kv_calculate_nbps_level_settings ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
u32 i ;
bool force_high ;
struct radeon_clock_and_voltage_limits * max_limits =
& rdev - > pm . dpm . dyn_state . max_clock_voltage_on_ac ;
u32 mclk = max_limits - > mclk ;
if ( pi - > lowest_valid > pi - > highest_valid )
return - EINVAL ;
2014-04-30 18:40:51 -04:00
if ( rdev - > family = = CHIP_KABINI | | rdev - > family = = CHIP_MULLINS ) {
2013-08-14 01:01:40 -04:00
for ( i = pi - > lowest_valid ; i < = pi - > highest_valid ; i + + ) {
pi - > graphics_level [ i ] . GnbSlow = 1 ;
pi - > graphics_level [ i ] . ForceNbPs1 = 0 ;
pi - > graphics_level [ i ] . UpH = 0 ;
}
if ( ! pi - > sys_info . nb_dpm_enable )
return 0 ;
force_high = ( ( mclk > = pi - > sys_info . nbp_memory_clock [ 3 ] ) | |
( rdev - > pm . dpm . new_active_crtc_count > = 3 ) | | pi - > video_start ) ;
if ( force_high ) {
for ( i = pi - > lowest_valid ; i < = pi - > highest_valid ; i + + )
pi - > graphics_level [ i ] . GnbSlow = 0 ;
} else {
if ( pi - > battery_state )
pi - > graphics_level [ 0 ] . ForceNbPs1 = 1 ;
pi - > graphics_level [ 1 ] . GnbSlow = 0 ;
pi - > graphics_level [ 2 ] . GnbSlow = 0 ;
pi - > graphics_level [ 3 ] . GnbSlow = 0 ;
pi - > graphics_level [ 4 ] . GnbSlow = 0 ;
}
} else {
for ( i = pi - > lowest_valid ; i < = pi - > highest_valid ; i + + ) {
pi - > graphics_level [ i ] . GnbSlow = 1 ;
pi - > graphics_level [ i ] . ForceNbPs1 = 0 ;
pi - > graphics_level [ i ] . UpH = 0 ;
}
if ( pi - > sys_info . nb_dpm_enable & & pi - > battery_state ) {
pi - > graphics_level [ pi - > lowest_valid ] . UpH = 0x28 ;
pi - > graphics_level [ pi - > lowest_valid ] . GnbSlow = 0 ;
if ( pi - > lowest_valid ! = pi - > highest_valid )
pi - > graphics_level [ pi - > lowest_valid ] . ForceNbPs1 = 1 ;
}
}
return 0 ;
}
static int kv_calculate_dpm_settings ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
u32 i ;
if ( pi - > lowest_valid > pi - > highest_valid )
return - EINVAL ;
for ( i = pi - > lowest_valid ; i < = pi - > highest_valid ; i + + )
pi - > graphics_level [ i ] . DisplayWatermark = ( i = = pi - > highest_valid ) ? 1 : 0 ;
return 0 ;
}
static void kv_init_graphics_levels ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
u32 i ;
struct radeon_clock_voltage_dependency_table * table =
& rdev - > pm . dpm . dyn_state . vddc_dependency_on_sclk ;
if ( table & & table - > count ) {
u32 vid_2bit ;
pi - > graphics_dpm_level_count = 0 ;
for ( i = 0 ; i < table - > count ; i + + ) {
if ( pi - > high_voltage_t & &
( pi - > high_voltage_t <
kv_convert_8bit_index_to_voltage ( rdev , table - > entries [ i ] . v ) ) )
break ;
kv_set_divider_value ( rdev , i , table - > entries [ i ] . clk ) ;
2014-04-30 18:40:52 -04:00
vid_2bit = kv_convert_vid7_to_vid2 ( rdev ,
& pi - > sys_info . vid_mapping_table ,
table - > entries [ i ] . v ) ;
2013-08-14 01:01:40 -04:00
kv_set_vid ( rdev , i , vid_2bit ) ;
kv_set_at ( rdev , i , pi - > at [ i ] ) ;
kv_dpm_power_level_enabled_for_throttle ( rdev , i , true ) ;
pi - > graphics_dpm_level_count + + ;
}
} else {
struct sumo_sclk_voltage_mapping_table * table =
& pi - > sys_info . sclk_voltage_mapping_table ;
pi - > graphics_dpm_level_count = 0 ;
for ( i = 0 ; i < table - > num_max_dpm_entries ; i + + ) {
if ( pi - > high_voltage_t & &
pi - > high_voltage_t <
kv_convert_2bit_index_to_voltage ( rdev , table - > entries [ i ] . vid_2bit ) )
break ;
kv_set_divider_value ( rdev , i , table - > entries [ i ] . sclk_frequency ) ;
kv_set_vid ( rdev , i , table - > entries [ i ] . vid_2bit ) ;
kv_set_at ( rdev , i , pi - > at [ i ] ) ;
kv_dpm_power_level_enabled_for_throttle ( rdev , i , true ) ;
pi - > graphics_dpm_level_count + + ;
}
}
for ( i = 0 ; i < SMU7_MAX_LEVELS_GRAPHICS ; i + + )
kv_dpm_power_level_enable ( rdev , i , false ) ;
}
static void kv_enable_new_levels ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
u32 i ;
for ( i = 0 ; i < SMU7_MAX_LEVELS_GRAPHICS ; i + + ) {
if ( i > = pi - > lowest_valid & & i < = pi - > highest_valid )
kv_dpm_power_level_enable ( rdev , i , true ) ;
}
}
2013-09-04 12:01:28 -04:00
static int kv_set_enabled_level ( struct radeon_device * rdev , u32 level )
{
u32 new_mask = ( 1 < < level ) ;
return kv_send_msg_to_smc_with_parameter ( rdev ,
PPSMC_MSG_SCLKDPM_SetEnabledMask ,
new_mask ) ;
}
2013-08-14 01:01:40 -04:00
static int kv_set_enabled_levels ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
u32 i , new_mask = 0 ;
for ( i = pi - > lowest_valid ; i < = pi - > highest_valid ; i + + )
new_mask | = ( 1 < < i ) ;
return kv_send_msg_to_smc_with_parameter ( rdev ,
PPSMC_MSG_SCLKDPM_SetEnabledMask ,
new_mask ) ;
}
static void kv_program_nbps_index_settings ( struct radeon_device * rdev ,
struct radeon_ps * new_rps )
{
struct kv_ps * new_ps = kv_get_ps ( new_rps ) ;
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
u32 nbdpmconfig1 ;
2014-04-30 18:40:51 -04:00
if ( rdev - > family = = CHIP_KABINI | | rdev - > family = = CHIP_MULLINS )
2013-08-14 01:01:40 -04:00
return ;
if ( pi - > sys_info . nb_dpm_enable ) {
nbdpmconfig1 = RREG32_SMC ( NB_DPM_CONFIG_1 ) ;
nbdpmconfig1 & = ~ ( Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK |
DpmXNbPsLo_MASK | DpmXNbPsHi_MASK ) ;
nbdpmconfig1 | = ( Dpm0PgNbPsLo ( new_ps - > dpm0_pg_nb_ps_lo ) |
Dpm0PgNbPsHi ( new_ps - > dpm0_pg_nb_ps_hi ) |
DpmXNbPsLo ( new_ps - > dpmx_nb_ps_lo ) |
DpmXNbPsHi ( new_ps - > dpmx_nb_ps_hi ) ) ;
WREG32_SMC ( NB_DPM_CONFIG_1 , nbdpmconfig1 ) ;
}
}
static int kv_set_thermal_temperature_range ( struct radeon_device * rdev ,
int min_temp , int max_temp )
{
int low_temp = 0 * 1000 ;
int high_temp = 255 * 1000 ;
u32 tmp ;
if ( low_temp < min_temp )
low_temp = min_temp ;
if ( high_temp > max_temp )
high_temp = max_temp ;
if ( high_temp < low_temp ) {
DRM_ERROR ( " invalid thermal range: %d - %d \n " , low_temp , high_temp ) ;
return - EINVAL ;
}
tmp = RREG32_SMC ( CG_THERMAL_INT_CTRL ) ;
tmp & = ~ ( DIG_THERM_INTH_MASK | DIG_THERM_INTL_MASK ) ;
tmp | = ( DIG_THERM_INTH ( 49 + ( high_temp / 1000 ) ) |
DIG_THERM_INTL ( 49 + ( low_temp / 1000 ) ) ) ;
WREG32_SMC ( CG_THERMAL_INT_CTRL , tmp ) ;
rdev - > pm . dpm . thermal . min_temp = low_temp ;
rdev - > pm . dpm . thermal . max_temp = high_temp ;
return 0 ;
}
union igp_info {
struct _ATOM_INTEGRATED_SYSTEM_INFO info ;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2 ;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5 ;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6 ;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7 ;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8 ;
} ;
static int kv_parse_sys_info_table ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
struct radeon_mode_info * mode_info = & rdev - > mode_info ;
int index = GetIndexIntoMasterTable ( DATA , IntegratedSystemInfo ) ;
union igp_info * igp_info ;
u8 frev , crev ;
u16 data_offset ;
int i ;
if ( atom_parse_data_header ( mode_info - > atom_context , index , NULL ,
& frev , & crev , & data_offset ) ) {
igp_info = ( union igp_info * ) ( mode_info - > atom_context - > bios +
data_offset ) ;
if ( crev ! = 8 ) {
DRM_ERROR ( " Unsupported IGP table: %d %d \n " , frev , crev ) ;
return - EINVAL ;
}
pi - > sys_info . bootup_sclk = le32_to_cpu ( igp_info - > info_8 . ulBootUpEngineClock ) ;
pi - > sys_info . bootup_uma_clk = le32_to_cpu ( igp_info - > info_8 . ulBootUpUMAClock ) ;
pi - > sys_info . bootup_nb_voltage_index =
le16_to_cpu ( igp_info - > info_8 . usBootUpNBVoltage ) ;
if ( igp_info - > info_8 . ucHtcTmpLmt = = 0 )
pi - > sys_info . htc_tmp_lmt = 203 ;
else
pi - > sys_info . htc_tmp_lmt = igp_info - > info_8 . ucHtcTmpLmt ;
if ( igp_info - > info_8 . ucHtcHystLmt = = 0 )
pi - > sys_info . htc_hyst_lmt = 5 ;
else
pi - > sys_info . htc_hyst_lmt = igp_info - > info_8 . ucHtcHystLmt ;
if ( pi - > sys_info . htc_tmp_lmt < = pi - > sys_info . htc_hyst_lmt ) {
DRM_ERROR ( " The htcTmpLmt should be larger than htcHystLmt. \n " ) ;
}
if ( le32_to_cpu ( igp_info - > info_8 . ulSystemConfig ) & ( 1 < < 3 ) )
pi - > sys_info . nb_dpm_enable = true ;
else
pi - > sys_info . nb_dpm_enable = false ;
for ( i = 0 ; i < KV_NUM_NBPSTATES ; i + + ) {
pi - > sys_info . nbp_memory_clock [ i ] =
le32_to_cpu ( igp_info - > info_8 . ulNbpStateMemclkFreq [ i ] ) ;
pi - > sys_info . nbp_n_clock [ i ] =
le32_to_cpu ( igp_info - > info_8 . ulNbpStateNClkFreq [ i ] ) ;
}
if ( le32_to_cpu ( igp_info - > info_8 . ulGPUCapInfo ) &
SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS )
pi - > caps_enable_dfs_bypass = true ;
sumo_construct_sclk_voltage_mapping_table ( rdev ,
& pi - > sys_info . sclk_voltage_mapping_table ,
igp_info - > info_8 . sAvail_SCLK ) ;
sumo_construct_vid_mapping_table ( rdev ,
& pi - > sys_info . vid_mapping_table ,
igp_info - > info_8 . sAvail_SCLK ) ;
kv_construct_max_power_limits_table ( rdev ,
& rdev - > pm . dpm . dyn_state . max_clock_voltage_on_ac ) ;
}
return 0 ;
}
union power_info {
struct _ATOM_POWERPLAY_INFO info ;
struct _ATOM_POWERPLAY_INFO_V2 info_2 ;
struct _ATOM_POWERPLAY_INFO_V3 info_3 ;
struct _ATOM_PPLIB_POWERPLAYTABLE pplib ;
struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2 ;
struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3 ;
} ;
union pplib_clock_info {
struct _ATOM_PPLIB_R600_CLOCK_INFO r600 ;
struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780 ;
struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen ;
struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo ;
} ;
union pplib_power_state {
struct _ATOM_PPLIB_STATE v1 ;
struct _ATOM_PPLIB_STATE_V2 v2 ;
} ;
static void kv_patch_boot_state ( struct radeon_device * rdev ,
struct kv_ps * ps )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
ps - > num_levels = 1 ;
ps - > levels [ 0 ] = pi - > boot_pl ;
}
static void kv_parse_pplib_non_clock_info ( struct radeon_device * rdev ,
struct radeon_ps * rps ,
struct _ATOM_PPLIB_NONCLOCK_INFO * non_clock_info ,
u8 table_rev )
{
struct kv_ps * ps = kv_get_ps ( rps ) ;
rps - > caps = le32_to_cpu ( non_clock_info - > ulCapsAndSettings ) ;
rps - > class = le16_to_cpu ( non_clock_info - > usClassification ) ;
rps - > class2 = le16_to_cpu ( non_clock_info - > usClassification2 ) ;
if ( ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev ) {
rps - > vclk = le32_to_cpu ( non_clock_info - > ulVCLK ) ;
rps - > dclk = le32_to_cpu ( non_clock_info - > ulDCLK ) ;
} else {
rps - > vclk = 0 ;
rps - > dclk = 0 ;
}
if ( rps - > class & ATOM_PPLIB_CLASSIFICATION_BOOT ) {
rdev - > pm . dpm . boot_ps = rps ;
kv_patch_boot_state ( rdev , ps ) ;
}
if ( rps - > class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE )
rdev - > pm . dpm . uvd_ps = rps ;
}
static void kv_parse_pplib_clock_info ( struct radeon_device * rdev ,
struct radeon_ps * rps , int index ,
union pplib_clock_info * clock_info )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
struct kv_ps * ps = kv_get_ps ( rps ) ;
struct kv_pl * pl = & ps - > levels [ index ] ;
u32 sclk ;
sclk = le16_to_cpu ( clock_info - > sumo . usEngineClockLow ) ;
sclk | = clock_info - > sumo . ucEngineClockHigh < < 16 ;
pl - > sclk = sclk ;
pl - > vddc_index = clock_info - > sumo . vddcIndex ;
ps - > num_levels = index + 1 ;
if ( pi - > caps_sclk_ds ) {
pl - > ds_divider_index = 5 ;
pl - > ss_divider_index = 5 ;
}
}
static int kv_parse_power_table ( struct radeon_device * rdev )
{
struct radeon_mode_info * mode_info = & rdev - > mode_info ;
struct _ATOM_PPLIB_NONCLOCK_INFO * non_clock_info ;
union pplib_power_state * power_state ;
int i , j , k , non_clock_array_index , clock_array_index ;
union pplib_clock_info * clock_info ;
struct _StateArray * state_array ;
struct _ClockInfoArray * clock_info_array ;
struct _NonClockInfoArray * non_clock_info_array ;
union power_info * power_info ;
int index = GetIndexIntoMasterTable ( DATA , PowerPlayInfo ) ;
2016-03-16 12:56:45 +01:00
u16 data_offset ;
2013-08-14 01:01:40 -04:00
u8 frev , crev ;
u8 * power_state_offset ;
struct kv_ps * ps ;
if ( ! atom_parse_data_header ( mode_info - > atom_context , index , NULL ,
& frev , & crev , & data_offset ) )
return - EINVAL ;
power_info = ( union power_info * ) ( mode_info - > atom_context - > bios + data_offset ) ;
state_array = ( struct _StateArray * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( power_info - > pplib . usStateArrayOffset ) ) ;
clock_info_array = ( struct _ClockInfoArray * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( power_info - > pplib . usClockInfoArrayOffset ) ) ;
non_clock_info_array = ( struct _NonClockInfoArray * )
( mode_info - > atom_context - > bios + data_offset +
le16_to_cpu ( power_info - > pplib . usNonClockInfoArrayOffset ) ) ;
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 14:03:40 -07:00
rdev - > pm . dpm . ps = kcalloc ( state_array - > ucNumEntries ,
sizeof ( struct radeon_ps ) ,
GFP_KERNEL ) ;
2013-08-14 01:01:40 -04:00
if ( ! rdev - > pm . dpm . ps )
return - ENOMEM ;
power_state_offset = ( u8 * ) state_array - > states ;
for ( i = 0 ; i < state_array - > ucNumEntries ; i + + ) {
2013-08-20 19:09:54 -04:00
u8 * idx ;
2013-08-14 01:01:40 -04:00
power_state = ( union pplib_power_state * ) power_state_offset ;
non_clock_array_index = power_state - > v2 . nonClockInfoIndex ;
non_clock_info = ( struct _ATOM_PPLIB_NONCLOCK_INFO * )
& non_clock_info_array - > nonClockInfo [ non_clock_array_index ] ;
if ( ! rdev - > pm . power_state [ i ] . clock_info )
return - EINVAL ;
ps = kzalloc ( sizeof ( struct kv_ps ) , GFP_KERNEL ) ;
if ( ps = = NULL ) {
kfree ( rdev - > pm . dpm . ps ) ;
return - ENOMEM ;
}
rdev - > pm . dpm . ps [ i ] . ps_priv = ps ;
k = 0 ;
2013-08-20 19:09:54 -04:00
idx = ( u8 * ) & power_state - > v2 . clockInfoIndex [ 0 ] ;
2013-08-14 01:01:40 -04:00
for ( j = 0 ; j < power_state - > v2 . ucNumDPMLevels ; j + + ) {
2013-08-20 19:09:54 -04:00
clock_array_index = idx [ j ] ;
2013-08-14 01:01:40 -04:00
if ( clock_array_index > = clock_info_array - > ucNumEntries )
continue ;
if ( k > = SUMO_MAX_HARDWARE_POWERLEVELS )
break ;
clock_info = ( union pplib_clock_info * )
2013-08-20 19:09:54 -04:00
( ( u8 * ) & clock_info_array - > clockInfo [ 0 ] +
( clock_array_index * clock_info_array - > ucEntrySize ) ) ;
2013-08-14 01:01:40 -04:00
kv_parse_pplib_clock_info ( rdev ,
& rdev - > pm . dpm . ps [ i ] , k ,
clock_info ) ;
k + + ;
}
kv_parse_pplib_non_clock_info ( rdev , & rdev - > pm . dpm . ps [ i ] ,
non_clock_info ,
non_clock_info_array - > ucEntrySize ) ;
power_state_offset + = 2 + power_state - > v2 . ucNumDPMLevels ;
}
rdev - > pm . dpm . num_ps = state_array - > ucNumEntries ;
2013-09-04 16:17:07 -04:00
/* fill in the vce power states */
for ( i = 0 ; i < RADEON_MAX_VCE_LEVELS ; i + + ) {
u32 sclk ;
clock_array_index = rdev - > pm . dpm . vce_states [ i ] . clk_idx ;
clock_info = ( union pplib_clock_info * )
& clock_info_array - > clockInfo [ clock_array_index * clock_info_array - > ucEntrySize ] ;
sclk = le16_to_cpu ( clock_info - > sumo . usEngineClockLow ) ;
sclk | = clock_info - > sumo . ucEngineClockHigh < < 16 ;
rdev - > pm . dpm . vce_states [ i ] . sclk = sclk ;
rdev - > pm . dpm . vce_states [ i ] . mclk = 0 ;
}
2013-08-14 01:01:40 -04:00
return 0 ;
}
int kv_dpm_init ( struct radeon_device * rdev )
{
struct kv_power_info * pi ;
int ret , i ;
pi = kzalloc ( sizeof ( struct kv_power_info ) , GFP_KERNEL ) ;
if ( pi = = NULL )
return - ENOMEM ;
rdev - > pm . dpm . priv = pi ;
2013-08-21 10:02:32 -04:00
ret = r600_get_platform_caps ( rdev ) ;
if ( ret )
return ret ;
2013-08-14 01:01:40 -04:00
ret = r600_parse_extended_power_table ( rdev ) ;
if ( ret )
return ret ;
for ( i = 0 ; i < SUMO_MAX_HARDWARE_POWERLEVELS ; i + + )
pi - > at [ i ] = TRINITY_AT_DFLT ;
2016-03-16 12:56:45 +01:00
pi - > sram_end = SMC_RAM_END ;
2013-08-14 01:01:40 -04:00
2014-10-26 15:10:21 -04:00
/* Enabling nb dpm on an asrock system prevents dpm from working */
if ( rdev - > pdev - > subsystem_vendor = = 0x1849 )
pi - > enable_nb_dpm = false ;
else
pi - > enable_nb_dpm = true ;
2013-08-14 01:01:40 -04:00
pi - > caps_power_containment = true ;
pi - > caps_cac = true ;
pi - > enable_didt = false ;
if ( pi - > enable_didt ) {
pi - > caps_sq_ramping = true ;
pi - > caps_db_ramping = true ;
pi - > caps_td_ramping = true ;
pi - > caps_tcp_ramping = true ;
}
pi - > caps_sclk_ds = true ;
pi - > enable_auto_thermal_throttling = true ;
pi - > disable_nb_ps3_in_battery = false ;
2014-10-26 15:10:21 -04:00
if ( radeon_bapm = = - 1 ) {
2014-12-15 17:24:19 -05:00
/* only enable bapm on KB, ML by default */
if ( rdev - > family = = CHIP_KABINI | | rdev - > family = = CHIP_MULLINS )
2014-10-26 15:10:21 -04:00
pi - > bapm_enable = true ;
2014-12-15 17:24:19 -05:00
else
pi - > bapm_enable = false ;
2014-10-26 15:10:21 -04:00
} else if ( radeon_bapm = = 0 ) {
2014-08-07 09:28:31 -04:00
pi - > bapm_enable = false ;
2014-10-26 15:10:21 -04:00
} else {
2014-08-07 09:28:31 -04:00
pi - > bapm_enable = true ;
2014-10-26 15:10:21 -04:00
}
2013-08-14 01:01:40 -04:00
pi - > voltage_drop_t = 0 ;
pi - > caps_sclk_throttle_low_notification = false ;
pi - > caps_fps = false ; /* true? */
2013-08-09 10:02:40 -04:00
pi - > caps_uvd_pg = true ;
2013-08-14 01:01:40 -04:00
pi - > caps_uvd_dpm = true ;
2013-09-04 16:17:07 -04:00
pi - > caps_vce_pg = false ; /* XXX true */
2013-08-14 01:01:40 -04:00
pi - > caps_samu_pg = false ;
pi - > caps_acp_pg = false ;
pi - > caps_stable_p_state = false ;
ret = kv_parse_sys_info_table ( rdev ) ;
if ( ret )
return ret ;
kv_patch_voltage_values ( rdev ) ;
kv_construct_boot_state ( rdev ) ;
ret = kv_parse_power_table ( rdev ) ;
if ( ret )
return ret ;
pi - > enable_dpm = true ;
return 0 ;
}
2013-07-18 16:39:53 -04:00
void kv_dpm_debugfs_print_current_performance_level ( struct radeon_device * rdev ,
struct seq_file * m )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
u32 current_index =
( RREG32_SMC ( TARGET_AND_CURRENT_PROFILE_INDEX ) & CURR_SCLK_INDEX_MASK ) > >
CURR_SCLK_INDEX_SHIFT ;
u32 sclk , tmp ;
u16 vddc ;
if ( current_index > = SMU__NUM_SCLK_DPM_STATE ) {
seq_printf ( m , " invalid dpm profile %d \n " , current_index ) ;
} else {
sclk = be32_to_cpu ( pi - > graphics_level [ current_index ] . SclkFrequency ) ;
tmp = ( RREG32_SMC ( SMU_VOLTAGE_STATUS ) & SMU_VOLTAGE_CURRENT_LEVEL_MASK ) > >
SMU_VOLTAGE_CURRENT_LEVEL_SHIFT ;
vddc = kv_convert_8bit_index_to_voltage ( rdev , ( u16 ) tmp ) ;
2014-10-02 10:37:21 -04:00
seq_printf ( m , " uvd %sabled \n " , pi - > uvd_power_gated ? " dis " : " en " ) ;
seq_printf ( m , " vce %sabled \n " , pi - > vce_power_gated ? " dis " : " en " ) ;
2013-07-18 16:39:53 -04:00
seq_printf ( m , " power level %d sclk: %u vddc: %u \n " ,
current_index , sclk , vddc ) ;
}
}
2014-09-30 11:21:23 -04:00
u32 kv_dpm_get_current_sclk ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
u32 current_index =
( RREG32_SMC ( TARGET_AND_CURRENT_PROFILE_INDEX ) & CURR_SCLK_INDEX_MASK ) > >
CURR_SCLK_INDEX_SHIFT ;
u32 sclk ;
if ( current_index > = SMU__NUM_SCLK_DPM_STATE ) {
return 0 ;
} else {
sclk = be32_to_cpu ( pi - > graphics_level [ current_index ] . SclkFrequency ) ;
return sclk ;
}
}
u32 kv_dpm_get_current_mclk ( struct radeon_device * rdev )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
return pi - > sys_info . bootup_uma_clk ;
}
2013-08-14 01:01:40 -04:00
void kv_dpm_print_power_state ( struct radeon_device * rdev ,
struct radeon_ps * rps )
{
int i ;
struct kv_ps * ps = kv_get_ps ( rps ) ;
r600_dpm_print_class_info ( rps - > class , rps - > class2 ) ;
r600_dpm_print_cap_info ( rps - > caps ) ;
printk ( " \t uvd vclk: %d dclk: %d \n " , rps - > vclk , rps - > dclk ) ;
for ( i = 0 ; i < ps - > num_levels ; i + + ) {
struct kv_pl * pl = & ps - > levels [ i ] ;
printk ( " \t \t power level %d sclk: %u vddc: %u \n " ,
i , pl - > sclk ,
kv_convert_8bit_index_to_voltage ( rdev , pl - > vddc_index ) ) ;
}
r600_dpm_print_ps_status ( rdev , rps ) ;
}
void kv_dpm_fini ( struct radeon_device * rdev )
{
int i ;
for ( i = 0 ; i < rdev - > pm . dpm . num_ps ; i + + ) {
kfree ( rdev - > pm . dpm . ps [ i ] . ps_priv ) ;
}
kfree ( rdev - > pm . dpm . ps ) ;
kfree ( rdev - > pm . dpm . priv ) ;
r600_free_extended_power_table ( rdev ) ;
}
void kv_dpm_display_configuration_changed ( struct radeon_device * rdev )
{
}
u32 kv_dpm_get_sclk ( struct radeon_device * rdev , bool low )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
struct kv_ps * requested_state = kv_get_ps ( & pi - > requested_rps ) ;
if ( low )
return requested_state - > levels [ 0 ] . sclk ;
else
return requested_state - > levels [ requested_state - > num_levels - 1 ] . sclk ;
}
u32 kv_dpm_get_mclk ( struct radeon_device * rdev , bool low )
{
struct kv_power_info * pi = kv_get_pi ( rdev ) ;
return pi - > sys_info . bootup_uma_clk ;
}