Merge branch 'drm-next-4.9' of git://people.freedesktop.org/~agd5f/linux into drm-next
More radeon and amdgpu changes for 4.9. Highlights: - Initial SI support for amdgpu (controlled by a Kconfig option) - misc ttm cleanups - runtimepm fixes - S3/S4 fixes - power improvements - lots of code cleanups and optimizations * 'drm-next-4.9' of git://people.freedesktop.org/~agd5f/linux: (151 commits) drm/ttm: remove cpu_address member from ttm_tt drm/radeon/radeon_device: remove unused function drm/amdgpu: clean function declarations in amdgpu_ttm.c up drm/amdgpu: use the new ring ib and dma frame size callbacks (v2) drm/amdgpu/vce3: add ring callbacks for ib and dma frame size drm/amdgpu/vce2: add ring callbacks for ib and dma frame size drm/amdgpu/vce: add common ring callbacks for ib and dma frame size drm/amdgpu/uvd6: add ring callbacks for ib and dma frame size drm/amdgpu/uvd5: add ring callbacks for ib and dma frame size drm/amdgpu/uvd4.2: add ring callbacks for ib and dma frame size drm/amdgpu/sdma3: add ring callbacks for ib and dma frame size drm/amdgpu/sdma2.4: add ring callbacks for ib and dma frame size drm/amdgpu/cik_sdma: add ring callbacks for ib and dma frame size drm/amdgpu/si_dma: add ring callbacks for ib and dma frame size drm/amdgpu/gfx8: add ring callbacks for ib and dma frame size drm/amdgpu/gfx7: add ring callbacks for ib and dma frame size drm/amdgpu/gfx6: add ring callbacks for ib and dma frame size drm/amdgpu/ring: add an interface to get dma frame and ib size drm/amdgpu/sdma3: drop unused functions drm/amdgpu/gfx6: drop gds_switch callback ...
This commit is contained in:
commit
bd4a68da19
@ -1,3 +1,10 @@
|
||||
config DRM_AMDGPU_SI
|
||||
bool "Enable amdgpu support for SI parts"
|
||||
depends on DRM_AMDGPU
|
||||
help
|
||||
Choose this option if you want to enable experimental support
|
||||
for SI asics.
|
||||
|
||||
config DRM_AMDGPU_CIK
|
||||
bool "Enable amdgpu support for CIK parts"
|
||||
depends on DRM_AMDGPU
|
||||
|
@ -30,6 +30,8 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
|
||||
ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
|
||||
amdgpu_amdkfd_gfx_v7.o
|
||||
|
||||
amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
|
||||
|
||||
amdgpu-y += \
|
||||
vi.o
|
||||
|
||||
|
@ -64,6 +64,7 @@
|
||||
extern int amdgpu_modeset;
|
||||
extern int amdgpu_vram_limit;
|
||||
extern int amdgpu_gart_size;
|
||||
extern int amdgpu_moverate;
|
||||
extern int amdgpu_benchmarking;
|
||||
extern int amdgpu_testing;
|
||||
extern int amdgpu_audio;
|
||||
@ -94,6 +95,7 @@ extern unsigned amdgpu_pg_mask;
|
||||
extern char *amdgpu_disable_cu;
|
||||
extern int amdgpu_sclk_deep_sleep_en;
|
||||
extern char *amdgpu_virtual_display;
|
||||
extern unsigned amdgpu_pp_feature_mask;
|
||||
|
||||
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
|
||||
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
|
||||
@ -108,7 +110,7 @@ extern char *amdgpu_virtual_display;
|
||||
#define AMDGPU_MAX_RINGS 16
|
||||
#define AMDGPU_MAX_GFX_RINGS 1
|
||||
#define AMDGPU_MAX_COMPUTE_RINGS 8
|
||||
#define AMDGPU_MAX_VCE_RINGS 2
|
||||
#define AMDGPU_MAX_VCE_RINGS 3
|
||||
|
||||
/* max number of IP instances */
|
||||
#define AMDGPU_MAX_SDMA_INSTANCES 2
|
||||
@ -318,6 +320,10 @@ struct amdgpu_ring_funcs {
|
||||
/* note usage for clock and power gating */
|
||||
void (*begin_use)(struct amdgpu_ring *ring);
|
||||
void (*end_use)(struct amdgpu_ring *ring);
|
||||
void (*emit_switch_buffer) (struct amdgpu_ring *ring);
|
||||
void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
|
||||
unsigned (*get_emit_ib_size) (struct amdgpu_ring *ring);
|
||||
unsigned (*get_dma_frame_size) (struct amdgpu_ring *ring);
|
||||
};
|
||||
|
||||
/*
|
||||
@ -618,6 +624,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
|
||||
int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
|
||||
int pages, struct page **pagelist,
|
||||
dma_addr_t *dma_addr, uint32_t flags);
|
||||
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
|
||||
|
||||
/*
|
||||
* GPU MC structures, functions & helpers
|
||||
@ -963,6 +970,7 @@ struct amdgpu_ctx {
|
||||
spinlock_t ring_lock;
|
||||
struct fence **fences;
|
||||
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
|
||||
bool preamble_presented;
|
||||
};
|
||||
|
||||
struct amdgpu_ctx_mgr {
|
||||
@ -1222,11 +1230,16 @@ struct amdgpu_cs_parser {
|
||||
struct fence *fence;
|
||||
uint64_t bytes_moved_threshold;
|
||||
uint64_t bytes_moved;
|
||||
struct amdgpu_bo_list_entry *evictable;
|
||||
|
||||
/* user fence */
|
||||
struct amdgpu_bo_list_entry uf_entry;
|
||||
};
|
||||
|
||||
#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */
|
||||
#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */
|
||||
#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
|
||||
|
||||
struct amdgpu_job {
|
||||
struct amd_sched_job base;
|
||||
struct amdgpu_device *adev;
|
||||
@ -1235,9 +1248,10 @@ struct amdgpu_job {
|
||||
struct amdgpu_sync sync;
|
||||
struct amdgpu_ib *ibs;
|
||||
struct fence *fence; /* the hw fence */
|
||||
uint32_t preamble_status;
|
||||
uint32_t num_ibs;
|
||||
void *owner;
|
||||
uint64_t ctx;
|
||||
uint64_t fence_ctx; /* the fence_context this job uses */
|
||||
bool vm_needs_flush;
|
||||
unsigned vm_id;
|
||||
uint64_t vm_pd_addr;
|
||||
@ -1686,6 +1700,7 @@ struct amdgpu_vce {
|
||||
unsigned harvest_config;
|
||||
struct amd_sched_entity entity;
|
||||
uint32_t srbm_soft_reset;
|
||||
unsigned num_rings;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -1703,6 +1718,10 @@ struct amdgpu_sdma_instance {
|
||||
|
||||
struct amdgpu_sdma {
|
||||
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
//SI DMA has a difference trap irq number for the second engine
|
||||
struct amdgpu_irq_src trap_irq_1;
|
||||
#endif
|
||||
struct amdgpu_irq_src trap_irq;
|
||||
struct amdgpu_irq_src illegal_inst_irq;
|
||||
int num_instances;
|
||||
@ -1819,6 +1838,9 @@ struct amdgpu_asic_funcs {
|
||||
int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
|
||||
/* query virtual capabilities */
|
||||
u32 (*get_virtual_caps)(struct amdgpu_device *adev);
|
||||
/* static power management */
|
||||
int (*get_pcie_lanes)(struct amdgpu_device *adev);
|
||||
void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
|
||||
};
|
||||
|
||||
/*
|
||||
@ -1993,6 +2015,8 @@ struct amdgpu_device {
|
||||
spinlock_t pcie_idx_lock;
|
||||
amdgpu_rreg_t pcie_rreg;
|
||||
amdgpu_wreg_t pcie_wreg;
|
||||
amdgpu_rreg_t pciep_rreg;
|
||||
amdgpu_wreg_t pciep_wreg;
|
||||
/* protects concurrent UVD register access */
|
||||
spinlock_t uvd_ctx_idx_lock;
|
||||
amdgpu_rreg_t uvd_ctx_rreg;
|
||||
@ -2033,6 +2057,14 @@ struct amdgpu_device {
|
||||
atomic64_t num_evictions;
|
||||
atomic_t gpu_reset_counter;
|
||||
|
||||
/* data for buffer migration throttling */
|
||||
struct {
|
||||
spinlock_t lock;
|
||||
s64 last_update_us;
|
||||
s64 accum_us; /* accumulated microseconds */
|
||||
u32 log2_max_MBps;
|
||||
} mm_stats;
|
||||
|
||||
/* display */
|
||||
bool enable_virtual_display;
|
||||
struct amdgpu_mode_info mode_info;
|
||||
@ -2101,6 +2133,10 @@ struct amdgpu_device {
|
||||
/* link all shadow bo */
|
||||
struct list_head shadow_list;
|
||||
struct mutex shadow_list_lock;
|
||||
/* link all gtt */
|
||||
spinlock_t gtt_list_lock;
|
||||
struct list_head gtt_list;
|
||||
|
||||
};
|
||||
|
||||
bool amdgpu_device_is_px(struct drm_device *dev);
|
||||
@ -2133,6 +2169,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
|
||||
#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
|
||||
#define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
|
||||
#define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
|
||||
#define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
|
||||
#define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v))
|
||||
#define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
|
||||
#define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
|
||||
#define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
|
||||
@ -2223,6 +2261,9 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
||||
#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
|
||||
#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
|
||||
#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
|
||||
#define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
|
||||
#define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
|
||||
#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
|
||||
#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
|
||||
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
|
||||
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
|
||||
@ -2244,9 +2285,13 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
||||
#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
|
||||
#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
|
||||
#define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
|
||||
#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
|
||||
#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
|
||||
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
|
||||
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
|
||||
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
|
||||
#define amdgpu_ring_get_emit_ib_size(r) (r)->funcs->get_emit_ib_size((r))
|
||||
#define amdgpu_ring_get_dma_frame_size(r) (r)->funcs->get_dma_frame_size((r))
|
||||
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
|
||||
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
|
||||
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
|
||||
@ -2402,6 +2447,8 @@ void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
|
||||
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
|
||||
u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev);
|
||||
int amdgpu_ttm_global_init(struct amdgpu_device *adev);
|
||||
int amdgpu_ttm_init(struct amdgpu_device *adev);
|
||||
void amdgpu_ttm_fini(struct amdgpu_device *adev);
|
||||
void amdgpu_program_register_sequence(struct amdgpu_device *adev,
|
||||
const u32 *registers,
|
||||
const u32 array_size);
|
||||
@ -2434,8 +2481,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
|
||||
struct drm_file *file_priv);
|
||||
void amdgpu_driver_preclose_kms(struct drm_device *dev,
|
||||
struct drm_file *file_priv);
|
||||
int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
|
||||
int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
|
||||
int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
|
||||
int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
|
||||
u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
|
||||
int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
|
||||
void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
|
||||
@ -2481,6 +2528,7 @@ static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
|
||||
struct amdgpu_bo_va_mapping *
|
||||
amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
||||
uint64_t addr, struct amdgpu_bo **bo);
|
||||
int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser);
|
||||
|
||||
#include "amdgpu_object.h"
|
||||
#endif
|
||||
|
@ -978,6 +978,48 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
|
||||
return -EINVAL;
|
||||
|
||||
switch (crev) {
|
||||
case 2:
|
||||
case 3:
|
||||
case 5:
|
||||
/* r6xx, r7xx, evergreen, ni, si.
|
||||
* TODO: add support for asic_type <= CHIP_RV770*/
|
||||
if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
|
||||
args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
|
||||
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
|
||||
dividers->post_div = args.v3.ucPostDiv;
|
||||
dividers->enable_post_div = (args.v3.ucCntlFlag &
|
||||
ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
|
||||
dividers->enable_dithen = (args.v3.ucCntlFlag &
|
||||
ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
|
||||
dividers->whole_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
|
||||
dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
|
||||
dividers->ref_div = args.v3.ucRefDiv;
|
||||
dividers->vco_mode = (args.v3.ucCntlFlag &
|
||||
ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
|
||||
} else {
|
||||
/* for SI we use ComputeMemoryClockParam for memory plls */
|
||||
if (adev->asic_type >= CHIP_TAHITI)
|
||||
return -EINVAL;
|
||||
args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
|
||||
if (strobe_mode)
|
||||
args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
|
||||
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
|
||||
dividers->post_div = args.v5.ucPostDiv;
|
||||
dividers->enable_post_div = (args.v5.ucCntlFlag &
|
||||
ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
|
||||
dividers->enable_dithen = (args.v5.ucCntlFlag &
|
||||
ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
|
||||
dividers->whole_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDiv);
|
||||
dividers->frac_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDivFrac);
|
||||
dividers->ref_div = args.v5.ucRefDiv;
|
||||
dividers->vco_mode = (args.v5.ucCntlFlag &
|
||||
ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
|
||||
}
|
||||
break;
|
||||
case 4:
|
||||
/* fusion */
|
||||
args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */
|
||||
@ -1122,6 +1164,32 @@ void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
}
|
||||
|
||||
void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
|
||||
u16 *vddc, u16 *vddci, u16 *mvdd)
|
||||
{
|
||||
struct amdgpu_mode_info *mode_info = &adev->mode_info;
|
||||
int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
|
||||
u8 frev, crev;
|
||||
u16 data_offset;
|
||||
union firmware_info *firmware_info;
|
||||
|
||||
*vddc = 0;
|
||||
*vddci = 0;
|
||||
*mvdd = 0;
|
||||
|
||||
if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
|
||||
&frev, &crev, &data_offset)) {
|
||||
firmware_info =
|
||||
(union firmware_info *)(mode_info->atom_context->bios +
|
||||
data_offset);
|
||||
*vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
|
||||
if ((frev == 2) && (crev >= 2)) {
|
||||
*vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
|
||||
*mvdd = le16_to_cpu(firmware_info->info_22.usBootUpMVDDCVoltage);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
union set_voltage {
|
||||
struct _SET_VOLTAGE_PS_ALLOCATION alloc;
|
||||
struct _SET_VOLTAGE_PARAMETERS v1;
|
||||
@ -1129,6 +1197,52 @@ union set_voltage {
|
||||
struct _SET_VOLTAGE_PARAMETERS_V1_3 v3;
|
||||
};
|
||||
|
||||
int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
|
||||
u16 voltage_id, u16 *voltage)
|
||||
{
|
||||
union set_voltage args;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
|
||||
u8 frev, crev;
|
||||
|
||||
if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
|
||||
return -EINVAL;
|
||||
|
||||
switch (crev) {
|
||||
case 1:
|
||||
return -EINVAL;
|
||||
case 2:
|
||||
args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE;
|
||||
args.v2.ucVoltageMode = 0;
|
||||
args.v2.usVoltageLevel = 0;
|
||||
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
|
||||
*voltage = le16_to_cpu(args.v2.usVoltageLevel);
|
||||
break;
|
||||
case 3:
|
||||
args.v3.ucVoltageType = voltage_type;
|
||||
args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
|
||||
args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
|
||||
|
||||
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
|
||||
*voltage = le16_to_cpu(args.v3.usVoltageLevel);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev,
|
||||
u16 *voltage,
|
||||
u16 leakage_idx)
|
||||
{
|
||||
return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
|
||||
}
|
||||
|
||||
void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
|
||||
u16 voltage_level,
|
||||
u8 voltage_type)
|
||||
@ -1349,6 +1463,50 @@ static ATOM_VOLTAGE_OBJECT_V3 *amdgpu_atombios_lookup_voltage_object_v3(ATOM_VOL
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
|
||||
u8 voltage_type,
|
||||
u8 *svd_gpio_id, u8 *svc_gpio_id)
|
||||
{
|
||||
int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
|
||||
u8 frev, crev;
|
||||
u16 data_offset, size;
|
||||
union voltage_object_info *voltage_info;
|
||||
union voltage_object *voltage_object = NULL;
|
||||
|
||||
if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
|
||||
&frev, &crev, &data_offset)) {
|
||||
voltage_info = (union voltage_object_info *)
|
||||
(adev->mode_info.atom_context->bios + data_offset);
|
||||
|
||||
switch (frev) {
|
||||
case 3:
|
||||
switch (crev) {
|
||||
case 1:
|
||||
voltage_object = (union voltage_object *)
|
||||
amdgpu_atombios_lookup_voltage_object_v3(&voltage_info->v3,
|
||||
voltage_type,
|
||||
VOLTAGE_OBJ_SVID2);
|
||||
if (voltage_object) {
|
||||
*svd_gpio_id = voltage_object->v3.asSVID2Obj.ucSVDGpioId;
|
||||
*svc_gpio_id = voltage_object->v3.asSVID2Obj.ucSVCGpioId;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("unknown voltage object table\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("unknown voltage object table\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool
|
||||
amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev,
|
||||
u8 voltage_type, u8 voltage_mode)
|
||||
|
@ -208,5 +208,19 @@ void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
|
||||
void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
|
||||
|
||||
int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
|
||||
u16 voltage_id, u16 *voltage);
|
||||
int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev,
|
||||
u16 *voltage,
|
||||
u16 leakage_idx);
|
||||
void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
|
||||
u16 *vddc, u16 *vddci, u16 *mvdd);
|
||||
int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
|
||||
u8 clock_type,
|
||||
u32 clock,
|
||||
bool strobe_mode,
|
||||
struct atom_clock_dividers *dividers);
|
||||
int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
|
||||
u8 voltage_type,
|
||||
u8 *svd_gpio_id, u8 *svc_gpio_id);
|
||||
#endif
|
||||
|
@ -616,7 +616,7 @@ static int amdgpu_cgs_irq_put(struct cgs_device *cgs_device, unsigned src_id, un
|
||||
return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
|
||||
}
|
||||
|
||||
int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
|
||||
static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
|
||||
enum amd_ip_block_type block_type,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
@ -637,7 +637,7 @@ int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
|
||||
static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
|
||||
enum amd_ip_block_type block_type,
|
||||
enum amd_powergating_state state)
|
||||
{
|
||||
@ -848,6 +848,12 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
|
||||
case CGS_SYSTEM_INFO_GFX_SE_INFO:
|
||||
sys_info->value = adev->gfx.config.max_shader_engines;
|
||||
break;
|
||||
case CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID:
|
||||
sys_info->value = adev->pdev->subsystem_device;
|
||||
break;
|
||||
case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
|
||||
sys_info->value = adev->pdev->subsystem_vendor;
|
||||
break;
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -91,6 +91,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
|
||||
uint32_t *offset)
|
||||
{
|
||||
struct drm_gem_object *gobj;
|
||||
unsigned long size;
|
||||
|
||||
gobj = drm_gem_object_lookup(p->filp, data->handle);
|
||||
if (gobj == NULL)
|
||||
@ -101,6 +102,11 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
|
||||
p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
|
||||
p->uf_entry.tv.shared = true;
|
||||
p->uf_entry.user_pages = NULL;
|
||||
|
||||
size = amdgpu_bo_size(p->uf_entry.robj);
|
||||
if (size != PAGE_SIZE || (data->offset + 8) > size)
|
||||
return -EINVAL;
|
||||
|
||||
*offset = data->offset;
|
||||
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
@ -235,56 +241,115 @@ free_chunk:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Returns how many bytes TTM can move per IB.
|
||||
/* Convert microseconds to bytes. */
|
||||
static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
|
||||
{
|
||||
if (us <= 0 || !adev->mm_stats.log2_max_MBps)
|
||||
return 0;
|
||||
|
||||
/* Since accum_us is incremented by a million per second, just
|
||||
* multiply it by the number of MB/s to get the number of bytes.
|
||||
*/
|
||||
return us << adev->mm_stats.log2_max_MBps;
|
||||
}
|
||||
|
||||
static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
|
||||
{
|
||||
if (!adev->mm_stats.log2_max_MBps)
|
||||
return 0;
|
||||
|
||||
return bytes >> adev->mm_stats.log2_max_MBps;
|
||||
}
|
||||
|
||||
/* Returns how many bytes TTM can move right now. If no bytes can be moved,
|
||||
* it returns 0. If it returns non-zero, it's OK to move at least one buffer,
|
||||
* which means it can go over the threshold once. If that happens, the driver
|
||||
* will be in debt and no other buffer migrations can be done until that debt
|
||||
* is repaid.
|
||||
*
|
||||
* This approach allows moving a buffer of any size (it's important to allow
|
||||
* that).
|
||||
*
|
||||
* The currency is simply time in microseconds and it increases as the clock
|
||||
* ticks. The accumulated microseconds (us) are converted to bytes and
|
||||
* returned.
|
||||
*/
|
||||
static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
|
||||
{
|
||||
u64 real_vram_size = adev->mc.real_vram_size;
|
||||
u64 vram_usage = atomic64_read(&adev->vram_usage);
|
||||
s64 time_us, increment_us;
|
||||
u64 max_bytes;
|
||||
u64 free_vram, total_vram, used_vram;
|
||||
|
||||
/* This function is based on the current VRAM usage.
|
||||
/* Allow a maximum of 200 accumulated ms. This is basically per-IB
|
||||
* throttling.
|
||||
*
|
||||
* - If all of VRAM is free, allow relocating the number of bytes that
|
||||
* is equal to 1/4 of the size of VRAM for this IB.
|
||||
|
||||
* - If more than one half of VRAM is occupied, only allow relocating
|
||||
* 1 MB of data for this IB.
|
||||
*
|
||||
* - From 0 to one half of used VRAM, the threshold decreases
|
||||
* linearly.
|
||||
* __________________
|
||||
* 1/4 of -|\ |
|
||||
* VRAM | \ |
|
||||
* | \ |
|
||||
* | \ |
|
||||
* | \ |
|
||||
* | \ |
|
||||
* | \ |
|
||||
* | \________|1 MB
|
||||
* |----------------|
|
||||
* VRAM 0 % 100 %
|
||||
* used used
|
||||
*
|
||||
* Note: It's a threshold, not a limit. The threshold must be crossed
|
||||
* for buffer relocations to stop, so any buffer of an arbitrary size
|
||||
* can be moved as long as the threshold isn't crossed before
|
||||
* the relocation takes place. We don't want to disable buffer
|
||||
* relocations completely.
|
||||
*
|
||||
* The idea is that buffers should be placed in VRAM at creation time
|
||||
* and TTM should only do a minimum number of relocations during
|
||||
* command submission. In practice, you need to submit at least
|
||||
* a dozen IBs to move all buffers to VRAM if they are in GTT.
|
||||
*
|
||||
* Also, things can get pretty crazy under memory pressure and actual
|
||||
* VRAM usage can change a lot, so playing safe even at 50% does
|
||||
* consistently increase performance.
|
||||
* It means that in order to get full max MBps, at least 5 IBs per
|
||||
* second must be submitted and not more than 200ms apart from each
|
||||
* other.
|
||||
*/
|
||||
const s64 us_upper_bound = 200000;
|
||||
|
||||
u64 half_vram = real_vram_size >> 1;
|
||||
u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
|
||||
u64 bytes_moved_threshold = half_free_vram >> 1;
|
||||
return max(bytes_moved_threshold, 1024*1024ull);
|
||||
if (!adev->mm_stats.log2_max_MBps)
|
||||
return 0;
|
||||
|
||||
total_vram = adev->mc.real_vram_size - adev->vram_pin_size;
|
||||
used_vram = atomic64_read(&adev->vram_usage);
|
||||
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
|
||||
|
||||
spin_lock(&adev->mm_stats.lock);
|
||||
|
||||
/* Increase the amount of accumulated us. */
|
||||
time_us = ktime_to_us(ktime_get());
|
||||
increment_us = time_us - adev->mm_stats.last_update_us;
|
||||
adev->mm_stats.last_update_us = time_us;
|
||||
adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
|
||||
us_upper_bound);
|
||||
|
||||
/* This prevents the short period of low performance when the VRAM
|
||||
* usage is low and the driver is in debt or doesn't have enough
|
||||
* accumulated us to fill VRAM quickly.
|
||||
*
|
||||
* The situation can occur in these cases:
|
||||
* - a lot of VRAM is freed by userspace
|
||||
* - the presence of a big buffer causes a lot of evictions
|
||||
* (solution: split buffers into smaller ones)
|
||||
*
|
||||
* If 128 MB or 1/8th of VRAM is free, start filling it now by setting
|
||||
* accum_us to a positive number.
|
||||
*/
|
||||
if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
|
||||
s64 min_us;
|
||||
|
||||
/* Be more aggresive on dGPUs. Try to fill a portion of free
|
||||
* VRAM now.
|
||||
*/
|
||||
if (!(adev->flags & AMD_IS_APU))
|
||||
min_us = bytes_to_us(adev, free_vram / 4);
|
||||
else
|
||||
min_us = 0; /* Reset accum_us on APUs. */
|
||||
|
||||
adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
|
||||
}
|
||||
|
||||
/* This returns 0 if the driver is in debt to disallow (optional)
|
||||
* buffer moves.
|
||||
*/
|
||||
max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
|
||||
|
||||
spin_unlock(&adev->mm_stats.lock);
|
||||
return max_bytes;
|
||||
}
|
||||
|
||||
/* Report how many bytes have really been moved for the last command
|
||||
* submission. This can result in a debt that can stop buffer migrations
|
||||
* temporarily.
|
||||
*/
|
||||
static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
|
||||
u64 num_bytes)
|
||||
{
|
||||
spin_lock(&adev->mm_stats.lock);
|
||||
adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
|
||||
spin_unlock(&adev->mm_stats.lock);
|
||||
}
|
||||
|
||||
static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
|
||||
@ -297,15 +362,10 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
|
||||
if (bo->pin_count)
|
||||
return 0;
|
||||
|
||||
/* Avoid moving this one if we have moved too many buffers
|
||||
* for this IB already.
|
||||
*
|
||||
* Note that this allows moving at least one buffer of
|
||||
* any size, because it doesn't take the current "bo"
|
||||
* into account. We don't want to disallow buffer moves
|
||||
* completely.
|
||||
/* Don't move this buffer if we have depleted our allowance
|
||||
* to move it. Don't move anything if the threshold is zero.
|
||||
*/
|
||||
if (p->bytes_moved <= p->bytes_moved_threshold)
|
||||
if (p->bytes_moved < p->bytes_moved_threshold)
|
||||
domain = bo->prefered_domains;
|
||||
else
|
||||
domain = bo->allowed_domains;
|
||||
@ -317,17 +377,67 @@ retry:
|
||||
p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
|
||||
initial_bytes_moved;
|
||||
|
||||
if (unlikely(r)) {
|
||||
if (r != -ERESTARTSYS && domain != bo->allowed_domains) {
|
||||
domain = bo->allowed_domains;
|
||||
goto retry;
|
||||
}
|
||||
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
|
||||
domain = bo->allowed_domains;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
||||
/* Last resort, try to evict something from the current working set */
|
||||
static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_bo_list_entry *lobj)
|
||||
{
|
||||
uint32_t domain = lobj->robj->allowed_domains;
|
||||
int r;
|
||||
|
||||
if (!p->evictable)
|
||||
return false;
|
||||
|
||||
for (;&p->evictable->tv.head != &p->validated;
|
||||
p->evictable = list_prev_entry(p->evictable, tv.head)) {
|
||||
|
||||
struct amdgpu_bo_list_entry *candidate = p->evictable;
|
||||
struct amdgpu_bo *bo = candidate->robj;
|
||||
u64 initial_bytes_moved;
|
||||
uint32_t other;
|
||||
|
||||
/* If we reached our current BO we can forget it */
|
||||
if (candidate == lobj)
|
||||
break;
|
||||
|
||||
other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
|
||||
|
||||
/* Check if this BO is in one of the domains we need space for */
|
||||
if (!(other & domain))
|
||||
continue;
|
||||
|
||||
/* Check if we can move this BO somewhere else */
|
||||
other = bo->allowed_domains & ~domain;
|
||||
if (!other)
|
||||
continue;
|
||||
|
||||
/* Good we can try to move this BO somewhere else */
|
||||
amdgpu_ttm_placement_from_domain(bo, other);
|
||||
initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
|
||||
p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
|
||||
initial_bytes_moved;
|
||||
|
||||
if (unlikely(r))
|
||||
break;
|
||||
|
||||
p->evictable = list_prev_entry(p->evictable, tv.head);
|
||||
list_move(&candidate->tv.head, &p->validated);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
||||
struct list_head *validated)
|
||||
{
|
||||
struct amdgpu_bo_list_entry *lobj;
|
||||
@ -351,9 +461,15 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
||||
binding_userptr = true;
|
||||
}
|
||||
|
||||
r = amdgpu_cs_bo_validate(p, bo);
|
||||
if (p->evictable == lobj)
|
||||
p->evictable = NULL;
|
||||
|
||||
do {
|
||||
r = amdgpu_cs_bo_validate(p, bo);
|
||||
} while (r == -ENOMEM && amdgpu_cs_try_evict(p, lobj));
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (bo->shadow) {
|
||||
r = amdgpu_cs_bo_validate(p, bo);
|
||||
if (r)
|
||||
@ -481,6 +597,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
||||
|
||||
p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
|
||||
p->bytes_moved = 0;
|
||||
p->evictable = list_last_entry(&p->validated,
|
||||
struct amdgpu_bo_list_entry,
|
||||
tv.head);
|
||||
|
||||
r = amdgpu_cs_list_validate(p, &duplicates);
|
||||
if (r) {
|
||||
@ -494,6 +613,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
||||
goto error_validate;
|
||||
}
|
||||
|
||||
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved);
|
||||
|
||||
fpriv->vm.last_eviction_counter =
|
||||
atomic64_read(&p->adev->num_evictions);
|
||||
|
||||
@ -524,8 +645,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
||||
}
|
||||
}
|
||||
|
||||
if (p->uf_entry.robj)
|
||||
p->job->uf_addr += amdgpu_bo_gpu_offset(p->uf_entry.robj);
|
||||
if (!r && p->uf_entry.robj) {
|
||||
struct amdgpu_bo *uf = p->uf_entry.robj;
|
||||
|
||||
r = amdgpu_ttm_bind(uf->tbo.ttm, &uf->tbo.mem);
|
||||
p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
|
||||
}
|
||||
|
||||
error_validate:
|
||||
if (r) {
|
||||
@ -735,6 +860,14 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
|
||||
parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
|
||||
if (!parser->ctx->preamble_presented) {
|
||||
parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
|
||||
parser->ctx->preamble_presented = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (parser->job->ring && parser->job->ring != ring)
|
||||
return -EINVAL;
|
||||
|
||||
@ -874,7 +1007,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
}
|
||||
|
||||
job->owner = p->filp;
|
||||
job->ctx = entity->fence_context;
|
||||
job->fence_ctx = entity->fence_context;
|
||||
p->fence = fence_get(&job->base.s_fence->finished);
|
||||
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
|
||||
job->uf_sequence = cs->out.handle;
|
||||
@ -1040,3 +1173,29 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM
|
||||
*
|
||||
* @parser: command submission parser context
|
||||
*
|
||||
* Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM.
|
||||
*/
|
||||
int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
|
||||
{
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
if (!parser->bo_list)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < parser->bo_list->num_entries; i++) {
|
||||
struct amdgpu_bo *bo = parser->bo_list->array[i].robj;
|
||||
|
||||
r = amdgpu_ttm_bind(bo->tbo.ttm, &bo->tbo.mem);
|
||||
if (unlikely(r))
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -41,6 +41,9 @@
|
||||
#include "atom.h"
|
||||
#include "amdgpu_atombios.h"
|
||||
#include "amd_pcie.h"
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
#include "si.h"
|
||||
#endif
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
#include "cik.h"
|
||||
#endif
|
||||
@ -52,6 +55,11 @@ static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
|
||||
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
|
||||
|
||||
static const char *amdgpu_asic_name[] = {
|
||||
"TAHITI",
|
||||
"PITCAIRN",
|
||||
"VERDE",
|
||||
"OLAND",
|
||||
"HAINAN",
|
||||
"BONAIRE",
|
||||
"KAVERI",
|
||||
"KABINI",
|
||||
@ -1027,7 +1035,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
|
||||
/* don't suspend or resume card normally */
|
||||
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||
|
||||
amdgpu_resume_kms(dev, true, true);
|
||||
amdgpu_device_resume(dev, true, true);
|
||||
|
||||
dev->pdev->d3_delay = d3_delay;
|
||||
|
||||
@ -1037,7 +1045,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
|
||||
printk(KERN_INFO "amdgpu: switched off\n");
|
||||
drm_kms_helper_poll_disable(dev);
|
||||
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||
amdgpu_suspend_kms(dev, true, true);
|
||||
amdgpu_device_suspend(dev, true, true);
|
||||
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
|
||||
}
|
||||
}
|
||||
@ -1231,6 +1239,18 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
|
||||
if (r)
|
||||
return r;
|
||||
break;
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
case CHIP_VERDE:
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
case CHIP_OLAND:
|
||||
case CHIP_HAINAN:
|
||||
adev->family = AMDGPU_FAMILY_SI;
|
||||
r = si_set_ip_blocks(adev);
|
||||
if (r)
|
||||
return r;
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
case CHIP_BONAIRE:
|
||||
case CHIP_HAWAII:
|
||||
@ -1347,6 +1367,9 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_UVD ||
|
||||
adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_VCE)
|
||||
continue;
|
||||
/* enable clockgating to save power */
|
||||
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
|
||||
AMD_CG_STATE_GATE);
|
||||
@ -1490,6 +1513,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
{
|
||||
int r, i;
|
||||
bool runtime = false;
|
||||
u32 max_MBps;
|
||||
|
||||
adev->shutdown = false;
|
||||
adev->dev = &pdev->dev;
|
||||
@ -1513,6 +1537,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
adev->smc_wreg = &amdgpu_invalid_wreg;
|
||||
adev->pcie_rreg = &amdgpu_invalid_rreg;
|
||||
adev->pcie_wreg = &amdgpu_invalid_wreg;
|
||||
adev->pciep_rreg = &amdgpu_invalid_rreg;
|
||||
adev->pciep_wreg = &amdgpu_invalid_wreg;
|
||||
adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
|
||||
adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
|
||||
adev->didt_rreg = &amdgpu_invalid_rreg;
|
||||
@ -1549,12 +1575,22 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
spin_lock_init(&adev->didt_idx_lock);
|
||||
spin_lock_init(&adev->gc_cac_idx_lock);
|
||||
spin_lock_init(&adev->audio_endpt_idx_lock);
|
||||
spin_lock_init(&adev->mm_stats.lock);
|
||||
|
||||
INIT_LIST_HEAD(&adev->shadow_list);
|
||||
mutex_init(&adev->shadow_list_lock);
|
||||
|
||||
adev->rmmio_base = pci_resource_start(adev->pdev, 5);
|
||||
adev->rmmio_size = pci_resource_len(adev->pdev, 5);
|
||||
INIT_LIST_HEAD(&adev->gtt_list);
|
||||
spin_lock_init(&adev->gtt_list_lock);
|
||||
|
||||
if (adev->asic_type >= CHIP_BONAIRE) {
|
||||
adev->rmmio_base = pci_resource_start(adev->pdev, 5);
|
||||
adev->rmmio_size = pci_resource_len(adev->pdev, 5);
|
||||
} else {
|
||||
adev->rmmio_base = pci_resource_start(adev->pdev, 2);
|
||||
adev->rmmio_size = pci_resource_len(adev->pdev, 2);
|
||||
}
|
||||
|
||||
adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
|
||||
if (adev->rmmio == NULL) {
|
||||
return -ENOMEM;
|
||||
@ -1562,8 +1598,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
|
||||
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
|
||||
|
||||
/* doorbell bar mapping */
|
||||
amdgpu_doorbell_init(adev);
|
||||
if (adev->asic_type >= CHIP_BONAIRE)
|
||||
/* doorbell bar mapping */
|
||||
amdgpu_doorbell_init(adev);
|
||||
|
||||
/* io port mapping */
|
||||
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
|
||||
@ -1660,6 +1697,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
|
||||
adev->accel_working = true;
|
||||
|
||||
/* Initialize the buffer migration limit. */
|
||||
if (amdgpu_moverate >= 0)
|
||||
max_MBps = amdgpu_moverate;
|
||||
else
|
||||
max_MBps = 8; /* Allow 8 MB/s. */
|
||||
/* Get a log2 for easy divisions. */
|
||||
adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
|
||||
|
||||
amdgpu_fbdev_init(adev);
|
||||
|
||||
r = amdgpu_ib_pool_init(adev);
|
||||
@ -1764,7 +1809,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
||||
adev->rio_mem = NULL;
|
||||
iounmap(adev->rmmio);
|
||||
adev->rmmio = NULL;
|
||||
amdgpu_doorbell_fini(adev);
|
||||
if (adev->asic_type >= CHIP_BONAIRE)
|
||||
amdgpu_doorbell_fini(adev);
|
||||
amdgpu_debugfs_regs_cleanup(adev);
|
||||
amdgpu_debugfs_remove_files(adev);
|
||||
}
|
||||
@ -1774,7 +1820,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
||||
* Suspend & resume.
|
||||
*/
|
||||
/**
|
||||
* amdgpu_suspend_kms - initiate device suspend
|
||||
* amdgpu_device_suspend - initiate device suspend
|
||||
*
|
||||
* @pdev: drm dev pointer
|
||||
* @state: suspend state
|
||||
@ -1783,7 +1829,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
||||
* Returns 0 for success or an error on failure.
|
||||
* Called at driver suspend.
|
||||
*/
|
||||
int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
|
||||
int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
|
||||
{
|
||||
struct amdgpu_device *adev;
|
||||
struct drm_crtc *crtc;
|
||||
@ -1796,7 +1842,8 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
|
||||
|
||||
adev = dev->dev_private;
|
||||
|
||||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
|
||||
dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
|
||||
return 0;
|
||||
|
||||
drm_kms_helper_poll_disable(dev);
|
||||
@ -1851,6 +1898,10 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
|
||||
/* Shut down the device */
|
||||
pci_disable_device(dev->pdev);
|
||||
pci_set_power_state(dev->pdev, PCI_D3hot);
|
||||
} else {
|
||||
r = amdgpu_asic_reset(adev);
|
||||
if (r)
|
||||
DRM_ERROR("amdgpu asic reset failed\n");
|
||||
}
|
||||
|
||||
if (fbcon) {
|
||||
@ -1862,7 +1913,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_resume_kms - initiate device resume
|
||||
* amdgpu_device_resume - initiate device resume
|
||||
*
|
||||
* @pdev: drm dev pointer
|
||||
*
|
||||
@ -1870,32 +1921,37 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
|
||||
* Returns 0 for success or an error on failure.
|
||||
* Called at driver resume.
|
||||
*/
|
||||
int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
|
||||
int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
int r;
|
||||
|
||||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
|
||||
dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
|
||||
return 0;
|
||||
|
||||
if (fbcon) {
|
||||
if (fbcon)
|
||||
console_lock();
|
||||
}
|
||||
|
||||
if (resume) {
|
||||
pci_set_power_state(dev->pdev, PCI_D0);
|
||||
pci_restore_state(dev->pdev);
|
||||
if (pci_enable_device(dev->pdev)) {
|
||||
r = pci_enable_device(dev->pdev);
|
||||
if (r) {
|
||||
if (fbcon)
|
||||
console_unlock();
|
||||
return -1;
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
/* post card */
|
||||
if (!amdgpu_card_posted(adev))
|
||||
amdgpu_atom_asic_init(adev->mode_info.atom_context);
|
||||
if (!amdgpu_card_posted(adev) || !resume) {
|
||||
r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
|
||||
if (r)
|
||||
DRM_ERROR("amdgpu asic init failed\n");
|
||||
}
|
||||
|
||||
r = amdgpu_resume(adev);
|
||||
if (r)
|
||||
@ -2163,6 +2219,11 @@ retry:
|
||||
}
|
||||
if (!r) {
|
||||
amdgpu_irq_gpu_reset_resume_helper(adev);
|
||||
if (need_full_reset && amdgpu_need_backup(adev)) {
|
||||
r = amdgpu_ttm_recover_gart(adev);
|
||||
if (r)
|
||||
DRM_ERROR("gart recovery failed!!!\n");
|
||||
}
|
||||
r = amdgpu_ib_ring_tests(adev);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "ib ring test failed (%d).\n", r);
|
||||
@ -2600,7 +2661,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
|
||||
while (size) {
|
||||
uint32_t value;
|
||||
|
||||
value = RREG32_SMC(*pos >> 2);
|
||||
value = RREG32_SMC(*pos);
|
||||
r = put_user(value, (uint32_t *)buf);
|
||||
if (r)
|
||||
return r;
|
||||
@ -2631,7 +2692,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
WREG32_SMC(*pos >> 2, value);
|
||||
WREG32_SMC(*pos, value);
|
||||
|
||||
result += 4;
|
||||
buf += 4;
|
||||
|
@ -55,13 +55,15 @@
|
||||
* - 3.3.0 - Add VM support for UVD on supported hardware.
|
||||
* - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS.
|
||||
* - 3.5.0 - Add support for new UVD_NO_OP register.
|
||||
* - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 3
|
||||
#define KMS_DRIVER_MINOR 5
|
||||
#define KMS_DRIVER_MINOR 6
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
|
||||
int amdgpu_vram_limit = 0;
|
||||
int amdgpu_gart_size = -1; /* auto */
|
||||
int amdgpu_moverate = -1; /* auto */
|
||||
int amdgpu_benchmarking = 0;
|
||||
int amdgpu_testing = 0;
|
||||
int amdgpu_audio = -1;
|
||||
@ -93,6 +95,7 @@ unsigned amdgpu_cg_mask = 0xffffffff;
|
||||
unsigned amdgpu_pg_mask = 0xffffffff;
|
||||
char *amdgpu_disable_cu = NULL;
|
||||
char *amdgpu_virtual_display = NULL;
|
||||
unsigned amdgpu_pp_feature_mask = 0xffffffff;
|
||||
|
||||
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
|
||||
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
|
||||
@ -100,6 +103,9 @@ module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
|
||||
MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)");
|
||||
module_param_named(gartsize, amdgpu_gart_size, int, 0600);
|
||||
|
||||
MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)");
|
||||
module_param_named(moverate, amdgpu_moverate, int, 0600);
|
||||
|
||||
MODULE_PARM_DESC(benchmark, "Run benchmark");
|
||||
module_param_named(benchmark, amdgpu_benchmarking, int, 0444);
|
||||
|
||||
@ -172,6 +178,9 @@ module_param_named(powerplay, amdgpu_powerplay, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = disable)");
|
||||
module_param_named(powercontainment, amdgpu_powercontainment, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
|
||||
module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444);
|
||||
#endif
|
||||
|
||||
MODULE_PARM_DESC(sclkdeepsleep, "SCLK Deep Sleep (1 = enable (default), 0 = disable)");
|
||||
@ -196,6 +205,80 @@ MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_d
|
||||
module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
|
||||
|
||||
static const struct pci_device_id pciidlist[] = {
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
|
||||
{0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
|
||||
{0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
|
||||
{0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
|
||||
{0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
|
||||
{0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
|
||||
{0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
|
||||
{0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
|
||||
{0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
|
||||
{0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
|
||||
{0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
|
||||
{0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
|
||||
{0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
|
||||
{0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
|
||||
{0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
|
||||
{0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
|
||||
{0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
|
||||
{0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
|
||||
{0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
|
||||
{0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
|
||||
{0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
|
||||
{0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
|
||||
{0x1002, 0x6600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
|
||||
{0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
|
||||
{0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
|
||||
{0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
|
||||
{0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
|
||||
{0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6826, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
|
||||
{0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
|
||||
{0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
|
||||
{0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
|
||||
{0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
|
||||
{0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
|
||||
{0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
|
||||
{0x1002, 0x683B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
|
||||
{0x1002, 0x683D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
|
||||
{0x1002, 0x683F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
|
||||
{0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
|
||||
#endif
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
/* Kaveri */
|
||||
{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
|
||||
@ -393,32 +476,72 @@ amdgpu_pci_remove(struct pci_dev *pdev)
|
||||
drm_put_dev(dev);
|
||||
}
|
||||
|
||||
static void
|
||||
amdgpu_pci_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
||||
/* if we are running in a VM, make sure the device
|
||||
* torn down properly on reboot/shutdown
|
||||
*/
|
||||
if (adev->virtualization.is_virtual)
|
||||
amdgpu_pci_remove(pdev);
|
||||
}
|
||||
|
||||
static int amdgpu_pmops_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
return amdgpu_suspend_kms(drm_dev, true, true);
|
||||
return amdgpu_device_suspend(drm_dev, true, true);
|
||||
}
|
||||
|
||||
static int amdgpu_pmops_resume(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
return amdgpu_resume_kms(drm_dev, true, true);
|
||||
|
||||
/* GPU comes up enabled by the bios on resume */
|
||||
if (amdgpu_device_is_px(drm_dev)) {
|
||||
pm_runtime_disable(dev);
|
||||
pm_runtime_set_active(dev);
|
||||
pm_runtime_enable(dev);
|
||||
}
|
||||
|
||||
return amdgpu_device_resume(drm_dev, true, true);
|
||||
}
|
||||
|
||||
static int amdgpu_pmops_freeze(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
return amdgpu_suspend_kms(drm_dev, false, true);
|
||||
return amdgpu_device_suspend(drm_dev, false, true);
|
||||
}
|
||||
|
||||
static int amdgpu_pmops_thaw(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
return amdgpu_resume_kms(drm_dev, false, true);
|
||||
return amdgpu_device_resume(drm_dev, false, true);
|
||||
}
|
||||
|
||||
static int amdgpu_pmops_poweroff(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
return amdgpu_device_suspend(drm_dev, true, true);
|
||||
}
|
||||
|
||||
static int amdgpu_pmops_restore(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
return amdgpu_device_resume(drm_dev, false, true);
|
||||
}
|
||||
|
||||
static int amdgpu_pmops_runtime_suspend(struct device *dev)
|
||||
@ -436,7 +559,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
|
||||
drm_kms_helper_poll_disable(drm_dev);
|
||||
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
|
||||
|
||||
ret = amdgpu_suspend_kms(drm_dev, false, false);
|
||||
ret = amdgpu_device_suspend(drm_dev, false, false);
|
||||
pci_save_state(pdev);
|
||||
pci_disable_device(pdev);
|
||||
pci_ignore_hotplug(pdev);
|
||||
@ -469,7 +592,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
|
||||
return ret;
|
||||
pci_set_master(pdev);
|
||||
|
||||
ret = amdgpu_resume_kms(drm_dev, false, false);
|
||||
ret = amdgpu_device_resume(drm_dev, false, false);
|
||||
drm_kms_helper_poll_enable(drm_dev);
|
||||
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
|
||||
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
|
||||
@ -523,8 +646,8 @@ static const struct dev_pm_ops amdgpu_pm_ops = {
|
||||
.resume = amdgpu_pmops_resume,
|
||||
.freeze = amdgpu_pmops_freeze,
|
||||
.thaw = amdgpu_pmops_thaw,
|
||||
.poweroff = amdgpu_pmops_freeze,
|
||||
.restore = amdgpu_pmops_resume,
|
||||
.poweroff = amdgpu_pmops_poweroff,
|
||||
.restore = amdgpu_pmops_restore,
|
||||
.runtime_suspend = amdgpu_pmops_runtime_suspend,
|
||||
.runtime_resume = amdgpu_pmops_runtime_resume,
|
||||
.runtime_idle = amdgpu_pmops_runtime_idle,
|
||||
@ -606,6 +729,7 @@ static struct pci_driver amdgpu_kms_pci_driver = {
|
||||
.id_table = pciidlist,
|
||||
.probe = amdgpu_pci_probe,
|
||||
.remove = amdgpu_pci_remove,
|
||||
.shutdown = amdgpu_pci_shutdown,
|
||||
.driver.pm = &amdgpu_pm_ops,
|
||||
};
|
||||
|
||||
|
@ -25,6 +25,7 @@
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
@ -47,8 +48,35 @@ struct amdgpu_fbdev {
|
||||
struct amdgpu_device *adev;
|
||||
};
|
||||
|
||||
static int
|
||||
amdgpufb_open(struct fb_info *info, int user)
|
||||
{
|
||||
struct amdgpu_fbdev *rfbdev = info->par;
|
||||
struct amdgpu_device *adev = rfbdev->adev;
|
||||
int ret = pm_runtime_get_sync(adev->ddev->dev);
|
||||
if (ret < 0 && ret != -EACCES) {
|
||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
amdgpufb_release(struct fb_info *info, int user)
|
||||
{
|
||||
struct amdgpu_fbdev *rfbdev = info->par;
|
||||
struct amdgpu_device *adev = rfbdev->adev;
|
||||
|
||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct fb_ops amdgpufb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_open = amdgpufb_open,
|
||||
.fb_release = amdgpufb_release,
|
||||
.fb_check_var = drm_fb_helper_check_var,
|
||||
.fb_set_par = drm_fb_helper_set_par,
|
||||
.fb_fillrect = drm_fb_helper_cfb_fillrect,
|
||||
|
@ -124,7 +124,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
bool skip_preamble, need_ctx_switch;
|
||||
unsigned patch_offset = ~0;
|
||||
struct amdgpu_vm *vm;
|
||||
uint64_t ctx;
|
||||
uint64_t fence_ctx;
|
||||
uint32_t status = 0, alloc_size;
|
||||
|
||||
unsigned i;
|
||||
int r = 0;
|
||||
@ -135,10 +136,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
/* ring tests don't use a job */
|
||||
if (job) {
|
||||
vm = job->vm;
|
||||
ctx = job->ctx;
|
||||
fence_ctx = job->fence_ctx;
|
||||
} else {
|
||||
vm = NULL;
|
||||
ctx = 0;
|
||||
fence_ctx = 0;
|
||||
}
|
||||
|
||||
if (!ring->ready) {
|
||||
@ -151,7 +152,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = amdgpu_ring_alloc(ring, 256 * num_ibs);
|
||||
alloc_size = amdgpu_ring_get_dma_frame_size(ring) +
|
||||
num_ibs * amdgpu_ring_get_emit_ib_size(ring);
|
||||
|
||||
r = amdgpu_ring_alloc(ring, alloc_size);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
|
||||
return r;
|
||||
@ -174,13 +178,22 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
/* always set cond_exec_polling to CONTINUE */
|
||||
*ring->cond_exe_cpu_addr = 1;
|
||||
|
||||
skip_preamble = ring->current_ctx == ctx;
|
||||
need_ctx_switch = ring->current_ctx != ctx;
|
||||
skip_preamble = ring->current_ctx == fence_ctx;
|
||||
need_ctx_switch = ring->current_ctx != fence_ctx;
|
||||
if (job && ring->funcs->emit_cntxcntl) {
|
||||
if (need_ctx_switch)
|
||||
status |= AMDGPU_HAVE_CTX_SWITCH;
|
||||
status |= job->preamble_status;
|
||||
amdgpu_ring_emit_cntxcntl(ring, status);
|
||||
}
|
||||
|
||||
for (i = 0; i < num_ibs; ++i) {
|
||||
ib = &ibs[i];
|
||||
|
||||
/* drop preamble IBs if we don't have a context switch */
|
||||
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
|
||||
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
|
||||
skip_preamble &&
|
||||
!(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST))
|
||||
continue;
|
||||
|
||||
amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
|
||||
@ -209,7 +222,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
|
||||
amdgpu_ring_patch_cond_exec(ring, patch_offset);
|
||||
|
||||
ring->current_ctx = ctx;
|
||||
ring->current_ctx = fence_ctx;
|
||||
if (ring->funcs->emit_switch_buffer)
|
||||
amdgpu_ring_emit_switch_buffer(ring);
|
||||
amdgpu_ring_commit(ring);
|
||||
return 0;
|
||||
}
|
||||
|
@ -119,8 +119,6 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
|
||||
*/
|
||||
void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (adev->irq.ih.use_bus_addr) {
|
||||
if (adev->irq.ih.ring) {
|
||||
/* add 8 bytes for the rptr/wptr shadows and
|
||||
@ -132,17 +130,9 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
|
||||
adev->irq.ih.ring = NULL;
|
||||
}
|
||||
} else {
|
||||
if (adev->irq.ih.ring_obj) {
|
||||
r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false);
|
||||
if (likely(r == 0)) {
|
||||
amdgpu_bo_kunmap(adev->irq.ih.ring_obj);
|
||||
amdgpu_bo_unpin(adev->irq.ih.ring_obj);
|
||||
amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
|
||||
}
|
||||
amdgpu_bo_unref(&adev->irq.ih.ring_obj);
|
||||
adev->irq.ih.ring = NULL;
|
||||
adev->irq.ih.ring_obj = NULL;
|
||||
}
|
||||
amdgpu_bo_free_kernel(&adev->irq.ih.ring_obj,
|
||||
&adev->irq.ih.gpu_addr,
|
||||
(void **)&adev->irq.ih.ring);
|
||||
amdgpu_wb_free(adev, adev->irq.ih.wptr_offs);
|
||||
amdgpu_wb_free(adev, adev->irq.ih.rptr_offs);
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
|
||||
amdgpu_ib_free(job->adev, &job->ibs[i], f);
|
||||
}
|
||||
|
||||
void amdgpu_job_free_cb(struct amd_sched_job *s_job)
|
||||
static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
|
||||
{
|
||||
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
|
||||
|
||||
@ -124,7 +124,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||
return r;
|
||||
|
||||
job->owner = owner;
|
||||
job->ctx = entity->fence_context;
|
||||
job->fence_ctx = entity->fence_context;
|
||||
*f = fence_get(&job->base.s_fence->finished);
|
||||
amdgpu_job_free_resources(job);
|
||||
amd_sched_entity_push_job(&job->base);
|
||||
|
@ -296,7 +296,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCE:
|
||||
type = AMD_IP_BLOCK_TYPE_VCE;
|
||||
for (i = 0; i < AMDGPU_MAX_VCE_RINGS; i++)
|
||||
for (i = 0; i < adev->vce.num_rings; i++)
|
||||
ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
|
||||
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
|
||||
ib_size_alignment = 1;
|
||||
@ -542,12 +542,16 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
||||
return r;
|
||||
|
||||
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
|
||||
if (unlikely(!fpriv))
|
||||
return -ENOMEM;
|
||||
if (unlikely(!fpriv)) {
|
||||
r = -ENOMEM;
|
||||
goto out_suspend;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_init(adev, &fpriv->vm);
|
||||
if (r)
|
||||
goto error_free;
|
||||
if (r) {
|
||||
kfree(fpriv);
|
||||
goto out_suspend;
|
||||
}
|
||||
|
||||
mutex_init(&fpriv->bo_list_lock);
|
||||
idr_init(&fpriv->bo_list_handles);
|
||||
@ -556,12 +560,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
||||
|
||||
file_priv->driver_priv = fpriv;
|
||||
|
||||
out_suspend:
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
return 0;
|
||||
|
||||
error_free:
|
||||
kfree(fpriv);
|
||||
|
||||
return r;
|
||||
}
|
||||
@ -600,6 +601,9 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
|
||||
|
||||
kfree(fpriv);
|
||||
file_priv->driver_priv = NULL;
|
||||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -614,6 +618,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
|
||||
void amdgpu_driver_preclose_kms(struct drm_device *dev,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
pm_runtime_get_sync(dev->dev);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -38,8 +38,6 @@
|
||||
#include "amdgpu_trace.h"
|
||||
|
||||
|
||||
int amdgpu_ttm_init(struct amdgpu_device *adev);
|
||||
void amdgpu_ttm_fini(struct amdgpu_device *adev);
|
||||
|
||||
static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
|
||||
struct ttm_mem_reg *mem)
|
||||
@ -287,6 +285,35 @@ error_free:
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_bo_free_kernel - free BO for kernel use
|
||||
*
|
||||
* @bo: amdgpu BO to free
|
||||
*
|
||||
* unmaps and unpin a BO for kernel internal use.
|
||||
*/
|
||||
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
|
||||
void **cpu_addr)
|
||||
{
|
||||
if (*bo == NULL)
|
||||
return;
|
||||
|
||||
if (likely(amdgpu_bo_reserve(*bo, false) == 0)) {
|
||||
if (cpu_addr)
|
||||
amdgpu_bo_kunmap(*bo);
|
||||
|
||||
amdgpu_bo_unpin(*bo);
|
||||
amdgpu_bo_unreserve(*bo);
|
||||
}
|
||||
amdgpu_bo_unref(bo);
|
||||
|
||||
if (gpu_addr)
|
||||
*gpu_addr = 0;
|
||||
|
||||
if (cpu_addr)
|
||||
*cpu_addr = NULL;
|
||||
}
|
||||
|
||||
int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
||||
unsigned long size, int byte_align,
|
||||
bool kernel, u32 domain, u64 flags,
|
||||
@ -646,6 +673,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
||||
dev_err(bo->adev->dev, "%p pin failed\n", bo);
|
||||
goto error;
|
||||
}
|
||||
r = amdgpu_ttm_bind(bo->tbo.ttm, &bo->tbo.mem);
|
||||
if (unlikely(r)) {
|
||||
dev_err(bo->adev->dev, "%p bind failed\n", bo);
|
||||
goto error;
|
||||
}
|
||||
|
||||
bo->pin_count = 1;
|
||||
if (gpu_addr != NULL)
|
||||
@ -692,7 +724,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
|
||||
bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
|
||||
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
|
||||
bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
|
||||
} else {
|
||||
} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
|
||||
bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
|
||||
}
|
||||
|
||||
@ -918,8 +950,11 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
|
||||
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
|
||||
{
|
||||
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
|
||||
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
|
||||
!amdgpu_ttm_is_bound(bo->tbo.ttm));
|
||||
WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
|
||||
!bo->pin_count);
|
||||
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
|
||||
|
||||
return bo->tbo.offset;
|
||||
}
|
||||
|
@ -31,6 +31,8 @@
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
|
||||
#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
|
||||
|
||||
/**
|
||||
* amdgpu_mem_type_to_domain - return domain corresponding to mem_type
|
||||
* @mem_type: ttm memory type
|
||||
@ -128,6 +130,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
|
||||
unsigned long size, int align,
|
||||
u32 domain, struct amdgpu_bo **bo_ptr,
|
||||
u64 *gpu_addr, void **cpu_addr);
|
||||
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
|
||||
void **cpu_addr);
|
||||
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
|
||||
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
|
||||
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "amdgpu.h"
|
||||
#include "atom.h"
|
||||
#include "atombios_encoders.h"
|
||||
#include "amdgpu_pll.h"
|
||||
#include <asm/div64.h>
|
||||
#include <linux/gcd.h>
|
||||
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "amdgpu_pm.h"
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu_powerplay.h"
|
||||
#include "si_dpm.h"
|
||||
#include "cik_dpm.h"
|
||||
#include "vi_dpm.h"
|
||||
|
||||
@ -52,10 +53,6 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
|
||||
pp_init->chip_family = adev->family;
|
||||
pp_init->chip_id = adev->asic_type;
|
||||
pp_init->device = amdgpu_cgs_create_device(adev);
|
||||
pp_init->rev_id = adev->pdev->revision;
|
||||
pp_init->sub_sys_id = adev->pdev->subsystem_device;
|
||||
pp_init->sub_vendor_id = adev->pdev->subsystem_vendor;
|
||||
|
||||
ret = amd_powerplay_init(pp_init, amd_pp);
|
||||
kfree(pp_init);
|
||||
#endif
|
||||
@ -63,6 +60,15 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
|
||||
amd_pp->pp_handle = (void *)adev;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
case CHIP_VERDE:
|
||||
case CHIP_OLAND:
|
||||
case CHIP_HAINAN:
|
||||
amd_pp->ip_funcs = &si_dpm_ip_funcs;
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
case CHIP_BONAIRE:
|
||||
case CHIP_HAWAII:
|
||||
|
@ -252,28 +252,17 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
||||
*/
|
||||
void amdgpu_ring_fini(struct amdgpu_ring *ring)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_bo *ring_obj;
|
||||
|
||||
ring_obj = ring->ring_obj;
|
||||
ring->ready = false;
|
||||
ring->ring = NULL;
|
||||
ring->ring_obj = NULL;
|
||||
|
||||
amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
|
||||
amdgpu_wb_free(ring->adev, ring->fence_offs);
|
||||
amdgpu_wb_free(ring->adev, ring->rptr_offs);
|
||||
amdgpu_wb_free(ring->adev, ring->wptr_offs);
|
||||
|
||||
if (ring_obj) {
|
||||
r = amdgpu_bo_reserve(ring_obj, false);
|
||||
if (likely(r == 0)) {
|
||||
amdgpu_bo_kunmap(ring_obj);
|
||||
amdgpu_bo_unpin(ring_obj);
|
||||
amdgpu_bo_unreserve(ring_obj);
|
||||
}
|
||||
amdgpu_bo_unref(&ring_obj);
|
||||
}
|
||||
amdgpu_bo_free_kernel(&ring->ring_obj,
|
||||
&ring->gpu_addr,
|
||||
(void **)&ring->ring);
|
||||
|
||||
amdgpu_debugfs_ring_fini(ring);
|
||||
}
|
||||
|
||||
|
@ -89,10 +89,10 @@ int amdgpu_ttm_global_init(struct amdgpu_device *adev)
|
||||
global_ref->init = &amdgpu_ttm_mem_global_init;
|
||||
global_ref->release = &amdgpu_ttm_mem_global_release;
|
||||
r = drm_global_item_ref(global_ref);
|
||||
if (r != 0) {
|
||||
if (r) {
|
||||
DRM_ERROR("Failed setting up TTM memory accounting "
|
||||
"subsystem.\n");
|
||||
return r;
|
||||
goto error_mem;
|
||||
}
|
||||
|
||||
adev->mman.bo_global_ref.mem_glob =
|
||||
@ -103,26 +103,30 @@ int amdgpu_ttm_global_init(struct amdgpu_device *adev)
|
||||
global_ref->init = &ttm_bo_global_init;
|
||||
global_ref->release = &ttm_bo_global_release;
|
||||
r = drm_global_item_ref(global_ref);
|
||||
if (r != 0) {
|
||||
if (r) {
|
||||
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
|
||||
drm_global_item_unref(&adev->mman.mem_global_ref);
|
||||
return r;
|
||||
goto error_bo;
|
||||
}
|
||||
|
||||
ring = adev->mman.buffer_funcs_ring;
|
||||
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
|
||||
r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
|
||||
rq, amdgpu_sched_jobs);
|
||||
if (r != 0) {
|
||||
if (r) {
|
||||
DRM_ERROR("Failed setting up TTM BO move run queue.\n");
|
||||
drm_global_item_unref(&adev->mman.mem_global_ref);
|
||||
drm_global_item_unref(&adev->mman.bo_global_ref.ref);
|
||||
return r;
|
||||
goto error_entity;
|
||||
}
|
||||
|
||||
adev->mman.mem_global_referenced = true;
|
||||
|
||||
return 0;
|
||||
|
||||
error_entity:
|
||||
drm_global_item_unref(&adev->mman.bo_global_ref.ref);
|
||||
error_bo:
|
||||
drm_global_item_unref(&adev->mman.mem_global_ref);
|
||||
error_mem:
|
||||
return r;
|
||||
}
|
||||
|
||||
static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
|
||||
@ -197,6 +201,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
|
||||
.lpfn = 0,
|
||||
.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
|
||||
};
|
||||
unsigned i;
|
||||
|
||||
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
|
||||
placement->placement = &placements;
|
||||
@ -208,10 +213,25 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
|
||||
rbo = container_of(bo, struct amdgpu_bo, tbo);
|
||||
switch (bo->mem.mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
if (rbo->adev->mman.buffer_funcs_ring->ready == false)
|
||||
if (rbo->adev->mman.buffer_funcs_ring->ready == false) {
|
||||
amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU);
|
||||
else
|
||||
} else {
|
||||
amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_GTT);
|
||||
for (i = 0; i < rbo->placement.num_placement; ++i) {
|
||||
if (!(rbo->placements[i].flags &
|
||||
TTM_PL_FLAG_TT))
|
||||
continue;
|
||||
|
||||
if (rbo->placements[i].lpfn)
|
||||
continue;
|
||||
|
||||
/* set an upper limit to force directly
|
||||
* allocating address space for the BO.
|
||||
*/
|
||||
rbo->placements[i].lpfn =
|
||||
rbo->adev->mc.gtt_size >> PAGE_SHIFT;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case TTM_PL_TT:
|
||||
default:
|
||||
@ -256,8 +276,12 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
|
||||
new_start = new_mem->start << PAGE_SHIFT;
|
||||
|
||||
switch (old_mem->mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
case TTM_PL_TT:
|
||||
r = amdgpu_ttm_bind(bo->ttm, old_mem);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
case TTM_PL_VRAM:
|
||||
old_start += bo->bdev->man[old_mem->mem_type].gpu_offset;
|
||||
break;
|
||||
default:
|
||||
@ -265,8 +289,12 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
|
||||
return -EINVAL;
|
||||
}
|
||||
switch (new_mem->mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
case TTM_PL_TT:
|
||||
r = amdgpu_ttm_bind(bo->ttm, new_mem);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
case TTM_PL_VRAM:
|
||||
new_start += bo->bdev->man[new_mem->mem_type].gpu_offset;
|
||||
break;
|
||||
default:
|
||||
@ -311,7 +339,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
|
||||
placement.num_busy_placement = 1;
|
||||
placement.busy_placement = &placements;
|
||||
placements.fpfn = 0;
|
||||
placements.lpfn = 0;
|
||||
placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
|
||||
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
|
||||
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
|
||||
interruptible, no_wait_gpu);
|
||||
@ -358,7 +386,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
|
||||
placement.num_busy_placement = 1;
|
||||
placement.busy_placement = &placements;
|
||||
placements.fpfn = 0;
|
||||
placements.lpfn = 0;
|
||||
placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
|
||||
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
|
||||
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
|
||||
interruptible, no_wait_gpu);
|
||||
@ -520,6 +548,7 @@ struct amdgpu_ttm_tt {
|
||||
spinlock_t guptasklock;
|
||||
struct list_head guptasks;
|
||||
atomic_t mmu_invalidations;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
|
||||
@ -637,7 +666,6 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
|
||||
struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void*)ttm;
|
||||
uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
|
||||
int r;
|
||||
|
||||
if (gtt->userptr) {
|
||||
@ -647,7 +675,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
|
||||
return r;
|
||||
}
|
||||
}
|
||||
gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
|
||||
gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
|
||||
if (!ttm->num_pages) {
|
||||
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
|
||||
ttm->num_pages, bo_mem, ttm);
|
||||
@ -658,14 +686,62 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
|
||||
bo_mem->mem_type == AMDGPU_PL_OA)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
|
||||
return gtt && !list_empty(>t->list);
|
||||
}
|
||||
|
||||
int amdgpu_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
uint32_t flags;
|
||||
int r;
|
||||
|
||||
if (!ttm || amdgpu_ttm_is_bound(ttm))
|
||||
return 0;
|
||||
|
||||
flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
|
||||
r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
|
||||
ttm->pages, gtt->ttm.dma_address, flags);
|
||||
|
||||
if (r) {
|
||||
DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
|
||||
ttm->num_pages, (unsigned)gtt->offset);
|
||||
DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
|
||||
ttm->num_pages, gtt->offset);
|
||||
return r;
|
||||
}
|
||||
spin_lock(>t->adev->gtt_list_lock);
|
||||
list_add_tail(>t->list, >t->adev->gtt_list);
|
||||
spin_unlock(>t->adev->gtt_list_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt, *tmp;
|
||||
struct ttm_mem_reg bo_mem;
|
||||
uint32_t flags;
|
||||
int r;
|
||||
|
||||
bo_mem.mem_type = TTM_PL_TT;
|
||||
spin_lock(&adev->gtt_list_lock);
|
||||
list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
|
||||
flags = amdgpu_ttm_tt_pte_flags(gtt->adev, >t->ttm.ttm, &bo_mem);
|
||||
r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
|
||||
gtt->ttm.ttm.pages, gtt->ttm.dma_address,
|
||||
flags);
|
||||
if (r) {
|
||||
spin_unlock(&adev->gtt_list_lock);
|
||||
DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
|
||||
gtt->ttm.ttm.num_pages, gtt->offset);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
spin_unlock(&adev->gtt_list_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -673,6 +749,9 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
|
||||
if (!amdgpu_ttm_is_bound(ttm))
|
||||
return 0;
|
||||
|
||||
/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
|
||||
if (gtt->adev->gart.ready)
|
||||
amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
|
||||
@ -680,6 +759,10 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
|
||||
if (gtt->userptr)
|
||||
amdgpu_ttm_tt_unpin_userptr(ttm);
|
||||
|
||||
spin_lock(>t->adev->gtt_list_lock);
|
||||
list_del_init(>t->list);
|
||||
spin_unlock(>t->adev->gtt_list_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -716,6 +799,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
kfree(gtt);
|
||||
return NULL;
|
||||
}
|
||||
INIT_LIST_HEAD(>t->list);
|
||||
return >t->ttm.ttm;
|
||||
}
|
||||
|
||||
|
@ -26,13 +26,13 @@
|
||||
|
||||
#include "gpu_scheduler.h"
|
||||
|
||||
#define AMDGPU_PL_GDS TTM_PL_PRIV0
|
||||
#define AMDGPU_PL_GWS TTM_PL_PRIV1
|
||||
#define AMDGPU_PL_OA TTM_PL_PRIV2
|
||||
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
|
||||
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
|
||||
#define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
|
||||
|
||||
#define AMDGPU_PL_FLAG_GDS TTM_PL_FLAG_PRIV0
|
||||
#define AMDGPU_PL_FLAG_GWS TTM_PL_FLAG_PRIV1
|
||||
#define AMDGPU_PL_FLAG_OA TTM_PL_FLAG_PRIV2
|
||||
#define AMDGPU_PL_FLAG_GDS (TTM_PL_FLAG_PRIV << 0)
|
||||
#define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1)
|
||||
#define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2)
|
||||
|
||||
#define AMDGPU_TTM_LRU_SIZE 20
|
||||
|
||||
@ -77,4 +77,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
||||
struct fence **fence);
|
||||
|
||||
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
|
||||
int amdgpu_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
|
||||
|
||||
#endif
|
||||
|
@ -247,35 +247,28 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
|
||||
const struct common_firmware_header *header = NULL;
|
||||
|
||||
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
|
||||
if (err) {
|
||||
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
|
||||
err = -ENOMEM;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
err = amdgpu_bo_reserve(*bo, false);
|
||||
if (err) {
|
||||
amdgpu_bo_unref(bo);
|
||||
dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
|
||||
goto failed;
|
||||
goto failed_reserve;
|
||||
}
|
||||
|
||||
err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr);
|
||||
if (err) {
|
||||
amdgpu_bo_unreserve(*bo);
|
||||
amdgpu_bo_unref(bo);
|
||||
dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
|
||||
goto failed;
|
||||
goto failed_pin;
|
||||
}
|
||||
|
||||
err = amdgpu_bo_kmap(*bo, &fw_buf_ptr);
|
||||
if (err) {
|
||||
dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
|
||||
amdgpu_bo_unpin(*bo);
|
||||
amdgpu_bo_unreserve(*bo);
|
||||
amdgpu_bo_unref(bo);
|
||||
goto failed;
|
||||
goto failed_kmap;
|
||||
}
|
||||
|
||||
amdgpu_bo_unreserve(*bo);
|
||||
@ -290,10 +283,16 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
|
||||
fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
failed_kmap:
|
||||
amdgpu_bo_unpin(*bo);
|
||||
failed_pin:
|
||||
amdgpu_bo_unreserve(*bo);
|
||||
failed_reserve:
|
||||
amdgpu_bo_unref(bo);
|
||||
failed:
|
||||
if (err)
|
||||
adev->firmware.smu_load = false;
|
||||
adev->firmware.smu_load = false;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -249,22 +249,13 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
||||
|
||||
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
kfree(adev->uvd.saved_bo);
|
||||
|
||||
amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
|
||||
|
||||
if (adev->uvd.vcpu_bo) {
|
||||
r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
|
||||
if (!r) {
|
||||
amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
|
||||
amdgpu_bo_unpin(adev->uvd.vcpu_bo);
|
||||
amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
|
||||
}
|
||||
|
||||
amdgpu_bo_unref(&adev->uvd.vcpu_bo);
|
||||
}
|
||||
amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo,
|
||||
&adev->uvd.gpu_addr,
|
||||
(void **)&adev->uvd.cpu_addr);
|
||||
|
||||
amdgpu_ring_fini(&adev->uvd.ring);
|
||||
|
||||
@ -891,6 +882,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = amdgpu_cs_sysvm_access_required(parser);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
ctx.parser = parser;
|
||||
ctx.buf_sizes = buf_sizes;
|
||||
ctx.ib_idx = ib_idx;
|
||||
|
@ -634,7 +634,11 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
|
||||
uint32_t allocated = 0;
|
||||
uint32_t tmp, handle = 0;
|
||||
uint32_t *size = &tmp;
|
||||
int i, r = 0, idx = 0;
|
||||
int i, r, idx = 0;
|
||||
|
||||
r = amdgpu_cs_sysvm_access_required(p);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
while (idx < ib->length_dw) {
|
||||
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
|
||||
@ -799,6 +803,18 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
|
||||
amdgpu_ring_write(ring, VCE_CMD_END);
|
||||
}
|
||||
|
||||
unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
4; /* amdgpu_vce_ring_emit_ib */
|
||||
}
|
||||
|
||||
unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vce_ring_test_ring - test if VCE ring is working
|
||||
*
|
||||
@ -850,8 +866,8 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
struct fence *fence = NULL;
|
||||
long r;
|
||||
|
||||
/* skip vce ring1 ib test for now, since it's not reliable */
|
||||
if (ring == &ring->adev->vce.ring[1])
|
||||
/* skip vce ring1/2 ib test for now, since it's not reliable */
|
||||
if (ring != &ring->adev->vce.ring[0])
|
||||
return 0;
|
||||
|
||||
r = amdgpu_vce_get_create_msg(ring, 1, NULL);
|
||||
|
@ -42,5 +42,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
|
||||
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout);
|
||||
void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring);
|
||||
void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring);
|
||||
unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring);
|
||||
unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring);
|
||||
|
||||
#endif
|
||||
|
@ -1163,7 +1163,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
}
|
||||
|
||||
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
|
||||
gtt_flags = (adev == bo_va->bo->adev) ? flags : 0;
|
||||
gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
|
||||
adev == bo_va->bo->adev) ? flags : 0;
|
||||
|
||||
spin_lock(&vm->status_lock);
|
||||
if (!list_empty(&bo_va->vm_status))
|
||||
|
@ -497,7 +497,13 @@ void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
|
||||
* SetPixelClock provides the dividers
|
||||
*/
|
||||
args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
|
||||
args.v6.ucPpll = ATOM_EXT_PLL1;
|
||||
if (adev->asic_type == CHIP_TAHITI ||
|
||||
adev->asic_type == CHIP_PITCAIRN ||
|
||||
adev->asic_type == CHIP_VERDE ||
|
||||
adev->asic_type == CHIP_OLAND)
|
||||
args.v6.ucPpll = ATOM_PPLL0;
|
||||
else
|
||||
args.v6.ucPpll = ATOM_EXT_PLL1;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "amdgpu.h"
|
||||
#include "atom.h"
|
||||
#include "amdgpu_atombios.h"
|
||||
#include "atombios_i2c.h"
|
||||
|
||||
#define TARGET_HW_I2C_CLOCK 50
|
||||
|
||||
|
@ -5396,7 +5396,7 @@ static void ci_dpm_disable(struct amdgpu_device *adev)
|
||||
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
|
||||
AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
|
||||
|
||||
ci_dpm_powergate_uvd(adev, false);
|
||||
ci_dpm_powergate_uvd(adev, true);
|
||||
|
||||
if (!amdgpu_ci_is_smc_running(adev))
|
||||
return;
|
||||
@ -6036,7 +6036,7 @@ static int ci_dpm_init(struct amdgpu_device *adev)
|
||||
|
||||
pi->caps_dynamic_ac_timing = true;
|
||||
|
||||
pi->uvd_power_gated = false;
|
||||
pi->uvd_power_gated = true;
|
||||
|
||||
/* make sure dc limits are valid */
|
||||
if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
|
||||
@ -6179,8 +6179,6 @@ static int ci_dpm_late_init(void *handle)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ci_dpm_powergate_uvd(adev, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -847,6 +847,22 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
|
||||
}
|
||||
|
||||
static unsigned cik_sdma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
7 + 4; /* cik_sdma_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned cik_sdma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
6 + /* cik_sdma_ring_emit_hdp_flush */
|
||||
3 + /* cik_sdma_ring_emit_hdp_invalidate */
|
||||
6 + /* cik_sdma_ring_emit_pipeline_sync */
|
||||
12 + /* cik_sdma_ring_emit_vm_flush */
|
||||
9 + 9 + 9; /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
|
||||
}
|
||||
|
||||
static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
@ -1220,6 +1236,8 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
|
||||
.test_ib = cik_sdma_ring_test_ib,
|
||||
.insert_nop = cik_sdma_ring_insert_nop,
|
||||
.pad_ib = cik_sdma_ring_pad_ib,
|
||||
.get_emit_ib_size = cik_sdma_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = cik_sdma_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -44,6 +44,7 @@
|
||||
|
||||
static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate);
|
||||
static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
|
||||
static void cz_dpm_fini(struct amdgpu_device *adev);
|
||||
|
||||
static struct cz_ps *cz_get_ps(struct amdgpu_ps *rps)
|
||||
{
|
||||
@ -350,6 +351,8 @@ static int cz_parse_power_table(struct amdgpu_device *adev)
|
||||
|
||||
ps = kzalloc(sizeof(struct cz_ps), GFP_KERNEL);
|
||||
if (ps == NULL) {
|
||||
for (j = 0; j < i; j++)
|
||||
kfree(adev->pm.dpm.ps[j].ps_priv);
|
||||
kfree(adev->pm.dpm.ps);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -409,11 +412,11 @@ static int cz_dpm_init(struct amdgpu_device *adev)
|
||||
|
||||
ret = amdgpu_get_platform_caps(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
|
||||
ret = amdgpu_parse_extended_power_table(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
|
||||
pi->sram_end = SMC_RAM_END;
|
||||
|
||||
@ -467,23 +470,26 @@ static int cz_dpm_init(struct amdgpu_device *adev)
|
||||
|
||||
ret = cz_parse_sys_info_table(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
|
||||
cz_patch_voltage_values(adev);
|
||||
cz_construct_boot_state(adev);
|
||||
|
||||
ret = cz_parse_power_table(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
|
||||
ret = cz_process_firmware_header(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
|
||||
pi->dpm_enabled = true;
|
||||
pi->uvd_dynamic_pg = false;
|
||||
|
||||
return 0;
|
||||
err:
|
||||
cz_dpm_fini(adev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void cz_dpm_fini(struct amdgpu_device *adev)
|
||||
@ -672,17 +678,12 @@ static void cz_reset_ap_mask(struct amdgpu_device *adev)
|
||||
struct cz_power_info *pi = cz_get_pi(adev);
|
||||
|
||||
pi->active_process_mask = 0;
|
||||
|
||||
}
|
||||
|
||||
static int cz_dpm_download_pptable_from_smu(struct amdgpu_device *adev,
|
||||
void **table)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = cz_smu_download_pptable(adev, table);
|
||||
|
||||
return ret;
|
||||
return cz_smu_download_pptable(adev, table);
|
||||
}
|
||||
|
||||
static int cz_dpm_upload_pptable_to_smu(struct amdgpu_device *adev)
|
||||
@ -822,9 +823,9 @@ static void cz_init_sclk_limit(struct amdgpu_device *adev)
|
||||
pi->sclk_dpm.hard_min_clk = 0;
|
||||
cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel);
|
||||
level = cz_get_argument(adev);
|
||||
if (level < table->count)
|
||||
if (level < table->count) {
|
||||
clock = table->entries[level].clk;
|
||||
else {
|
||||
} else {
|
||||
DRM_ERROR("Invalid SLCK Voltage Dependency table entry.\n");
|
||||
clock = table->entries[table->count - 1].clk;
|
||||
}
|
||||
@ -850,9 +851,9 @@ static void cz_init_uvd_limit(struct amdgpu_device *adev)
|
||||
pi->uvd_dpm.hard_min_clk = 0;
|
||||
cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel);
|
||||
level = cz_get_argument(adev);
|
||||
if (level < table->count)
|
||||
if (level < table->count) {
|
||||
clock = table->entries[level].vclk;
|
||||
else {
|
||||
} else {
|
||||
DRM_ERROR("Invalid UVD Voltage Dependency table entry.\n");
|
||||
clock = table->entries[table->count - 1].vclk;
|
||||
}
|
||||
@ -878,9 +879,9 @@ static void cz_init_vce_limit(struct amdgpu_device *adev)
|
||||
pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;
|
||||
cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel);
|
||||
level = cz_get_argument(adev);
|
||||
if (level < table->count)
|
||||
if (level < table->count) {
|
||||
clock = table->entries[level].ecclk;
|
||||
else {
|
||||
} else {
|
||||
/* future BIOS would fix this error */
|
||||
DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n");
|
||||
clock = table->entries[table->count - 1].ecclk;
|
||||
@ -907,9 +908,9 @@ static void cz_init_acp_limit(struct amdgpu_device *adev)
|
||||
pi->acp_dpm.hard_min_clk = 0;
|
||||
cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxAclkLevel);
|
||||
level = cz_get_argument(adev);
|
||||
if (level < table->count)
|
||||
if (level < table->count) {
|
||||
clock = table->entries[level].clk;
|
||||
else {
|
||||
} else {
|
||||
DRM_ERROR("Invalid ACP Voltage Dependency table entry.\n");
|
||||
clock = table->entries[table->count - 1].clk;
|
||||
}
|
||||
@ -934,7 +935,6 @@ static void cz_init_sclk_threshold(struct amdgpu_device *adev)
|
||||
struct cz_power_info *pi = cz_get_pi(adev);
|
||||
|
||||
pi->low_sclk_interrupt_threshold = 0;
|
||||
|
||||
}
|
||||
|
||||
static void cz_dpm_setup_asic(struct amdgpu_device *adev)
|
||||
@ -1207,7 +1207,7 @@ static int cz_enable_didt(struct amdgpu_device *adev, bool enable)
|
||||
int ret;
|
||||
|
||||
if (pi->caps_sq_ramping || pi->caps_db_ramping ||
|
||||
pi->caps_td_ramping || pi->caps_tcp_ramping) {
|
||||
pi->caps_td_ramping || pi->caps_tcp_ramping) {
|
||||
if (adev->gfx.gfx_current_status != AMDGPU_GFX_SAFE_MODE) {
|
||||
ret = cz_disable_cgpg(adev);
|
||||
if (ret) {
|
||||
@ -1281,7 +1281,7 @@ static void cz_apply_state_adjust_rules(struct amdgpu_device *adev,
|
||||
ps->force_high = false;
|
||||
ps->need_dfs_bypass = true;
|
||||
pi->video_start = new_rps->dclk || new_rps->vclk ||
|
||||
new_rps->evclk || new_rps->ecclk;
|
||||
new_rps->evclk || new_rps->ecclk;
|
||||
|
||||
if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
|
||||
ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
|
||||
@ -1339,7 +1339,6 @@ static int cz_dpm_enable(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
cz_reset_acp_boot_level(adev);
|
||||
|
||||
cz_update_current_ps(adev, adev->pm.dpm.boot_ps);
|
||||
|
||||
return 0;
|
||||
@ -1669,7 +1668,6 @@ static void cz_dpm_post_set_power_state(struct amdgpu_device *adev)
|
||||
struct amdgpu_ps *ps = &pi->requested_rps;
|
||||
|
||||
cz_update_current_ps(adev, ps);
|
||||
|
||||
}
|
||||
|
||||
static int cz_dpm_force_highest(struct amdgpu_device *adev)
|
||||
@ -2201,7 +2199,6 @@ static int cz_update_vce_dpm(struct amdgpu_device *adev)
|
||||
/* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
|
||||
if (pi->caps_stable_power_state) {
|
||||
pi->vce_dpm.hard_min_clk = table->entries[table->count-1].ecclk;
|
||||
|
||||
} else { /* non-stable p-state cases. without vce.Arbiter.EcclkHardMin */
|
||||
/* leave it as set by user */
|
||||
/*pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;*/
|
||||
|
@ -29,6 +29,8 @@
|
||||
#include "cz_smumgr.h"
|
||||
#include "smu_ucode_xfer_cz.h"
|
||||
#include "amdgpu_ucode.h"
|
||||
#include "cz_dpm.h"
|
||||
#include "vi_dpm.h"
|
||||
|
||||
#include "smu/smu_8_0_d.h"
|
||||
#include "smu/smu_8_0_sh_mask.h"
|
||||
@ -48,7 +50,7 @@ static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev)
|
||||
return priv;
|
||||
}
|
||||
|
||||
int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
|
||||
static int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
|
||||
{
|
||||
int i;
|
||||
u32 content = 0, tmp;
|
||||
@ -140,7 +142,7 @@ int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
|
||||
static int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
|
||||
u32 value, u32 limit)
|
||||
{
|
||||
int ret;
|
||||
|
3160
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
Normal file
3160
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
Normal file
File diff suppressed because it is too large
Load Diff
29
drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
Normal file
29
drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __DCE_V6_0_H__
|
||||
#define __DCE_V6_0_H__
|
||||
|
||||
extern const struct amd_ip_funcs dce_v6_0_ip_funcs;
|
||||
|
||||
#endif
|
3233
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
Normal file
3233
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
Normal file
File diff suppressed because it is too large
Load Diff
29
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
Normal file
29
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __GFX_V6_0_H__
|
||||
#define __GFX_V6_0_H__
|
||||
|
||||
extern const struct amd_ip_funcs gfx_v6_0_ip_funcs;
|
||||
|
||||
#endif
|
@ -2096,6 +2096,25 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
|
||||
amdgpu_ring_write(ring, control);
|
||||
}
|
||||
|
||||
static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
|
||||
{
|
||||
uint32_t dw2 = 0;
|
||||
|
||||
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
|
||||
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
|
||||
/* set load_global_config & load_global_uconfig */
|
||||
dw2 |= 0x8001;
|
||||
/* set load_cs_sh_regs */
|
||||
dw2 |= 0x01000000;
|
||||
/* set load_per_context_state & load_gfx_sh_regs */
|
||||
dw2 |= 0x10002;
|
||||
}
|
||||
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
|
||||
amdgpu_ring_write(ring, dw2);
|
||||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfx_v7_0_ring_test_ib - basic ring IB test
|
||||
*
|
||||
@ -2443,7 +2462,7 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 gfx_v7_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
|
||||
static u32 gfx_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
return ring->adev->wb.wb[ring->rptr_offs];
|
||||
}
|
||||
@ -2463,11 +2482,6 @@ static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
|
||||
(void)RREG32(mmCP_RB0_WPTR);
|
||||
}
|
||||
|
||||
static u32 gfx_v7_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
|
||||
{
|
||||
return ring->adev->wb.wb[ring->rptr_offs];
|
||||
}
|
||||
|
||||
static u32 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
|
||||
{
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
@ -4176,6 +4190,41 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
|
||||
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
|
||||
}
|
||||
|
||||
static unsigned gfx_v7_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
4; /* gfx_v7_0_ring_emit_ib_gfx */
|
||||
}
|
||||
|
||||
static unsigned gfx_v7_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
20 + /* gfx_v7_0_ring_emit_gds_switch */
|
||||
7 + /* gfx_v7_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
|
||||
12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
|
||||
7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
|
||||
17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
|
||||
3; /* gfx_v7_ring_emit_cntxcntl */
|
||||
}
|
||||
|
||||
static unsigned gfx_v7_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
4; /* gfx_v7_0_ring_emit_ib_compute */
|
||||
}
|
||||
|
||||
static unsigned gfx_v7_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
20 + /* gfx_v7_0_ring_emit_gds_switch */
|
||||
7 + /* gfx_v7_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
|
||||
7 + /* gfx_v7_0_ring_emit_pipeline_sync */
|
||||
17 + /* gfx_v7_0_ring_emit_vm_flush */
|
||||
7 + 7 + 7; /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
|
||||
}
|
||||
|
||||
static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
|
||||
.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
|
||||
.select_se_sh = &gfx_v7_0_select_se_sh,
|
||||
@ -4495,9 +4544,9 @@ static int gfx_v7_0_sw_fini(void *handle)
|
||||
int i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
|
||||
amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
|
||||
amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
|
||||
amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
|
||||
amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
|
||||
amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
|
||||
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
|
||||
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
|
||||
@ -4928,7 +4977,7 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
|
||||
.get_rptr = gfx_v7_0_ring_get_rptr_gfx,
|
||||
.get_rptr = gfx_v7_0_ring_get_rptr,
|
||||
.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
|
||||
.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
|
||||
.parse_cs = NULL,
|
||||
@ -4943,10 +4992,13 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
|
||||
.test_ib = gfx_v7_0_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
|
||||
.get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_gfx,
|
||||
.get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_gfx,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
|
||||
.get_rptr = gfx_v7_0_ring_get_rptr_compute,
|
||||
.get_rptr = gfx_v7_0_ring_get_rptr,
|
||||
.get_wptr = gfx_v7_0_ring_get_wptr_compute,
|
||||
.set_wptr = gfx_v7_0_ring_set_wptr_compute,
|
||||
.parse_cs = NULL,
|
||||
@ -4961,6 +5013,8 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
|
||||
.test_ib = gfx_v7_0_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_compute,
|
||||
.get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_compute,
|
||||
};
|
||||
|
||||
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -2113,9 +2113,9 @@ static int gfx_v8_0_sw_fini(void *handle)
|
||||
int i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
|
||||
amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
|
||||
amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
|
||||
amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
|
||||
amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
|
||||
amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
|
||||
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
|
||||
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
|
||||
@ -3866,7 +3866,7 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
|
||||
static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 0);
|
||||
|
||||
@ -5835,7 +5835,7 @@ static int gfx_v8_0_set_clockgating_state(void *handle,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 gfx_v8_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
|
||||
static u32 gfx_v8_0_ring_get_rptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
return ring->adev->wb.wb[ring->rptr_offs];
|
||||
}
|
||||
@ -5915,12 +5915,6 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
||||
{
|
||||
u32 header, control = 0;
|
||||
|
||||
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
|
||||
if (ctx_switch) {
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
|
||||
if (ib->flags & AMDGPU_IB_FLAG_CE)
|
||||
header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
|
||||
else
|
||||
@ -5990,14 +5984,6 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
||||
amdgpu_ring_write(ring, seq);
|
||||
amdgpu_ring_write(ring, 0xffffffff);
|
||||
amdgpu_ring_write(ring, 4); /* poll interval */
|
||||
|
||||
if (usepfp) {
|
||||
/* synce CE with ME to prevent CE fetch CEIB before context switch done */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
@ -6005,6 +5991,10 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
{
|
||||
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
|
||||
|
||||
/* GFX8 emits 128 dw nop to prevent DE do vm_flush before CE finish CEIB */
|
||||
if (usepfp)
|
||||
amdgpu_ring_insert_nop(ring, 128);
|
||||
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
||||
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
|
||||
WRITE_DATA_DST_SEL(0)) |
|
||||
@ -6044,18 +6034,11 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
/* sync PFP to ME, otherwise we might get invalid PFP reads */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
|
||||
amdgpu_ring_write(ring, 0x0);
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
/* GFX8 emits 128 dw nop to prevent CE access VM before vm_flush finish */
|
||||
amdgpu_ring_insert_nop(ring, 128);
|
||||
}
|
||||
}
|
||||
|
||||
static u32 gfx_v8_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
|
||||
{
|
||||
return ring->adev->wb.wb[ring->rptr_offs];
|
||||
}
|
||||
|
||||
static u32 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
|
||||
{
|
||||
return ring->adev->wb.wb[ring->wptr_offs];
|
||||
@ -6091,6 +6074,77 @@ static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
|
||||
amdgpu_ring_write(ring, upper_32_bits(seq));
|
||||
}
|
||||
|
||||
static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring)
|
||||
{
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
|
||||
static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
|
||||
{
|
||||
uint32_t dw2 = 0;
|
||||
|
||||
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
|
||||
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
|
||||
/* set load_global_config & load_global_uconfig */
|
||||
dw2 |= 0x8001;
|
||||
/* set load_cs_sh_regs */
|
||||
dw2 |= 0x01000000;
|
||||
/* set load_per_context_state & load_gfx_sh_regs for GFX */
|
||||
dw2 |= 0x10002;
|
||||
|
||||
/* set load_ce_ram if preamble presented */
|
||||
if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
|
||||
dw2 |= 0x10000000;
|
||||
} else {
|
||||
/* still load_ce_ram if this is the first time preamble presented
|
||||
* although there is no context switch happens.
|
||||
*/
|
||||
if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
|
||||
dw2 |= 0x10000000;
|
||||
}
|
||||
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
|
||||
amdgpu_ring_write(ring, dw2);
|
||||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
|
||||
static unsigned gfx_v8_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
4; /* gfx_v8_0_ring_emit_ib_gfx */
|
||||
}
|
||||
|
||||
static unsigned gfx_v8_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
20 + /* gfx_v8_0_ring_emit_gds_switch */
|
||||
7 + /* gfx_v8_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
|
||||
6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
|
||||
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
|
||||
256 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
|
||||
2 + /* gfx_v8_ring_emit_sb */
|
||||
3; /* gfx_v8_ring_emit_cntxcntl */
|
||||
}
|
||||
|
||||
static unsigned gfx_v8_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
4; /* gfx_v8_0_ring_emit_ib_compute */
|
||||
}
|
||||
|
||||
static unsigned gfx_v8_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
20 + /* gfx_v8_0_ring_emit_gds_switch */
|
||||
7 + /* gfx_v8_0_ring_emit_hdp_flush */
|
||||
5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
|
||||
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
|
||||
17 + /* gfx_v8_0_ring_emit_vm_flush */
|
||||
7 + 7 + 7; /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
|
||||
}
|
||||
|
||||
static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
@ -6257,7 +6311,7 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
|
||||
.get_rptr = gfx_v8_0_ring_get_rptr_gfx,
|
||||
.get_rptr = gfx_v8_0_ring_get_rptr,
|
||||
.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
|
||||
.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
|
||||
.parse_cs = NULL,
|
||||
@ -6272,10 +6326,14 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
|
||||
.test_ib = gfx_v8_0_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.emit_switch_buffer = gfx_v8_ring_emit_sb,
|
||||
.emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
|
||||
.get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_gfx,
|
||||
.get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_gfx,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
|
||||
.get_rptr = gfx_v8_0_ring_get_rptr_compute,
|
||||
.get_rptr = gfx_v8_0_ring_get_rptr,
|
||||
.get_wptr = gfx_v8_0_ring_get_wptr_compute,
|
||||
.set_wptr = gfx_v8_0_ring_set_wptr_compute,
|
||||
.parse_cs = NULL,
|
||||
@ -6290,6 +6348,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
|
||||
.test_ib = gfx_v8_0_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_compute,
|
||||
.get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_compute,
|
||||
};
|
||||
|
||||
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
1071
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
Normal file
1071
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
Normal file
File diff suppressed because it is too large
Load Diff
29
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
Normal file
29
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __GMC_V6_0_H__
|
||||
#define __GMC_V6_0_H__
|
||||
|
||||
extern const struct amd_ip_funcs gmc_v6_0_ip_funcs;
|
||||
|
||||
#endif
|
@ -121,7 +121,7 @@ out:
|
||||
return result;
|
||||
}
|
||||
|
||||
void iceland_start_smc(struct amdgpu_device *adev)
|
||||
static void iceland_start_smc(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
|
||||
|
||||
@ -129,7 +129,7 @@ void iceland_start_smc(struct amdgpu_device *adev)
|
||||
WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
|
||||
}
|
||||
|
||||
void iceland_reset_smc(struct amdgpu_device *adev)
|
||||
static void iceland_reset_smc(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
|
||||
|
||||
@ -145,7 +145,7 @@ static int iceland_program_jump_on_start(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void iceland_stop_smc_clock(struct amdgpu_device *adev)
|
||||
static void iceland_stop_smc_clock(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
|
||||
|
||||
@ -153,7 +153,7 @@ void iceland_stop_smc_clock(struct amdgpu_device *adev)
|
||||
WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
|
||||
}
|
||||
|
||||
void iceland_start_smc_clock(struct amdgpu_device *adev)
|
||||
static void iceland_start_smc_clock(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
|
||||
|
||||
|
127
drivers/gpu/drm/amd/amdgpu/r600_dpm.h
Normal file
127
drivers/gpu/drm/amd/amdgpu/r600_dpm.h
Normal file
@ -0,0 +1,127 @@
|
||||
/*
|
||||
* Copyright 2011 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#ifndef __R600_DPM_H__
|
||||
#define __R600_DPM_H__
|
||||
|
||||
#define R600_ASI_DFLT 10000
|
||||
#define R600_BSP_DFLT 0x41EB
|
||||
#define R600_BSU_DFLT 0x2
|
||||
#define R600_AH_DFLT 5
|
||||
#define R600_RLP_DFLT 25
|
||||
#define R600_RMP_DFLT 65
|
||||
#define R600_LHP_DFLT 40
|
||||
#define R600_LMP_DFLT 15
|
||||
#define R600_TD_DFLT 0
|
||||
#define R600_UTC_DFLT_00 0x24
|
||||
#define R600_UTC_DFLT_01 0x22
|
||||
#define R600_UTC_DFLT_02 0x22
|
||||
#define R600_UTC_DFLT_03 0x22
|
||||
#define R600_UTC_DFLT_04 0x22
|
||||
#define R600_UTC_DFLT_05 0x22
|
||||
#define R600_UTC_DFLT_06 0x22
|
||||
#define R600_UTC_DFLT_07 0x22
|
||||
#define R600_UTC_DFLT_08 0x22
|
||||
#define R600_UTC_DFLT_09 0x22
|
||||
#define R600_UTC_DFLT_10 0x22
|
||||
#define R600_UTC_DFLT_11 0x22
|
||||
#define R600_UTC_DFLT_12 0x22
|
||||
#define R600_UTC_DFLT_13 0x22
|
||||
#define R600_UTC_DFLT_14 0x22
|
||||
#define R600_DTC_DFLT_00 0x24
|
||||
#define R600_DTC_DFLT_01 0x22
|
||||
#define R600_DTC_DFLT_02 0x22
|
||||
#define R600_DTC_DFLT_03 0x22
|
||||
#define R600_DTC_DFLT_04 0x22
|
||||
#define R600_DTC_DFLT_05 0x22
|
||||
#define R600_DTC_DFLT_06 0x22
|
||||
#define R600_DTC_DFLT_07 0x22
|
||||
#define R600_DTC_DFLT_08 0x22
|
||||
#define R600_DTC_DFLT_09 0x22
|
||||
#define R600_DTC_DFLT_10 0x22
|
||||
#define R600_DTC_DFLT_11 0x22
|
||||
#define R600_DTC_DFLT_12 0x22
|
||||
#define R600_DTC_DFLT_13 0x22
|
||||
#define R600_DTC_DFLT_14 0x22
|
||||
#define R600_VRC_DFLT 0x0000C003
|
||||
#define R600_VOLTAGERESPONSETIME_DFLT 1000
|
||||
#define R600_BACKBIASRESPONSETIME_DFLT 1000
|
||||
#define R600_VRU_DFLT 0x3
|
||||
#define R600_SPLLSTEPTIME_DFLT 0x1000
|
||||
#define R600_SPLLSTEPUNIT_DFLT 0x3
|
||||
#define R600_TPU_DFLT 0
|
||||
#define R600_TPC_DFLT 0x200
|
||||
#define R600_SSTU_DFLT 0
|
||||
#define R600_SST_DFLT 0x00C8
|
||||
#define R600_GICST_DFLT 0x200
|
||||
#define R600_FCT_DFLT 0x0400
|
||||
#define R600_FCTU_DFLT 0
|
||||
#define R600_CTXCGTT3DRPHC_DFLT 0x20
|
||||
#define R600_CTXCGTT3DRSDC_DFLT 0x40
|
||||
#define R600_VDDC3DOORPHC_DFLT 0x100
|
||||
#define R600_VDDC3DOORSDC_DFLT 0x7
|
||||
#define R600_VDDC3DOORSU_DFLT 0
|
||||
#define R600_MPLLLOCKTIME_DFLT 100
|
||||
#define R600_MPLLRESETTIME_DFLT 150
|
||||
#define R600_VCOSTEPPCT_DFLT 20
|
||||
#define R600_ENDINGVCOSTEPPCT_DFLT 5
|
||||
#define R600_REFERENCEDIVIDER_DFLT 4
|
||||
|
||||
#define R600_PM_NUMBER_OF_TC 15
|
||||
#define R600_PM_NUMBER_OF_SCLKS 20
|
||||
#define R600_PM_NUMBER_OF_MCLKS 4
|
||||
#define R600_PM_NUMBER_OF_VOLTAGE_LEVELS 4
|
||||
#define R600_PM_NUMBER_OF_ACTIVITY_LEVELS 3
|
||||
|
||||
/* XXX are these ok? */
|
||||
#define R600_TEMP_RANGE_MIN (90 * 1000)
|
||||
#define R600_TEMP_RANGE_MAX (120 * 1000)
|
||||
|
||||
#define FDO_PWM_MODE_STATIC 1
|
||||
#define FDO_PWM_MODE_STATIC_RPM 5
|
||||
|
||||
enum r600_power_level {
|
||||
R600_POWER_LEVEL_LOW = 0,
|
||||
R600_POWER_LEVEL_MEDIUM = 1,
|
||||
R600_POWER_LEVEL_HIGH = 2,
|
||||
R600_POWER_LEVEL_CTXSW = 3,
|
||||
};
|
||||
|
||||
enum r600_td {
|
||||
R600_TD_AUTO,
|
||||
R600_TD_UP,
|
||||
R600_TD_DOWN,
|
||||
};
|
||||
|
||||
enum r600_display_watermark {
|
||||
R600_DISPLAY_WATERMARK_LOW = 0,
|
||||
R600_DISPLAY_WATERMARK_HIGH = 1,
|
||||
};
|
||||
|
||||
enum r600_display_gap
|
||||
{
|
||||
R600_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
|
||||
R600_PM_DISPLAY_GAP_VBLANK = 1,
|
||||
R600_PM_DISPLAY_GAP_WATERMARK = 2,
|
||||
R600_PM_DISPLAY_GAP_IGNORE = 3,
|
||||
};
|
||||
#endif
|
@ -902,6 +902,22 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
|
||||
}
|
||||
|
||||
static unsigned sdma_v2_4_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
7 + 6; /* sdma_v2_4_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned sdma_v2_4_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
6 + /* sdma_v2_4_ring_emit_hdp_flush */
|
||||
3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
|
||||
6 + /* sdma_v2_4_ring_emit_pipeline_sync */
|
||||
12 + /* sdma_v2_4_ring_emit_vm_flush */
|
||||
10 + 10 + 10; /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
|
||||
}
|
||||
|
||||
static int sdma_v2_4_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
@ -1220,6 +1236,8 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
|
||||
.test_ib = sdma_v2_4_ring_test_ib,
|
||||
.insert_nop = sdma_v2_4_ring_insert_nop,
|
||||
.pad_ib = sdma_v2_4_ring_pad_ib,
|
||||
.get_emit_ib_size = sdma_v2_4_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = sdma_v2_4_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -495,31 +495,6 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
|
||||
amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
|
||||
}
|
||||
|
||||
unsigned init_cond_exec(struct amdgpu_ring *ring)
|
||||
{
|
||||
unsigned ret;
|
||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
|
||||
amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
|
||||
amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
|
||||
amdgpu_ring_write(ring, 1);
|
||||
ret = ring->wptr;/* this is the offset we need patch later */
|
||||
amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
|
||||
return ret;
|
||||
}
|
||||
|
||||
void patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
|
||||
{
|
||||
unsigned cur;
|
||||
BUG_ON(ring->ring[offset] != 0x55aa55aa);
|
||||
|
||||
cur = ring->wptr - 1;
|
||||
if (likely(cur > offset))
|
||||
ring->ring[offset] = cur - offset;
|
||||
else
|
||||
ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* sdma_v3_0_gfx_stop - stop the gfx async dma engines
|
||||
*
|
||||
@ -1129,6 +1104,22 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
|
||||
}
|
||||
|
||||
static unsigned sdma_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
7 + 6; /* sdma_v3_0_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned sdma_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
6 + /* sdma_v3_0_ring_emit_hdp_flush */
|
||||
3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
|
||||
6 + /* sdma_v3_0_ring_emit_pipeline_sync */
|
||||
12 + /* sdma_v3_0_ring_emit_vm_flush */
|
||||
10 + 10 + 10; /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
}
|
||||
|
||||
static int sdma_v3_0_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
@ -1590,6 +1581,8 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
|
||||
.test_ib = sdma_v3_0_ring_test_ib,
|
||||
.insert_nop = sdma_v3_0_ring_insert_nop,
|
||||
.pad_ib = sdma_v3_0_ring_pad_ib,
|
||||
.get_emit_ib_size = sdma_v3_0_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = sdma_v3_0_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
1965
drivers/gpu/drm/amd/amdgpu/si.c
Normal file
1965
drivers/gpu/drm/amd/amdgpu/si.c
Normal file
File diff suppressed because it is too large
Load Diff
33
drivers/gpu/drm/amd/amdgpu/si.h
Normal file
33
drivers/gpu/drm/amd/amdgpu/si.h
Normal file
@ -0,0 +1,33 @@
|
||||
/*
|
||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __SI_H__
|
||||
#define __SI_H__
|
||||
|
||||
extern const struct amd_ip_funcs si_common_ip_funcs;
|
||||
|
||||
void si_srbm_select(struct amdgpu_device *adev,
|
||||
u32 me, u32 pipe, u32 queue, u32 vmid);
|
||||
int si_set_ip_blocks(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
915
drivers/gpu/drm/amd/amdgpu/si_dma.c
Normal file
915
drivers/gpu/drm/amd/amdgpu/si_dma.c
Normal file
@ -0,0 +1,915 @@
|
||||
/*
|
||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Alex Deucher
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_trace.h"
|
||||
#include "si/sid.h"
|
||||
|
||||
const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
|
||||
{
|
||||
DMA0_REGISTER_OFFSET,
|
||||
DMA1_REGISTER_OFFSET
|
||||
};
|
||||
|
||||
static void si_dma_set_ring_funcs(struct amdgpu_device *adev);
|
||||
static void si_dma_set_buffer_funcs(struct amdgpu_device *adev);
|
||||
static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev);
|
||||
static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
|
||||
|
||||
static uint32_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
return ring->adev->wb.wb[ring->rptr_offs>>2];
|
||||
}
|
||||
|
||||
static uint32_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
|
||||
|
||||
return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
|
||||
}
|
||||
|
||||
static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
|
||||
|
||||
WREG32(DMA_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
|
||||
}
|
||||
|
||||
static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
struct amdgpu_ib *ib,
|
||||
unsigned vm_id, bool ctx_switch)
|
||||
{
|
||||
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
|
||||
* Pad as necessary with NOPs.
|
||||
*/
|
||||
while ((ring->wptr & 7) != 5)
|
||||
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
|
||||
amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0));
|
||||
amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
|
||||
amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
|
||||
|
||||
}
|
||||
|
||||
static void si_dma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
||||
{
|
||||
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
|
||||
amdgpu_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL));
|
||||
amdgpu_ring_write(ring, 1);
|
||||
}
|
||||
|
||||
static void si_dma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
|
||||
{
|
||||
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
|
||||
amdgpu_ring_write(ring, (0xf << 16) | (HDP_DEBUG0));
|
||||
amdgpu_ring_write(ring, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* si_dma_ring_emit_fence - emit a fence on the DMA ring
|
||||
*
|
||||
* @ring: amdgpu ring pointer
|
||||
* @fence: amdgpu fence object
|
||||
*
|
||||
* Add a DMA fence packet to the ring to write
|
||||
* the fence seq number and DMA trap packet to generate
|
||||
* an interrupt if needed (VI).
|
||||
*/
|
||||
static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
|
||||
unsigned flags)
|
||||
{
|
||||
|
||||
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
|
||||
/* write the fence */
|
||||
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
|
||||
amdgpu_ring_write(ring, addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
|
||||
amdgpu_ring_write(ring, seq);
|
||||
/* optionally write high bits as well */
|
||||
if (write64bit) {
|
||||
addr += 4;
|
||||
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
|
||||
amdgpu_ring_write(ring, addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
|
||||
amdgpu_ring_write(ring, upper_32_bits(seq));
|
||||
}
|
||||
/* generate an interrupt */
|
||||
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0));
|
||||
}
|
||||
|
||||
static void si_dma_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
u32 rb_cntl;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
/* dma0 */
|
||||
rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
|
||||
rb_cntl &= ~DMA_RB_ENABLE;
|
||||
WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
|
||||
ring->ready = false;
|
||||
}
|
||||
}
|
||||
|
||||
static int si_dma_start(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
u32 rb_cntl, dma_cntl, ib_cntl, rb_bufsz;
|
||||
int i, r;
|
||||
uint64_t rptr_addr;
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
|
||||
WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
|
||||
WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
|
||||
|
||||
/* Set ring buffer size in dwords */
|
||||
rb_bufsz = order_base_2(ring->ring_size / 4);
|
||||
rb_cntl = rb_bufsz << 1;
|
||||
#ifdef __BIG_ENDIAN
|
||||
rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
|
||||
#endif
|
||||
WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
|
||||
|
||||
/* Initialize the ring buffer's read and write pointers */
|
||||
WREG32(DMA_RB_RPTR + sdma_offsets[i], 0);
|
||||
WREG32(DMA_RB_WPTR + sdma_offsets[i], 0);
|
||||
|
||||
rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
|
||||
|
||||
WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
|
||||
WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
|
||||
|
||||
rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
|
||||
|
||||
WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
|
||||
|
||||
/* enable DMA IBs */
|
||||
ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
|
||||
#ifdef __BIG_ENDIAN
|
||||
ib_cntl |= DMA_IB_SWAP_ENABLE;
|
||||
#endif
|
||||
WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl);
|
||||
|
||||
dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]);
|
||||
dma_cntl &= ~CTXEMPTY_INT_ENABLE;
|
||||
WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl);
|
||||
|
||||
ring->wptr = 0;
|
||||
WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
|
||||
WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
|
||||
|
||||
ring->ready = true;
|
||||
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
return r;
|
||||
}
|
||||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* si_dma_ring_test_ring - simple async dma engine test
|
||||
*
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
*
|
||||
* Test the DMA engine by writing using it to write an
|
||||
* value to memory. (VI).
|
||||
* Returns 0 for success, error for failure.
|
||||
*/
|
||||
static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
unsigned i;
|
||||
unsigned index;
|
||||
int r;
|
||||
u32 tmp;
|
||||
u64 gpu_addr;
|
||||
|
||||
r = amdgpu_wb_get(adev, &index);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
gpu_addr = adev->wb.gpu_addr + (index * 4);
|
||||
tmp = 0xCAFEDEAD;
|
||||
adev->wb.wb[index] = cpu_to_le32(tmp);
|
||||
|
||||
r = amdgpu_ring_alloc(ring, 4);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
|
||||
amdgpu_wb_free(adev, index);
|
||||
return r;
|
||||
}
|
||||
|
||||
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1));
|
||||
amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
|
||||
amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
|
||||
amdgpu_ring_write(ring, 0xDEADBEEF);
|
||||
amdgpu_ring_commit(ring);
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
tmp = le32_to_cpu(adev->wb.wb[index]);
|
||||
if (tmp == 0xDEADBEEF)
|
||||
break;
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
|
||||
if (i < adev->usec_timeout) {
|
||||
DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
|
||||
} else {
|
||||
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
|
||||
ring->idx, tmp);
|
||||
r = -EINVAL;
|
||||
}
|
||||
amdgpu_wb_free(adev, index);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* si_dma_ring_test_ib - test an IB on the DMA engine
|
||||
*
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
*
|
||||
* Test a simple IB in the DMA ring (VI).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_ib ib;
|
||||
struct fence *f = NULL;
|
||||
unsigned index;
|
||||
u32 tmp = 0;
|
||||
u64 gpu_addr;
|
||||
long r;
|
||||
|
||||
r = amdgpu_wb_get(adev, &index);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
gpu_addr = adev->wb.gpu_addr + (index * 4);
|
||||
tmp = 0xCAFEDEAD;
|
||||
adev->wb.wb[index] = cpu_to_le32(tmp);
|
||||
memset(&ib, 0, sizeof(ib));
|
||||
r = amdgpu_ib_get(adev, NULL, 256, &ib);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
|
||||
goto err0;
|
||||
}
|
||||
|
||||
ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1);
|
||||
ib.ptr[1] = lower_32_bits(gpu_addr);
|
||||
ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
|
||||
ib.ptr[3] = 0xDEADBEEF;
|
||||
ib.length_dw = 4;
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
|
||||
if (r)
|
||||
goto err1;
|
||||
|
||||
r = fence_wait_timeout(f, false, timeout);
|
||||
if (r == 0) {
|
||||
DRM_ERROR("amdgpu: IB test timed out\n");
|
||||
r = -ETIMEDOUT;
|
||||
goto err1;
|
||||
} else if (r < 0) {
|
||||
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
||||
goto err1;
|
||||
}
|
||||
tmp = le32_to_cpu(adev->wb.wb[index]);
|
||||
if (tmp == 0xDEADBEEF) {
|
||||
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
|
||||
r = 0;
|
||||
} else {
|
||||
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
|
||||
r = -EINVAL;
|
||||
}
|
||||
|
||||
err1:
|
||||
amdgpu_ib_free(adev, &ib, NULL);
|
||||
fence_put(f);
|
||||
err0:
|
||||
amdgpu_wb_free(adev, index);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_dma_vm_copy_pte - update PTEs by copying them from the GART
|
||||
*
|
||||
* @ib: indirect buffer to fill with commands
|
||||
* @pe: addr of the page entry
|
||||
* @src: src addr to copy from
|
||||
* @count: number of page entries to update
|
||||
*
|
||||
* Update PTEs by copying them from the GART using DMA (SI).
|
||||
*/
|
||||
static void si_dma_vm_copy_pte(struct amdgpu_ib *ib,
|
||||
uint64_t pe, uint64_t src,
|
||||
unsigned count)
|
||||
{
|
||||
unsigned bytes = count * 8;
|
||||
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
|
||||
1, 0, 0, bytes);
|
||||
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
|
||||
ib->ptr[ib->length_dw++] = lower_32_bits(src);
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
|
||||
}
|
||||
|
||||
/**
|
||||
* si_dma_vm_write_pte - update PTEs by writing them manually
|
||||
*
|
||||
* @ib: indirect buffer to fill with commands
|
||||
* @pe: addr of the page entry
|
||||
* @value: dst addr to write into pe
|
||||
* @count: number of page entries to update
|
||||
* @incr: increase next addr by incr bytes
|
||||
*
|
||||
* Update PTEs by writing them manually using DMA (SI).
|
||||
*/
|
||||
static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
|
||||
uint64_t value, unsigned count,
|
||||
uint32_t incr)
|
||||
{
|
||||
unsigned ndw = count * 2;
|
||||
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
|
||||
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
||||
for (; ndw > 0; ndw -= 2) {
|
||||
ib->ptr[ib->length_dw++] = lower_32_bits(value);
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
value += incr;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* si_dma_vm_set_pte_pde - update the page tables using sDMA
|
||||
*
|
||||
* @ib: indirect buffer to fill with commands
|
||||
* @pe: addr of the page entry
|
||||
* @addr: dst addr to write into pe
|
||||
* @count: number of page entries to update
|
||||
* @incr: increase next addr by incr bytes
|
||||
* @flags: access flags
|
||||
*
|
||||
* Update the page tables using sDMA (CIK).
|
||||
*/
|
||||
static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags)
|
||||
{
|
||||
uint64_t value;
|
||||
unsigned ndw;
|
||||
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
if (flags & AMDGPU_PTE_VALID)
|
||||
value = addr;
|
||||
else
|
||||
value = 0;
|
||||
|
||||
/* for physically contiguous pages (vram) */
|
||||
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
|
||||
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
ib->ptr[ib->length_dw++] = flags; /* mask */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
ib->ptr[ib->length_dw++] = value; /* value */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
ib->ptr[ib->length_dw++] = incr; /* increment size */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
pe += ndw * 4;
|
||||
addr += (ndw / 2) * incr;
|
||||
count -= ndw / 2;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* si_dma_pad_ib - pad the IB to the required number of dw
|
||||
*
|
||||
* @ib: indirect buffer to fill with padding
|
||||
*
|
||||
*/
|
||||
static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
|
||||
{
|
||||
while (ib->length_dw & 0x7)
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_ring_emit_pipeline_sync - sync the pipeline
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
*
|
||||
* Make sure all previous operations are completed (CIK).
|
||||
*/
|
||||
static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
||||
{
|
||||
uint32_t seq = ring->fence_drv.sync_seq;
|
||||
uint64_t addr = ring->fence_drv.gpu_addr;
|
||||
|
||||
/* wait for idle */
|
||||
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) |
|
||||
(1 << 27)); /* Poll memory */
|
||||
amdgpu_ring_write(ring, lower_32_bits(addr));
|
||||
amdgpu_ring_write(ring, (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */
|
||||
amdgpu_ring_write(ring, 0xffffffff); /* mask */
|
||||
amdgpu_ring_write(ring, seq); /* value */
|
||||
amdgpu_ring_write(ring, (3 << 28) | 0x20); /* func(equal) | poll interval */
|
||||
}
|
||||
|
||||
/**
|
||||
* si_dma_ring_emit_vm_flush - cik vm flush using sDMA
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
* @vm: amdgpu_vm pointer
|
||||
*
|
||||
* Update the page table base and flush the VM TLB
|
||||
* using sDMA (VI).
|
||||
*/
|
||||
static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned vm_id, uint64_t pd_addr)
|
||||
{
|
||||
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
|
||||
if (vm_id < 8)
|
||||
amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
|
||||
else
|
||||
amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
|
||||
amdgpu_ring_write(ring, pd_addr >> 12);
|
||||
|
||||
/* bits 0-7 are the VM contexts0-7 */
|
||||
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
|
||||
amdgpu_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST));
|
||||
amdgpu_ring_write(ring, 1 << vm_id);
|
||||
|
||||
/* wait for invalidate to complete */
|
||||
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
|
||||
amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
|
||||
amdgpu_ring_write(ring, 0xff << 16); /* retry */
|
||||
amdgpu_ring_write(ring, 1 << vm_id); /* mask */
|
||||
amdgpu_ring_write(ring, 0); /* value */
|
||||
amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
|
||||
}
|
||||
|
||||
static unsigned si_dma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
7 + 3; /* si_dma_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned si_dma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
3 + /* si_dma_ring_emit_hdp_flush */
|
||||
3 + /* si_dma_ring_emit_hdp_invalidate */
|
||||
6 + /* si_dma_ring_emit_pipeline_sync */
|
||||
12 + /* si_dma_ring_emit_vm_flush */
|
||||
9 + 9 + 9; /* si_dma_ring_emit_fence x3 for user fence, vm fence */
|
||||
}
|
||||
|
||||
static int si_dma_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->sdma.num_instances = 2;
|
||||
|
||||
si_dma_set_ring_funcs(adev);
|
||||
si_dma_set_buffer_funcs(adev);
|
||||
si_dma_set_vm_pte_funcs(adev);
|
||||
si_dma_set_irq_funcs(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_dma_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
int r, i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* DMA0 trap event */
|
||||
r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* DMA1 trap event */
|
||||
r = amdgpu_irq_add_id(adev, 244, &adev->sdma.trap_irq_1);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
ring->ring_obj = NULL;
|
||||
ring->use_doorbell = false;
|
||||
sprintf(ring->name, "sdma%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), 0xf,
|
||||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
|
||||
AMDGPU_RING_TYPE_SDMA);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int si_dma_sw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_dma_hw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
return si_dma_start(adev);
|
||||
}
|
||||
|
||||
static int si_dma_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
si_dma_stop(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_dma_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
return si_dma_hw_fini(adev);
|
||||
}
|
||||
|
||||
static int si_dma_resume(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
return si_dma_hw_init(adev);
|
||||
}
|
||||
|
||||
static bool si_dma_is_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
u32 tmp = RREG32(SRBM_STATUS2);
|
||||
|
||||
if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int si_dma_wait_for_idle(void *handle)
|
||||
{
|
||||
unsigned i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (si_dma_is_idle(handle))
|
||||
return 0;
|
||||
udelay(1);
|
||||
}
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int si_dma_soft_reset(void *handle)
|
||||
{
|
||||
DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *src,
|
||||
unsigned type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
u32 sdma_cntl;
|
||||
|
||||
switch (type) {
|
||||
case AMDGPU_SDMA_IRQ_TRAP0:
|
||||
switch (state) {
|
||||
case AMDGPU_IRQ_STATE_DISABLE:
|
||||
sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
|
||||
sdma_cntl &= ~TRAP_ENABLE;
|
||||
WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
|
||||
break;
|
||||
case AMDGPU_IRQ_STATE_ENABLE:
|
||||
sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
|
||||
sdma_cntl |= TRAP_ENABLE;
|
||||
WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case AMDGPU_SDMA_IRQ_TRAP1:
|
||||
switch (state) {
|
||||
case AMDGPU_IRQ_STATE_DISABLE:
|
||||
sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
|
||||
sdma_cntl &= ~TRAP_ENABLE;
|
||||
WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
|
||||
break;
|
||||
case AMDGPU_IRQ_STATE_ENABLE:
|
||||
sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
|
||||
sdma_cntl |= TRAP_ENABLE;
|
||||
WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_dma_process_trap_irq(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
amdgpu_fence_process(&adev->sdma.instance[0].ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_dma_process_trap_irq_1(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
amdgpu_fence_process(&adev->sdma.instance[1].ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_dma_process_illegal_inst_irq(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
DRM_ERROR("Illegal instruction in SDMA command stream\n");
|
||||
schedule_work(&adev->reset_work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_dma_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
u32 orig, data, offset;
|
||||
int i;
|
||||
bool enable;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
enable = (state == AMD_CG_STATE_GATE) ? true : false;
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
if (i == 0)
|
||||
offset = DMA0_REGISTER_OFFSET;
|
||||
else
|
||||
offset = DMA1_REGISTER_OFFSET;
|
||||
orig = data = RREG32(DMA_POWER_CNTL + offset);
|
||||
data &= ~MEM_POWER_OVERRIDE;
|
||||
if (data != orig)
|
||||
WREG32(DMA_POWER_CNTL + offset, data);
|
||||
WREG32(DMA_CLK_CTRL + offset, 0x00000100);
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
if (i == 0)
|
||||
offset = DMA0_REGISTER_OFFSET;
|
||||
else
|
||||
offset = DMA1_REGISTER_OFFSET;
|
||||
orig = data = RREG32(DMA_POWER_CNTL + offset);
|
||||
data |= MEM_POWER_OVERRIDE;
|
||||
if (data != orig)
|
||||
WREG32(DMA_POWER_CNTL + offset, data);
|
||||
|
||||
orig = data = RREG32(DMA_CLK_CTRL + offset);
|
||||
data = 0xff000000;
|
||||
if (data != orig)
|
||||
WREG32(DMA_CLK_CTRL + offset, data);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_dma_set_powergating_state(void *handle,
|
||||
enum amd_powergating_state state)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
WREG32(DMA_PGFSM_WRITE, 0x00002000);
|
||||
WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
|
||||
|
||||
for (tmp = 0; tmp < 5; tmp++)
|
||||
WREG32(DMA_PGFSM_WRITE, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs si_dma_ip_funcs = {
|
||||
.name = "si_dma",
|
||||
.early_init = si_dma_early_init,
|
||||
.late_init = NULL,
|
||||
.sw_init = si_dma_sw_init,
|
||||
.sw_fini = si_dma_sw_fini,
|
||||
.hw_init = si_dma_hw_init,
|
||||
.hw_fini = si_dma_hw_fini,
|
||||
.suspend = si_dma_suspend,
|
||||
.resume = si_dma_resume,
|
||||
.is_idle = si_dma_is_idle,
|
||||
.wait_for_idle = si_dma_wait_for_idle,
|
||||
.soft_reset = si_dma_soft_reset,
|
||||
.set_clockgating_state = si_dma_set_clockgating_state,
|
||||
.set_powergating_state = si_dma_set_powergating_state,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
|
||||
.get_rptr = si_dma_ring_get_rptr,
|
||||
.get_wptr = si_dma_ring_get_wptr,
|
||||
.set_wptr = si_dma_ring_set_wptr,
|
||||
.parse_cs = NULL,
|
||||
.emit_ib = si_dma_ring_emit_ib,
|
||||
.emit_fence = si_dma_ring_emit_fence,
|
||||
.emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
|
||||
.emit_vm_flush = si_dma_ring_emit_vm_flush,
|
||||
.emit_hdp_flush = si_dma_ring_emit_hdp_flush,
|
||||
.emit_hdp_invalidate = si_dma_ring_emit_hdp_invalidate,
|
||||
.test_ring = si_dma_ring_test_ring,
|
||||
.test_ib = si_dma_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
.pad_ib = si_dma_ring_pad_ib,
|
||||
.get_emit_ib_size = si_dma_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = si_dma_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||
adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs;
|
||||
}
|
||||
|
||||
static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = {
|
||||
.set = si_dma_set_trap_irq_state,
|
||||
.process = si_dma_process_trap_irq,
|
||||
};
|
||||
|
||||
static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs_1 = {
|
||||
.set = si_dma_set_trap_irq_state,
|
||||
.process = si_dma_process_trap_irq_1,
|
||||
};
|
||||
|
||||
static const struct amdgpu_irq_src_funcs si_dma_illegal_inst_irq_funcs = {
|
||||
.process = si_dma_process_illegal_inst_irq,
|
||||
};
|
||||
|
||||
static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
|
||||
adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs;
|
||||
adev->sdma.trap_irq_1.funcs = &si_dma_trap_irq_funcs_1;
|
||||
adev->sdma.illegal_inst_irq.funcs = &si_dma_illegal_inst_irq_funcs;
|
||||
}
|
||||
|
||||
/**
|
||||
* si_dma_emit_copy_buffer - copy buffer using the sDMA engine
|
||||
*
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
* @src_offset: src GPU address
|
||||
* @dst_offset: dst GPU address
|
||||
* @byte_count: number of bytes to xfer
|
||||
*
|
||||
* Copy GPU buffers using the DMA engine (VI).
|
||||
* Used by the amdgpu ttm implementation to move pages if
|
||||
* registered as the asic copy callback.
|
||||
*/
|
||||
static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib,
|
||||
uint64_t src_offset,
|
||||
uint64_t dst_offset,
|
||||
uint32_t byte_count)
|
||||
{
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
|
||||
1, 0, 0, byte_count);
|
||||
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
|
||||
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) & 0xff;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(src_offset) & 0xff;
|
||||
}
|
||||
|
||||
/**
|
||||
* si_dma_emit_fill_buffer - fill buffer using the sDMA engine
|
||||
*
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
* @src_data: value to write to buffer
|
||||
* @dst_offset: dst GPU address
|
||||
* @byte_count: number of bytes to xfer
|
||||
*
|
||||
* Fill GPU buffers using the DMA engine (VI).
|
||||
*/
|
||||
static void si_dma_emit_fill_buffer(struct amdgpu_ib *ib,
|
||||
uint32_t src_data,
|
||||
uint64_t dst_offset,
|
||||
uint32_t byte_count)
|
||||
{
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_CONSTANT_FILL,
|
||||
0, 0, 0, byte_count / 4);
|
||||
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
|
||||
ib->ptr[ib->length_dw++] = src_data;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) << 16;
|
||||
}
|
||||
|
||||
|
||||
static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = {
|
||||
.copy_max_bytes = 0xffff8,
|
||||
.copy_num_dw = 5,
|
||||
.emit_copy_buffer = si_dma_emit_copy_buffer,
|
||||
|
||||
.fill_max_bytes = 0xffff8,
|
||||
.fill_num_dw = 4,
|
||||
.emit_fill_buffer = si_dma_emit_fill_buffer,
|
||||
};
|
||||
|
||||
static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->mman.buffer_funcs == NULL) {
|
||||
adev->mman.buffer_funcs = &si_dma_buffer_funcs;
|
||||
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
|
||||
}
|
||||
}
|
||||
|
||||
static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
|
||||
.copy_pte = si_dma_vm_copy_pte,
|
||||
.write_pte = si_dma_vm_write_pte,
|
||||
.set_pte_pde = si_dma_vm_set_pte_pde,
|
||||
};
|
||||
|
||||
static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
if (adev->vm_manager.vm_pte_funcs == NULL) {
|
||||
adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||
adev->vm_manager.vm_pte_rings[i] =
|
||||
&adev->sdma.instance[i].ring;
|
||||
|
||||
adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
|
||||
}
|
||||
}
|
29
drivers/gpu/drm/amd/amdgpu/si_dma.h
Normal file
29
drivers/gpu/drm/amd/amdgpu/si_dma.h
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __SI_DMA_H__
|
||||
#define __SI_DMA_H__
|
||||
|
||||
extern const struct amd_ip_funcs si_dma_ip_funcs;
|
||||
|
||||
#endif
|
7993
drivers/gpu/drm/amd/amdgpu/si_dpm.c
Normal file
7993
drivers/gpu/drm/amd/amdgpu/si_dpm.c
Normal file
File diff suppressed because it is too large
Load Diff
1015
drivers/gpu/drm/amd/amdgpu/si_dpm.h
Normal file
1015
drivers/gpu/drm/amd/amdgpu/si_dpm.h
Normal file
File diff suppressed because it is too large
Load Diff
299
drivers/gpu/drm/amd/amdgpu/si_ih.c
Normal file
299
drivers/gpu/drm/amd/amdgpu/si_ih.c
Normal file
@ -0,0 +1,299 @@
|
||||
/*
|
||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_ih.h"
|
||||
#include "si/sid.h"
|
||||
#include "si_ih.h"
|
||||
|
||||
static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev);
|
||||
|
||||
static void si_ih_enable_interrupts(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 ih_cntl = RREG32(IH_CNTL);
|
||||
u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
|
||||
|
||||
ih_cntl |= ENABLE_INTR;
|
||||
ih_rb_cntl |= IH_RB_ENABLE;
|
||||
WREG32(IH_CNTL, ih_cntl);
|
||||
WREG32(IH_RB_CNTL, ih_rb_cntl);
|
||||
adev->irq.ih.enabled = true;
|
||||
}
|
||||
|
||||
static void si_ih_disable_interrupts(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
|
||||
u32 ih_cntl = RREG32(IH_CNTL);
|
||||
|
||||
ih_rb_cntl &= ~IH_RB_ENABLE;
|
||||
ih_cntl &= ~ENABLE_INTR;
|
||||
WREG32(IH_RB_CNTL, ih_rb_cntl);
|
||||
WREG32(IH_CNTL, ih_cntl);
|
||||
WREG32(IH_RB_RPTR, 0);
|
||||
WREG32(IH_RB_WPTR, 0);
|
||||
adev->irq.ih.enabled = false;
|
||||
adev->irq.ih.rptr = 0;
|
||||
}
|
||||
|
||||
static int si_ih_irq_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int rb_bufsz;
|
||||
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
|
||||
u64 wptr_off;
|
||||
|
||||
si_ih_disable_interrupts(adev);
|
||||
WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8);
|
||||
interrupt_cntl = RREG32(INTERRUPT_CNTL);
|
||||
interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
|
||||
interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
|
||||
WREG32(INTERRUPT_CNTL, interrupt_cntl);
|
||||
|
||||
WREG32(IH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
|
||||
rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
|
||||
|
||||
ih_rb_cntl = IH_WPTR_OVERFLOW_ENABLE |
|
||||
IH_WPTR_OVERFLOW_CLEAR |
|
||||
(rb_bufsz << 1) |
|
||||
IH_WPTR_WRITEBACK_ENABLE;
|
||||
|
||||
wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
|
||||
WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
|
||||
WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
|
||||
WREG32(IH_RB_CNTL, ih_rb_cntl);
|
||||
WREG32(IH_RB_RPTR, 0);
|
||||
WREG32(IH_RB_WPTR, 0);
|
||||
|
||||
ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
|
||||
if (adev->irq.msi_enabled)
|
||||
ih_cntl |= RPTR_REARM;
|
||||
WREG32(IH_CNTL, ih_cntl);
|
||||
|
||||
pci_set_master(adev->pdev);
|
||||
si_ih_enable_interrupts(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void si_ih_irq_disable(struct amdgpu_device *adev)
|
||||
{
|
||||
si_ih_disable_interrupts(adev);
|
||||
mdelay(1);
|
||||
}
|
||||
|
||||
static u32 si_ih_get_wptr(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 wptr, tmp;
|
||||
|
||||
wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
|
||||
|
||||
if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
|
||||
wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
|
||||
dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
|
||||
wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
|
||||
adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
|
||||
tmp = RREG32(IH_RB_CNTL);
|
||||
tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
|
||||
WREG32(IH_RB_CNTL, tmp);
|
||||
}
|
||||
return (wptr & adev->irq.ih.ptr_mask);
|
||||
}
|
||||
|
||||
static void si_ih_decode_iv(struct amdgpu_device *adev,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
u32 ring_index = adev->irq.ih.rptr >> 2;
|
||||
uint32_t dw[4];
|
||||
|
||||
dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
|
||||
dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
|
||||
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
|
||||
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
|
||||
|
||||
entry->src_id = dw[0] & 0xff;
|
||||
entry->src_data = dw[1] & 0xfffffff;
|
||||
entry->ring_id = dw[2] & 0xff;
|
||||
entry->vm_id = (dw[2] >> 8) & 0xff;
|
||||
|
||||
adev->irq.ih.rptr += 16;
|
||||
}
|
||||
|
||||
static void si_ih_set_rptr(struct amdgpu_device *adev)
|
||||
{
|
||||
WREG32(IH_RB_RPTR, adev->irq.ih.rptr);
|
||||
}
|
||||
|
||||
static int si_ih_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
si_ih_set_interrupt_funcs(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_ih_sw_init(void *handle)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return amdgpu_irq_init(adev);
|
||||
}
|
||||
|
||||
static int si_ih_sw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
amdgpu_irq_fini(adev);
|
||||
amdgpu_ih_ring_fini(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_ih_hw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
return si_ih_irq_init(adev);
|
||||
}
|
||||
|
||||
static int si_ih_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
si_ih_irq_disable(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_ih_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
return si_ih_hw_fini(adev);
|
||||
}
|
||||
|
||||
static int si_ih_resume(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
return si_ih_hw_init(adev);
|
||||
}
|
||||
|
||||
static bool si_ih_is_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
u32 tmp = RREG32(SRBM_STATUS);
|
||||
|
||||
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int si_ih_wait_for_idle(void *handle)
|
||||
{
|
||||
unsigned i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (si_ih_is_idle(handle))
|
||||
return 0;
|
||||
udelay(1);
|
||||
}
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int si_ih_soft_reset(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
u32 srbm_soft_reset = 0;
|
||||
u32 tmp = RREG32(SRBM_STATUS);
|
||||
|
||||
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
|
||||
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
|
||||
|
||||
if (srbm_soft_reset) {
|
||||
tmp = RREG32(SRBM_SOFT_RESET);
|
||||
tmp |= srbm_soft_reset;
|
||||
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
|
||||
WREG32(SRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32(SRBM_SOFT_RESET);
|
||||
|
||||
udelay(50);
|
||||
|
||||
tmp &= ~srbm_soft_reset;
|
||||
WREG32(SRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32(SRBM_SOFT_RESET);
|
||||
|
||||
udelay(50);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_ih_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_ih_set_powergating_state(void *handle,
|
||||
enum amd_powergating_state state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs si_ih_ip_funcs = {
|
||||
.name = "si_ih",
|
||||
.early_init = si_ih_early_init,
|
||||
.late_init = NULL,
|
||||
.sw_init = si_ih_sw_init,
|
||||
.sw_fini = si_ih_sw_fini,
|
||||
.hw_init = si_ih_hw_init,
|
||||
.hw_fini = si_ih_hw_fini,
|
||||
.suspend = si_ih_suspend,
|
||||
.resume = si_ih_resume,
|
||||
.is_idle = si_ih_is_idle,
|
||||
.wait_for_idle = si_ih_wait_for_idle,
|
||||
.soft_reset = si_ih_soft_reset,
|
||||
.set_clockgating_state = si_ih_set_clockgating_state,
|
||||
.set_powergating_state = si_ih_set_powergating_state,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ih_funcs si_ih_funcs = {
|
||||
.get_wptr = si_ih_get_wptr,
|
||||
.decode_iv = si_ih_decode_iv,
|
||||
.set_rptr = si_ih_set_rptr
|
||||
};
|
||||
|
||||
static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->irq.ih_funcs == NULL)
|
||||
adev->irq.ih_funcs = &si_ih_funcs;
|
||||
}
|
||||
|
29
drivers/gpu/drm/amd/amdgpu/si_ih.h
Normal file
29
drivers/gpu/drm/amd/amdgpu/si_ih.h
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __SI_IH_H__
|
||||
#define __SI_IH_H__
|
||||
|
||||
extern const struct amd_ip_funcs si_ih_ip_funcs;
|
||||
|
||||
#endif
|
273
drivers/gpu/drm/amd/amdgpu/si_smc.c
Normal file
273
drivers/gpu/drm/amd/amdgpu/si_smc.c
Normal file
@ -0,0 +1,273 @@
|
||||
/*
|
||||
* Copyright 2011 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Alex Deucher
|
||||
*/
|
||||
|
||||
#include <linux/firmware.h>
|
||||
#include "drmP.h"
|
||||
#include "amdgpu.h"
|
||||
#include "si/sid.h"
|
||||
#include "ppsmc.h"
|
||||
#include "amdgpu_ucode.h"
|
||||
#include "sislands_smc.h"
|
||||
|
||||
static int si_set_smc_sram_address(struct amdgpu_device *adev,
|
||||
u32 smc_address, u32 limit)
|
||||
{
|
||||
if (smc_address & 3)
|
||||
return -EINVAL;
|
||||
if ((smc_address + 3) > limit)
|
||||
return -EINVAL;
|
||||
|
||||
WREG32(SMC_IND_INDEX_0, smc_address);
|
||||
WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
|
||||
u32 smc_start_address,
|
||||
const u8 *src, u32 byte_count, u32 limit)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
u32 data, original_data, addr, extra_shift;
|
||||
|
||||
if (smc_start_address & 3)
|
||||
return -EINVAL;
|
||||
if ((smc_start_address + byte_count) > limit)
|
||||
return -EINVAL;
|
||||
|
||||
addr = smc_start_address;
|
||||
|
||||
spin_lock_irqsave(&adev->smc_idx_lock, flags);
|
||||
while (byte_count >= 4) {
|
||||
/* SMC address space is BE */
|
||||
data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
|
||||
|
||||
ret = si_set_smc_sram_address(adev, addr, limit);
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
WREG32(SMC_IND_DATA_0, data);
|
||||
|
||||
src += 4;
|
||||
byte_count -= 4;
|
||||
addr += 4;
|
||||
}
|
||||
|
||||
/* RMW for the final bytes */
|
||||
if (byte_count > 0) {
|
||||
data = 0;
|
||||
|
||||
ret = si_set_smc_sram_address(adev, addr, limit);
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
original_data = RREG32(SMC_IND_DATA_0);
|
||||
extra_shift = 8 * (4 - byte_count);
|
||||
|
||||
while (byte_count > 0) {
|
||||
/* SMC address space is BE */
|
||||
data = (data << 8) + *src++;
|
||||
byte_count--;
|
||||
}
|
||||
|
||||
data <<= extra_shift;
|
||||
data |= (original_data & ~((~0UL) << extra_shift));
|
||||
|
||||
ret = si_set_smc_sram_address(adev, addr, limit);
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
WREG32(SMC_IND_DATA_0, data);
|
||||
}
|
||||
|
||||
done:
|
||||
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void amdgpu_si_start_smc(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
|
||||
|
||||
tmp &= ~RST_REG;
|
||||
|
||||
WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
|
||||
}
|
||||
|
||||
void amdgpu_si_reset_smc(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
RREG32(CB_CGTT_SCLK_CTRL);
|
||||
RREG32(CB_CGTT_SCLK_CTRL);
|
||||
RREG32(CB_CGTT_SCLK_CTRL);
|
||||
RREG32(CB_CGTT_SCLK_CTRL);
|
||||
|
||||
tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) |
|
||||
RST_REG;
|
||||
WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
|
||||
}
|
||||
|
||||
int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev)
|
||||
{
|
||||
static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
|
||||
|
||||
return amdgpu_si_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
|
||||
}
|
||||
|
||||
void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
|
||||
|
||||
if (enable)
|
||||
tmp &= ~CK_DISABLE;
|
||||
else
|
||||
tmp |= CK_DISABLE;
|
||||
|
||||
WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
|
||||
}
|
||||
|
||||
bool amdgpu_si_is_smc_running(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 rst = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
|
||||
u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
|
||||
|
||||
if (!(rst & RST_REG) && !(clk & CK_DISABLE))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev,
|
||||
PPSMC_Msg msg)
|
||||
{
|
||||
u32 tmp;
|
||||
int i;
|
||||
|
||||
if (!amdgpu_si_is_smc_running(adev))
|
||||
return PPSMC_Result_Failed;
|
||||
|
||||
WREG32(SMC_MESSAGE_0, msg);
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
tmp = RREG32(SMC_RESP_0);
|
||||
if (tmp != 0)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
return (PPSMC_Result)RREG32(SMC_RESP_0);
|
||||
}
|
||||
|
||||
PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 tmp;
|
||||
int i;
|
||||
|
||||
if (!amdgpu_si_is_smc_running(adev))
|
||||
return PPSMC_Result_OK;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
|
||||
if ((tmp & CKEN) == 0)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
return PPSMC_Result_OK;
|
||||
}
|
||||
|
||||
int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
|
||||
{
|
||||
const struct smc_firmware_header_v1_0 *hdr;
|
||||
unsigned long flags;
|
||||
u32 ucode_start_address;
|
||||
u32 ucode_size;
|
||||
const u8 *src;
|
||||
u32 data;
|
||||
|
||||
if (!adev->pm.fw)
|
||||
return -EINVAL;
|
||||
|
||||
hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
|
||||
|
||||
amdgpu_ucode_print_smc_hdr(&hdr->header);
|
||||
|
||||
adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
|
||||
ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
|
||||
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
|
||||
src = (const u8 *)
|
||||
(adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
if (ucode_size & 3)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&adev->smc_idx_lock, flags);
|
||||
WREG32(SMC_IND_INDEX_0, ucode_start_address);
|
||||
WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
|
||||
while (ucode_size >= 4) {
|
||||
/* SMC address space is BE */
|
||||
data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
|
||||
|
||||
WREG32(SMC_IND_DATA_0, data);
|
||||
|
||||
src += 4;
|
||||
ucode_size -= 4;
|
||||
}
|
||||
WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
|
||||
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
|
||||
u32 *value, u32 limit)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&adev->smc_idx_lock, flags);
|
||||
ret = si_set_smc_sram_address(adev, smc_address, limit);
|
||||
if (ret == 0)
|
||||
*value = RREG32(SMC_IND_DATA_0);
|
||||
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
|
||||
u32 value, u32 limit)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&adev->smc_idx_lock, flags);
|
||||
ret = si_set_smc_sram_address(adev, smc_address, limit);
|
||||
if (ret == 0)
|
||||
WREG32(SMC_IND_DATA_0, value);
|
||||
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
422
drivers/gpu/drm/amd/amdgpu/sislands_smc.h
Normal file
422
drivers/gpu/drm/amd/amdgpu/sislands_smc.h
Normal file
@ -0,0 +1,422 @@
|
||||
/*
|
||||
* Copyright 2013 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#ifndef PP_SISLANDS_SMC_H
|
||||
#define PP_SISLANDS_SMC_H
|
||||
|
||||
#include "ppsmc.h"
|
||||
|
||||
#pragma pack(push, 1)
|
||||
|
||||
#define SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
|
||||
|
||||
struct PP_SIslands_Dpm2PerfLevel
|
||||
{
|
||||
uint8_t MaxPS;
|
||||
uint8_t TgtAct;
|
||||
uint8_t MaxPS_StepInc;
|
||||
uint8_t MaxPS_StepDec;
|
||||
uint8_t PSSamplingTime;
|
||||
uint8_t NearTDPDec;
|
||||
uint8_t AboveSafeInc;
|
||||
uint8_t BelowSafeInc;
|
||||
uint8_t PSDeltaLimit;
|
||||
uint8_t PSDeltaWin;
|
||||
uint16_t PwrEfficiencyRatio;
|
||||
uint8_t Reserved[4];
|
||||
};
|
||||
|
||||
typedef struct PP_SIslands_Dpm2PerfLevel PP_SIslands_Dpm2PerfLevel;
|
||||
|
||||
struct PP_SIslands_DPM2Status
|
||||
{
|
||||
uint32_t dpm2Flags;
|
||||
uint8_t CurrPSkip;
|
||||
uint8_t CurrPSkipPowerShift;
|
||||
uint8_t CurrPSkipTDP;
|
||||
uint8_t CurrPSkipOCP;
|
||||
uint8_t MaxSPLLIndex;
|
||||
uint8_t MinSPLLIndex;
|
||||
uint8_t CurrSPLLIndex;
|
||||
uint8_t InfSweepMode;
|
||||
uint8_t InfSweepDir;
|
||||
uint8_t TDPexceeded;
|
||||
uint8_t reserved;
|
||||
uint8_t SwitchDownThreshold;
|
||||
uint32_t SwitchDownCounter;
|
||||
uint32_t SysScalingFactor;
|
||||
};
|
||||
|
||||
typedef struct PP_SIslands_DPM2Status PP_SIslands_DPM2Status;
|
||||
|
||||
struct PP_SIslands_DPM2Parameters
|
||||
{
|
||||
uint32_t TDPLimit;
|
||||
uint32_t NearTDPLimit;
|
||||
uint32_t SafePowerLimit;
|
||||
uint32_t PowerBoostLimit;
|
||||
uint32_t MinLimitDelta;
|
||||
};
|
||||
typedef struct PP_SIslands_DPM2Parameters PP_SIslands_DPM2Parameters;
|
||||
|
||||
struct PP_SIslands_PAPMStatus
|
||||
{
|
||||
uint32_t EstimatedDGPU_T;
|
||||
uint32_t EstimatedDGPU_P;
|
||||
uint32_t EstimatedAPU_T;
|
||||
uint32_t EstimatedAPU_P;
|
||||
uint8_t dGPU_T_Limit_Exceeded;
|
||||
uint8_t reserved[3];
|
||||
};
|
||||
typedef struct PP_SIslands_PAPMStatus PP_SIslands_PAPMStatus;
|
||||
|
||||
struct PP_SIslands_PAPMParameters
|
||||
{
|
||||
uint32_t NearTDPLimitTherm;
|
||||
uint32_t NearTDPLimitPAPM;
|
||||
uint32_t PlatformPowerLimit;
|
||||
uint32_t dGPU_T_Limit;
|
||||
uint32_t dGPU_T_Warning;
|
||||
uint32_t dGPU_T_Hysteresis;
|
||||
};
|
||||
typedef struct PP_SIslands_PAPMParameters PP_SIslands_PAPMParameters;
|
||||
|
||||
struct SISLANDS_SMC_SCLK_VALUE
|
||||
{
|
||||
uint32_t vCG_SPLL_FUNC_CNTL;
|
||||
uint32_t vCG_SPLL_FUNC_CNTL_2;
|
||||
uint32_t vCG_SPLL_FUNC_CNTL_3;
|
||||
uint32_t vCG_SPLL_FUNC_CNTL_4;
|
||||
uint32_t vCG_SPLL_SPREAD_SPECTRUM;
|
||||
uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
|
||||
uint32_t sclk_value;
|
||||
};
|
||||
|
||||
typedef struct SISLANDS_SMC_SCLK_VALUE SISLANDS_SMC_SCLK_VALUE;
|
||||
|
||||
struct SISLANDS_SMC_MCLK_VALUE
|
||||
{
|
||||
uint32_t vMPLL_FUNC_CNTL;
|
||||
uint32_t vMPLL_FUNC_CNTL_1;
|
||||
uint32_t vMPLL_FUNC_CNTL_2;
|
||||
uint32_t vMPLL_AD_FUNC_CNTL;
|
||||
uint32_t vMPLL_DQ_FUNC_CNTL;
|
||||
uint32_t vMCLK_PWRMGT_CNTL;
|
||||
uint32_t vDLL_CNTL;
|
||||
uint32_t vMPLL_SS;
|
||||
uint32_t vMPLL_SS2;
|
||||
uint32_t mclk_value;
|
||||
};
|
||||
|
||||
typedef struct SISLANDS_SMC_MCLK_VALUE SISLANDS_SMC_MCLK_VALUE;
|
||||
|
||||
struct SISLANDS_SMC_VOLTAGE_VALUE
|
||||
{
|
||||
uint16_t value;
|
||||
uint8_t index;
|
||||
uint8_t phase_settings;
|
||||
};
|
||||
|
||||
typedef struct SISLANDS_SMC_VOLTAGE_VALUE SISLANDS_SMC_VOLTAGE_VALUE;
|
||||
|
||||
struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL
|
||||
{
|
||||
uint8_t ACIndex;
|
||||
uint8_t displayWatermark;
|
||||
uint8_t gen2PCIE;
|
||||
uint8_t UVDWatermark;
|
||||
uint8_t VCEWatermark;
|
||||
uint8_t strobeMode;
|
||||
uint8_t mcFlags;
|
||||
uint8_t padding;
|
||||
uint32_t aT;
|
||||
uint32_t bSP;
|
||||
SISLANDS_SMC_SCLK_VALUE sclk;
|
||||
SISLANDS_SMC_MCLK_VALUE mclk;
|
||||
SISLANDS_SMC_VOLTAGE_VALUE vddc;
|
||||
SISLANDS_SMC_VOLTAGE_VALUE mvdd;
|
||||
SISLANDS_SMC_VOLTAGE_VALUE vddci;
|
||||
SISLANDS_SMC_VOLTAGE_VALUE std_vddc;
|
||||
uint8_t hysteresisUp;
|
||||
uint8_t hysteresisDown;
|
||||
uint8_t stateFlags;
|
||||
uint8_t arbRefreshState;
|
||||
uint32_t SQPowerThrottle;
|
||||
uint32_t SQPowerThrottle_2;
|
||||
uint32_t MaxPoweredUpCU;
|
||||
SISLANDS_SMC_VOLTAGE_VALUE high_temp_vddc;
|
||||
SISLANDS_SMC_VOLTAGE_VALUE low_temp_vddc;
|
||||
uint32_t reserved[2];
|
||||
PP_SIslands_Dpm2PerfLevel dpm2;
|
||||
};
|
||||
|
||||
#define SISLANDS_SMC_STROBE_RATIO 0x0F
|
||||
#define SISLANDS_SMC_STROBE_ENABLE 0x10
|
||||
|
||||
#define SISLANDS_SMC_MC_EDC_RD_FLAG 0x01
|
||||
#define SISLANDS_SMC_MC_EDC_WR_FLAG 0x02
|
||||
#define SISLANDS_SMC_MC_RTT_ENABLE 0x04
|
||||
#define SISLANDS_SMC_MC_STUTTER_EN 0x08
|
||||
#define SISLANDS_SMC_MC_PG_EN 0x10
|
||||
|
||||
typedef struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL SISLANDS_SMC_HW_PERFORMANCE_LEVEL;
|
||||
|
||||
struct SISLANDS_SMC_SWSTATE
|
||||
{
|
||||
uint8_t flags;
|
||||
uint8_t levelCount;
|
||||
uint8_t padding2;
|
||||
uint8_t padding3;
|
||||
SISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[1];
|
||||
};
|
||||
|
||||
typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
|
||||
|
||||
#define SISLANDS_SMC_VOLTAGEMASK_VDDC 0
|
||||
#define SISLANDS_SMC_VOLTAGEMASK_MVDD 1
|
||||
#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
|
||||
#define SISLANDS_SMC_VOLTAGEMASK_MAX 4
|
||||
|
||||
struct SISLANDS_SMC_VOLTAGEMASKTABLE
|
||||
{
|
||||
uint32_t lowMask[SISLANDS_SMC_VOLTAGEMASK_MAX];
|
||||
};
|
||||
|
||||
typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE;
|
||||
|
||||
#define SISLANDS_MAX_NO_VREG_STEPS 32
|
||||
|
||||
struct SISLANDS_SMC_STATETABLE
|
||||
{
|
||||
uint8_t thermalProtectType;
|
||||
uint8_t systemFlags;
|
||||
uint8_t maxVDDCIndexInPPTable;
|
||||
uint8_t extraFlags;
|
||||
uint32_t lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
|
||||
SISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
|
||||
SISLANDS_SMC_VOLTAGEMASKTABLE phaseMaskTable;
|
||||
PP_SIslands_DPM2Parameters dpm2Params;
|
||||
SISLANDS_SMC_SWSTATE initialState;
|
||||
SISLANDS_SMC_SWSTATE ACPIState;
|
||||
SISLANDS_SMC_SWSTATE ULVState;
|
||||
SISLANDS_SMC_SWSTATE driverState;
|
||||
SISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
|
||||
};
|
||||
|
||||
typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
|
||||
|
||||
#define SI_SMC_SOFT_REGISTER_mclk_chg_timeout 0x0
|
||||
#define SI_SMC_SOFT_REGISTER_delay_vreg 0xC
|
||||
#define SI_SMC_SOFT_REGISTER_delay_acpi 0x28
|
||||
#define SI_SMC_SOFT_REGISTER_seq_index 0x5C
|
||||
#define SI_SMC_SOFT_REGISTER_mvdd_chg_time 0x60
|
||||
#define SI_SMC_SOFT_REGISTER_mclk_switch_lim 0x70
|
||||
#define SI_SMC_SOFT_REGISTER_watermark_threshold 0x78
|
||||
#define SI_SMC_SOFT_REGISTER_phase_shedding_delay 0x88
|
||||
#define SI_SMC_SOFT_REGISTER_ulv_volt_change_delay 0x8C
|
||||
#define SI_SMC_SOFT_REGISTER_mc_block_delay 0x98
|
||||
#define SI_SMC_SOFT_REGISTER_ticks_per_us 0xA8
|
||||
#define SI_SMC_SOFT_REGISTER_crtc_index 0xC4
|
||||
#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min 0xC8
|
||||
#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max 0xCC
|
||||
#define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width 0xF4
|
||||
#define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen 0xFC
|
||||
#define SI_SMC_SOFT_REGISTER_vr_hot_gpio 0x100
|
||||
#define SI_SMC_SOFT_REGISTER_svi_rework_plat_type 0x118
|
||||
#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c
|
||||
#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120
|
||||
|
||||
struct PP_SIslands_FanTable
|
||||
{
|
||||
uint8_t fdo_mode;
|
||||
uint8_t padding;
|
||||
int16_t temp_min;
|
||||
int16_t temp_med;
|
||||
int16_t temp_max;
|
||||
int16_t slope1;
|
||||
int16_t slope2;
|
||||
int16_t fdo_min;
|
||||
int16_t hys_up;
|
||||
int16_t hys_down;
|
||||
int16_t hys_slope;
|
||||
int16_t temp_resp_lim;
|
||||
int16_t temp_curr;
|
||||
int16_t slope_curr;
|
||||
int16_t pwm_curr;
|
||||
uint32_t refresh_period;
|
||||
int16_t fdo_max;
|
||||
uint8_t temp_src;
|
||||
int8_t padding2;
|
||||
};
|
||||
|
||||
typedef struct PP_SIslands_FanTable PP_SIslands_FanTable;
|
||||
|
||||
#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
|
||||
#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32
|
||||
|
||||
#define SMC_SISLANDS_SCALE_I 7
|
||||
#define SMC_SISLANDS_SCALE_R 12
|
||||
|
||||
struct PP_SIslands_CacConfig
|
||||
{
|
||||
uint16_t cac_lkge_lut[SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES];
|
||||
uint32_t lkge_lut_V0;
|
||||
uint32_t lkge_lut_Vstep;
|
||||
uint32_t WinTime;
|
||||
uint32_t R_LL;
|
||||
uint32_t calculation_repeats;
|
||||
uint32_t l2numWin_TDP;
|
||||
uint32_t dc_cac;
|
||||
uint8_t lts_truncate_n;
|
||||
uint8_t SHIFT_N;
|
||||
uint8_t log2_PG_LKG_SCALE;
|
||||
uint8_t cac_temp;
|
||||
uint32_t lkge_lut_T0;
|
||||
uint32_t lkge_lut_Tstep;
|
||||
};
|
||||
|
||||
typedef struct PP_SIslands_CacConfig PP_SIslands_CacConfig;
|
||||
|
||||
#define SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE 16
|
||||
#define SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
|
||||
|
||||
struct SMC_SIslands_MCRegisterAddress
|
||||
{
|
||||
uint16_t s0;
|
||||
uint16_t s1;
|
||||
};
|
||||
|
||||
typedef struct SMC_SIslands_MCRegisterAddress SMC_SIslands_MCRegisterAddress;
|
||||
|
||||
struct SMC_SIslands_MCRegisterSet
|
||||
{
|
||||
uint32_t value[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
|
||||
};
|
||||
|
||||
typedef struct SMC_SIslands_MCRegisterSet SMC_SIslands_MCRegisterSet;
|
||||
|
||||
struct SMC_SIslands_MCRegisters
|
||||
{
|
||||
uint8_t last;
|
||||
uint8_t reserved[3];
|
||||
SMC_SIslands_MCRegisterAddress address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
|
||||
SMC_SIslands_MCRegisterSet data[SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
|
||||
};
|
||||
|
||||
typedef struct SMC_SIslands_MCRegisters SMC_SIslands_MCRegisters;
|
||||
|
||||
struct SMC_SIslands_MCArbDramTimingRegisterSet
|
||||
{
|
||||
uint32_t mc_arb_dram_timing;
|
||||
uint32_t mc_arb_dram_timing2;
|
||||
uint8_t mc_arb_rfsh_rate;
|
||||
uint8_t mc_arb_burst_time;
|
||||
uint8_t padding[2];
|
||||
};
|
||||
|
||||
typedef struct SMC_SIslands_MCArbDramTimingRegisterSet SMC_SIslands_MCArbDramTimingRegisterSet;
|
||||
|
||||
struct SMC_SIslands_MCArbDramTimingRegisters
|
||||
{
|
||||
uint8_t arb_current;
|
||||
uint8_t reserved[3];
|
||||
SMC_SIslands_MCArbDramTimingRegisterSet data[16];
|
||||
};
|
||||
|
||||
typedef struct SMC_SIslands_MCArbDramTimingRegisters SMC_SIslands_MCArbDramTimingRegisters;
|
||||
|
||||
struct SMC_SISLANDS_SPLL_DIV_TABLE
|
||||
{
|
||||
uint32_t freq[256];
|
||||
uint32_t ss[256];
|
||||
};
|
||||
|
||||
#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK 0x01ffffff
|
||||
#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT 0
|
||||
#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK 0xfe000000
|
||||
#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT 25
|
||||
#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK 0x000fffff
|
||||
#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT 0
|
||||
#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK 0xfff00000
|
||||
#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT 20
|
||||
|
||||
typedef struct SMC_SISLANDS_SPLL_DIV_TABLE SMC_SISLANDS_SPLL_DIV_TABLE;
|
||||
|
||||
#define SMC_SISLANDS_DTE_MAX_FILTER_STAGES 5
|
||||
|
||||
#define SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE 16
|
||||
|
||||
struct Smc_SIslands_DTE_Configuration
|
||||
{
|
||||
uint32_t tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
|
||||
uint32_t R[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
|
||||
uint32_t K;
|
||||
uint32_t T0;
|
||||
uint32_t MaxT;
|
||||
uint8_t WindowSize;
|
||||
uint8_t Tdep_count;
|
||||
uint8_t temp_select;
|
||||
uint8_t DTE_mode;
|
||||
uint8_t T_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
|
||||
uint32_t Tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
|
||||
uint32_t Tdep_R[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
|
||||
uint32_t Tthreshold;
|
||||
};
|
||||
|
||||
typedef struct Smc_SIslands_DTE_Configuration Smc_SIslands_DTE_Configuration;
|
||||
|
||||
#define SMC_SISLANDS_DTE_STATUS_FLAG_DTE_ON 1
|
||||
|
||||
#define SISLANDS_SMC_FIRMWARE_HEADER_LOCATION 0x10000
|
||||
|
||||
#define SISLANDS_SMC_FIRMWARE_HEADER_version 0x0
|
||||
#define SISLANDS_SMC_FIRMWARE_HEADER_flags 0x4
|
||||
#define SISLANDS_SMC_FIRMWARE_HEADER_softRegisters 0xC
|
||||
#define SISLANDS_SMC_FIRMWARE_HEADER_stateTable 0x10
|
||||
#define SISLANDS_SMC_FIRMWARE_HEADER_fanTable 0x14
|
||||
#define SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable 0x18
|
||||
#define SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable 0x24
|
||||
#define SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable 0x30
|
||||
#define SISLANDS_SMC_FIRMWARE_HEADER_spllTable 0x38
|
||||
#define SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration 0x40
|
||||
#define SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters 0x48
|
||||
|
||||
#pragma pack(pop)
|
||||
|
||||
int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
|
||||
u32 smc_start_address,
|
||||
const u8 *src, u32 byte_count, u32 limit);
|
||||
void amdgpu_si_start_smc(struct amdgpu_device *adev);
|
||||
void amdgpu_si_reset_smc(struct amdgpu_device *adev);
|
||||
int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev);
|
||||
void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable);
|
||||
bool amdgpu_si_is_smc_running(struct amdgpu_device *adev);
|
||||
PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg);
|
||||
PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev);
|
||||
int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit);
|
||||
int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
|
||||
u32 *value, u32 limit);
|
||||
int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
|
||||
u32 value, u32 limit);
|
||||
|
||||
#endif
|
||||
|
@ -526,6 +526,20 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
amdgpu_ring_write(ring, ib->length_dw);
|
||||
}
|
||||
|
||||
static unsigned uvd_v4_2_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
4; /* uvd_v4_2_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned uvd_v4_2_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
2 + /* uvd_v4_2_ring_emit_hdp_flush */
|
||||
2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
|
||||
14; /* uvd_v4_2_ring_emit_fence x1 no user fence */
|
||||
}
|
||||
|
||||
/**
|
||||
* uvd_v4_2_mc_resume - memory controller programming
|
||||
*
|
||||
@ -756,6 +770,8 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.begin_use = amdgpu_uvd_ring_begin_use,
|
||||
.end_use = amdgpu_uvd_ring_end_use,
|
||||
.get_emit_ib_size = uvd_v4_2_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = uvd_v4_2_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -577,6 +577,20 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
amdgpu_ring_write(ring, ib->length_dw);
|
||||
}
|
||||
|
||||
static unsigned uvd_v5_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
6; /* uvd_v5_0_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned uvd_v5_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
2 + /* uvd_v5_0_ring_emit_hdp_flush */
|
||||
2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
|
||||
14; /* uvd_v5_0_ring_emit_fence x1 no user fence */
|
||||
}
|
||||
|
||||
static bool uvd_v5_0_is_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
@ -807,6 +821,8 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.begin_use = amdgpu_uvd_ring_begin_use,
|
||||
.end_use = amdgpu_uvd_ring_end_use,
|
||||
.get_emit_ib_size = uvd_v5_0_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = uvd_v5_0_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -725,6 +725,31 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
||||
amdgpu_ring_write(ring, 0xE);
|
||||
}
|
||||
|
||||
static unsigned uvd_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
8; /* uvd_v6_0_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned uvd_v6_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
2 + /* uvd_v6_0_ring_emit_hdp_flush */
|
||||
2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
|
||||
10 + /* uvd_v6_0_ring_emit_pipeline_sync */
|
||||
14; /* uvd_v6_0_ring_emit_fence x1 no user fence */
|
||||
}
|
||||
|
||||
static unsigned uvd_v6_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
2 + /* uvd_v6_0_ring_emit_hdp_flush */
|
||||
2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
|
||||
10 + /* uvd_v6_0_ring_emit_pipeline_sync */
|
||||
20 + /* uvd_v6_0_ring_emit_vm_flush */
|
||||
14 + 14; /* uvd_v6_0_ring_emit_fence x2 vm fence */
|
||||
}
|
||||
|
||||
static bool uvd_v6_0_is_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
@ -1037,6 +1062,8 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.begin_use = amdgpu_uvd_ring_begin_use,
|
||||
.end_use = amdgpu_uvd_ring_end_use,
|
||||
.get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
|
||||
@ -1056,6 +1083,8 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.begin_use = amdgpu_uvd_ring_begin_use,
|
||||
.end_use = amdgpu_uvd_ring_end_use,
|
||||
.get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size_vm,
|
||||
};
|
||||
|
||||
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
|
@ -30,10 +30,10 @@
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_vce.h"
|
||||
#include "cikd.h"
|
||||
|
||||
#include "vce/vce_2_0_d.h"
|
||||
#include "vce/vce_2_0_sh_mask.h"
|
||||
|
||||
#include "smu/smu_7_0_1_d.h"
|
||||
#include "smu/smu_7_0_1_sh_mask.h"
|
||||
#include "oss/oss_2_0_d.h"
|
||||
#include "oss/oss_2_0_sh_mask.h"
|
||||
|
||||
@ -193,6 +193,8 @@ static int vce_v2_0_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->vce.num_rings = 2;
|
||||
|
||||
vce_v2_0_set_ring_funcs(adev);
|
||||
vce_v2_0_set_irq_funcs(adev);
|
||||
|
||||
@ -202,7 +204,7 @@ static int vce_v2_0_early_init(void *handle)
|
||||
static int vce_v2_0_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
int r;
|
||||
int r, i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* VCE */
|
||||
@ -219,19 +221,14 @@ static int vce_v2_0_sw_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
ring = &adev->vce.ring[0];
|
||||
sprintf(ring->name, "vce0");
|
||||
r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
|
||||
&adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
ring = &adev->vce.ring[1];
|
||||
sprintf(ring->name, "vce1");
|
||||
r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
|
||||
&adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
|
||||
if (r)
|
||||
return r;
|
||||
for (i = 0; i < adev->vce.num_rings; i++) {
|
||||
ring = &adev->vce.ring[i];
|
||||
sprintf(ring->name, "vce%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
|
||||
&adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
@ -254,29 +251,23 @@ static int vce_v2_0_sw_fini(void *handle)
|
||||
|
||||
static int vce_v2_0_hw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
int r;
|
||||
int r, i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
r = vce_v2_0_start(adev);
|
||||
/* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
|
||||
if (r)
|
||||
/* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
|
||||
return 0;
|
||||
|
||||
ring = &adev->vce.ring[0];
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
return r;
|
||||
}
|
||||
for (i = 0; i < adev->vce.num_rings; i++)
|
||||
adev->vce.ring[i].ready = false;
|
||||
|
||||
ring = &adev->vce.ring[1];
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
return r;
|
||||
for (i = 0; i < adev->vce.num_rings; i++) {
|
||||
r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
|
||||
if (r)
|
||||
return r;
|
||||
else
|
||||
adev->vce.ring[i].ready = true;
|
||||
}
|
||||
|
||||
DRM_INFO("VCE initialized successfully.\n");
|
||||
@ -548,11 +539,28 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vce_v2_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
|
||||
|
||||
if (enable)
|
||||
tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
|
||||
else
|
||||
tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
|
||||
|
||||
WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
|
||||
}
|
||||
|
||||
|
||||
static int vce_v2_0_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
bool gate = false;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
|
||||
|
||||
|
||||
vce_v2_0_set_bypass_mode(adev, enable);
|
||||
|
||||
if (state == AMD_CG_STATE_GATE)
|
||||
gate = true;
|
||||
@ -614,12 +622,16 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.begin_use = amdgpu_vce_ring_begin_use,
|
||||
.end_use = amdgpu_vce_ring_end_use,
|
||||
.get_emit_ib_size = amdgpu_vce_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = amdgpu_vce_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->vce.ring[0].funcs = &vce_v2_0_ring_funcs;
|
||||
adev->vce.ring[1].funcs = &vce_v2_0_ring_funcs;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->vce.num_rings; i++)
|
||||
adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
|
||||
}
|
||||
|
||||
static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
|
||||
|
@ -70,8 +70,10 @@ static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
|
||||
|
||||
if (ring == &adev->vce.ring[0])
|
||||
return RREG32(mmVCE_RB_RPTR);
|
||||
else
|
||||
else if (ring == &adev->vce.ring[1])
|
||||
return RREG32(mmVCE_RB_RPTR2);
|
||||
else
|
||||
return RREG32(mmVCE_RB_RPTR3);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -87,8 +89,10 @@ static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
|
||||
|
||||
if (ring == &adev->vce.ring[0])
|
||||
return RREG32(mmVCE_RB_WPTR);
|
||||
else
|
||||
else if (ring == &adev->vce.ring[1])
|
||||
return RREG32(mmVCE_RB_WPTR2);
|
||||
else
|
||||
return RREG32(mmVCE_RB_WPTR3);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -104,8 +108,10 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
|
||||
|
||||
if (ring == &adev->vce.ring[0])
|
||||
WREG32(mmVCE_RB_WPTR, ring->wptr);
|
||||
else
|
||||
else if (ring == &adev->vce.ring[1])
|
||||
WREG32(mmVCE_RB_WPTR2, ring->wptr);
|
||||
else
|
||||
WREG32(mmVCE_RB_WPTR3, ring->wptr);
|
||||
}
|
||||
|
||||
static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
|
||||
@ -229,6 +235,13 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
|
||||
WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
|
||||
WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
|
||||
|
||||
ring = &adev->vce.ring[2];
|
||||
WREG32(mmVCE_RB_RPTR3, ring->wptr);
|
||||
WREG32(mmVCE_RB_WPTR3, ring->wptr);
|
||||
WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
|
||||
WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
|
||||
WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (idx = 0; idx < 2; ++idx) {
|
||||
if (adev->vce.harvest_config & (1 << idx))
|
||||
@ -345,6 +358,8 @@ static int vce_v3_0_early_init(void *handle)
|
||||
(AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
|
||||
return -ENOENT;
|
||||
|
||||
adev->vce.num_rings = 3;
|
||||
|
||||
vce_v3_0_set_ring_funcs(adev);
|
||||
vce_v3_0_set_irq_funcs(adev);
|
||||
|
||||
@ -355,7 +370,7 @@ static int vce_v3_0_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ring *ring;
|
||||
int r;
|
||||
int r, i;
|
||||
|
||||
/* VCE */
|
||||
r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
|
||||
@ -371,19 +386,14 @@ static int vce_v3_0_sw_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
ring = &adev->vce.ring[0];
|
||||
sprintf(ring->name, "vce0");
|
||||
r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
|
||||
&adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
ring = &adev->vce.ring[1];
|
||||
sprintf(ring->name, "vce1");
|
||||
r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
|
||||
&adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
|
||||
if (r)
|
||||
return r;
|
||||
for (i = 0; i < adev->vce.num_rings; i++) {
|
||||
ring = &adev->vce.ring[i];
|
||||
sprintf(ring->name, "vce%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
|
||||
&adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
@ -413,10 +423,10 @@ static int vce_v3_0_hw_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->vce.ring[0].ready = false;
|
||||
adev->vce.ring[1].ready = false;
|
||||
for (i = 0; i < adev->vce.num_rings; i++)
|
||||
adev->vce.ring[i].ready = false;
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
for (i = 0; i < adev->vce.num_rings; i++) {
|
||||
r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
|
||||
if (r)
|
||||
return r;
|
||||
@ -674,6 +684,7 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
|
||||
switch (entry->src_data) {
|
||||
case 0:
|
||||
case 1:
|
||||
case 2:
|
||||
amdgpu_fence_process(&adev->vce.ring[entry->src_data]);
|
||||
break;
|
||||
default:
|
||||
@ -685,7 +696,7 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vce_v3_set_bypass_mode(struct amdgpu_device *adev, bool enable)
|
||||
static void vce_v3_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
|
||||
|
||||
@ -704,8 +715,9 @@ static int vce_v3_0_set_clockgating_state(void *handle,
|
||||
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
|
||||
int i;
|
||||
|
||||
if (adev->asic_type == CHIP_POLARIS10)
|
||||
vce_v3_set_bypass_mode(adev, enable);
|
||||
if ((adev->asic_type == CHIP_POLARIS10) ||
|
||||
(adev->asic_type == CHIP_TONGA))
|
||||
vce_v3_0_set_bypass_mode(adev, enable);
|
||||
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
|
||||
return 0;
|
||||
@ -763,6 +775,60 @@ static int vce_v3_0_set_powergating_state(void *handle,
|
||||
return vce_v3_0_start(adev);
|
||||
}
|
||||
|
||||
static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
|
||||
{
|
||||
amdgpu_ring_write(ring, VCE_CMD_IB_VM);
|
||||
amdgpu_ring_write(ring, vm_id);
|
||||
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
|
||||
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
|
||||
amdgpu_ring_write(ring, ib->length_dw);
|
||||
}
|
||||
|
||||
static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned int vm_id, uint64_t pd_addr)
|
||||
{
|
||||
amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
|
||||
amdgpu_ring_write(ring, vm_id);
|
||||
amdgpu_ring_write(ring, pd_addr >> 12);
|
||||
|
||||
amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB);
|
||||
amdgpu_ring_write(ring, vm_id);
|
||||
amdgpu_ring_write(ring, VCE_CMD_END);
|
||||
}
|
||||
|
||||
static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
|
||||
{
|
||||
uint32_t seq = ring->fence_drv.sync_seq;
|
||||
uint64_t addr = ring->fence_drv.gpu_addr;
|
||||
|
||||
amdgpu_ring_write(ring, VCE_CMD_WAIT_GE);
|
||||
amdgpu_ring_write(ring, lower_32_bits(addr));
|
||||
amdgpu_ring_write(ring, upper_32_bits(addr));
|
||||
amdgpu_ring_write(ring, seq);
|
||||
}
|
||||
|
||||
static unsigned vce_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
5; /* vce_v3_0_ring_emit_ib */
|
||||
}
|
||||
|
||||
static unsigned vce_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
4 + /* vce_v3_0_emit_pipeline_sync */
|
||||
6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
|
||||
}
|
||||
|
||||
static unsigned vce_v3_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
|
||||
{
|
||||
return
|
||||
6 + /* vce_v3_0_emit_vm_flush */
|
||||
4 + /* vce_v3_0_emit_pipeline_sync */
|
||||
6 + 6; /* amdgpu_vce_ring_emit_fence x2 vm fence */
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs vce_v3_0_ip_funcs = {
|
||||
.name = "vce_v3_0",
|
||||
.early_init = vce_v3_0_early_init,
|
||||
@ -783,7 +849,7 @@ const struct amd_ip_funcs vce_v3_0_ip_funcs = {
|
||||
.set_powergating_state = vce_v3_0_set_powergating_state,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
|
||||
static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
|
||||
.get_rptr = vce_v3_0_ring_get_rptr,
|
||||
.get_wptr = vce_v3_0_ring_get_wptr,
|
||||
.set_wptr = vce_v3_0_ring_set_wptr,
|
||||
@ -796,12 +862,42 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.begin_use = amdgpu_vce_ring_begin_use,
|
||||
.end_use = amdgpu_vce_ring_end_use,
|
||||
.get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
|
||||
.get_rptr = vce_v3_0_ring_get_rptr,
|
||||
.get_wptr = vce_v3_0_ring_get_wptr,
|
||||
.set_wptr = vce_v3_0_ring_set_wptr,
|
||||
.parse_cs = NULL,
|
||||
.emit_ib = vce_v3_0_ring_emit_ib,
|
||||
.emit_vm_flush = vce_v3_0_emit_vm_flush,
|
||||
.emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
|
||||
.emit_fence = amdgpu_vce_ring_emit_fence,
|
||||
.test_ring = amdgpu_vce_ring_test_ring,
|
||||
.test_ib = amdgpu_vce_ring_test_ib,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.begin_use = amdgpu_vce_ring_begin_use,
|
||||
.end_use = amdgpu_vce_ring_end_use,
|
||||
.get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
|
||||
.get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size_vm,
|
||||
};
|
||||
|
||||
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->vce.ring[0].funcs = &vce_v3_0_ring_funcs;
|
||||
adev->vce.ring[1].funcs = &vce_v3_0_ring_funcs;
|
||||
int i;
|
||||
|
||||
if (adev->asic_type >= CHIP_STONEY) {
|
||||
for (i = 0; i < adev->vce.num_rings; i++)
|
||||
adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
|
||||
DRM_INFO("VCE enabled in VM mode\n");
|
||||
} else {
|
||||
for (i = 0; i < adev->vce.num_rings; i++)
|
||||
adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
|
||||
DRM_INFO("VCE enabled in physical mode\n");
|
||||
}
|
||||
}
|
||||
|
||||
static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
|
||||
|
@ -1650,7 +1650,7 @@ static int vi_common_early_init(void *handle)
|
||||
AMD_PG_SUPPORT_GFX_PIPELINE |
|
||||
AMD_PG_SUPPORT_UVD |
|
||||
AMD_PG_SUPPORT_VCE;
|
||||
adev->external_rev_id = adev->rev_id + 0x1;
|
||||
adev->external_rev_id = adev->rev_id + 0x61;
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
|
@ -369,4 +369,8 @@
|
||||
#define VCE_CMD_IB_AUTO 0x00000005
|
||||
#define VCE_CMD_SEMAPHORE 0x00000006
|
||||
|
||||
#define VCE_CMD_IB_VM 0x00000102
|
||||
#define VCE_CMD_WAIT_GE 0x00000106
|
||||
#define VCE_CMD_UPDATE_PTB 0x00000107
|
||||
#define VCE_CMD_FLUSH_TLB 0x00000108
|
||||
#endif
|
||||
|
@ -29,7 +29,12 @@
|
||||
* Supported ASIC types
|
||||
*/
|
||||
enum amd_asic_type {
|
||||
CHIP_BONAIRE = 0,
|
||||
CHIP_TAHITI = 0,
|
||||
CHIP_PITCAIRN,
|
||||
CHIP_VERDE,
|
||||
CHIP_OLAND,
|
||||
CHIP_HAINAN,
|
||||
CHIP_BONAIRE,
|
||||
CHIP_KAVERI,
|
||||
CHIP_KABINI,
|
||||
CHIP_HAWAII,
|
||||
|
941
drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h
Normal file
941
drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h
Normal file
@ -0,0 +1,941 @@
|
||||
/*
|
||||
* Copyright 2013 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
static const u32 si_SECT_CONTEXT_def_1[] =
|
||||
{
|
||||
0x00000000, // DB_RENDER_CONTROL
|
||||
0x00000000, // DB_COUNT_CONTROL
|
||||
0x00000000, // DB_DEPTH_VIEW
|
||||
0x00000000, // DB_RENDER_OVERRIDE
|
||||
0x00000000, // DB_RENDER_OVERRIDE2
|
||||
0x00000000, // DB_HTILE_DATA_BASE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0x00000000, // DB_DEPTH_BOUNDS_MIN
|
||||
0x00000000, // DB_DEPTH_BOUNDS_MAX
|
||||
0x00000000, // DB_STENCIL_CLEAR
|
||||
0x00000000, // DB_DEPTH_CLEAR
|
||||
0x00000000, // PA_SC_SCREEN_SCISSOR_TL
|
||||
0x40004000, // PA_SC_SCREEN_SCISSOR_BR
|
||||
0, // HOLE
|
||||
0x00000000, // DB_DEPTH_INFO
|
||||
0x00000000, // DB_Z_INFO
|
||||
0x00000000, // DB_STENCIL_INFO
|
||||
0x00000000, // DB_Z_READ_BASE
|
||||
0x00000000, // DB_STENCIL_READ_BASE
|
||||
0x00000000, // DB_Z_WRITE_BASE
|
||||
0x00000000, // DB_STENCIL_WRITE_BASE
|
||||
0x00000000, // DB_DEPTH_SIZE
|
||||
0x00000000, // DB_DEPTH_SLICE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0x00000000, // TA_BC_BASE_ADDR
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0x00000000, // COHER_DEST_BASE_2
|
||||
0x00000000, // COHER_DEST_BASE_3
|
||||
0x00000000, // PA_SC_WINDOW_OFFSET
|
||||
0x80000000, // PA_SC_WINDOW_SCISSOR_TL
|
||||
0x40004000, // PA_SC_WINDOW_SCISSOR_BR
|
||||
0x0000ffff, // PA_SC_CLIPRECT_RULE
|
||||
0x00000000, // PA_SC_CLIPRECT_0_TL
|
||||
0x40004000, // PA_SC_CLIPRECT_0_BR
|
||||
0x00000000, // PA_SC_CLIPRECT_1_TL
|
||||
0x40004000, // PA_SC_CLIPRECT_1_BR
|
||||
0x00000000, // PA_SC_CLIPRECT_2_TL
|
||||
0x40004000, // PA_SC_CLIPRECT_2_BR
|
||||
0x00000000, // PA_SC_CLIPRECT_3_TL
|
||||
0x40004000, // PA_SC_CLIPRECT_3_BR
|
||||
0xaa99aaaa, // PA_SC_EDGERULE
|
||||
0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
|
||||
0xffffffff, // CB_TARGET_MASK
|
||||
0xffffffff, // CB_SHADER_MASK
|
||||
0x80000000, // PA_SC_GENERIC_SCISSOR_TL
|
||||
0x40004000, // PA_SC_GENERIC_SCISSOR_BR
|
||||
0x00000000, // COHER_DEST_BASE_0
|
||||
0x00000000, // COHER_DEST_BASE_1
|
||||
0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
|
||||
0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
|
||||
0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
|
||||
0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
|
||||
0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
|
||||
0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
|
||||
0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
|
||||
0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
|
||||
0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
|
||||
0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
|
||||
0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
|
||||
0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
|
||||
0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
|
||||
0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
|
||||
0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
|
||||
0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
|
||||
0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
|
||||
0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
|
||||
0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
|
||||
0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
|
||||
0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
|
||||
0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
|
||||
0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
|
||||
0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
|
||||
0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
|
||||
0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
|
||||
0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
|
||||
0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
|
||||
0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
|
||||
0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
|
||||
0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
|
||||
0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
|
||||
0x00000000, // PA_SC_VPORT_ZMIN_0
|
||||
0x3f800000, // PA_SC_VPORT_ZMAX_0
|
||||
0x00000000, // PA_SC_VPORT_ZMIN_1
|
||||
0x3f800000, // PA_SC_VPORT_ZMAX_1
|
||||
0x00000000, // PA_SC_VPORT_ZMIN_2
|
||||
0x3f800000, // PA_SC_VPORT_ZMAX_2
|
||||
0x00000000, // PA_SC_VPORT_ZMIN_3
|
||||
0x3f800000, // PA_SC_VPORT_ZMAX_3
|
||||
0x00000000, // PA_SC_VPORT_ZMIN_4
|
||||
0x3f800000, // PA_SC_VPORT_ZMAX_4
|
||||
0x00000000, // PA_SC_VPORT_ZMIN_5
|
||||
0x3f800000, // PA_SC_VPORT_ZMAX_5
|
||||
0x00000000, // PA_SC_VPORT_ZMIN_6
|
||||
0x3f800000, // PA_SC_VPORT_ZMAX_6
|
||||
0x00000000, // PA_SC_VPORT_ZMIN_7
|
||||
0x3f800000, // PA_SC_VPORT_ZMAX_7
|
||||
0x00000000, // PA_SC_VPORT_ZMIN_8
|
||||
0x3f800000, // PA_SC_VPORT_ZMAX_8
|
||||
0x00000000, // PA_SC_VPORT_ZMIN_9
|
||||
0x3f800000, // PA_SC_VPORT_ZMAX_9
|
||||
0x00000000, // PA_SC_VPORT_ZMIN_10
|
||||
0x3f800000, // PA_SC_VPORT_ZMAX_10
|
||||
0x00000000, // PA_SC_VPORT_ZMIN_11
|
||||
0x3f800000, // PA_SC_VPORT_ZMAX_11
|
||||
0x00000000, // PA_SC_VPORT_ZMIN_12
|
||||
0x3f800000, // PA_SC_VPORT_ZMAX_12
|
||||
0x00000000, // PA_SC_VPORT_ZMIN_13
|
||||
0x3f800000, // PA_SC_VPORT_ZMAX_13
|
||||
0x00000000, // PA_SC_VPORT_ZMIN_14
|
||||
0x3f800000, // PA_SC_VPORT_ZMAX_14
|
||||
0x00000000, // PA_SC_VPORT_ZMIN_15
|
||||
0x3f800000, // PA_SC_VPORT_ZMAX_15
|
||||
};
|
||||
static const u32 si_SECT_CONTEXT_def_2[] =
|
||||
{
|
||||
0x00000000, // CP_PERFMON_CNTX_CNTL
|
||||
0x00000000, // CP_RINGID
|
||||
0x00000000, // CP_VMID
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0xffffffff, // VGT_MAX_VTX_INDX
|
||||
0x00000000, // VGT_MIN_VTX_INDX
|
||||
0x00000000, // VGT_INDX_OFFSET
|
||||
0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
|
||||
0, // HOLE
|
||||
0x00000000, // CB_BLEND_RED
|
||||
0x00000000, // CB_BLEND_GREEN
|
||||
0x00000000, // CB_BLEND_BLUE
|
||||
0x00000000, // CB_BLEND_ALPHA
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0x00000000, // DB_STENCIL_CONTROL
|
||||
0x00000000, // DB_STENCILREFMASK
|
||||
0x00000000, // DB_STENCILREFMASK_BF
|
||||
0, // HOLE
|
||||
0x00000000, // PA_CL_VPORT_XSCALE
|
||||
0x00000000, // PA_CL_VPORT_XOFFSET
|
||||
0x00000000, // PA_CL_VPORT_YSCALE
|
||||
0x00000000, // PA_CL_VPORT_YOFFSET
|
||||
0x00000000, // PA_CL_VPORT_ZSCALE
|
||||
0x00000000, // PA_CL_VPORT_ZOFFSET
|
||||
0x00000000, // PA_CL_VPORT_XSCALE_1
|
||||
0x00000000, // PA_CL_VPORT_XOFFSET_1
|
||||
0x00000000, // PA_CL_VPORT_YSCALE_1
|
||||
0x00000000, // PA_CL_VPORT_YOFFSET_1
|
||||
0x00000000, // PA_CL_VPORT_ZSCALE_1
|
||||
0x00000000, // PA_CL_VPORT_ZOFFSET_1
|
||||
0x00000000, // PA_CL_VPORT_XSCALE_2
|
||||
0x00000000, // PA_CL_VPORT_XOFFSET_2
|
||||
0x00000000, // PA_CL_VPORT_YSCALE_2
|
||||
0x00000000, // PA_CL_VPORT_YOFFSET_2
|
||||
0x00000000, // PA_CL_VPORT_ZSCALE_2
|
||||
0x00000000, // PA_CL_VPORT_ZOFFSET_2
|
||||
0x00000000, // PA_CL_VPORT_XSCALE_3
|
||||
0x00000000, // PA_CL_VPORT_XOFFSET_3
|
||||
0x00000000, // PA_CL_VPORT_YSCALE_3
|
||||
0x00000000, // PA_CL_VPORT_YOFFSET_3
|
||||
0x00000000, // PA_CL_VPORT_ZSCALE_3
|
||||
0x00000000, // PA_CL_VPORT_ZOFFSET_3
|
||||
0x00000000, // PA_CL_VPORT_XSCALE_4
|
||||
0x00000000, // PA_CL_VPORT_XOFFSET_4
|
||||
0x00000000, // PA_CL_VPORT_YSCALE_4
|
||||
0x00000000, // PA_CL_VPORT_YOFFSET_4
|
||||
0x00000000, // PA_CL_VPORT_ZSCALE_4
|
||||
0x00000000, // PA_CL_VPORT_ZOFFSET_4
|
||||
0x00000000, // PA_CL_VPORT_XSCALE_5
|
||||
0x00000000, // PA_CL_VPORT_XOFFSET_5
|
||||
0x00000000, // PA_CL_VPORT_YSCALE_5
|
||||
0x00000000, // PA_CL_VPORT_YOFFSET_5
|
||||
0x00000000, // PA_CL_VPORT_ZSCALE_5
|
||||
0x00000000, // PA_CL_VPORT_ZOFFSET_5
|
||||
0x00000000, // PA_CL_VPORT_XSCALE_6
|
||||
0x00000000, // PA_CL_VPORT_XOFFSET_6
|
||||
0x00000000, // PA_CL_VPORT_YSCALE_6
|
||||
0x00000000, // PA_CL_VPORT_YOFFSET_6
|
||||
0x00000000, // PA_CL_VPORT_ZSCALE_6
|
||||
0x00000000, // PA_CL_VPORT_ZOFFSET_6
|
||||
0x00000000, // PA_CL_VPORT_XSCALE_7
|
||||
0x00000000, // PA_CL_VPORT_XOFFSET_7
|
||||
0x00000000, // PA_CL_VPORT_YSCALE_7
|
||||
0x00000000, // PA_CL_VPORT_YOFFSET_7
|
||||
0x00000000, // PA_CL_VPORT_ZSCALE_7
|
||||
0x00000000, // PA_CL_VPORT_ZOFFSET_7
|
||||
0x00000000, // PA_CL_VPORT_XSCALE_8
|
||||
0x00000000, // PA_CL_VPORT_XOFFSET_8
|
||||
0x00000000, // PA_CL_VPORT_YSCALE_8
|
||||
0x00000000, // PA_CL_VPORT_YOFFSET_8
|
||||
0x00000000, // PA_CL_VPORT_ZSCALE_8
|
||||
0x00000000, // PA_CL_VPORT_ZOFFSET_8
|
||||
0x00000000, // PA_CL_VPORT_XSCALE_9
|
||||
0x00000000, // PA_CL_VPORT_XOFFSET_9
|
||||
0x00000000, // PA_CL_VPORT_YSCALE_9
|
||||
0x00000000, // PA_CL_VPORT_YOFFSET_9
|
||||
0x00000000, // PA_CL_VPORT_ZSCALE_9
|
||||
0x00000000, // PA_CL_VPORT_ZOFFSET_9
|
||||
0x00000000, // PA_CL_VPORT_XSCALE_10
|
||||
0x00000000, // PA_CL_VPORT_XOFFSET_10
|
||||
0x00000000, // PA_CL_VPORT_YSCALE_10
|
||||
0x00000000, // PA_CL_VPORT_YOFFSET_10
|
||||
0x00000000, // PA_CL_VPORT_ZSCALE_10
|
||||
0x00000000, // PA_CL_VPORT_ZOFFSET_10
|
||||
0x00000000, // PA_CL_VPORT_XSCALE_11
|
||||
0x00000000, // PA_CL_VPORT_XOFFSET_11
|
||||
0x00000000, // PA_CL_VPORT_YSCALE_11
|
||||
0x00000000, // PA_CL_VPORT_YOFFSET_11
|
||||
0x00000000, // PA_CL_VPORT_ZSCALE_11
|
||||
0x00000000, // PA_CL_VPORT_ZOFFSET_11
|
||||
0x00000000, // PA_CL_VPORT_XSCALE_12
|
||||
0x00000000, // PA_CL_VPORT_XOFFSET_12
|
||||
0x00000000, // PA_CL_VPORT_YSCALE_12
|
||||
0x00000000, // PA_CL_VPORT_YOFFSET_12
|
||||
0x00000000, // PA_CL_VPORT_ZSCALE_12
|
||||
0x00000000, // PA_CL_VPORT_ZOFFSET_12
|
||||
0x00000000, // PA_CL_VPORT_XSCALE_13
|
||||
0x00000000, // PA_CL_VPORT_XOFFSET_13
|
||||
0x00000000, // PA_CL_VPORT_YSCALE_13
|
||||
0x00000000, // PA_CL_VPORT_YOFFSET_13
|
||||
0x00000000, // PA_CL_VPORT_ZSCALE_13
|
||||
0x00000000, // PA_CL_VPORT_ZOFFSET_13
|
||||
0x00000000, // PA_CL_VPORT_XSCALE_14
|
||||
0x00000000, // PA_CL_VPORT_XOFFSET_14
|
||||
0x00000000, // PA_CL_VPORT_YSCALE_14
|
||||
0x00000000, // PA_CL_VPORT_YOFFSET_14
|
||||
0x00000000, // PA_CL_VPORT_ZSCALE_14
|
||||
0x00000000, // PA_CL_VPORT_ZOFFSET_14
|
||||
0x00000000, // PA_CL_VPORT_XSCALE_15
|
||||
0x00000000, // PA_CL_VPORT_XOFFSET_15
|
||||
0x00000000, // PA_CL_VPORT_YSCALE_15
|
||||
0x00000000, // PA_CL_VPORT_YOFFSET_15
|
||||
0x00000000, // PA_CL_VPORT_ZSCALE_15
|
||||
0x00000000, // PA_CL_VPORT_ZOFFSET_15
|
||||
0x00000000, // PA_CL_UCP_0_X
|
||||
0x00000000, // PA_CL_UCP_0_Y
|
||||
0x00000000, // PA_CL_UCP_0_Z
|
||||
0x00000000, // PA_CL_UCP_0_W
|
||||
0x00000000, // PA_CL_UCP_1_X
|
||||
0x00000000, // PA_CL_UCP_1_Y
|
||||
0x00000000, // PA_CL_UCP_1_Z
|
||||
0x00000000, // PA_CL_UCP_1_W
|
||||
0x00000000, // PA_CL_UCP_2_X
|
||||
0x00000000, // PA_CL_UCP_2_Y
|
||||
0x00000000, // PA_CL_UCP_2_Z
|
||||
0x00000000, // PA_CL_UCP_2_W
|
||||
0x00000000, // PA_CL_UCP_3_X
|
||||
0x00000000, // PA_CL_UCP_3_Y
|
||||
0x00000000, // PA_CL_UCP_3_Z
|
||||
0x00000000, // PA_CL_UCP_3_W
|
||||
0x00000000, // PA_CL_UCP_4_X
|
||||
0x00000000, // PA_CL_UCP_4_Y
|
||||
0x00000000, // PA_CL_UCP_4_Z
|
||||
0x00000000, // PA_CL_UCP_4_W
|
||||
0x00000000, // PA_CL_UCP_5_X
|
||||
0x00000000, // PA_CL_UCP_5_Y
|
||||
0x00000000, // PA_CL_UCP_5_Z
|
||||
0x00000000, // PA_CL_UCP_5_W
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_0
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_1
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_2
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_3
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_4
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_5
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_6
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_7
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_8
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_9
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_10
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_11
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_12
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_13
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_14
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_15
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_16
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_17
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_18
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_19
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_20
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_21
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_22
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_23
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_24
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_25
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_26
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_27
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_28
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_29
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_30
|
||||
0x00000000, // SPI_PS_INPUT_CNTL_31
|
||||
0x00000000, // SPI_VS_OUT_CONFIG
|
||||
0, // HOLE
|
||||
0x00000000, // SPI_PS_INPUT_ENA
|
||||
0x00000000, // SPI_PS_INPUT_ADDR
|
||||
0x00000000, // SPI_INTERP_CONTROL_0
|
||||
0x00000002, // SPI_PS_IN_CONTROL
|
||||
0, // HOLE
|
||||
0x00000000, // SPI_BARYC_CNTL
|
||||
0, // HOLE
|
||||
0x00000000, // SPI_TMPRING_SIZE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0x00000000, // SPI_WAVE_MGMT_1
|
||||
0x00000000, // SPI_WAVE_MGMT_2
|
||||
0x00000000, // SPI_SHADER_POS_FORMAT
|
||||
0x00000000, // SPI_SHADER_Z_FORMAT
|
||||
0x00000000, // SPI_SHADER_COL_FORMAT
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0x00000000, // CB_BLEND0_CONTROL
|
||||
0x00000000, // CB_BLEND1_CONTROL
|
||||
0x00000000, // CB_BLEND2_CONTROL
|
||||
0x00000000, // CB_BLEND3_CONTROL
|
||||
0x00000000, // CB_BLEND4_CONTROL
|
||||
0x00000000, // CB_BLEND5_CONTROL
|
||||
0x00000000, // CB_BLEND6_CONTROL
|
||||
0x00000000, // CB_BLEND7_CONTROL
|
||||
};
|
||||
static const u32 si_SECT_CONTEXT_def_3[] =
|
||||
{
|
||||
0x00000000, // PA_CL_POINT_X_RAD
|
||||
0x00000000, // PA_CL_POINT_Y_RAD
|
||||
0x00000000, // PA_CL_POINT_SIZE
|
||||
0x00000000, // PA_CL_POINT_CULL_RAD
|
||||
0x00000000, // VGT_DMA_BASE_HI
|
||||
0x00000000, // VGT_DMA_BASE
|
||||
};
|
||||
static const u32 si_SECT_CONTEXT_def_4[] =
|
||||
{
|
||||
0x00000000, // DB_DEPTH_CONTROL
|
||||
0x00000000, // DB_EQAA
|
||||
0x00000000, // CB_COLOR_CONTROL
|
||||
0x00000000, // DB_SHADER_CONTROL
|
||||
0x00090000, // PA_CL_CLIP_CNTL
|
||||
0x00000004, // PA_SU_SC_MODE_CNTL
|
||||
0x00000000, // PA_CL_VTE_CNTL
|
||||
0x00000000, // PA_CL_VS_OUT_CNTL
|
||||
0x00000000, // PA_CL_NANINF_CNTL
|
||||
0x00000000, // PA_SU_LINE_STIPPLE_CNTL
|
||||
0x00000000, // PA_SU_LINE_STIPPLE_SCALE
|
||||
0x00000000, // PA_SU_PRIM_FILTER_CNTL
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0x00000000, // PA_SU_POINT_SIZE
|
||||
0x00000000, // PA_SU_POINT_MINMAX
|
||||
0x00000000, // PA_SU_LINE_CNTL
|
||||
0x00000000, // PA_SC_LINE_STIPPLE
|
||||
0x00000000, // VGT_OUTPUT_PATH_CNTL
|
||||
0x00000000, // VGT_HOS_CNTL
|
||||
0x00000000, // VGT_HOS_MAX_TESS_LEVEL
|
||||
0x00000000, // VGT_HOS_MIN_TESS_LEVEL
|
||||
0x00000000, // VGT_HOS_REUSE_DEPTH
|
||||
0x00000000, // VGT_GROUP_PRIM_TYPE
|
||||
0x00000000, // VGT_GROUP_FIRST_DECR
|
||||
0x00000000, // VGT_GROUP_DECR
|
||||
0x00000000, // VGT_GROUP_VECT_0_CNTL
|
||||
0x00000000, // VGT_GROUP_VECT_1_CNTL
|
||||
0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
|
||||
0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
|
||||
0x00000000, // VGT_GS_MODE
|
||||
0, // HOLE
|
||||
0x00000000, // PA_SC_MODE_CNTL_0
|
||||
0x00000000, // PA_SC_MODE_CNTL_1
|
||||
0x00000000, // VGT_ENHANCE
|
||||
0x00000100, // VGT_GS_PER_ES
|
||||
0x00000080, // VGT_ES_PER_GS
|
||||
0x00000002, // VGT_GS_PER_VS
|
||||
0x00000000, // VGT_GSVS_RING_OFFSET_1
|
||||
0x00000000, // VGT_GSVS_RING_OFFSET_2
|
||||
0x00000000, // VGT_GSVS_RING_OFFSET_3
|
||||
0x00000000, // VGT_GS_OUT_PRIM_TYPE
|
||||
0x00000000, // IA_ENHANCE
|
||||
};
|
||||
static const u32 si_SECT_CONTEXT_def_5[] =
|
||||
{
|
||||
0x00000000, // VGT_PRIMITIVEID_EN
|
||||
};
|
||||
static const u32 si_SECT_CONTEXT_def_6[] =
|
||||
{
|
||||
0x00000000, // VGT_PRIMITIVEID_RESET
|
||||
};
|
||||
static const u32 si_SECT_CONTEXT_def_7[] =
|
||||
{
|
||||
0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0x00000000, // VGT_INSTANCE_STEP_RATE_0
|
||||
0x00000000, // VGT_INSTANCE_STEP_RATE_1
|
||||
0x000000ff, // IA_MULTI_VGT_PARAM
|
||||
0x00000000, // VGT_ESGS_RING_ITEMSIZE
|
||||
0x00000000, // VGT_GSVS_RING_ITEMSIZE
|
||||
0x00000000, // VGT_REUSE_OFF
|
||||
0x00000000, // VGT_VTX_CNT_EN
|
||||
0x00000000, // DB_HTILE_SURFACE
|
||||
0x00000000, // DB_SRESULTS_COMPARE_STATE0
|
||||
0x00000000, // DB_SRESULTS_COMPARE_STATE1
|
||||
0x00000000, // DB_PRELOAD_CONTROL
|
||||
0, // HOLE
|
||||
0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
|
||||
0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
|
||||
0, // HOLE
|
||||
0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
|
||||
0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
|
||||
0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
|
||||
0, // HOLE
|
||||
0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
|
||||
0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
|
||||
0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
|
||||
0, // HOLE
|
||||
0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
|
||||
0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
|
||||
0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
|
||||
0, // HOLE
|
||||
0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
|
||||
0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
|
||||
0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
|
||||
0, // HOLE
|
||||
0x00000000, // VGT_GS_MAX_VERT_OUT
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0x00000000, // VGT_SHADER_STAGES_EN
|
||||
0x00000000, // VGT_LS_HS_CONFIG
|
||||
0x00000000, // VGT_GS_VERT_ITEMSIZE
|
||||
0x00000000, // VGT_GS_VERT_ITEMSIZE_1
|
||||
0x00000000, // VGT_GS_VERT_ITEMSIZE_2
|
||||
0x00000000, // VGT_GS_VERT_ITEMSIZE_3
|
||||
0x00000000, // VGT_TF_PARAM
|
||||
0x00000000, // DB_ALPHA_TO_MASK
|
||||
0, // HOLE
|
||||
0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
|
||||
0x00000000, // PA_SU_POLY_OFFSET_CLAMP
|
||||
0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
|
||||
0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
|
||||
0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
|
||||
0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
|
||||
0x00000000, // VGT_GS_INSTANCE_CNT
|
||||
0x00000000, // VGT_STRMOUT_CONFIG
|
||||
0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0x00000000, // PA_SC_CENTROID_PRIORITY_0
|
||||
0x00000000, // PA_SC_CENTROID_PRIORITY_1
|
||||
0x00001000, // PA_SC_LINE_CNTL
|
||||
0x00000000, // PA_SC_AA_CONFIG
|
||||
0x00000005, // PA_SU_VTX_CNTL
|
||||
0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
|
||||
0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
|
||||
0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
|
||||
0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
|
||||
0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
|
||||
0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
|
||||
0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
|
||||
0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
|
||||
0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
|
||||
0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
|
||||
0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
|
||||
0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
|
||||
0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
|
||||
0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
|
||||
0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
|
||||
0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
|
||||
0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
|
||||
0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
|
||||
0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
|
||||
0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
|
||||
0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
|
||||
0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
|
||||
0x00000010, // VGT_OUT_DEALLOC_CNTL
|
||||
0x00000000, // CB_COLOR0_BASE
|
||||
0x00000000, // CB_COLOR0_PITCH
|
||||
0x00000000, // CB_COLOR0_SLICE
|
||||
0x00000000, // CB_COLOR0_VIEW
|
||||
0x00000000, // CB_COLOR0_INFO
|
||||
0x00000000, // CB_COLOR0_ATTRIB
|
||||
0, // HOLE
|
||||
0x00000000, // CB_COLOR0_CMASK
|
||||
0x00000000, // CB_COLOR0_CMASK_SLICE
|
||||
0x00000000, // CB_COLOR0_FMASK
|
||||
0x00000000, // CB_COLOR0_FMASK_SLICE
|
||||
0x00000000, // CB_COLOR0_CLEAR_WORD0
|
||||
0x00000000, // CB_COLOR0_CLEAR_WORD1
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0x00000000, // CB_COLOR1_BASE
|
||||
0x00000000, // CB_COLOR1_PITCH
|
||||
0x00000000, // CB_COLOR1_SLICE
|
||||
0x00000000, // CB_COLOR1_VIEW
|
||||
0x00000000, // CB_COLOR1_INFO
|
||||
0x00000000, // CB_COLOR1_ATTRIB
|
||||
0, // HOLE
|
||||
0x00000000, // CB_COLOR1_CMASK
|
||||
0x00000000, // CB_COLOR1_CMASK_SLICE
|
||||
0x00000000, // CB_COLOR1_FMASK
|
||||
0x00000000, // CB_COLOR1_FMASK_SLICE
|
||||
0x00000000, // CB_COLOR1_CLEAR_WORD0
|
||||
0x00000000, // CB_COLOR1_CLEAR_WORD1
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0x00000000, // CB_COLOR2_BASE
|
||||
0x00000000, // CB_COLOR2_PITCH
|
||||
0x00000000, // CB_COLOR2_SLICE
|
||||
0x00000000, // CB_COLOR2_VIEW
|
||||
0x00000000, // CB_COLOR2_INFO
|
||||
0x00000000, // CB_COLOR2_ATTRIB
|
||||
0, // HOLE
|
||||
0x00000000, // CB_COLOR2_CMASK
|
||||
0x00000000, // CB_COLOR2_CMASK_SLICE
|
||||
0x00000000, // CB_COLOR2_FMASK
|
||||
0x00000000, // CB_COLOR2_FMASK_SLICE
|
||||
0x00000000, // CB_COLOR2_CLEAR_WORD0
|
||||
0x00000000, // CB_COLOR2_CLEAR_WORD1
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0x00000000, // CB_COLOR3_BASE
|
||||
0x00000000, // CB_COLOR3_PITCH
|
||||
0x00000000, // CB_COLOR3_SLICE
|
||||
0x00000000, // CB_COLOR3_VIEW
|
||||
0x00000000, // CB_COLOR3_INFO
|
||||
0x00000000, // CB_COLOR3_ATTRIB
|
||||
0, // HOLE
|
||||
0x00000000, // CB_COLOR3_CMASK
|
||||
0x00000000, // CB_COLOR3_CMASK_SLICE
|
||||
0x00000000, // CB_COLOR3_FMASK
|
||||
0x00000000, // CB_COLOR3_FMASK_SLICE
|
||||
0x00000000, // CB_COLOR3_CLEAR_WORD0
|
||||
0x00000000, // CB_COLOR3_CLEAR_WORD1
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0x00000000, // CB_COLOR4_BASE
|
||||
0x00000000, // CB_COLOR4_PITCH
|
||||
0x00000000, // CB_COLOR4_SLICE
|
||||
0x00000000, // CB_COLOR4_VIEW
|
||||
0x00000000, // CB_COLOR4_INFO
|
||||
0x00000000, // CB_COLOR4_ATTRIB
|
||||
0, // HOLE
|
||||
0x00000000, // CB_COLOR4_CMASK
|
||||
0x00000000, // CB_COLOR4_CMASK_SLICE
|
||||
0x00000000, // CB_COLOR4_FMASK
|
||||
0x00000000, // CB_COLOR4_FMASK_SLICE
|
||||
0x00000000, // CB_COLOR4_CLEAR_WORD0
|
||||
0x00000000, // CB_COLOR4_CLEAR_WORD1
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0x00000000, // CB_COLOR5_BASE
|
||||
0x00000000, // CB_COLOR5_PITCH
|
||||
0x00000000, // CB_COLOR5_SLICE
|
||||
0x00000000, // CB_COLOR5_VIEW
|
||||
0x00000000, // CB_COLOR5_INFO
|
||||
0x00000000, // CB_COLOR5_ATTRIB
|
||||
0, // HOLE
|
||||
0x00000000, // CB_COLOR5_CMASK
|
||||
0x00000000, // CB_COLOR5_CMASK_SLICE
|
||||
0x00000000, // CB_COLOR5_FMASK
|
||||
0x00000000, // CB_COLOR5_FMASK_SLICE
|
||||
0x00000000, // CB_COLOR5_CLEAR_WORD0
|
||||
0x00000000, // CB_COLOR5_CLEAR_WORD1
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0x00000000, // CB_COLOR6_BASE
|
||||
0x00000000, // CB_COLOR6_PITCH
|
||||
0x00000000, // CB_COLOR6_SLICE
|
||||
0x00000000, // CB_COLOR6_VIEW
|
||||
0x00000000, // CB_COLOR6_INFO
|
||||
0x00000000, // CB_COLOR6_ATTRIB
|
||||
0, // HOLE
|
||||
0x00000000, // CB_COLOR6_CMASK
|
||||
0x00000000, // CB_COLOR6_CMASK_SLICE
|
||||
0x00000000, // CB_COLOR6_FMASK
|
||||
0x00000000, // CB_COLOR6_FMASK_SLICE
|
||||
0x00000000, // CB_COLOR6_CLEAR_WORD0
|
||||
0x00000000, // CB_COLOR6_CLEAR_WORD1
|
||||
0, // HOLE
|
||||
0, // HOLE
|
||||
0x00000000, // CB_COLOR7_BASE
|
||||
0x00000000, // CB_COLOR7_PITCH
|
||||
0x00000000, // CB_COLOR7_SLICE
|
||||
0x00000000, // CB_COLOR7_VIEW
|
||||
0x00000000, // CB_COLOR7_INFO
|
||||
0x00000000, // CB_COLOR7_ATTRIB
|
||||
0, // HOLE
|
||||
0x00000000, // CB_COLOR7_CMASK
|
||||
0x00000000, // CB_COLOR7_CMASK_SLICE
|
||||
0x00000000, // CB_COLOR7_FMASK
|
||||
0x00000000, // CB_COLOR7_FMASK_SLICE
|
||||
0x00000000, // CB_COLOR7_CLEAR_WORD0
|
||||
0x00000000, // CB_COLOR7_CLEAR_WORD1
|
||||
};
|
||||
static const struct cs_extent_def si_SECT_CONTEXT_defs[] =
|
||||
{
|
||||
{si_SECT_CONTEXT_def_1, 0x0000a000, 212 },
|
||||
{si_SECT_CONTEXT_def_2, 0x0000a0d8, 272 },
|
||||
{si_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
|
||||
{si_SECT_CONTEXT_def_4, 0x0000a200, 157 },
|
||||
{si_SECT_CONTEXT_def_5, 0x0000a2a1, 1 },
|
||||
{si_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
|
||||
{si_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
|
||||
{ NULL, 0, 0 }
|
||||
};
|
||||
static const struct cs_section_def si_cs_data[] = {
|
||||
{ si_SECT_CONTEXT_defs, SECT_CONTEXT },
|
||||
{ NULL, SECT_NONE }
|
||||
};
|
105
drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h
Normal file
105
drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h
Normal file
@ -0,0 +1,105 @@
|
||||
/*
|
||||
* Copyright 2010 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Alex Deucher
|
||||
*/
|
||||
#ifndef __SI_REG_H__
|
||||
#define __SI_REG_H__
|
||||
|
||||
/* SI */
|
||||
#define SI_DC_GPIO_HPD_MASK 0x196c
|
||||
#define SI_DC_GPIO_HPD_A 0x196d
|
||||
#define SI_DC_GPIO_HPD_EN 0x196e
|
||||
#define SI_DC_GPIO_HPD_Y 0x196f
|
||||
|
||||
#define SI_GRPH_CONTROL 0x1a01
|
||||
# define SI_GRPH_DEPTH(x) (((x) & 0x3) << 0)
|
||||
# define SI_GRPH_DEPTH_8BPP 0
|
||||
# define SI_GRPH_DEPTH_16BPP 1
|
||||
# define SI_GRPH_DEPTH_32BPP 2
|
||||
# define SI_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
|
||||
# define SI_ADDR_SURF_2_BANK 0
|
||||
# define SI_ADDR_SURF_4_BANK 1
|
||||
# define SI_ADDR_SURF_8_BANK 2
|
||||
# define SI_ADDR_SURF_16_BANK 3
|
||||
# define SI_GRPH_Z(x) (((x) & 0x3) << 4)
|
||||
# define SI_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
|
||||
# define SI_ADDR_SURF_BANK_WIDTH_1 0
|
||||
# define SI_ADDR_SURF_BANK_WIDTH_2 1
|
||||
# define SI_ADDR_SURF_BANK_WIDTH_4 2
|
||||
# define SI_ADDR_SURF_BANK_WIDTH_8 3
|
||||
# define SI_GRPH_FORMAT(x) (((x) & 0x7) << 8)
|
||||
/* 8 BPP */
|
||||
# define SI_GRPH_FORMAT_INDEXED 0
|
||||
/* 16 BPP */
|
||||
# define SI_GRPH_FORMAT_ARGB1555 0
|
||||
# define SI_GRPH_FORMAT_ARGB565 1
|
||||
# define SI_GRPH_FORMAT_ARGB4444 2
|
||||
# define SI_GRPH_FORMAT_AI88 3
|
||||
# define SI_GRPH_FORMAT_MONO16 4
|
||||
# define SI_GRPH_FORMAT_BGRA5551 5
|
||||
/* 32 BPP */
|
||||
# define SI_GRPH_FORMAT_ARGB8888 0
|
||||
# define SI_GRPH_FORMAT_ARGB2101010 1
|
||||
# define SI_GRPH_FORMAT_32BPP_DIG 2
|
||||
# define SI_GRPH_FORMAT_8B_ARGB2101010 3
|
||||
# define SI_GRPH_FORMAT_BGRA1010102 4
|
||||
# define SI_GRPH_FORMAT_8B_BGRA1010102 5
|
||||
# define SI_GRPH_FORMAT_RGB111110 6
|
||||
# define SI_GRPH_FORMAT_BGR101111 7
|
||||
# define SI_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
|
||||
# define SI_ADDR_SURF_BANK_HEIGHT_1 0
|
||||
# define SI_ADDR_SURF_BANK_HEIGHT_2 1
|
||||
# define SI_ADDR_SURF_BANK_HEIGHT_4 2
|
||||
# define SI_ADDR_SURF_BANK_HEIGHT_8 3
|
||||
# define SI_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
|
||||
# define SI_ADDR_SURF_TILE_SPLIT_64B 0
|
||||
# define SI_ADDR_SURF_TILE_SPLIT_128B 1
|
||||
# define SI_ADDR_SURF_TILE_SPLIT_256B 2
|
||||
# define SI_ADDR_SURF_TILE_SPLIT_512B 3
|
||||
# define SI_ADDR_SURF_TILE_SPLIT_1KB 4
|
||||
# define SI_ADDR_SURF_TILE_SPLIT_2KB 5
|
||||
# define SI_ADDR_SURF_TILE_SPLIT_4KB 6
|
||||
# define SI_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
|
||||
# define SI_ADDR_SURF_MACRO_TILE_ASPECT_1 0
|
||||
# define SI_ADDR_SURF_MACRO_TILE_ASPECT_2 1
|
||||
# define SI_ADDR_SURF_MACRO_TILE_ASPECT_4 2
|
||||
# define SI_ADDR_SURF_MACRO_TILE_ASPECT_8 3
|
||||
# define SI_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
|
||||
# define SI_GRPH_ARRAY_LINEAR_GENERAL 0
|
||||
# define SI_GRPH_ARRAY_LINEAR_ALIGNED 1
|
||||
# define SI_GRPH_ARRAY_1D_TILED_THIN1 2
|
||||
# define SI_GRPH_ARRAY_2D_TILED_THIN1 4
|
||||
# define SI_GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24)
|
||||
# define SI_ADDR_SURF_P2 0
|
||||
# define SI_ADDR_SURF_P4_8x16 4
|
||||
# define SI_ADDR_SURF_P4_16x16 5
|
||||
# define SI_ADDR_SURF_P4_16x32 6
|
||||
# define SI_ADDR_SURF_P4_32x32 7
|
||||
# define SI_ADDR_SURF_P8_16x16_8x16 8
|
||||
# define SI_ADDR_SURF_P8_16x32_8x16 9
|
||||
# define SI_ADDR_SURF_P8_32x32_8x16 10
|
||||
# define SI_ADDR_SURF_P8_16x32_16x16 11
|
||||
# define SI_ADDR_SURF_P8_32x32_16x16 12
|
||||
# define SI_ADDR_SURF_P8_32x32_16x32 13
|
||||
# define SI_ADDR_SURF_P8_32x64_32x32 14
|
||||
|
||||
#endif
|
2426
drivers/gpu/drm/amd/include/asic_reg/si/sid.h
Normal file
2426
drivers/gpu/drm/amd/include/asic_reg/si/sid.h
Normal file
File diff suppressed because it is too large
Load Diff
@ -494,6 +494,7 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
|
||||
union
|
||||
{
|
||||
ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
|
||||
ULONG ulClockParams; //ULONG access for BE
|
||||
ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
|
||||
};
|
||||
UCHAR ucRefDiv; //Output Parameter
|
||||
@ -526,6 +527,7 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
|
||||
union
|
||||
{
|
||||
ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
|
||||
ULONG ulClockParams; //ULONG access for BE
|
||||
ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
|
||||
};
|
||||
UCHAR ucRefDiv; //Output Parameter
|
||||
|
@ -119,6 +119,8 @@ enum cgs_system_info_id {
|
||||
CGS_SYSTEM_INFO_PG_FLAGS,
|
||||
CGS_SYSTEM_INFO_GFX_CU_INFO,
|
||||
CGS_SYSTEM_INFO_GFX_SE_INFO,
|
||||
CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID,
|
||||
CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID,
|
||||
CGS_SYSTEM_INFO_ID_MAXIMUM,
|
||||
};
|
||||
|
||||
|
@ -538,7 +538,6 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
|
||||
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
|
||||
break;
|
||||
case AMD_PP_EVENT_READJUST_POWER_STATE:
|
||||
pp_handle->hwmgr->current_ps = pp_handle->hwmgr->boot_ps;
|
||||
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
|
||||
break;
|
||||
default:
|
||||
@ -765,15 +764,12 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
|
||||
PP_CHECK_HW(hwmgr);
|
||||
|
||||
if (!hwmgr->hardcode_pp_table) {
|
||||
hwmgr->hardcode_pp_table =
|
||||
kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
|
||||
hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
|
||||
hwmgr->soft_pp_table_size,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!hwmgr->hardcode_pp_table)
|
||||
return -ENOMEM;
|
||||
|
||||
/* to avoid powerplay crash when hardcode pptable is empty */
|
||||
memcpy(hwmgr->hardcode_pp_table, hwmgr->soft_pp_table,
|
||||
hwmgr->soft_pp_table_size);
|
||||
}
|
||||
|
||||
memcpy(hwmgr->hardcode_pp_table, buf, size);
|
||||
|
@ -70,11 +70,12 @@ int psm_set_states(struct pp_eventmgr *eventmgr, unsigned long *state_id)
|
||||
int i;
|
||||
|
||||
table_entries = hwmgr->num_ps;
|
||||
|
||||
state = hwmgr->ps;
|
||||
|
||||
for (i = 0; i < table_entries; i++) {
|
||||
if (state->id == *state_id) {
|
||||
hwmgr->request_ps = state;
|
||||
memcpy(hwmgr->request_ps, state, hwmgr->ps_size);
|
||||
return 0;
|
||||
}
|
||||
state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
|
||||
@ -106,7 +107,7 @@ int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip)
|
||||
if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) {
|
||||
phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
|
||||
phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
|
||||
hwmgr->current_ps = requested;
|
||||
memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -5,7 +5,7 @@
|
||||
HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \
|
||||
hardwaremanager.o pp_acpi.o cz_hwmgr.o \
|
||||
cz_clockpowergating.o tonga_powertune.o\
|
||||
tonga_processpptables.o ppatomctrl.o \
|
||||
process_pptables_v1_0.o ppatomctrl.o \
|
||||
tonga_hwmgr.o pppcielanes.o tonga_thermal.o\
|
||||
fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \
|
||||
fiji_clockpowergating.o fiji_thermal.o \
|
||||
|
@ -44,8 +44,8 @@
|
||||
#include "dce/dce_10_0_sh_mask.h"
|
||||
#include "pppcielanes.h"
|
||||
#include "fiji_hwmgr.h"
|
||||
#include "tonga_processpptables.h"
|
||||
#include "tonga_pptable.h"
|
||||
#include "process_pptables_v1_0.h"
|
||||
#include "pptable_v1_0.h"
|
||||
#include "pp_debug.h"
|
||||
#include "pp_acpi.h"
|
||||
#include "amd_pcie_helpers.h"
|
||||
@ -112,7 +112,7 @@ static const uint8_t fiji_clock_stretch_amount_conversion[2][6] =
|
||||
|
||||
static const unsigned long PhwFiji_Magic = (unsigned long)(PHM_VIslands_Magic);
|
||||
|
||||
struct fiji_power_state *cast_phw_fiji_power_state(
|
||||
static struct fiji_power_state *cast_phw_fiji_power_state(
|
||||
struct pp_hw_power_state *hw_ps)
|
||||
{
|
||||
PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic),
|
||||
@ -122,7 +122,8 @@ struct fiji_power_state *cast_phw_fiji_power_state(
|
||||
return (struct fiji_power_state *)hw_ps;
|
||||
}
|
||||
|
||||
const struct fiji_power_state *cast_const_phw_fiji_power_state(
|
||||
static const struct
|
||||
fiji_power_state *cast_const_phw_fiji_power_state(
|
||||
const struct pp_hw_power_state *hw_ps)
|
||||
{
|
||||
PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic),
|
||||
@ -1626,7 +1627,7 @@ static int fiji_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
|
||||
* @param voltage - voltage to look for
|
||||
* @return 0 on success
|
||||
*/
|
||||
uint8_t fiji_get_voltage_index(
|
||||
static uint8_t fiji_get_voltage_index(
|
||||
struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
|
||||
{
|
||||
uint8_t count = (uint8_t) (lookup_table->count);
|
||||
@ -1690,7 +1691,7 @@ static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
|
||||
* @return always 0
|
||||
*/
|
||||
|
||||
int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
|
||||
static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
|
||||
struct SMU73_Discrete_DpmTable *table)
|
||||
{
|
||||
int result;
|
||||
@ -2301,7 +2302,7 @@ static int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
|
||||
* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
|
||||
* @param voltage the SMC VOLTAGE structure to be populated
|
||||
*/
|
||||
int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
|
||||
static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
|
||||
uint32_t mclk, SMIO_Pattern *smio_pat)
|
||||
{
|
||||
const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
|
||||
@ -4005,7 +4006,7 @@ static int fiji_get_pp_table_entry(struct pp_hwmgr *hwmgr,
|
||||
|
||||
ps = (struct fiji_power_state *)(&state->hardware);
|
||||
|
||||
result = tonga_get_powerplay_table_entry(hwmgr, entry_index, state,
|
||||
result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
|
||||
fiji_get_pp_table_entry_callback_func);
|
||||
|
||||
/* This is the earliest time we have all the dependency table and the VBIOS boot state
|
||||
@ -4622,7 +4623,7 @@ static int fiji_generate_dpm_level_enable_mask(
|
||||
return 0;
|
||||
}
|
||||
|
||||
int fiji_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
|
||||
static int fiji_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
|
||||
{
|
||||
return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
|
||||
(PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable :
|
||||
@ -4636,14 +4637,14 @@ int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
|
||||
PPSMC_MSG_VCEDPM_Disable);
|
||||
}
|
||||
|
||||
int fiji_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
|
||||
static int fiji_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
|
||||
{
|
||||
return smum_send_msg_to_smc(hwmgr->smumgr, enable?
|
||||
PPSMC_MSG_SAMUDPM_Enable :
|
||||
PPSMC_MSG_SAMUDPM_Disable);
|
||||
}
|
||||
|
||||
int fiji_enable_disable_acp_dpm(struct pp_hwmgr *hwmgr, bool enable)
|
||||
static int fiji_enable_disable_acp_dpm(struct pp_hwmgr *hwmgr, bool enable)
|
||||
{
|
||||
return smum_send_msg_to_smc(hwmgr->smumgr, enable?
|
||||
PPSMC_MSG_ACPDPM_Enable :
|
||||
@ -4880,7 +4881,7 @@ static void fiji_apply_dal_minimum_voltage_request(struct pp_hwmgr *hwmgr)
|
||||
return;
|
||||
}
|
||||
|
||||
int fiji_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
|
||||
static int fiji_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int result;
|
||||
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
|
||||
@ -5156,7 +5157,7 @@ static int fiji_program_display_gap(struct pp_hwmgr *hwmgr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int fiji_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
|
||||
static int fiji_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
return fiji_program_display_gap(hwmgr);
|
||||
}
|
||||
@ -5187,7 +5188,7 @@ static int fiji_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr,
|
||||
PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
|
||||
}
|
||||
|
||||
int fiji_dpm_set_interrupt_state(void *private_data,
|
||||
static int fiji_dpm_set_interrupt_state(void *private_data,
|
||||
unsigned src_id, unsigned type,
|
||||
int enabled)
|
||||
{
|
||||
@ -5235,7 +5236,7 @@ int fiji_dpm_set_interrupt_state(void *private_data,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int fiji_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
|
||||
static int fiji_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
|
||||
const void *thermal_interrupt_info)
|
||||
{
|
||||
int result;
|
||||
@ -5405,7 +5406,10 @@ static inline bool fiji_are_power_levels_equal(const struct fiji_performance_lev
|
||||
(pl1->pcie_lane == pl2->pcie_lane));
|
||||
}
|
||||
|
||||
int fiji_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
|
||||
static int
|
||||
fiji_check_states_equal(struct pp_hwmgr *hwmgr,
|
||||
const struct pp_hw_power_state *pstate1,
|
||||
const struct pp_hw_power_state *pstate2, bool *equal)
|
||||
{
|
||||
const struct fiji_power_state *psa = cast_const_phw_fiji_power_state(pstate1);
|
||||
const struct fiji_power_state *psb = cast_const_phw_fiji_power_state(pstate2);
|
||||
@ -5437,7 +5441,8 @@ int fiji_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_sta
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
|
||||
static bool
|
||||
fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
|
||||
bool is_update_required = false;
|
||||
@ -5547,7 +5552,7 @@ static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
|
||||
.dynamic_state_management_enable = &fiji_enable_dpm_tasks,
|
||||
.dynamic_state_management_disable = &fiji_disable_dpm_tasks,
|
||||
.force_dpm_level = &fiji_dpm_force_dpm_level,
|
||||
.get_num_of_pp_table_entries = &tonga_get_number_of_powerplay_table_entries,
|
||||
.get_num_of_pp_table_entries = &get_number_of_powerplay_table_entries_v1_0,
|
||||
.get_power_state_size = &fiji_get_power_state_size,
|
||||
.get_pp_table_entry = &fiji_get_pp_table_entry,
|
||||
.patch_boot_state = &fiji_patch_boot_state,
|
||||
@ -5589,7 +5594,7 @@ static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
|
||||
int fiji_hwmgr_init(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
hwmgr->hwmgr_func = &fiji_hwmgr_funcs;
|
||||
hwmgr->pptable_func = &tonga_pptable_funcs;
|
||||
hwmgr->pptable_func = &pptable_v1_0_funcs;
|
||||
pp_fiji_thermal_initialize(hwmgr);
|
||||
return 0;
|
||||
}
|
||||
|
@ -152,7 +152,7 @@ int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int fiji_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
|
||||
static int fiji_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int result;
|
||||
|
||||
@ -421,7 +421,7 @@ int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
|
||||
* @param Result the last failure code
|
||||
* @return result from set temperature range routine
|
||||
*/
|
||||
int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
|
||||
static int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
|
||||
void *input, void *output, void *storage, int result)
|
||||
{
|
||||
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
|
||||
@ -533,7 +533,7 @@ int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
|
||||
* @param Result the last failure code
|
||||
* @return result from set temperature range routine
|
||||
*/
|
||||
int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
|
||||
static int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
|
||||
void *input, void *output, void *storage, int result)
|
||||
{
|
||||
/* If the fantable setup has failed we could have disabled
|
||||
|
@ -24,8 +24,6 @@
|
||||
#include "hwmgr.h"
|
||||
#include "hardwaremanager.h"
|
||||
#include "power_state.h"
|
||||
#include "pp_acpi.h"
|
||||
#include "amd_acpi.h"
|
||||
#include "pp_debug.h"
|
||||
|
||||
#define PHM_FUNC_CHECK(hw) \
|
||||
@ -34,38 +32,6 @@
|
||||
return -EINVAL; \
|
||||
} while (0)
|
||||
|
||||
void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageTransition);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableEngineTransition);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGClockGating);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGCGTSSM);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLSClockGating);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Force3DClockSupport);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLightSleep);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMCLS);
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisablePowerGating);
|
||||
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableDPM);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableSMUUVDHandshake);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalAutoThrottling);
|
||||
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
|
||||
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NoOD5Support);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UserMaxClockForMultiDisplays);
|
||||
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
|
||||
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
|
||||
|
||||
if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
|
||||
acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
|
||||
}
|
||||
|
||||
bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
return hwmgr->block_hw_access;
|
||||
|
@ -32,8 +32,8 @@
|
||||
#include "pp_debug.h"
|
||||
#include "ppatomctrl.h"
|
||||
#include "ppsmc.h"
|
||||
|
||||
#define VOLTAGE_SCALE 4
|
||||
#include "pp_acpi.h"
|
||||
#include "amd_acpi.h"
|
||||
|
||||
extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr);
|
||||
extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
|
||||
@ -41,23 +41,12 @@ extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr);
|
||||
extern int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
|
||||
extern int iceland_hwmgr_init(struct pp_hwmgr *hwmgr);
|
||||
|
||||
static int hwmgr_set_features_platform_caps(struct pp_hwmgr *hwmgr)
|
||||
static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
|
||||
static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
|
||||
|
||||
uint8_t convert_to_vid(uint16_t vddc)
|
||||
{
|
||||
if (amdgpu_sclk_deep_sleep_en)
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_SclkDeepSleep);
|
||||
else
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_SclkDeepSleep);
|
||||
|
||||
if (amdgpu_powercontainment)
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_PowerContainment);
|
||||
else
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_PowerContainment);
|
||||
|
||||
return 0;
|
||||
return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
|
||||
}
|
||||
|
||||
int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
|
||||
@ -76,13 +65,12 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
|
||||
hwmgr->device = pp_init->device;
|
||||
hwmgr->chip_family = pp_init->chip_family;
|
||||
hwmgr->chip_id = pp_init->chip_id;
|
||||
hwmgr->hw_revision = pp_init->rev_id;
|
||||
hwmgr->sub_sys_id = pp_init->sub_sys_id;
|
||||
hwmgr->sub_vendor_id = pp_init->sub_vendor_id;
|
||||
hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
|
||||
hwmgr->power_source = PP_PowerSource_AC;
|
||||
hwmgr->pp_table_version = PP_TABLE_V1;
|
||||
|
||||
hwmgr_set_features_platform_caps(hwmgr);
|
||||
hwmgr_init_default_caps(hwmgr);
|
||||
hwmgr_set_user_specify_caps(hwmgr);
|
||||
|
||||
switch (hwmgr->chip_family) {
|
||||
case AMDGPU_FAMILY_CZ:
|
||||
@ -111,8 +99,6 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
phm_init_dynamic_caps(hwmgr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -131,6 +117,8 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr)
|
||||
kfree(hwmgr->set_temperature_range.function_list);
|
||||
|
||||
kfree(hwmgr->ps);
|
||||
kfree(hwmgr->current_ps);
|
||||
kfree(hwmgr->request_ps);
|
||||
kfree(hwmgr);
|
||||
return 0;
|
||||
}
|
||||
@ -155,10 +143,17 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
|
||||
sizeof(struct pp_power_state);
|
||||
|
||||
hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL);
|
||||
|
||||
if (hwmgr->ps == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
hwmgr->request_ps = kzalloc(size, GFP_KERNEL);
|
||||
if (hwmgr->request_ps == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
hwmgr->current_ps = kzalloc(size, GFP_KERNEL);
|
||||
if (hwmgr->current_ps == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
state = hwmgr->ps;
|
||||
|
||||
for (i = 0; i < table_entries; i++) {
|
||||
@ -166,7 +161,8 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
|
||||
|
||||
if (state->classification.flags & PP_StateClassificationFlag_Boot) {
|
||||
hwmgr->boot_ps = state;
|
||||
hwmgr->current_ps = hwmgr->request_ps = state;
|
||||
memcpy(hwmgr->current_ps, state, size);
|
||||
memcpy(hwmgr->request_ps, state, size);
|
||||
}
|
||||
|
||||
state->id = i + 1; /* assigned unique num for every power state id */
|
||||
@ -176,6 +172,7 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
|
||||
state = (struct pp_power_state *)((unsigned long)state + size);
|
||||
}
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -209,8 +206,6 @@ int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Returns once the part of the register indicated by the mask has
|
||||
* reached the given value.The indirect space is described by giving
|
||||
@ -452,6 +447,27 @@ uint8_t phm_get_voltage_index(
|
||||
return i - 1;
|
||||
}
|
||||
|
||||
uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
|
||||
uint32_t voltage)
|
||||
{
|
||||
uint8_t count = (uint8_t) (voltage_table->count);
|
||||
uint8_t i = 0;
|
||||
|
||||
PP_ASSERT_WITH_CODE((NULL != voltage_table),
|
||||
"Voltage Table empty.", return 0;);
|
||||
PP_ASSERT_WITH_CODE((0 != count),
|
||||
"Voltage Table empty.", return 0;);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
/* find first voltage bigger than requested */
|
||||
if (voltage_table->entries[i].value >= voltage)
|
||||
return i;
|
||||
}
|
||||
|
||||
/* voltage is bigger than max voltage in the table */
|
||||
return i - 1;
|
||||
}
|
||||
|
||||
uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
|
||||
{
|
||||
uint32_t i;
|
||||
@ -539,7 +555,8 @@ int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr
|
||||
table_clk_vlt->entries[2].v = 810;
|
||||
table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
|
||||
table_clk_vlt->entries[3].v = 900;
|
||||
pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
|
||||
if (pptable_info != NULL)
|
||||
pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
|
||||
hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
|
||||
}
|
||||
|
||||
@ -605,3 +622,94 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
|
||||
printk(KERN_ERR "DAL requested level can not"
|
||||
" found a available voltage in VDDC DPM Table \n");
|
||||
}
|
||||
|
||||
void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageTransition);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableEngineTransition);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGClockGating);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGCGTSSM);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLSClockGating);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Force3DClockSupport);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLightSleep);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMCLS);
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisablePowerGating);
|
||||
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableDPM);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableSMUUVDHandshake);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalAutoThrottling);
|
||||
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
|
||||
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NoOD5Support);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UserMaxClockForMultiDisplays);
|
||||
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
|
||||
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
|
||||
|
||||
if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
|
||||
acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
|
||||
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_DynamicPatchPowerState);
|
||||
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_EnableSMU7ThermalManagement);
|
||||
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_DynamicPowerManagement);
|
||||
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_SMC);
|
||||
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_DynamicUVDState);
|
||||
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_FanSpeedInTableIsRPM);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
if (amdgpu_sclk_deep_sleep_en)
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_SclkDeepSleep);
|
||||
else
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_SclkDeepSleep);
|
||||
|
||||
if (amdgpu_powercontainment)
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_PowerContainment);
|
||||
else
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_PowerContainment);
|
||||
|
||||
hwmgr->feature_mask = amdgpu_pp_feature_mask;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
|
||||
uint32_t sclk, uint16_t id, uint16_t *voltage)
|
||||
{
|
||||
uint32_t vol;
|
||||
int ret = 0;
|
||||
|
||||
if (hwmgr->chip_id < CHIP_POLARIS10) {
|
||||
atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
|
||||
if (*voltage >= 2000 || *voltage == 0)
|
||||
*voltage = 1150;
|
||||
} else {
|
||||
ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
|
||||
*voltage = (uint16_t)vol/100;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -781,7 +781,7 @@ static int iceland_upload_firmware(struct pp_hwmgr *hwmgr)
|
||||
* @param hwmgr the address of the powerplay hardware manager.
|
||||
* @return always 0
|
||||
*/
|
||||
int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
|
||||
static int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
|
||||
|
||||
@ -1355,14 +1355,6 @@ static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a voltage value in mv unit to VID number required by SMU firmware
|
||||
*/
|
||||
static uint8_t convert_to_vid(uint16_t vddc)
|
||||
{
|
||||
return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
|
||||
}
|
||||
|
||||
int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int i;
|
||||
@ -2606,7 +2598,7 @@ static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
|
||||
* @param pInput the pointer to input data (PowerState)
|
||||
* @return always 0
|
||||
*/
|
||||
int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
|
||||
static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int result;
|
||||
iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
|
||||
@ -4629,7 +4621,7 @@ static int iceland_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
|
||||
static int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
|
||||
|
||||
|
@ -31,7 +31,7 @@ int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
|
||||
static int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
if (phm_cf_want_uvd_power_gating(hwmgr)) {
|
||||
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
|
||||
@ -47,7 +47,7 @@ int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
|
||||
static int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
if (phm_cf_want_vce_power_gating(hwmgr))
|
||||
return smum_send_msg_to_smc(hwmgr->smumgr,
|
||||
@ -55,7 +55,7 @@ int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr)
|
||||
static int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
if (phm_cf_want_vce_power_gating(hwmgr))
|
||||
return smum_send_msg_to_smc(hwmgr->smumgr,
|
||||
@ -63,7 +63,7 @@ int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr)
|
||||
static int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_SamuPowerGating))
|
||||
@ -72,7 +72,7 @@ int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int polaris10_phm_powerup_samu(struct pp_hwmgr *hwmgr)
|
||||
static int polaris10_phm_powerup_samu(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_SamuPowerGating))
|
||||
|
@ -33,11 +33,11 @@
|
||||
#include "pp_debug.h"
|
||||
#include "ppatomctrl.h"
|
||||
#include "atombios.h"
|
||||
#include "tonga_pptable.h"
|
||||
#include "pptable_v1_0.h"
|
||||
#include "pppcielanes.h"
|
||||
#include "amd_pcie_helpers.h"
|
||||
#include "hardwaremanager.h"
|
||||
#include "tonga_processpptables.h"
|
||||
#include "process_pptables_v1_0.h"
|
||||
#include "cgs_common.h"
|
||||
#include "smu74.h"
|
||||
#include "smu_ucode_xfer_vi.h"
|
||||
@ -108,7 +108,7 @@ enum DPM_EVENT_SRC {
|
||||
|
||||
static const unsigned long PhwPolaris10_Magic = (unsigned long)(PHM_VIslands_Magic);
|
||||
|
||||
struct polaris10_power_state *cast_phw_polaris10_power_state(
|
||||
static struct polaris10_power_state *cast_phw_polaris10_power_state(
|
||||
struct pp_hw_power_state *hw_ps)
|
||||
{
|
||||
PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic),
|
||||
@ -118,7 +118,8 @@ struct polaris10_power_state *cast_phw_polaris10_power_state(
|
||||
return (struct polaris10_power_state *)hw_ps;
|
||||
}
|
||||
|
||||
const struct polaris10_power_state *cast_const_phw_polaris10_power_state(
|
||||
static const struct polaris10_power_state *
|
||||
cast_const_phw_polaris10_power_state(
|
||||
const struct pp_hw_power_state *hw_ps)
|
||||
{
|
||||
PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic),
|
||||
@ -141,7 +142,7 @@ static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
|
||||
* @param hwmgr the address of the powerplay hardware manager.
|
||||
* @return always 0
|
||||
*/
|
||||
int phm_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
|
||||
static int phm_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
|
||||
|
||||
@ -150,7 +151,7 @@ int phm_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint16_t phm_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
|
||||
static uint16_t phm_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
uint32_t speedCntl = 0;
|
||||
|
||||
@ -161,7 +162,7 @@ uint16_t phm_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
|
||||
PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
|
||||
}
|
||||
|
||||
int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
|
||||
static int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
uint32_t link_width;
|
||||
|
||||
@ -181,7 +182,7 @@ int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
|
||||
* @param pHwMgr the address of the powerplay hardware manager.
|
||||
* @return always PP_Result_OK
|
||||
*/
|
||||
int polaris10_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
|
||||
static int polaris10_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
PP_ASSERT_WITH_CODE(
|
||||
(hwmgr->smumgr->smumgr_funcs->send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable) == 0),
|
||||
@ -661,7 +662,7 @@ static int polaris10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
|
||||
* on the power policy or external client requests,
|
||||
* such as UVD request, etc.
|
||||
*/
|
||||
int polaris10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
|
||||
static int polaris10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
|
||||
struct phm_ppt_v1_information *table_info =
|
||||
@ -735,11 +736,6 @@ int polaris10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint8_t convert_to_vid(uint16_t vddc)
|
||||
{
|
||||
return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
|
||||
}
|
||||
|
||||
/**
|
||||
* Mvdd table preparation for SMC.
|
||||
*
|
||||
@ -840,7 +836,7 @@ static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
|
||||
* @return always 0
|
||||
*/
|
||||
|
||||
int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
|
||||
static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
|
||||
struct SMU74_Discrete_DpmTable *table)
|
||||
{
|
||||
polaris10_populate_smc_vddci_table(hwmgr, table);
|
||||
@ -1417,7 +1413,7 @@ static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
|
||||
* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
|
||||
* @param voltage the SMC VOLTAGE structure to be populated
|
||||
*/
|
||||
int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
|
||||
static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
|
||||
uint32_t mclk, SMIO_Pattern *smio_pat)
|
||||
{
|
||||
const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
|
||||
@ -1931,7 +1927,7 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
|
||||
}
|
||||
|
||||
|
||||
int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
|
||||
static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
|
||||
SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
|
||||
@ -2560,7 +2556,7 @@ static int polaris10_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
|
||||
return polaris10_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
|
||||
}
|
||||
|
||||
int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr)
|
||||
static int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
|
||||
data->pcie_performance_request = true;
|
||||
@ -2568,7 +2564,7 @@ int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
||||
static int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int tmp_result, result = 0;
|
||||
tmp_result = (!polaris10_is_dpm_running(hwmgr)) ? 0 : -1;
|
||||
@ -2749,12 +2745,12 @@ int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
|
||||
static int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
return phm_hwmgr_backend_fini(hwmgr);
|
||||
}
|
||||
|
||||
int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
|
||||
static int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
|
||||
|
||||
@ -3109,7 +3105,7 @@ static int polaris10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
|
||||
static int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct phm_ppt_v1_information *table_info =
|
||||
(struct phm_ppt_v1_information *)(hwmgr->pptable);
|
||||
@ -3118,11 +3114,27 @@ int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
|
||||
struct phm_ppt_v1_voltage_lookup_table *lookup_table =
|
||||
table_info->vddc_lookup_table;
|
||||
uint32_t i;
|
||||
uint32_t hw_revision, sub_vendor_id, sub_sys_id;
|
||||
struct cgs_system_info sys_info = {0};
|
||||
|
||||
if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7 &&
|
||||
((hwmgr->sub_sys_id == 0xb37 && hwmgr->sub_vendor_id == 0x1002) ||
|
||||
(hwmgr->sub_sys_id == 0x4a8 && hwmgr->sub_vendor_id == 0x1043) ||
|
||||
(hwmgr->sub_sys_id == 0x9480 && hwmgr->sub_vendor_id == 0x1682))) {
|
||||
sys_info.size = sizeof(struct cgs_system_info);
|
||||
|
||||
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
|
||||
cgs_query_system_info(hwmgr->device, &sys_info);
|
||||
hw_revision = (uint32_t)sys_info.value;
|
||||
|
||||
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID;
|
||||
cgs_query_system_info(hwmgr->device, &sys_info);
|
||||
sub_sys_id = (uint32_t)sys_info.value;
|
||||
|
||||
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID;
|
||||
cgs_query_system_info(hwmgr->device, &sys_info);
|
||||
sub_vendor_id = (uint32_t)sys_info.value;
|
||||
|
||||
if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 &&
|
||||
((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
|
||||
(sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
|
||||
(sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
|
||||
if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
|
||||
return 0;
|
||||
|
||||
@ -3137,7 +3149,7 @@ int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
|
||||
}
|
||||
|
||||
|
||||
int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
||||
static int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct polaris10_hwmgr *data;
|
||||
struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
|
||||
@ -3880,7 +3892,7 @@ static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
|
||||
|
||||
ps = (struct polaris10_power_state *)(&state->hardware);
|
||||
|
||||
result = tonga_get_powerplay_table_entry(hwmgr, entry_index, state,
|
||||
result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
|
||||
polaris10_get_pp_table_entry_callback_func);
|
||||
|
||||
/* This is the earliest time we have all the dependency table and the VBIOS boot state
|
||||
@ -4347,7 +4359,8 @@ static int polaris10_generate_dpm_level_enable_mask(
|
||||
return 0;
|
||||
}
|
||||
|
||||
int polaris10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
|
||||
static int
|
||||
polaris10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
|
||||
{
|
||||
return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
|
||||
PPSMC_MSG_UVDDPM_Enable :
|
||||
@ -4361,7 +4374,8 @@ int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
|
||||
PPSMC_MSG_VCEDPM_Disable);
|
||||
}
|
||||
|
||||
int polaris10_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
|
||||
static int
|
||||
polaris10_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
|
||||
{
|
||||
return smum_send_msg_to_smc(hwmgr->smumgr, enable?
|
||||
PPSMC_MSG_SAMUDPM_Enable :
|
||||
@ -4675,14 +4689,16 @@ static int polaris10_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_
|
||||
}
|
||||
|
||||
|
||||
int polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
|
||||
static int
|
||||
polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
|
||||
{
|
||||
PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
|
||||
|
||||
return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
|
||||
}
|
||||
|
||||
int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
|
||||
static int
|
||||
polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
uint32_t num_active_displays = 0;
|
||||
struct cgs_display_info info = {0};
|
||||
@ -4705,7 +4721,7 @@ int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwm
|
||||
* @param hwmgr the address of the powerplay hardware manager.
|
||||
* @return always OK
|
||||
*/
|
||||
int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
|
||||
static int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
|
||||
uint32_t num_active_displays = 0;
|
||||
@ -4750,7 +4766,7 @@ int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
|
||||
}
|
||||
|
||||
|
||||
int polaris10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
|
||||
static int polaris10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
return polaris10_program_display_gap(hwmgr);
|
||||
}
|
||||
@ -4774,13 +4790,15 @@ static int polaris10_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_
|
||||
PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
|
||||
}
|
||||
|
||||
int polaris10_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
|
||||
static int
|
||||
polaris10_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
|
||||
const void *thermal_interrupt_info)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool polaris10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
|
||||
static bool polaris10_check_smc_update_required_for_display_configuration(
|
||||
struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
|
||||
bool is_update_required = false;
|
||||
@ -4810,7 +4828,9 @@ static inline bool polaris10_are_power_levels_equal(const struct polaris10_perfo
|
||||
(pl1->pcie_lane == pl2->pcie_lane));
|
||||
}
|
||||
|
||||
int polaris10_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
|
||||
static int polaris10_check_states_equal(struct pp_hwmgr *hwmgr,
|
||||
const struct pp_hw_power_state *pstate1,
|
||||
const struct pp_hw_power_state *pstate2, bool *equal)
|
||||
{
|
||||
const struct polaris10_power_state *psa = cast_const_phw_polaris10_power_state(pstate1);
|
||||
const struct polaris10_power_state *psb = cast_const_phw_polaris10_power_state(pstate2);
|
||||
@ -4841,7 +4861,7 @@ int polaris10_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_powe
|
||||
return 0;
|
||||
}
|
||||
|
||||
int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr)
|
||||
static int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
|
||||
|
||||
@ -4954,7 +4974,7 @@ static int polaris10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr)
|
||||
static int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int tmp_result, result = 0;
|
||||
|
||||
@ -5225,7 +5245,7 @@ static const struct pp_hwmgr_func polaris10_hwmgr_funcs = {
|
||||
.get_sclk = polaris10_dpm_get_sclk,
|
||||
.patch_boot_state = polaris10_dpm_patch_boot_state,
|
||||
.get_pp_table_entry = polaris10_get_pp_table_entry,
|
||||
.get_num_of_pp_table_entries = tonga_get_number_of_powerplay_table_entries,
|
||||
.get_num_of_pp_table_entries = get_number_of_powerplay_table_entries_v1_0,
|
||||
.print_current_perforce_level = polaris10_print_current_perforce_level,
|
||||
.powerdown_uvd = polaris10_phm_powerdown_uvd,
|
||||
.powergate_uvd = polaris10_phm_powergate_uvd,
|
||||
@ -5262,7 +5282,7 @@ static const struct pp_hwmgr_func polaris10_hwmgr_funcs = {
|
||||
int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
hwmgr->hwmgr_func = &polaris10_hwmgr_funcs;
|
||||
hwmgr->pptable_func = &tonga_pptable_funcs;
|
||||
hwmgr->pptable_func = &pptable_v1_0_funcs;
|
||||
pp_polaris10_thermal_initialize(hwmgr);
|
||||
|
||||
return 0;
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "ppatomctrl.h"
|
||||
#include "polaris10_ppsmc.h"
|
||||
#include "polaris10_powertune.h"
|
||||
#include "polaris10_smumgr.h"
|
||||
|
||||
#define POLARIS10_MAX_HARDWARE_POWERLEVELS 2
|
||||
|
||||
@ -165,10 +166,6 @@ struct polaris10_pcie_perf_range {
|
||||
uint16_t max;
|
||||
uint16_t min;
|
||||
};
|
||||
struct polaris10_range_table {
|
||||
uint32_t trans_lower_frequency; /* in 10khz */
|
||||
uint32_t trans_upper_frequency;
|
||||
};
|
||||
|
||||
struct polaris10_hwmgr {
|
||||
struct polaris10_dpm_table dpm_table;
|
||||
|
@ -66,19 +66,6 @@ struct polaris10_pt_config_reg {
|
||||
enum polaris10_pt_config_reg_type type;
|
||||
};
|
||||
|
||||
struct polaris10_pt_defaults {
|
||||
uint8_t SviLoadLineEn;
|
||||
uint8_t SviLoadLineVddC;
|
||||
uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
|
||||
uint8_t TDC_MAWt;
|
||||
uint8_t TdcWaterfallCtl;
|
||||
uint8_t DTEAmbientTempBase;
|
||||
|
||||
uint32_t DisplayCac;
|
||||
uint32_t BAPM_TEMP_GRADIENT;
|
||||
uint16_t BAPMTI_R[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
|
||||
uint16_t BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
|
||||
};
|
||||
|
||||
void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
|
||||
int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
|
||||
|
@ -152,7 +152,7 @@ int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int polaris10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
|
||||
static int polaris10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int result;
|
||||
|
||||
@ -425,7 +425,7 @@ int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
|
||||
* @param Result the last failure code
|
||||
* @return result from set temperature range routine
|
||||
*/
|
||||
int tf_polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
|
||||
static int tf_polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
|
||||
void *input, void *output, void *storage, int result)
|
||||
{
|
||||
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
|
||||
@ -537,7 +537,7 @@ int tf_polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
|
||||
* @param Result the last failure code
|
||||
* @return result from set temperature range routine
|
||||
*/
|
||||
int tf_polaris10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
|
||||
static int tf_polaris10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
|
||||
void *input, void *output, void *storage, int result)
|
||||
{
|
||||
/* If the fantable setup has failed we could have disabled
|
||||
|
@ -164,7 +164,7 @@ typedef struct _ATOM_Tonga_State {
|
||||
typedef struct _ATOM_Tonga_State_Array {
|
||||
UCHAR ucRevId;
|
||||
UCHAR ucNumEntries; /* Number of entries. */
|
||||
ATOM_Tonga_State states[1]; /* Dynamically allocate entries. */
|
||||
ATOM_Tonga_State entries[1]; /* Dynamically allocate entries. */
|
||||
} ATOM_Tonga_State_Array;
|
||||
|
||||
typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
|
@ -23,13 +23,13 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "tonga_processpptables.h"
|
||||
#include "process_pptables_v1_0.h"
|
||||
#include "ppatomctrl.h"
|
||||
#include "atombios.h"
|
||||
#include "pp_debug.h"
|
||||
#include "hwmgr.h"
|
||||
#include "cgs_common.h"
|
||||
#include "tonga_pptable.h"
|
||||
#include "pptable_v1_0.h"
|
||||
|
||||
/**
|
||||
* Private Function used during initialization.
|
||||
@ -153,12 +153,14 @@ const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
|
||||
static int get_vddc_lookup_table(
|
||||
struct pp_hwmgr *hwmgr,
|
||||
phm_ppt_v1_voltage_lookup_table **lookup_table,
|
||||
const ATOM_Tonga_Voltage_Lookup_Table *vddc_lookup_pp_tables,
|
||||
uint32_t max_levels
|
||||
const ATOM_Tonga_Voltage_Lookup_Table *vddc_lookup_pp_tables,
|
||||
uint32_t max_levels
|
||||
)
|
||||
{
|
||||
uint32_t table_size, i;
|
||||
phm_ppt_v1_voltage_lookup_table *table;
|
||||
phm_ppt_v1_voltage_lookup_record *record;
|
||||
ATOM_Tonga_Voltage_Lookup_Record *atom_record;
|
||||
|
||||
PP_ASSERT_WITH_CODE((0 != vddc_lookup_pp_tables->ucNumEntries),
|
||||
"Invalid CAC Leakage PowerPlay Table!", return 1);
|
||||
@ -176,15 +178,17 @@ static int get_vddc_lookup_table(
|
||||
table->count = vddc_lookup_pp_tables->ucNumEntries;
|
||||
|
||||
for (i = 0; i < vddc_lookup_pp_tables->ucNumEntries; i++) {
|
||||
table->entries[i].us_calculated = 0;
|
||||
table->entries[i].us_vdd =
|
||||
vddc_lookup_pp_tables->entries[i].usVdd;
|
||||
table->entries[i].us_cac_low =
|
||||
vddc_lookup_pp_tables->entries[i].usCACLow;
|
||||
table->entries[i].us_cac_mid =
|
||||
vddc_lookup_pp_tables->entries[i].usCACMid;
|
||||
table->entries[i].us_cac_high =
|
||||
vddc_lookup_pp_tables->entries[i].usCACHigh;
|
||||
record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
|
||||
phm_ppt_v1_voltage_lookup_record,
|
||||
entries, table, i);
|
||||
atom_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
|
||||
ATOM_Tonga_Voltage_Lookup_Record,
|
||||
entries, vddc_lookup_pp_tables, i);
|
||||
record->us_calculated = 0;
|
||||
record->us_vdd = atom_record->usVdd;
|
||||
record->us_cac_low = atom_record->usCACLow;
|
||||
record->us_cac_mid = atom_record->usCACMid;
|
||||
record->us_cac_high = atom_record->usCACHigh;
|
||||
}
|
||||
|
||||
*lookup_table = table;
|
||||
@ -313,11 +317,12 @@ static int init_dpm_2_parameters(
|
||||
static int get_valid_clk(
|
||||
struct pp_hwmgr *hwmgr,
|
||||
struct phm_clock_array **clk_table,
|
||||
const phm_ppt_v1_clock_voltage_dependency_table * clk_volt_pp_table
|
||||
phm_ppt_v1_clock_voltage_dependency_table const *clk_volt_pp_table
|
||||
)
|
||||
{
|
||||
uint32_t table_size, i;
|
||||
struct phm_clock_array *table;
|
||||
phm_ppt_v1_clock_voltage_dependency_record *dep_record;
|
||||
|
||||
PP_ASSERT_WITH_CODE((0 != clk_volt_pp_table->count),
|
||||
"Invalid PowerPlay Table!", return -1);
|
||||
@ -334,9 +339,12 @@ static int get_valid_clk(
|
||||
|
||||
table->count = (uint32_t)clk_volt_pp_table->count;
|
||||
|
||||
for (i = 0; i < table->count; i++)
|
||||
table->values[i] = (uint32_t)clk_volt_pp_table->entries[i].clk;
|
||||
|
||||
for (i = 0; i < table->count; i++) {
|
||||
dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
|
||||
phm_ppt_v1_clock_voltage_dependency_record,
|
||||
entries, clk_volt_pp_table, i);
|
||||
table->values[i] = (uint32_t)dep_record->clk;
|
||||
}
|
||||
*clk_table = table;
|
||||
|
||||
return 0;
|
||||
@ -345,7 +353,7 @@ static int get_valid_clk(
|
||||
static int get_hard_limits(
|
||||
struct pp_hwmgr *hwmgr,
|
||||
struct phm_clock_and_voltage_limits *limits,
|
||||
const ATOM_Tonga_Hard_Limit_Table * limitable
|
||||
ATOM_Tonga_Hard_Limit_Table const *limitable
|
||||
)
|
||||
{
|
||||
PP_ASSERT_WITH_CODE((0 != limitable->ucNumEntries), "Invalid PowerPlay Table!", return -1);
|
||||
@ -363,11 +371,13 @@ static int get_hard_limits(
|
||||
static int get_mclk_voltage_dependency_table(
|
||||
struct pp_hwmgr *hwmgr,
|
||||
phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_mclk_dep_table,
|
||||
const ATOM_Tonga_MCLK_Dependency_Table * mclk_dep_table
|
||||
ATOM_Tonga_MCLK_Dependency_Table const *mclk_dep_table
|
||||
)
|
||||
{
|
||||
uint32_t table_size, i;
|
||||
phm_ppt_v1_clock_voltage_dependency_table *mclk_table;
|
||||
phm_ppt_v1_clock_voltage_dependency_record *mclk_table_record;
|
||||
ATOM_Tonga_MCLK_Dependency_Record *mclk_dep_record;
|
||||
|
||||
PP_ASSERT_WITH_CODE((0 != mclk_dep_table->ucNumEntries),
|
||||
"Invalid PowerPlay Table!", return -1);
|
||||
@ -385,16 +395,17 @@ static int get_mclk_voltage_dependency_table(
|
||||
mclk_table->count = (uint32_t)mclk_dep_table->ucNumEntries;
|
||||
|
||||
for (i = 0; i < mclk_dep_table->ucNumEntries; i++) {
|
||||
mclk_table->entries[i].vddInd =
|
||||
mclk_dep_table->entries[i].ucVddcInd;
|
||||
mclk_table->entries[i].vdd_offset =
|
||||
mclk_dep_table->entries[i].usVddgfxOffset;
|
||||
mclk_table->entries[i].vddci =
|
||||
mclk_dep_table->entries[i].usVddci;
|
||||
mclk_table->entries[i].mvdd =
|
||||
mclk_dep_table->entries[i].usMvdd;
|
||||
mclk_table->entries[i].clk =
|
||||
mclk_dep_table->entries[i].ulMclk;
|
||||
mclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
|
||||
phm_ppt_v1_clock_voltage_dependency_record,
|
||||
entries, mclk_table, i);
|
||||
mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
|
||||
ATOM_Tonga_MCLK_Dependency_Record,
|
||||
entries, mclk_dep_table, i);
|
||||
mclk_table_record->vddInd = mclk_dep_record->ucVddcInd;
|
||||
mclk_table_record->vdd_offset = mclk_dep_record->usVddgfxOffset;
|
||||
mclk_table_record->vddci = mclk_dep_record->usVddci;
|
||||
mclk_table_record->mvdd = mclk_dep_record->usMvdd;
|
||||
mclk_table_record->clk = mclk_dep_record->ulMclk;
|
||||
}
|
||||
|
||||
*pp_tonga_mclk_dep_table = mclk_table;
|
||||
@ -405,15 +416,17 @@ static int get_mclk_voltage_dependency_table(
|
||||
static int get_sclk_voltage_dependency_table(
|
||||
struct pp_hwmgr *hwmgr,
|
||||
phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table,
|
||||
const PPTable_Generic_SubTable_Header *sclk_dep_table
|
||||
PPTable_Generic_SubTable_Header const *sclk_dep_table
|
||||
)
|
||||
{
|
||||
uint32_t table_size, i;
|
||||
phm_ppt_v1_clock_voltage_dependency_table *sclk_table;
|
||||
phm_ppt_v1_clock_voltage_dependency_record *sclk_table_record;
|
||||
|
||||
if (sclk_dep_table->ucRevId < 1) {
|
||||
const ATOM_Tonga_SCLK_Dependency_Table *tonga_table =
|
||||
(ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table;
|
||||
ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record;
|
||||
|
||||
PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries),
|
||||
"Invalid PowerPlay Table!", return -1);
|
||||
@ -431,20 +444,23 @@ static int get_sclk_voltage_dependency_table(
|
||||
sclk_table->count = (uint32_t)tonga_table->ucNumEntries;
|
||||
|
||||
for (i = 0; i < tonga_table->ucNumEntries; i++) {
|
||||
sclk_table->entries[i].vddInd =
|
||||
tonga_table->entries[i].ucVddInd;
|
||||
sclk_table->entries[i].vdd_offset =
|
||||
tonga_table->entries[i].usVddcOffset;
|
||||
sclk_table->entries[i].clk =
|
||||
tonga_table->entries[i].ulSclk;
|
||||
sclk_table->entries[i].cks_enable =
|
||||
(((tonga_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
|
||||
sclk_table->entries[i].cks_voffset =
|
||||
(tonga_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
|
||||
sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
|
||||
ATOM_Tonga_SCLK_Dependency_Record,
|
||||
entries, tonga_table, i);
|
||||
sclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
|
||||
phm_ppt_v1_clock_voltage_dependency_record,
|
||||
entries, sclk_table, i);
|
||||
sclk_table_record->vddInd = sclk_dep_record->ucVddInd;
|
||||
sclk_table_record->vdd_offset = sclk_dep_record->usVddcOffset;
|
||||
sclk_table_record->clk = sclk_dep_record->ulSclk;
|
||||
sclk_table_record->cks_enable =
|
||||
(((sclk_dep_record->ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
|
||||
sclk_table_record->cks_voffset = (sclk_dep_record->ucCKSVOffsetandDisable & 0x7F);
|
||||
}
|
||||
} else {
|
||||
const ATOM_Polaris_SCLK_Dependency_Table *polaris_table =
|
||||
(ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table;
|
||||
ATOM_Polaris_SCLK_Dependency_Record *sclk_dep_record;
|
||||
|
||||
PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries),
|
||||
"Invalid PowerPlay Table!", return -1);
|
||||
@ -462,17 +478,19 @@ static int get_sclk_voltage_dependency_table(
|
||||
sclk_table->count = (uint32_t)polaris_table->ucNumEntries;
|
||||
|
||||
for (i = 0; i < polaris_table->ucNumEntries; i++) {
|
||||
sclk_table->entries[i].vddInd =
|
||||
polaris_table->entries[i].ucVddInd;
|
||||
sclk_table->entries[i].vdd_offset =
|
||||
polaris_table->entries[i].usVddcOffset;
|
||||
sclk_table->entries[i].clk =
|
||||
polaris_table->entries[i].ulSclk;
|
||||
sclk_table->entries[i].cks_enable =
|
||||
(((polaris_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
|
||||
sclk_table->entries[i].cks_voffset =
|
||||
(polaris_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
|
||||
sclk_table->entries[i].sclk_offset = polaris_table->entries[i].ulSclkOffset;
|
||||
sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
|
||||
ATOM_Polaris_SCLK_Dependency_Record,
|
||||
entries, polaris_table, i);
|
||||
sclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
|
||||
phm_ppt_v1_clock_voltage_dependency_record,
|
||||
entries, sclk_table, i);
|
||||
sclk_table_record->vddInd = sclk_dep_record->ucVddInd;
|
||||
sclk_table_record->vdd_offset = sclk_dep_record->usVddcOffset;
|
||||
sclk_table_record->clk = sclk_dep_record->ulSclk;
|
||||
sclk_table_record->cks_enable =
|
||||
(((sclk_dep_record->ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
|
||||
sclk_table_record->cks_voffset = (sclk_dep_record->ucCKSVOffsetandDisable & 0x7F);
|
||||
sclk_table_record->sclk_offset = sclk_dep_record->ulSclkOffset;
|
||||
}
|
||||
}
|
||||
*pp_tonga_sclk_dep_table = sclk_table;
|
||||
@ -483,16 +501,19 @@ static int get_sclk_voltage_dependency_table(
|
||||
static int get_pcie_table(
|
||||
struct pp_hwmgr *hwmgr,
|
||||
phm_ppt_v1_pcie_table **pp_tonga_pcie_table,
|
||||
const PPTable_Generic_SubTable_Header * pTable
|
||||
PPTable_Generic_SubTable_Header const *ptable
|
||||
)
|
||||
{
|
||||
uint32_t table_size, i, pcie_count;
|
||||
phm_ppt_v1_pcie_table *pcie_table;
|
||||
struct phm_ppt_v1_information *pp_table_information =
|
||||
(struct phm_ppt_v1_information *)(hwmgr->pptable);
|
||||
phm_ppt_v1_pcie_record *pcie_record;
|
||||
|
||||
if (ptable->ucRevId < 1) {
|
||||
const ATOM_Tonga_PCIE_Table *atom_pcie_table = (ATOM_Tonga_PCIE_Table *)ptable;
|
||||
ATOM_Tonga_PCIE_Record *atom_pcie_record;
|
||||
|
||||
if (pTable->ucRevId < 1) {
|
||||
const ATOM_Tonga_PCIE_Table *atom_pcie_table = (ATOM_Tonga_PCIE_Table *)pTable;
|
||||
PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
|
||||
"Invalid PowerPlay Table!", return -1);
|
||||
|
||||
@ -518,18 +539,23 @@ static int get_pcie_table(
|
||||
Disregarding the excess entries... \n");
|
||||
|
||||
pcie_table->count = pcie_count;
|
||||
|
||||
for (i = 0; i < pcie_count; i++) {
|
||||
pcie_table->entries[i].gen_speed =
|
||||
atom_pcie_table->entries[i].ucPCIEGenSpeed;
|
||||
pcie_table->entries[i].lane_width =
|
||||
atom_pcie_table->entries[i].usPCIELaneWidth;
|
||||
pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
|
||||
phm_ppt_v1_pcie_record,
|
||||
entries, pcie_table, i);
|
||||
atom_pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
|
||||
ATOM_Tonga_PCIE_Record,
|
||||
entries, atom_pcie_table, i);
|
||||
pcie_record->gen_speed = atom_pcie_record->ucPCIEGenSpeed;
|
||||
pcie_record->lane_width = atom_pcie_record->usPCIELaneWidth;
|
||||
}
|
||||
|
||||
*pp_tonga_pcie_table = pcie_table;
|
||||
} else {
|
||||
/* Polaris10/Polaris11 and newer. */
|
||||
const ATOM_Polaris10_PCIE_Table *atom_pcie_table = (ATOM_Polaris10_PCIE_Table *)pTable;
|
||||
const ATOM_Polaris10_PCIE_Table *atom_pcie_table = (ATOM_Polaris10_PCIE_Table *)ptable;
|
||||
ATOM_Polaris10_PCIE_Record *atom_pcie_record;
|
||||
|
||||
PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
|
||||
"Invalid PowerPlay Table!", return -1);
|
||||
|
||||
@ -557,12 +583,15 @@ static int get_pcie_table(
|
||||
pcie_table->count = pcie_count;
|
||||
|
||||
for (i = 0; i < pcie_count; i++) {
|
||||
pcie_table->entries[i].gen_speed =
|
||||
atom_pcie_table->entries[i].ucPCIEGenSpeed;
|
||||
pcie_table->entries[i].lane_width =
|
||||
atom_pcie_table->entries[i].usPCIELaneWidth;
|
||||
pcie_table->entries[i].pcie_sclk =
|
||||
atom_pcie_table->entries[i].ulPCIE_Sclk;
|
||||
pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
|
||||
phm_ppt_v1_pcie_record,
|
||||
entries, pcie_table, i);
|
||||
atom_pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
|
||||
ATOM_Polaris10_PCIE_Record,
|
||||
entries, atom_pcie_table, i);
|
||||
pcie_record->gen_speed = atom_pcie_record->ucPCIEGenSpeed;
|
||||
pcie_record->lane_width = atom_pcie_record->usPCIELaneWidth;
|
||||
pcie_record->pcie_sclk = atom_pcie_record->ulPCIE_Sclk;
|
||||
}
|
||||
|
||||
*pp_tonga_pcie_table = pcie_table;
|
||||
@ -684,6 +713,7 @@ static int get_mm_clock_voltage_table(
|
||||
uint32_t table_size, i;
|
||||
const ATOM_Tonga_MM_Dependency_Record *mm_dependency_record;
|
||||
phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table;
|
||||
phm_ppt_v1_mm_clock_voltage_dependency_record *mm_table_record;
|
||||
|
||||
PP_ASSERT_WITH_CODE((0 != mm_dependency_table->ucNumEntries),
|
||||
"Invalid PowerPlay Table!", return -1);
|
||||
@ -700,14 +730,19 @@ static int get_mm_clock_voltage_table(
|
||||
mm_table->count = mm_dependency_table->ucNumEntries;
|
||||
|
||||
for (i = 0; i < mm_dependency_table->ucNumEntries; i++) {
|
||||
mm_dependency_record = &mm_dependency_table->entries[i];
|
||||
mm_table->entries[i].vddcInd = mm_dependency_record->ucVddcInd;
|
||||
mm_table->entries[i].vddgfx_offset = mm_dependency_record->usVddgfxOffset;
|
||||
mm_table->entries[i].aclk = mm_dependency_record->ulAClk;
|
||||
mm_table->entries[i].samclock = mm_dependency_record->ulSAMUClk;
|
||||
mm_table->entries[i].eclk = mm_dependency_record->ulEClk;
|
||||
mm_table->entries[i].vclk = mm_dependency_record->ulVClk;
|
||||
mm_table->entries[i].dclk = mm_dependency_record->ulDClk;
|
||||
mm_dependency_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
|
||||
ATOM_Tonga_MM_Dependency_Record,
|
||||
entries, mm_dependency_table, i);
|
||||
mm_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
|
||||
phm_ppt_v1_mm_clock_voltage_dependency_record,
|
||||
entries, mm_table, i);
|
||||
mm_table_record->vddcInd = mm_dependency_record->ucVddcInd;
|
||||
mm_table_record->vddgfx_offset = mm_dependency_record->usVddgfxOffset;
|
||||
mm_table_record->aclk = mm_dependency_record->ulAClk;
|
||||
mm_table_record->samclock = mm_dependency_record->ulSAMUClk;
|
||||
mm_table_record->eclk = mm_dependency_record->ulEClk;
|
||||
mm_table_record->vclk = mm_dependency_record->ulVClk;
|
||||
mm_table_record->dclk = mm_dependency_record->ulDClk;
|
||||
}
|
||||
|
||||
*tonga_mm_table = mm_table;
|
||||
@ -1014,7 +1049,7 @@ static int check_powerplay_tables(
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tonga_pp_tables_initialize(struct pp_hwmgr *hwmgr)
|
||||
int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int result = 0;
|
||||
const ATOM_Tonga_POWERPLAYTABLE *powerplay_table;
|
||||
@ -1065,7 +1100,7 @@ int tonga_pp_tables_initialize(struct pp_hwmgr *hwmgr)
|
||||
return result;
|
||||
}
|
||||
|
||||
int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
|
||||
int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct phm_ppt_v1_information *pp_table_information =
|
||||
(struct phm_ppt_v1_information *)(hwmgr->pptable);
|
||||
@ -1109,14 +1144,14 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct pp_table_func tonga_pptable_funcs = {
|
||||
.pptable_init = tonga_pp_tables_initialize,
|
||||
.pptable_fini = tonga_pp_tables_uninitialize,
|
||||
const struct pp_table_func pptable_v1_0_funcs = {
|
||||
.pptable_init = pp_tables_v1_0_initialize,
|
||||
.pptable_fini = pp_tables_v1_0_uninitialize,
|
||||
};
|
||||
|
||||
int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
|
||||
int get_number_of_powerplay_table_entries_v1_0(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
const ATOM_Tonga_State_Array * state_arrays;
|
||||
ATOM_Tonga_State_Array const *state_arrays;
|
||||
const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
|
||||
|
||||
PP_ASSERT_WITH_CODE((NULL != pp_table),
|
||||
@ -1163,6 +1198,71 @@ static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr,
|
||||
return result;
|
||||
}
|
||||
|
||||
static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
|
||||
const ATOM_Tonga_VCE_State_Table *vce_state_table =
|
||||
(ATOM_Tonga_VCE_State_Table *)(((unsigned long)pp_table) + le16_to_cpu(pp_table->usVCEStateTableOffset));
|
||||
|
||||
if (vce_state_table == NULL)
|
||||
return 0;
|
||||
|
||||
return vce_state_table->ucNumEntries;
|
||||
}
|
||||
|
||||
static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i,
|
||||
struct pp_vce_state *vce_state, void **clock_info, uint32_t *flag)
|
||||
{
|
||||
const ATOM_Tonga_VCE_State_Record *vce_state_record;
|
||||
ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record;
|
||||
ATOM_Tonga_MCLK_Dependency_Record *mclk_dep_record;
|
||||
ATOM_Tonga_MM_Dependency_Record *mm_dep_record;
|
||||
const ATOM_Tonga_POWERPLAYTABLE *pptable = get_powerplay_table(hwmgr);
|
||||
const ATOM_Tonga_VCE_State_Table *vce_state_table = (ATOM_Tonga_VCE_State_Table *)(((unsigned long)pptable)
|
||||
+ le16_to_cpu(pptable->usVCEStateTableOffset));
|
||||
const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = (ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long)pptable)
|
||||
+ le16_to_cpu(pptable->usSclkDependencyTableOffset));
|
||||
const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = (ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long)pptable)
|
||||
+ le16_to_cpu(pptable->usMclkDependencyTableOffset));
|
||||
const ATOM_Tonga_MM_Dependency_Table *mm_dep_table = (ATOM_Tonga_MM_Dependency_Table *)(((unsigned long)pptable)
|
||||
+ le16_to_cpu(pptable->usMMDependencyTableOffset));
|
||||
|
||||
PP_ASSERT_WITH_CODE((i < vce_state_table->ucNumEntries),
|
||||
"Requested state entry ID is out of range!",
|
||||
return -EINVAL);
|
||||
|
||||
vce_state_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
|
||||
ATOM_Tonga_VCE_State_Record,
|
||||
entries, vce_state_table, i);
|
||||
sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
|
||||
ATOM_Tonga_SCLK_Dependency_Record,
|
||||
entries, sclk_dep_table,
|
||||
vce_state_record->ucSCLKIndex);
|
||||
mm_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
|
||||
ATOM_Tonga_MM_Dependency_Record,
|
||||
entries, mm_dep_table,
|
||||
vce_state_record->ucVCEClockIndex);
|
||||
*flag = vce_state_record->ucFlag;
|
||||
|
||||
vce_state->evclk = mm_dep_record->ulEClk;
|
||||
vce_state->ecclk = mm_dep_record->ulEClk;
|
||||
vce_state->sclk = sclk_dep_record->ulSclk;
|
||||
|
||||
if (vce_state_record->ucMCLKIndex >= mclk_dep_table->ucNumEntries)
|
||||
mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
|
||||
ATOM_Tonga_MCLK_Dependency_Record,
|
||||
entries, mclk_dep_table,
|
||||
mclk_dep_table->ucNumEntries - 1);
|
||||
else
|
||||
mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
|
||||
ATOM_Tonga_MCLK_Dependency_Record,
|
||||
entries, mclk_dep_table,
|
||||
vce_state_record->ucMCLKIndex);
|
||||
|
||||
vce_state->mclk = mclk_dep_record->ulMclk;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a Power State out of an entry in the PowerPlay table.
|
||||
* This function is called by the hardware back-end.
|
||||
@ -1171,15 +1271,17 @@ static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr,
|
||||
* @param power_state The address of the PowerState instance being created.
|
||||
* @return -1 if the entry cannot be retrieved.
|
||||
*/
|
||||
int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr,
|
||||
int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr,
|
||||
uint32_t entry_index, struct pp_power_state *power_state,
|
||||
int (*call_back_func)(struct pp_hwmgr *, void *,
|
||||
struct pp_power_state *, void *, uint32_t))
|
||||
{
|
||||
int result = 0;
|
||||
const ATOM_Tonga_State_Array * state_arrays;
|
||||
const ATOM_Tonga_State_Array *state_arrays;
|
||||
const ATOM_Tonga_State *state_entry;
|
||||
const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
|
||||
int i, j;
|
||||
uint32_t flags = 0;
|
||||
|
||||
PP_ASSERT_WITH_CODE((NULL != pp_table), "Missing PowerPlay Table!", return -1;);
|
||||
power_state->classification.bios_index = entry_index;
|
||||
@ -1196,7 +1298,9 @@ int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr,
|
||||
PP_ASSERT_WITH_CODE((entry_index <= state_arrays->ucNumEntries),
|
||||
"Invalid PowerPlay Table State Array Entry.", return -1);
|
||||
|
||||
state_entry = &(state_arrays->states[entry_index]);
|
||||
state_entry = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
|
||||
ATOM_Tonga_State, entries,
|
||||
state_arrays, entry_index);
|
||||
|
||||
result = call_back_func(hwmgr, (void *)state_entry, power_state,
|
||||
(void *)pp_table,
|
||||
@ -1209,5 +1313,13 @@ int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr,
|
||||
PP_StateClassificationFlag_Boot))
|
||||
result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(power_state->hardware));
|
||||
|
||||
hwmgr->num_vce_state_tables = i = ppt_get_num_of_vce_state_table_entries_v1_0(hwmgr);
|
||||
|
||||
if ((i != 0) && (i <= PP_MAX_VCE_LEVELS)) {
|
||||
for (j = 0; j < i; j++)
|
||||
ppt_get_vce_state_table_entry_v1_0(hwmgr, j, &(hwmgr->vce_states[j]), NULL, &flags);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -20,14 +20,14 @@
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#ifndef TONGA_PROCESSPPTABLES_H
|
||||
#define TONGA_PROCESSPPTABLES_H
|
||||
#ifndef _PROCESSPPTABLES_V1_0_H
|
||||
#define _PROCESSPPTABLES_V1_0_H
|
||||
|
||||
#include "hwmgr.h"
|
||||
|
||||
extern const struct pp_table_func tonga_pptable_funcs;
|
||||
extern int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr);
|
||||
extern int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index,
|
||||
extern const struct pp_table_func pptable_v1_0_funcs;
|
||||
extern int get_number_of_powerplay_table_entries_v1_0(struct pp_hwmgr *hwmgr);
|
||||
extern int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t entry_index,
|
||||
struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *,
|
||||
struct pp_power_state *, void *, uint32_t));
|
||||
|
@ -1523,7 +1523,7 @@ int get_number_of_vce_state_table_entries(
|
||||
|
||||
int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
|
||||
unsigned long i,
|
||||
struct PP_VCEState *vce_state,
|
||||
struct pp_vce_state *vce_state,
|
||||
void **clock_info,
|
||||
unsigned long *flag)
|
||||
{
|
||||
|
@ -29,8 +29,8 @@
|
||||
#include "tonga_hwmgr.h"
|
||||
#include "pptable.h"
|
||||
#include "processpptables.h"
|
||||
#include "tonga_processpptables.h"
|
||||
#include "tonga_pptable.h"
|
||||
#include "process_pptables_v1_0.h"
|
||||
#include "pptable_v1_0.h"
|
||||
#include "pp_debug.h"
|
||||
#include "tonga_ppsmc.h"
|
||||
#include "cgs_common.h"
|
||||
@ -202,6 +202,7 @@ uint8_t tonga_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
|
||||
return i - 1;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief PhwTonga_GetVoltageOrder
|
||||
* Returns index of requested voltage record in lookup(table)
|
||||
@ -229,7 +230,7 @@ uint8_t tonga_get_voltage_index(phm_ppt_v1_voltage_lookup_table *look_up_table,
|
||||
return i-1;
|
||||
}
|
||||
|
||||
bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
|
||||
static bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
/*
|
||||
* We return the status of Voltage Control instead of checking SCLK/MCLK DPM
|
||||
@ -334,7 +335,7 @@ void tonga_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
|
||||
}
|
||||
|
||||
int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
|
||||
static int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
|
||||
|
||||
@ -771,7 +772,7 @@ int tonga_set_boot_state(struct pp_hwmgr *hwmgr)
|
||||
* @param hwmgr the address of the powerplay hardware manager.
|
||||
* @return always 0
|
||||
*/
|
||||
int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
|
||||
static int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
|
||||
struct tonga_smumgr *tonga_smu = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
|
||||
@ -1314,15 +1315,6 @@ static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a voltage value in mv unit to VID number required by SMU firmware
|
||||
*/
|
||||
static uint8_t convert_to_vid(uint16_t vddc)
|
||||
{
|
||||
return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Preparation of vddc and vddgfx CAC tables for SMC.
|
||||
*
|
||||
@ -2894,7 +2886,7 @@ int tonga_populate_smc_initial_state(struct pp_hwmgr *hwmgr,
|
||||
* @param pInput the pointer to input data (PowerState)
|
||||
* @return always 0
|
||||
*/
|
||||
int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
|
||||
static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int result;
|
||||
tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
|
||||
@ -3989,7 +3981,7 @@ int tonga_set_valid_flag(phw_tonga_mc_reg_table *table)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
|
||||
static int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int result;
|
||||
tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
|
||||
@ -4326,6 +4318,79 @@ int tonga_program_voting_clients(struct pp_hwmgr *hwmgr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tonga_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
|
||||
{
|
||||
bool protection;
|
||||
enum DPM_EVENT_SRC src;
|
||||
|
||||
switch (sources) {
|
||||
default:
|
||||
printk(KERN_ERR "Unknown throttling event sources.");
|
||||
/* fall through */
|
||||
case 0:
|
||||
protection = false;
|
||||
/* src is unused */
|
||||
break;
|
||||
case (1 << PHM_AutoThrottleSource_Thermal):
|
||||
protection = true;
|
||||
src = DPM_EVENT_SRC_DIGITAL;
|
||||
break;
|
||||
case (1 << PHM_AutoThrottleSource_External):
|
||||
protection = true;
|
||||
src = DPM_EVENT_SRC_EXTERNAL;
|
||||
break;
|
||||
case (1 << PHM_AutoThrottleSource_External) |
|
||||
(1 << PHM_AutoThrottleSource_Thermal):
|
||||
protection = true;
|
||||
src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
|
||||
break;
|
||||
}
|
||||
/* Order matters - don't enable thermal protection for the wrong source. */
|
||||
if (protection) {
|
||||
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
|
||||
DPM_EVENT_SRC, src);
|
||||
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
|
||||
THERMAL_PROTECTION_DIS,
|
||||
!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_ThermalController));
|
||||
} else
|
||||
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
|
||||
THERMAL_PROTECTION_DIS, 1);
|
||||
}
|
||||
|
||||
static int tonga_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
|
||||
PHM_AutoThrottleSource source)
|
||||
{
|
||||
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
|
||||
|
||||
if (!(data->active_auto_throttle_sources & (1 << source))) {
|
||||
data->active_auto_throttle_sources |= 1 << source;
|
||||
tonga_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tonga_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
return tonga_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
|
||||
}
|
||||
|
||||
static int tonga_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
|
||||
PHM_AutoThrottleSource source)
|
||||
{
|
||||
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
|
||||
|
||||
if (data->active_auto_throttle_sources & (1 << source)) {
|
||||
data->active_auto_throttle_sources &= ~(1 << source);
|
||||
tonga_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tonga_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
return tonga_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
|
||||
}
|
||||
|
||||
int tonga_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
@ -4409,6 +4474,10 @@ int tonga_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
||||
PP_ASSERT_WITH_CODE((tmp_result == 0),
|
||||
"Failed to power control set level!", result = tmp_result);
|
||||
|
||||
tmp_result = tonga_enable_thermal_auto_throttle(hwmgr);
|
||||
PP_ASSERT_WITH_CODE((0 == tmp_result),
|
||||
"Failed to enable thermal auto throttle!", result = tmp_result);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -4420,6 +4489,10 @@ int tonga_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
||||
PP_ASSERT_WITH_CODE((0 == tmp_result),
|
||||
"SMC is still running!", return 0);
|
||||
|
||||
tmp_result = tonga_disable_thermal_auto_throttle(hwmgr);
|
||||
PP_ASSERT_WITH_CODE((tmp_result == 0),
|
||||
"Failed to disable thermal auto throttle!", result = tmp_result);
|
||||
|
||||
tmp_result = tonga_stop_dpm(hwmgr);
|
||||
PP_ASSERT_WITH_CODE((0 == tmp_result),
|
||||
"Failed to stop DPM!", result = tmp_result);
|
||||
@ -5090,7 +5163,7 @@ static int tonga_get_pp_table_entry(struct pp_hwmgr *hwmgr,
|
||||
|
||||
tonga_ps = cast_phw_tonga_power_state(&(ps->hardware));
|
||||
|
||||
result = tonga_get_powerplay_table_entry(hwmgr, entry_index, ps,
|
||||
result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, ps,
|
||||
tonga_get_pp_table_entry_callback_func);
|
||||
|
||||
/* This is the earliest time we have all the dependency table and the VBIOS boot state
|
||||
@ -6254,7 +6327,7 @@ static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
|
||||
.get_sclk = tonga_dpm_get_sclk,
|
||||
.patch_boot_state = tonga_dpm_patch_boot_state,
|
||||
.get_pp_table_entry = tonga_get_pp_table_entry,
|
||||
.get_num_of_pp_table_entries = tonga_get_number_of_powerplay_table_entries,
|
||||
.get_num_of_pp_table_entries = get_number_of_powerplay_table_entries_v1_0,
|
||||
.print_current_perforce_level = tonga_print_current_perforce_level,
|
||||
.powerdown_uvd = tonga_phm_powerdown_uvd,
|
||||
.powergate_uvd = tonga_phm_powergate_uvd,
|
||||
@ -6290,7 +6363,7 @@ static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
|
||||
int tonga_hwmgr_init(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
hwmgr->hwmgr_func = &tonga_hwmgr_funcs;
|
||||
hwmgr->pptable_func = &tonga_pptable_funcs;
|
||||
hwmgr->pptable_func = &pptable_v1_0_funcs;
|
||||
pp_tonga_thermal_initialize(hwmgr);
|
||||
return 0;
|
||||
}
|
||||
|
@ -56,9 +56,6 @@ void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
|
||||
else
|
||||
tonga_hwmgr->power_tune_defaults = &tonga_power_tune_data_set_array[0];
|
||||
|
||||
/* Assume disabled */
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_PowerContainment);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_CAC);
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
|
||||
|
@ -131,9 +131,6 @@ struct amd_pp_init {
|
||||
struct cgs_device *device;
|
||||
uint32_t chip_family;
|
||||
uint32_t chip_id;
|
||||
uint32_t rev_id;
|
||||
uint16_t sub_sys_id;
|
||||
uint16_t sub_vendor_id;
|
||||
};
|
||||
|
||||
enum amd_pp_display_config_type{
|
||||
|
@ -341,7 +341,6 @@ extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
|
||||
extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
|
||||
extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
|
||||
extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr);
|
||||
extern void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr);
|
||||
extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr);
|
||||
extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block);
|
||||
extern int phm_set_power_state(struct pp_hwmgr *hwmgr,
|
||||
|
@ -31,18 +31,20 @@
|
||||
#include "hwmgr_ppt.h"
|
||||
#include "ppatomctrl.h"
|
||||
#include "hwmgr_ppt.h"
|
||||
#include "power_state.h"
|
||||
|
||||
struct pp_instance;
|
||||
struct pp_hwmgr;
|
||||
struct pp_hw_power_state;
|
||||
struct pp_power_state;
|
||||
struct PP_VCEState;
|
||||
struct phm_fan_speed_info;
|
||||
struct pp_atomctrl_voltage_table;
|
||||
|
||||
|
||||
extern int amdgpu_powercontainment;
|
||||
extern int amdgpu_sclk_deep_sleep_en;
|
||||
extern unsigned amdgpu_pp_feature_mask;
|
||||
|
||||
#define VOLTAGE_SCALE 4
|
||||
|
||||
uint8_t convert_to_vid(uint16_t vddc);
|
||||
|
||||
enum DISPLAY_GAP {
|
||||
DISPLAY_GAP_VBLANK_OR_WM = 0, /* Wait for vblank or MCHG watermark. */
|
||||
@ -52,7 +54,6 @@ enum DISPLAY_GAP {
|
||||
};
|
||||
typedef enum DISPLAY_GAP DISPLAY_GAP;
|
||||
|
||||
|
||||
struct vi_dpm_level {
|
||||
bool enabled;
|
||||
uint32_t value;
|
||||
@ -74,6 +75,19 @@ enum PP_Result {
|
||||
#define PCIE_PERF_REQ_GEN2 3
|
||||
#define PCIE_PERF_REQ_GEN3 4
|
||||
|
||||
enum PP_FEATURE_MASK {
|
||||
PP_SCLK_DPM_MASK = 0x1,
|
||||
PP_MCLK_DPM_MASK = 0x2,
|
||||
PP_PCIE_DPM_MASK = 0x4,
|
||||
PP_SCLK_DEEP_SLEEP_MASK = 0x8,
|
||||
PP_POWER_CONTAINMENT_MASK = 0x10,
|
||||
PP_UVD_HANDSHAKE_MASK = 0x20,
|
||||
PP_SMC_VOLTAGE_CONTROL_MASK = 0x40,
|
||||
PP_VBI_TIME_SUPPORT_MASK = 0x80,
|
||||
PP_ULV_MASK = 0x100,
|
||||
PP_ENABLE_GFX_CG_THRU_SMU = 0x200
|
||||
};
|
||||
|
||||
enum PHM_BackEnd_Magic {
|
||||
PHM_Dummy_Magic = 0xAA5555AA,
|
||||
PHM_RV770_Magic = 0xDCBAABCD,
|
||||
@ -354,7 +368,7 @@ struct pp_table_func {
|
||||
int (*pptable_get_vce_state_table_entry)(
|
||||
struct pp_hwmgr *hwmgr,
|
||||
unsigned long i,
|
||||
struct PP_VCEState *vce_state,
|
||||
struct pp_vce_state *vce_state,
|
||||
void **clock_info,
|
||||
unsigned long *flag);
|
||||
};
|
||||
@ -573,22 +587,43 @@ struct phm_microcode_version_info {
|
||||
uint32_t NB;
|
||||
};
|
||||
|
||||
#define PP_MAX_VCE_LEVELS 6
|
||||
|
||||
enum PP_VCE_LEVEL {
|
||||
PP_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
|
||||
PP_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
|
||||
PP_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
|
||||
PP_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
|
||||
PP_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
|
||||
PP_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
|
||||
};
|
||||
|
||||
|
||||
enum PP_TABLE_VERSION {
|
||||
PP_TABLE_V0 = 0,
|
||||
PP_TABLE_V1,
|
||||
PP_TABLE_V2,
|
||||
PP_TABLE_MAX
|
||||
};
|
||||
|
||||
/**
|
||||
* The main hardware manager structure.
|
||||
*/
|
||||
struct pp_hwmgr {
|
||||
uint32_t chip_family;
|
||||
uint32_t chip_id;
|
||||
uint32_t hw_revision;
|
||||
uint32_t sub_sys_id;
|
||||
uint32_t sub_vendor_id;
|
||||
|
||||
uint32_t pp_table_version;
|
||||
void *device;
|
||||
struct pp_smumgr *smumgr;
|
||||
const void *soft_pp_table;
|
||||
uint32_t soft_pp_table_size;
|
||||
void *hardcode_pp_table;
|
||||
bool need_pp_table_upload;
|
||||
|
||||
struct pp_vce_state vce_states[PP_MAX_VCE_LEVELS];
|
||||
uint32_t num_vce_state_tables;
|
||||
|
||||
enum amd_dpm_forced_level dpm_level;
|
||||
bool block_hw_access;
|
||||
struct phm_gfx_arbiter gfx_arbiter;
|
||||
@ -626,6 +661,7 @@ struct pp_hwmgr {
|
||||
struct pp_power_state *boot_ps;
|
||||
struct pp_power_state *uvd_ps;
|
||||
struct amd_pp_display_configuration display_config;
|
||||
uint32_t feature_mask;
|
||||
};
|
||||
|
||||
|
||||
@ -661,6 +697,8 @@ extern void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps, st
|
||||
extern int phm_reset_single_dpm_table(void *table, uint32_t count, int max);
|
||||
extern void phm_setup_pcie_table_entry(void *table, uint32_t index, uint32_t pcie_gen, uint32_t pcie_lanes);
|
||||
extern int32_t phm_get_dpm_level_enable_mask_value(void *table);
|
||||
extern uint8_t phm_get_voltage_id(struct pp_atomctrl_voltage_table *voltage_table,
|
||||
uint32_t voltage);
|
||||
extern uint8_t phm_get_voltage_index(struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage);
|
||||
extern uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci);
|
||||
extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level);
|
||||
@ -671,6 +709,9 @@ extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr);
|
||||
extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
|
||||
extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
|
||||
|
||||
extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
|
||||
uint32_t sclk, uint16_t id, uint16_t *voltage);
|
||||
|
||||
#define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
|
||||
|
||||
#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
|
||||
@ -685,8 +726,6 @@ extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
|
||||
PHM_FIELD_SHIFT(reg, field))
|
||||
|
||||
|
||||
|
||||
|
||||
/* Operations on named fields. */
|
||||
|
||||
#define PHM_READ_FIELD(device, reg, field) \
|
||||
|
@ -158,7 +158,7 @@ struct pp_power_state {
|
||||
|
||||
|
||||
/*Structure to hold a VCE state entry*/
|
||||
struct PP_VCEState {
|
||||
struct pp_vce_state {
|
||||
uint32_t evclk;
|
||||
uint32_t ecclk;
|
||||
uint32_t sclk;
|
||||
@ -171,30 +171,28 @@ enum PP_MMProfilingState {
|
||||
PP_MMProfilingState_Stopped
|
||||
};
|
||||
|
||||
struct PP_Clock_Engine_Request {
|
||||
unsigned long clientType;
|
||||
unsigned long ctxid;
|
||||
struct pp_clock_engine_request {
|
||||
unsigned long client_type;
|
||||
unsigned long ctx_id;
|
||||
uint64_t context_handle;
|
||||
unsigned long sclk;
|
||||
unsigned long sclkHardMin;
|
||||
unsigned long sclk_hard_min;
|
||||
unsigned long mclk;
|
||||
unsigned long iclk;
|
||||
unsigned long evclk;
|
||||
unsigned long ecclk;
|
||||
unsigned long ecclkHardMin;
|
||||
unsigned long ecclk_hard_min;
|
||||
unsigned long vclk;
|
||||
unsigned long dclk;
|
||||
unsigned long samclk;
|
||||
unsigned long acpclk;
|
||||
unsigned long sclkOverdrive;
|
||||
unsigned long mclkOverdrive;
|
||||
unsigned long sclk_over_drive;
|
||||
unsigned long mclk_over_drive;
|
||||
unsigned long sclk_threshold;
|
||||
unsigned long flag;
|
||||
unsigned long vclk_ceiling;
|
||||
unsigned long dclk_ceiling;
|
||||
unsigned long num_cus;
|
||||
unsigned long pmflag;
|
||||
enum PP_MMProfilingState MMProfilingState;
|
||||
unsigned long pm_flag;
|
||||
enum PP_MMProfilingState mm_profiling_state;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -43,5 +43,8 @@
|
||||
} while (0)
|
||||
|
||||
|
||||
#define GET_FLEXIBLE_ARRAY_MEMBER_ADDR(type, member, ptr, n) \
|
||||
(type *)((char *)&(ptr)->member + (sizeof(type) * (n)))
|
||||
|
||||
#endif /* PP_DEBUG_H */
|
||||
|
||||
|
@ -74,7 +74,6 @@ struct pp_smumgr_func {
|
||||
struct pp_smumgr {
|
||||
uint32_t chip_family;
|
||||
uint32_t chip_id;
|
||||
uint32_t hw_revision;
|
||||
void *device;
|
||||
void *backend;
|
||||
uint32_t usec_timeout;
|
||||
@ -122,6 +121,12 @@ extern int smu_allocate_memory(void *device, uint32_t size,
|
||||
|
||||
extern int smu_free_memory(void *device, void *handle);
|
||||
|
||||
extern int cz_smum_init(struct pp_smumgr *smumgr);
|
||||
extern int iceland_smum_init(struct pp_smumgr *smumgr);
|
||||
extern int tonga_smum_init(struct pp_smumgr *smumgr);
|
||||
extern int fiji_smum_init(struct pp_smumgr *smumgr);
|
||||
extern int polaris10_smum_init(struct pp_smumgr *smumgr);
|
||||
|
||||
#define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
|
||||
|
||||
#define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK
|
||||
|
@ -89,13 +89,8 @@ static int cz_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
|
||||
if (result != 0)
|
||||
return result;
|
||||
|
||||
result = SMUM_WAIT_FIELD_UNEQUAL(smumgr,
|
||||
return SMUM_WAIT_FIELD_UNEQUAL(smumgr,
|
||||
SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
|
||||
|
||||
if (result != 0)
|
||||
return result;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
|
||||
@ -106,12 +101,12 @@ static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
|
||||
|
||||
if (0 != (3 & smc_address)) {
|
||||
printk(KERN_ERR "[ powerplay ] SMC address must be 4 byte aligned\n");
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (limit <= (smc_address + 3)) {
|
||||
printk(KERN_ERR "[ powerplay ] SMC address beyond the SMC RAM area\n");
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX_0,
|
||||
@ -129,9 +124,10 @@ static int cz_write_smc_sram_dword(struct pp_smumgr *smumgr,
|
||||
return -EINVAL;
|
||||
|
||||
result = cz_set_smc_sram_address(smumgr, smc_address, limit);
|
||||
cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value);
|
||||
if (!result)
|
||||
cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value);
|
||||
|
||||
return 0;
|
||||
return result;
|
||||
}
|
||||
|
||||
static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
|
||||
@ -148,7 +144,6 @@ static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
|
||||
static int cz_request_smu_load_fw(struct pp_smumgr *smumgr)
|
||||
{
|
||||
struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend);
|
||||
int result = 0;
|
||||
uint32_t smc_address;
|
||||
|
||||
if (!smumgr->reload_fw) {
|
||||
@ -177,11 +172,9 @@ static int cz_request_smu_load_fw(struct pp_smumgr *smumgr)
|
||||
cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
|
||||
cz_smu->toc_entry_power_profiling_index);
|
||||
|
||||
result = cz_send_msg_to_smc_with_parameter(smumgr,
|
||||
return cz_send_msg_to_smc_with_parameter(smumgr,
|
||||
PPSMC_MSG_ExecuteJob,
|
||||
cz_smu->toc_entry_initialize_index);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
|
||||
@ -195,9 +188,6 @@ static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
|
||||
if (smumgr == NULL || smumgr->device == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
return cgs_read_register(smumgr->device,
|
||||
mmSMU_MP1_SRBM2P_ARG_0);
|
||||
|
||||
cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX, index);
|
||||
|
||||
for (i = 0; i < smumgr->usec_timeout; i++) {
|
||||
@ -275,7 +265,10 @@ static int cz_start_smu(struct pp_smumgr *smumgr)
|
||||
if (smumgr->chip_id == CHIP_STONEY)
|
||||
fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
|
||||
|
||||
cz_request_smu_load_fw(smumgr);
|
||||
ret = cz_request_smu_load_fw(smumgr);
|
||||
if (ret)
|
||||
printk(KERN_ERR "[ powerplay] SMU firmware load failed\n");
|
||||
|
||||
cz_check_fw_load_finish(smumgr, fw_to_check);
|
||||
|
||||
ret = cz_load_mec_firmware(smumgr);
|
||||
@ -566,10 +559,7 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
|
||||
|
||||
cz_smu_populate_single_ucode_load_task(smumgr,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
|
||||
if (smumgr->chip_id == CHIP_STONEY)
|
||||
cz_smu_populate_single_ucode_load_task(smumgr,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
|
||||
else
|
||||
if (smumgr->chip_id != CHIP_STONEY)
|
||||
cz_smu_populate_single_ucode_load_task(smumgr,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
|
||||
cz_smu_populate_single_ucode_load_task(smumgr,
|
||||
@ -580,10 +570,7 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
|
||||
cz_smu_populate_single_ucode_load_task(smumgr,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
|
||||
if (smumgr->chip_id == CHIP_STONEY)
|
||||
cz_smu_populate_single_ucode_load_task(smumgr,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
|
||||
else
|
||||
if (smumgr->chip_id != CHIP_STONEY)
|
||||
cz_smu_populate_single_ucode_load_task(smumgr,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
|
||||
cz_smu_populate_single_ucode_load_task(smumgr,
|
||||
@ -610,19 +597,12 @@ static int cz_smu_construct_toc(struct pp_smumgr *smumgr)
|
||||
struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
|
||||
|
||||
cz_smu->toc_entry_used_count = 0;
|
||||
|
||||
cz_smu_initialize_toc_empty_job_list(smumgr);
|
||||
|
||||
cz_smu_construct_toc_for_rlc_aram_save(smumgr);
|
||||
|
||||
cz_smu_construct_toc_for_vddgfx_enter(smumgr);
|
||||
|
||||
cz_smu_construct_toc_for_vddgfx_exit(smumgr);
|
||||
|
||||
cz_smu_construct_toc_for_power_profiling(smumgr);
|
||||
|
||||
cz_smu_construct_toc_for_bootup(smumgr);
|
||||
|
||||
cz_smu_construct_toc_for_clock_table(smumgr);
|
||||
|
||||
return 0;
|
||||
|
@ -40,7 +40,6 @@
|
||||
#include "cgs_common.h"
|
||||
|
||||
#define POLARIS10_SMC_SIZE 0x20000
|
||||
#define VOLTAGE_SCALE 4
|
||||
|
||||
/* Microcode file is stored in this buffer */
|
||||
#define BUFFER_SIZE 80000
|
||||
|
@ -26,12 +26,27 @@
|
||||
|
||||
#include <polaris10_ppsmc.h>
|
||||
#include <pp_endian.h>
|
||||
#include "smu74.h"
|
||||
|
||||
struct polaris10_avfs {
|
||||
enum AVFS_BTC_STATUS avfs_btc_status;
|
||||
uint32_t avfs_btc_param;
|
||||
};
|
||||
|
||||
struct polaris10_pt_defaults {
|
||||
uint8_t SviLoadLineEn;
|
||||
uint8_t SviLoadLineVddC;
|
||||
uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
|
||||
uint8_t TDC_MAWt;
|
||||
uint8_t TdcWaterfallCtl;
|
||||
uint8_t DTEAmbientTempBase;
|
||||
|
||||
uint32_t DisplayCac;
|
||||
uint32_t BAPM_TEMP_GRADIENT;
|
||||
uint16_t BAPMTI_R[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
|
||||
uint16_t BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
|
||||
};
|
||||
|
||||
struct polaris10_buffer_entry {
|
||||
uint32_t data_size;
|
||||
uint32_t mc_addr_low;
|
||||
@ -40,6 +55,11 @@ struct polaris10_buffer_entry {
|
||||
unsigned long handle;
|
||||
};
|
||||
|
||||
struct polaris10_range_table {
|
||||
uint32_t trans_lower_frequency; /* in 10khz */
|
||||
uint32_t trans_upper_frequency;
|
||||
};
|
||||
|
||||
struct polaris10_smumgr {
|
||||
uint8_t *header;
|
||||
uint8_t *mec_image;
|
||||
|
@ -28,11 +28,7 @@
|
||||
#include "smumgr.h"
|
||||
#include "cgs_common.h"
|
||||
#include "linux/delay.h"
|
||||
#include "cz_smumgr.h"
|
||||
#include "tonga_smumgr.h"
|
||||
#include "iceland_smumgr.h"
|
||||
#include "fiji_smumgr.h"
|
||||
#include "polaris10_smumgr.h"
|
||||
|
||||
|
||||
int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
|
||||
{
|
||||
@ -48,7 +44,6 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
|
||||
smumgr->device = pp_init->device;
|
||||
smumgr->chip_family = pp_init->chip_family;
|
||||
smumgr->chip_id = pp_init->chip_id;
|
||||
smumgr->hw_revision = pp_init->rev_id;
|
||||
smumgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
|
||||
smumgr->reload_fw = 1;
|
||||
handle->smu_mgr = smumgr;
|
||||
|
@ -65,30 +65,34 @@ void drm_global_release(void)
|
||||
|
||||
int drm_global_item_ref(struct drm_global_reference *ref)
|
||||
{
|
||||
int ret;
|
||||
int ret = 0;
|
||||
struct drm_global_item *item = &glob[ref->global_type];
|
||||
|
||||
mutex_lock(&item->mutex);
|
||||
if (item->refcount == 0) {
|
||||
item->object = kzalloc(ref->size, GFP_KERNEL);
|
||||
if (unlikely(item->object == NULL)) {
|
||||
ref->object = kzalloc(ref->size, GFP_KERNEL);
|
||||
if (unlikely(ref->object == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_err;
|
||||
goto error_unlock;
|
||||
}
|
||||
|
||||
ref->object = item->object;
|
||||
ret = ref->init(ref);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
goto error_free;
|
||||
|
||||
item->object = ref->object;
|
||||
} else {
|
||||
ref->object = item->object;
|
||||
}
|
||||
|
||||
++item->refcount;
|
||||
ref->object = item->object;
|
||||
mutex_unlock(&item->mutex);
|
||||
return 0;
|
||||
out_err:
|
||||
|
||||
error_free:
|
||||
kfree(ref->object);
|
||||
ref->object = NULL;
|
||||
error_unlock:
|
||||
mutex_unlock(&item->mutex);
|
||||
item->object = NULL;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_global_item_ref);
|
||||
|
@ -61,7 +61,7 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
|
||||
if (domain == QXL_GEM_DOMAIN_VRAM)
|
||||
qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
|
||||
if (domain == QXL_GEM_DOMAIN_SURFACE)
|
||||
qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
|
||||
qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV | pflag;
|
||||
if (domain == QXL_GEM_DOMAIN_CPU)
|
||||
qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
|
||||
if (!c)
|
||||
@ -151,7 +151,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
|
||||
|
||||
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
|
||||
map = qdev->vram_mapping;
|
||||
else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
|
||||
else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
|
||||
map = qdev->surface_mapping;
|
||||
else
|
||||
goto fallback;
|
||||
@ -191,7 +191,7 @@ void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
|
||||
|
||||
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
|
||||
map = qdev->vram_mapping;
|
||||
else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
|
||||
else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
|
||||
map = qdev->surface_mapping;
|
||||
else
|
||||
goto fallback;
|
||||
@ -311,7 +311,7 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
|
||||
|
||||
int qxl_surf_evict(struct qxl_device *qdev)
|
||||
{
|
||||
return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
|
||||
return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
|
||||
}
|
||||
|
||||
int qxl_vram_evict(struct qxl_device *qdev)
|
||||
|
@ -168,7 +168,7 @@ static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
break;
|
||||
case TTM_PL_VRAM:
|
||||
case TTM_PL_PRIV0:
|
||||
case TTM_PL_PRIV:
|
||||
/* "On-card" video ram */
|
||||
man->func = &ttm_bo_manager_func;
|
||||
man->gpu_offset = 0;
|
||||
@ -235,7 +235,7 @@ static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
|
||||
mem->bus.base = qdev->vram_base;
|
||||
mem->bus.offset = mem->start << PAGE_SHIFT;
|
||||
break;
|
||||
case TTM_PL_PRIV0:
|
||||
case TTM_PL_PRIV:
|
||||
mem->bus.is_iomem = true;
|
||||
mem->bus.base = qdev->surfaceram_base;
|
||||
mem->bus.offset = mem->start << PAGE_SHIFT;
|
||||
@ -376,7 +376,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
|
||||
qbo = to_qxl_bo(bo);
|
||||
qdev = qbo->gem_base.dev->dev_private;
|
||||
|
||||
if (bo->mem.mem_type == TTM_PL_PRIV0 && qbo->surface_id)
|
||||
if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
|
||||
qxl_surface_evict(qdev, qbo, new_mem ? true : false);
|
||||
}
|
||||
|
||||
@ -422,7 +422,7 @@ int qxl_ttm_init(struct qxl_device *qdev)
|
||||
DRM_ERROR("Failed initializing VRAM heap.\n");
|
||||
return r;
|
||||
}
|
||||
r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV0,
|
||||
r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV,
|
||||
qdev->surfaceram_size / PAGE_SIZE);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed initializing Surfaces heap.\n");
|
||||
@ -445,7 +445,7 @@ int qxl_ttm_init(struct qxl_device *qdev)
|
||||
void qxl_ttm_fini(struct qxl_device *qdev)
|
||||
{
|
||||
ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
|
||||
ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
|
||||
ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV);
|
||||
ttm_bo_device_release(&qdev->mman.bdev);
|
||||
qxl_ttm_global_fini(qdev);
|
||||
DRM_INFO("qxl: ttm finalized\n");
|
||||
@ -489,7 +489,7 @@ static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
|
||||
if (i == 0)
|
||||
qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
|
||||
else
|
||||
qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
|
||||
qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV].priv;
|
||||
|
||||
}
|
||||
return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
|
||||
|
@ -639,7 +639,7 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
|
||||
* Used at driver startup.
|
||||
* Returns true if virtual or false if not.
|
||||
*/
|
||||
static bool radeon_device_is_virtual(void)
|
||||
bool radeon_device_is_virtual(void)
|
||||
{
|
||||
#ifdef CONFIG_X86
|
||||
return boot_cpu_has(X86_FEATURE_HYPERVISOR);
|
||||
@ -1594,7 +1594,8 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
|
||||
|
||||
rdev = dev->dev_private;
|
||||
|
||||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
|
||||
dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
|
||||
return 0;
|
||||
|
||||
drm_kms_helper_poll_disable(dev);
|
||||
@ -1689,7 +1690,8 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
|
||||
struct drm_crtc *crtc;
|
||||
int r;
|
||||
|
||||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
|
||||
dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
|
||||
return 0;
|
||||
|
||||
if (fbcon) {
|
||||
@ -1956,14 +1958,3 @@ static void radeon_debugfs_remove_files(struct radeon_device *rdev)
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
int radeon_debugfs_init(struct drm_minor *minor)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_debugfs_cleanup(struct drm_minor *minor)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user