drm fixes for 6.2-rc7
dma-fence: - fix signaling bit for private fences panel: - boe-tv101wum-nl6 disable fix nouveau: - gm20b acr regression fix - tu102 scrub status fix - tu102 wait for firmware fix i915: - Fixes for potential use-after-free and double-free - GuC locking and refcount fixes - Display's reference clock value fix amdgpu: - GC11 fixes - DCN 3.1.4 fixes - NBIO 4.3 fix - DCN 3.2 fixes - Properly handle additional cases where DCN is not supported - SMU13 fixes vc4: - fix CEC adapter names ssd130x: - fix display init regression -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmPcg/0ACgkQDHTzWXnE hr7SRQ//WWOtULxrLQLvTKw29yLmldrz4JZrv0ChJ3voBkoaiFnvmhhHrkzEHn7F LTIWC7Lpx+axmoihXPvehbVJtIR/Fln0PrYsizt84fMVq5feT+3f+Ioj4jsu22HE DFgYxpmb6+4qOPN16hIYcf1dz00WZrZcxa0j+8XgyFHhLENQvChh5Xlxcj+6EkH0 5tiDWkWys2lTsLEPKqA09CPXIrxn3YfIL39lBtOh8KTegNWHHY0yiKx58vaYXcBv I6cH0/GCVh+mFNBMT8fb2877bCKAeVWrTHByXwxStu8FSX+EhxWQfC+mgPWlZgZo zC5b08O69vdR7Ru6W9u28AbpWRWsITbHR79AulF7854/F2peRqHXTfiEcKSEk2kZ jyxpLrSwE7HRpWjmGnp4UCYuKW0iJHjBmbrqEPywYwcHtMRdMsQi4cYvLyVu0ZxA 7DPDBr8ilEQv8xG6jbJpD+amo/6rZn9L+6CusK8haW0fUs/tQ1WXliSSOFzn5ueH 8mjddMRWec9kWmR/8uVIb4hGWT+MgxHSCTUo4CMw+KctVlp91YD+vk3u7VU6gtM4 CuIGSQcZeDwN5h8ab4iJ7PQbk/93eipMUBvHzFMzymrpsRWQgSv5FQLwxrvCLqEB CiQRyfBbpGsJ4v7d7K2yMn/D+b94vsajxEwi32rqNXgnBA+EoOg= =uX5h -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2023-02-03' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "A few more fixes this week, a bit more spread out though. We have a bunch of nouveau regression and stabilisation fixes, along with usual amdgpu, and i915. Otherwise just some minor misc ones: dma-fence: - fix signaling bit for private fences panel: - boe-tv101wum-nl6 disable fix nouveau: - gm20b acr regression fix - tu102 scrub status fix - tu102 wait for firmware fix i915: - Fixes for potential use-after-free and double-free - GuC locking and refcount fixes - Display's reference clock value fix amdgpu: - GC11 fixes - DCN 3.1.4 fixes - NBIO 4.3 fix - DCN 3.2 fixes - Properly handle additional cases where DCN is not supported - SMU13 fixes vc4: - fix CEC adapter names ssd130x: - fix display init regression" * tag 'drm-fixes-2023-02-03' of git://anongit.freedesktop.org/drm/drm: (23 commits) drm/amd/display: Properly handle additional cases where DCN is not supported drm/amdgpu: Enable vclk dclk node for gc11.0.3 drm/amd: Fix initialization for nbio 4.3.0 drm/amdgpu: enable HDP SD for gfx 11.0.3 drm/amd/pm: drop unneeded dpm features disablement for SMU 13.0.4/11 drm/amd/display: Reset DMUB mailbox SW state after HW reset drm/amd/display: Unassign does_plane_fit_in_mall function from dcn3.2 drm/amd/display: Adjust downscaling limits for dcn314 drm/amd/display: Add missing brackets in calculation drm/amdgpu: update wave data type to 3 for gfx11 drm/panel: boe-tv101wum-nl6: Ensure DSI writes succeed during disable drm/nouveau/acr/gm20b: regression fixes drm/nouveau/fb/tu102-: fix register used to determine scrub status drm/nouveau/devinit/tu102-: wait for GFW_BOOT_PROGRESS == COMPLETED drm/i915/adlp: Fix typo for reference clock drm/i915: Fix potential bit_17 double-free drm/i915: Fix up locking around dumping requests lists drm/i915: Fix request ref counting during error capture & debugfs dump drm/i915/guc: Fix locking when searching for a hung request drm/i915: Avoid potential vm use-after-free ...
This commit is contained in:
commit
bffede38f8
@ -167,7 +167,7 @@ struct dma_fence *dma_fence_allocate_private_stub(void)
|
||||
0, 0);
|
||||
|
||||
set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
|
||||
&dma_fence_stub.flags);
|
||||
&fence->flags);
|
||||
|
||||
dma_fence_signal(fence);
|
||||
|
||||
|
@ -790,8 +790,8 @@ static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd,
|
||||
* zero here */
|
||||
WARN_ON(simd != 0);
|
||||
|
||||
/* type 2 wave data */
|
||||
dst[(*no_fields)++] = 2;
|
||||
/* type 3 wave data */
|
||||
dst[(*no_fields)++] = 3;
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
|
||||
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
|
||||
|
@ -337,7 +337,13 @@ const struct nbio_hdp_flush_reg nbio_v4_3_hdp_flush_reg = {
|
||||
|
||||
static void nbio_v4_3_init_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
return;
|
||||
if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(4, 3, 0)) {
|
||||
uint32_t data;
|
||||
|
||||
data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2);
|
||||
data &= ~RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK;
|
||||
WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2, data);
|
||||
}
|
||||
}
|
||||
|
||||
static u32 nbio_v4_3_get_rom_offset(struct amdgpu_device *adev)
|
||||
|
@ -640,7 +640,8 @@ static int soc21_common_early_init(void *handle)
|
||||
AMD_CG_SUPPORT_GFX_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
AMD_CG_SUPPORT_REPEATER_FGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGCG;
|
||||
AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_HDP_SD;
|
||||
adev->pg_flags = AMD_PG_SUPPORT_VCN |
|
||||
AMD_PG_SUPPORT_VCN_DPG |
|
||||
AMD_PG_SUPPORT_JPEG;
|
||||
|
@ -4501,6 +4501,17 @@ DEVICE_ATTR_WO(s3_debug);
|
||||
static int dm_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_mode_info *mode_info = &adev->mode_info;
|
||||
struct atom_context *ctx = mode_info->atom_context;
|
||||
int index = GetIndexIntoMasterTable(DATA, Object_Header);
|
||||
u16 data_offset;
|
||||
|
||||
/* if there is no object header, skip DM */
|
||||
if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
|
||||
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
|
||||
dev_info(adev->dev, "No object header, skipping DM\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
switch (adev->asic_type) {
|
||||
#if defined(CONFIG_DRM_AMD_DC_SI)
|
||||
|
@ -874,8 +874,9 @@ static const struct dc_plane_cap plane_cap = {
|
||||
},
|
||||
|
||||
// 6:1 downscaling ratio: 1000/6 = 166.666
|
||||
// 4:1 downscaling ratio for ARGB888 to prevent underflow during P010 playback: 1000/4 = 250
|
||||
.max_downscale_factor = {
|
||||
.argb8888 = 167,
|
||||
.argb8888 = 250,
|
||||
.nv12 = 167,
|
||||
.fp16 = 167
|
||||
},
|
||||
@ -1763,7 +1764,7 @@ static bool dcn314_resource_construct(
|
||||
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
|
||||
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
|
||||
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
|
||||
dc->caps.max_downscale_ratio = 600;
|
||||
dc->caps.max_downscale_ratio = 400;
|
||||
dc->caps.i2c_speed_in_khz = 100;
|
||||
dc->caps.i2c_speed_in_khz_hdcp = 100;
|
||||
dc->caps.max_cursor_size = 256;
|
||||
|
@ -94,7 +94,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
|
||||
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
|
||||
.calc_vupdate_position = dcn10_calc_vupdate_position,
|
||||
.apply_idle_power_optimizations = dcn32_apply_idle_power_optimizations,
|
||||
.does_plane_fit_in_mall = dcn30_does_plane_fit_in_mall,
|
||||
.does_plane_fit_in_mall = NULL,
|
||||
.set_backlight_level = dcn21_set_backlight_level,
|
||||
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
|
||||
.hardware_release = dcn30_hardware_release,
|
||||
|
@ -3183,7 +3183,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
|
||||
} else {
|
||||
v->MIN_DST_Y_NEXT_START[k] = v->VTotal[k] - v->VFrontPorch[k] + v->VTotal[k] - v->VActive[k] - v->VStartup[k];
|
||||
}
|
||||
v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / (double)v->HTotal[k] / v->PixelClock[k], 1.0) / 4.0;
|
||||
v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / ((double)v->HTotal[k] / v->PixelClock[k]), 1.0) / 4.0;
|
||||
if (((v->VUpdateOffsetPix[k] + v->VUpdateWidthPix[k] + v->VReadyOffsetPix[k]) / v->HTotal[k])
|
||||
<= (isInterlaceTiming ?
|
||||
dml_floor((v->VTotal[k] - v->VActive[k] - v->VFrontPorch[k] - v->VStartup[k]) / 2.0, 1.0) :
|
||||
|
@ -532,6 +532,9 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
|
||||
if (dmub->hw_funcs.reset)
|
||||
dmub->hw_funcs.reset(dmub);
|
||||
|
||||
/* reset the cache of the last wptr as well now that hw is reset */
|
||||
dmub->inbox1_last_wptr = 0;
|
||||
|
||||
cw0.offset.quad_part = inst_fb->gpu_addr;
|
||||
cw0.region.base = DMUB_CW0_BASE;
|
||||
cw0.region.top = cw0.region.base + inst_fb->size - 1;
|
||||
@ -649,6 +652,15 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
|
||||
if (dmub->hw_funcs.reset)
|
||||
dmub->hw_funcs.reset(dmub);
|
||||
|
||||
/* mailboxes have been reset in hw, so reset the sw state as well */
|
||||
dmub->inbox1_last_wptr = 0;
|
||||
dmub->inbox1_rb.wrpt = 0;
|
||||
dmub->inbox1_rb.rptr = 0;
|
||||
dmub->outbox0_rb.wrpt = 0;
|
||||
dmub->outbox0_rb.rptr = 0;
|
||||
dmub->outbox1_rb.wrpt = 0;
|
||||
dmub->outbox1_rb.rptr = 0;
|
||||
|
||||
dmub->hw_init = false;
|
||||
|
||||
return DMUB_STATUS_OK;
|
||||
|
@ -2007,14 +2007,16 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
|
||||
gc_ver == IP_VERSION(10, 3, 0) ||
|
||||
gc_ver == IP_VERSION(10, 1, 2) ||
|
||||
gc_ver == IP_VERSION(11, 0, 0) ||
|
||||
gc_ver == IP_VERSION(11, 0, 2)))
|
||||
gc_ver == IP_VERSION(11, 0, 2) ||
|
||||
gc_ver == IP_VERSION(11, 0, 3)))
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
|
||||
if (!(gc_ver == IP_VERSION(10, 3, 1) ||
|
||||
gc_ver == IP_VERSION(10, 3, 0) ||
|
||||
gc_ver == IP_VERSION(10, 1, 2) ||
|
||||
gc_ver == IP_VERSION(11, 0, 0) ||
|
||||
gc_ver == IP_VERSION(11, 0, 2)))
|
||||
gc_ver == IP_VERSION(11, 0, 2) ||
|
||||
gc_ver == IP_VERSION(11, 0, 3)))
|
||||
*states = ATTR_STATE_UNSUPPORTED;
|
||||
} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
|
||||
if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
|
||||
|
@ -1499,6 +1499,20 @@ static int smu_disable_dpms(struct smu_context *smu)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* For SMU 13.0.4/11, PMFW will handle the features disablement properly
|
||||
* for gpu reset case. Driver involvement is unnecessary.
|
||||
*/
|
||||
if (amdgpu_in_reset(adev)) {
|
||||
switch (adev->ip_versions[MP1_HWIP][0]) {
|
||||
case IP_VERSION(13, 0, 4):
|
||||
case IP_VERSION(13, 0, 11):
|
||||
return 0;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* For gpu reset, runpm and hibernation through BACO,
|
||||
* BACO feature has to be kept enabled.
|
||||
|
@ -1319,7 +1319,7 @@ static const struct intel_cdclk_vals adlp_cdclk_table[] = {
|
||||
{ .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
|
||||
{ .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
|
||||
{ .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
|
||||
{ .refclk = 24400, .cdclk = 648000, .divider = 2, .ratio = 54 },
|
||||
{ .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
|
||||
|
||||
{ .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 },
|
||||
{ .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
|
||||
|
@ -1861,12 +1861,20 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
|
||||
vm = ctx->vm;
|
||||
GEM_BUG_ON(!vm);
|
||||
|
||||
err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/*
|
||||
* Get a reference for the allocated handle. Once the handle is
|
||||
* visible in the vm_xa table, userspace could try to close it
|
||||
* from under our feet, so we need to hold the extra reference
|
||||
* first.
|
||||
*/
|
||||
i915_vm_get(vm);
|
||||
|
||||
err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
|
||||
if (err) {
|
||||
i915_vm_put(vm);
|
||||
return err;
|
||||
}
|
||||
|
||||
GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
|
||||
args->value = id;
|
||||
args->size = 0;
|
||||
|
@ -305,10 +305,6 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
|
||||
spin_unlock(&obj->vma.lock);
|
||||
|
||||
obj->tiling_and_stride = tiling | stride;
|
||||
i915_gem_object_unlock(obj);
|
||||
|
||||
/* Force the fence to be reacquired for GTT access */
|
||||
i915_gem_object_release_mmap_gtt(obj);
|
||||
|
||||
/* Try to preallocate memory required to save swizzling on put-pages */
|
||||
if (i915_gem_object_needs_bit17_swizzle(obj)) {
|
||||
@ -321,6 +317,11 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
|
||||
obj->bit_17 = NULL;
|
||||
}
|
||||
|
||||
i915_gem_object_unlock(obj);
|
||||
|
||||
/* Force the fence to be reacquired for GTT access */
|
||||
i915_gem_object_release_mmap_gtt(obj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -528,7 +528,7 @@ retry:
|
||||
return rq;
|
||||
}
|
||||
|
||||
struct i915_request *intel_context_find_active_request(struct intel_context *ce)
|
||||
struct i915_request *intel_context_get_active_request(struct intel_context *ce)
|
||||
{
|
||||
struct intel_context *parent = intel_context_to_parent(ce);
|
||||
struct i915_request *rq, *active = NULL;
|
||||
@ -552,6 +552,8 @@ struct i915_request *intel_context_find_active_request(struct intel_context *ce)
|
||||
|
||||
active = rq;
|
||||
}
|
||||
if (active)
|
||||
active = i915_request_get_rcu(active);
|
||||
spin_unlock_irqrestore(&parent->guc_state.lock, flags);
|
||||
|
||||
return active;
|
||||
|
@ -268,8 +268,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
|
||||
|
||||
struct i915_request *intel_context_create_request(struct intel_context *ce);
|
||||
|
||||
struct i915_request *
|
||||
intel_context_find_active_request(struct intel_context *ce);
|
||||
struct i915_request *intel_context_get_active_request(struct intel_context *ce);
|
||||
|
||||
static inline bool intel_context_is_barrier(const struct intel_context *ce)
|
||||
{
|
||||
|
@ -248,8 +248,8 @@ void intel_engine_dump_active_requests(struct list_head *requests,
|
||||
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine,
|
||||
ktime_t *now);
|
||||
|
||||
struct i915_request *
|
||||
intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine);
|
||||
void intel_engine_get_hung_entity(struct intel_engine_cs *engine,
|
||||
struct intel_context **ce, struct i915_request **rq);
|
||||
|
||||
u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
|
||||
struct intel_context *
|
||||
|
@ -2094,17 +2094,6 @@ static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long list_count(struct list_head *list)
|
||||
{
|
||||
struct list_head *pos;
|
||||
unsigned long count = 0;
|
||||
|
||||
list_for_each(pos, list)
|
||||
count++;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static unsigned long read_ul(void *p, size_t x)
|
||||
{
|
||||
return *(unsigned long *)(p + x);
|
||||
@ -2196,11 +2185,11 @@ void intel_engine_dump_active_requests(struct list_head *requests,
|
||||
}
|
||||
}
|
||||
|
||||
static void engine_dump_active_requests(struct intel_engine_cs *engine, struct drm_printer *m)
|
||||
static void engine_dump_active_requests(struct intel_engine_cs *engine,
|
||||
struct drm_printer *m)
|
||||
{
|
||||
struct intel_context *hung_ce = NULL;
|
||||
struct i915_request *hung_rq = NULL;
|
||||
struct intel_context *ce;
|
||||
bool guc;
|
||||
|
||||
/*
|
||||
* No need for an engine->irq_seqno_barrier() before the seqno reads.
|
||||
@ -2209,27 +2198,22 @@ static void engine_dump_active_requests(struct intel_engine_cs *engine, struct d
|
||||
* But the intention here is just to report an instantaneous snapshot
|
||||
* so that's fine.
|
||||
*/
|
||||
lockdep_assert_held(&engine->sched_engine->lock);
|
||||
intel_engine_get_hung_entity(engine, &hung_ce, &hung_rq);
|
||||
|
||||
drm_printf(m, "\tRequests:\n");
|
||||
|
||||
guc = intel_uc_uses_guc_submission(&engine->gt->uc);
|
||||
if (guc) {
|
||||
ce = intel_engine_get_hung_context(engine);
|
||||
if (ce)
|
||||
hung_rq = intel_context_find_active_request(ce);
|
||||
} else {
|
||||
hung_rq = intel_engine_execlist_find_hung_request(engine);
|
||||
}
|
||||
|
||||
if (hung_rq)
|
||||
engine_dump_request(hung_rq, m, "\t\thung");
|
||||
else if (hung_ce)
|
||||
drm_printf(m, "\t\tGot hung ce but no hung rq!\n");
|
||||
|
||||
if (guc)
|
||||
if (intel_uc_uses_guc_submission(&engine->gt->uc))
|
||||
intel_guc_dump_active_requests(engine, hung_rq, m);
|
||||
else
|
||||
intel_engine_dump_active_requests(&engine->sched_engine->requests,
|
||||
hung_rq, m);
|
||||
intel_execlists_dump_active_requests(engine, hung_rq, m);
|
||||
|
||||
if (hung_rq)
|
||||
i915_request_put(hung_rq);
|
||||
}
|
||||
|
||||
void intel_engine_dump(struct intel_engine_cs *engine,
|
||||
@ -2239,7 +2223,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
|
||||
struct i915_gpu_error * const error = &engine->i915->gpu_error;
|
||||
struct i915_request *rq;
|
||||
intel_wakeref_t wakeref;
|
||||
unsigned long flags;
|
||||
ktime_t dummy;
|
||||
|
||||
if (header) {
|
||||
@ -2276,13 +2259,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
|
||||
i915_reset_count(error));
|
||||
print_properties(engine, m);
|
||||
|
||||
spin_lock_irqsave(&engine->sched_engine->lock, flags);
|
||||
engine_dump_active_requests(engine, m);
|
||||
|
||||
drm_printf(m, "\tOn hold?: %lu\n",
|
||||
list_count(&engine->sched_engine->hold));
|
||||
spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
|
||||
|
||||
drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
|
||||
wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
|
||||
if (wakeref) {
|
||||
@ -2328,8 +2306,7 @@ intel_engine_create_virtual(struct intel_engine_cs **siblings,
|
||||
return siblings[0]->cops->create_virtual(siblings, count, flags);
|
||||
}
|
||||
|
||||
struct i915_request *
|
||||
intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine)
|
||||
static struct i915_request *engine_execlist_find_hung_request(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct i915_request *request, *active = NULL;
|
||||
|
||||
@ -2381,6 +2358,33 @@ intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine)
|
||||
return active;
|
||||
}
|
||||
|
||||
void intel_engine_get_hung_entity(struct intel_engine_cs *engine,
|
||||
struct intel_context **ce, struct i915_request **rq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
*ce = intel_engine_get_hung_context(engine);
|
||||
if (*ce) {
|
||||
intel_engine_clear_hung_context(engine);
|
||||
|
||||
*rq = intel_context_get_active_request(*ce);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Getting here with GuC enabled means it is a forced error capture
|
||||
* with no actual hang. So, no need to attempt the execlist search.
|
||||
*/
|
||||
if (intel_uc_uses_guc_submission(&engine->gt->uc))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&engine->sched_engine->lock, flags);
|
||||
*rq = engine_execlist_find_hung_request(engine);
|
||||
if (*rq)
|
||||
*rq = i915_request_get_rcu(*rq);
|
||||
spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
|
||||
}
|
||||
|
||||
void xehp_enable_ccs_engines(struct intel_engine_cs *engine)
|
||||
{
|
||||
/*
|
||||
|
@ -4148,6 +4148,33 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
|
||||
spin_unlock_irqrestore(&sched_engine->lock, flags);
|
||||
}
|
||||
|
||||
static unsigned long list_count(struct list_head *list)
|
||||
{
|
||||
struct list_head *pos;
|
||||
unsigned long count = 0;
|
||||
|
||||
list_for_each(pos, list)
|
||||
count++;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
void intel_execlists_dump_active_requests(struct intel_engine_cs *engine,
|
||||
struct i915_request *hung_rq,
|
||||
struct drm_printer *m)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&engine->sched_engine->lock, flags);
|
||||
|
||||
intel_engine_dump_active_requests(&engine->sched_engine->requests, hung_rq, m);
|
||||
|
||||
drm_printf(m, "\tOn hold?: %lu\n",
|
||||
list_count(&engine->sched_engine->hold));
|
||||
|
||||
spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
#include "selftest_execlists.c"
|
||||
#endif
|
||||
|
@ -32,6 +32,10 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
|
||||
int indent),
|
||||
unsigned int max);
|
||||
|
||||
void intel_execlists_dump_active_requests(struct intel_engine_cs *engine,
|
||||
struct i915_request *hung_rq,
|
||||
struct drm_printer *m);
|
||||
|
||||
bool
|
||||
intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
|
||||
|
||||
|
@ -1702,7 +1702,7 @@ static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t st
|
||||
goto next_context;
|
||||
|
||||
guilty = false;
|
||||
rq = intel_context_find_active_request(ce);
|
||||
rq = intel_context_get_active_request(ce);
|
||||
if (!rq) {
|
||||
head = ce->ring->tail;
|
||||
goto out_replay;
|
||||
@ -1715,6 +1715,7 @@ static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t st
|
||||
head = intel_ring_wrap(ce->ring, rq->head);
|
||||
|
||||
__i915_request_reset(rq, guilty);
|
||||
i915_request_put(rq);
|
||||
out_replay:
|
||||
guc_reset_state(ce, head, guilty);
|
||||
next_context:
|
||||
@ -4817,6 +4818,8 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
|
||||
|
||||
xa_lock_irqsave(&guc->context_lookup, flags);
|
||||
xa_for_each(&guc->context_lookup, index, ce) {
|
||||
bool found;
|
||||
|
||||
if (!kref_get_unless_zero(&ce->ref))
|
||||
continue;
|
||||
|
||||
@ -4833,10 +4836,18 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
|
||||
goto next;
|
||||
}
|
||||
|
||||
found = false;
|
||||
spin_lock(&ce->guc_state.lock);
|
||||
list_for_each_entry(rq, &ce->guc_state.requests, sched.link) {
|
||||
if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE)
|
||||
continue;
|
||||
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
spin_unlock(&ce->guc_state.lock);
|
||||
|
||||
if (found) {
|
||||
intel_engine_set_hung_context(engine, ce);
|
||||
|
||||
/* Can only cope with one hang at a time... */
|
||||
@ -4844,6 +4855,7 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
|
||||
xa_lock(&guc->context_lookup);
|
||||
goto done;
|
||||
}
|
||||
|
||||
next:
|
||||
intel_context_put(ce);
|
||||
xa_lock(&guc->context_lookup);
|
||||
|
@ -1596,43 +1596,20 @@ capture_engine(struct intel_engine_cs *engine,
|
||||
{
|
||||
struct intel_engine_capture_vma *capture = NULL;
|
||||
struct intel_engine_coredump *ee;
|
||||
struct intel_context *ce;
|
||||
struct intel_context *ce = NULL;
|
||||
struct i915_request *rq = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL, dump_flags);
|
||||
if (!ee)
|
||||
return NULL;
|
||||
|
||||
ce = intel_engine_get_hung_context(engine);
|
||||
if (ce) {
|
||||
intel_engine_clear_hung_context(engine);
|
||||
rq = intel_context_find_active_request(ce);
|
||||
if (!rq || !i915_request_started(rq))
|
||||
goto no_request_capture;
|
||||
} else {
|
||||
/*
|
||||
* Getting here with GuC enabled means it is a forced error capture
|
||||
* with no actual hang. So, no need to attempt the execlist search.
|
||||
*/
|
||||
if (!intel_uc_uses_guc_submission(&engine->gt->uc)) {
|
||||
spin_lock_irqsave(&engine->sched_engine->lock, flags);
|
||||
rq = intel_engine_execlist_find_hung_request(engine);
|
||||
spin_unlock_irqrestore(&engine->sched_engine->lock,
|
||||
flags);
|
||||
}
|
||||
}
|
||||
if (rq)
|
||||
rq = i915_request_get_rcu(rq);
|
||||
|
||||
if (!rq)
|
||||
intel_engine_get_hung_entity(engine, &ce, &rq);
|
||||
if (!rq || !i915_request_started(rq))
|
||||
goto no_request_capture;
|
||||
|
||||
capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
|
||||
if (!capture) {
|
||||
i915_request_put(rq);
|
||||
if (!capture)
|
||||
goto no_request_capture;
|
||||
}
|
||||
if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
|
||||
intel_guc_capture_get_matching_node(engine->gt, ee, ce);
|
||||
|
||||
@ -1642,6 +1619,8 @@ capture_engine(struct intel_engine_cs *engine,
|
||||
return ee;
|
||||
|
||||
no_request_capture:
|
||||
if (rq)
|
||||
i915_request_put(rq);
|
||||
kfree(ee);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -97,6 +97,7 @@ int gp100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct n
|
||||
int gp102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
|
||||
int gp10b_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
|
||||
int gv100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
|
||||
int tu102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
|
||||
int ga100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
|
||||
int ga102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
|
||||
|
||||
|
@ -151,6 +151,9 @@ nvkm_firmware_mem_page(struct nvkm_memory *memory)
|
||||
static enum nvkm_memory_target
|
||||
nvkm_firmware_mem_target(struct nvkm_memory *memory)
|
||||
{
|
||||
if (nvkm_firmware_mem(memory)->device->func->tegra)
|
||||
return NVKM_MEM_TARGET_NCOH;
|
||||
|
||||
return NVKM_MEM_TARGET_HOST;
|
||||
}
|
||||
|
||||
|
@ -2405,7 +2405,7 @@ nv162_chipset = {
|
||||
.bus = { 0x00000001, gf100_bus_new },
|
||||
.devinit = { 0x00000001, tu102_devinit_new },
|
||||
.fault = { 0x00000001, tu102_fault_new },
|
||||
.fb = { 0x00000001, gv100_fb_new },
|
||||
.fb = { 0x00000001, tu102_fb_new },
|
||||
.fuse = { 0x00000001, gm107_fuse_new },
|
||||
.gpio = { 0x00000001, gk104_gpio_new },
|
||||
.gsp = { 0x00000001, gv100_gsp_new },
|
||||
@ -2440,7 +2440,7 @@ nv164_chipset = {
|
||||
.bus = { 0x00000001, gf100_bus_new },
|
||||
.devinit = { 0x00000001, tu102_devinit_new },
|
||||
.fault = { 0x00000001, tu102_fault_new },
|
||||
.fb = { 0x00000001, gv100_fb_new },
|
||||
.fb = { 0x00000001, tu102_fb_new },
|
||||
.fuse = { 0x00000001, gm107_fuse_new },
|
||||
.gpio = { 0x00000001, gk104_gpio_new },
|
||||
.gsp = { 0x00000001, gv100_gsp_new },
|
||||
@ -2475,7 +2475,7 @@ nv166_chipset = {
|
||||
.bus = { 0x00000001, gf100_bus_new },
|
||||
.devinit = { 0x00000001, tu102_devinit_new },
|
||||
.fault = { 0x00000001, tu102_fault_new },
|
||||
.fb = { 0x00000001, gv100_fb_new },
|
||||
.fb = { 0x00000001, tu102_fb_new },
|
||||
.fuse = { 0x00000001, gm107_fuse_new },
|
||||
.gpio = { 0x00000001, gk104_gpio_new },
|
||||
.gsp = { 0x00000001, gv100_gsp_new },
|
||||
@ -2510,7 +2510,7 @@ nv167_chipset = {
|
||||
.bus = { 0x00000001, gf100_bus_new },
|
||||
.devinit = { 0x00000001, tu102_devinit_new },
|
||||
.fault = { 0x00000001, tu102_fault_new },
|
||||
.fb = { 0x00000001, gv100_fb_new },
|
||||
.fb = { 0x00000001, tu102_fb_new },
|
||||
.fuse = { 0x00000001, gm107_fuse_new },
|
||||
.gpio = { 0x00000001, gk104_gpio_new },
|
||||
.gsp = { 0x00000001, gv100_gsp_new },
|
||||
@ -2545,7 +2545,7 @@ nv168_chipset = {
|
||||
.bus = { 0x00000001, gf100_bus_new },
|
||||
.devinit = { 0x00000001, tu102_devinit_new },
|
||||
.fault = { 0x00000001, tu102_fault_new },
|
||||
.fb = { 0x00000001, gv100_fb_new },
|
||||
.fb = { 0x00000001, tu102_fb_new },
|
||||
.fuse = { 0x00000001, gm107_fuse_new },
|
||||
.gpio = { 0x00000001, gk104_gpio_new },
|
||||
.gsp = { 0x00000001, gv100_gsp_new },
|
||||
|
@ -48,6 +48,16 @@ gm200_flcn_pio_dmem_rd(struct nvkm_falcon *falcon, u8 port, const u8 *img, int l
|
||||
img += 4;
|
||||
len -= 4;
|
||||
}
|
||||
|
||||
/* Sigh. Tegra PMU FW's init message... */
|
||||
if (len) {
|
||||
u32 data = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
|
||||
|
||||
while (len--) {
|
||||
*(u8 *)img++ = data & 0xff;
|
||||
data >>= 8;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
@ -64,6 +74,8 @@ gm200_flcn_pio_dmem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int l
|
||||
img += 4;
|
||||
len -= 4;
|
||||
}
|
||||
|
||||
WARN_ON(len);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -74,7 +86,7 @@ gm200_flcn_pio_dmem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 d
|
||||
|
||||
const struct nvkm_falcon_func_pio
|
||||
gm200_flcn_dmem_pio = {
|
||||
.min = 4,
|
||||
.min = 1,
|
||||
.max = 0x100,
|
||||
.wr_init = gm200_flcn_pio_dmem_wr_init,
|
||||
.wr = gm200_flcn_pio_dmem_wr,
|
||||
|
@ -65,10 +65,33 @@ tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
tu102_devinit_wait(struct nvkm_device *device)
|
||||
{
|
||||
unsigned timeout = 50 + 2000;
|
||||
|
||||
do {
|
||||
if (nvkm_rd32(device, 0x118128) & 0x00000001) {
|
||||
if ((nvkm_rd32(device, 0x118234) & 0x000000ff) == 0xff)
|
||||
return 0;
|
||||
}
|
||||
|
||||
usleep_range(1000, 2000);
|
||||
} while (timeout--);
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
int
|
||||
tu102_devinit_post(struct nvkm_devinit *base, bool post)
|
||||
{
|
||||
struct nv50_devinit *init = nv50_devinit(base);
|
||||
int ret;
|
||||
|
||||
ret = tu102_devinit_wait(init->base.subdev.device);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
gm200_devinit_preos(init, post);
|
||||
return 0;
|
||||
}
|
||||
|
@ -32,6 +32,7 @@ nvkm-y += nvkm/subdev/fb/gp100.o
|
||||
nvkm-y += nvkm/subdev/fb/gp102.o
|
||||
nvkm-y += nvkm/subdev/fb/gp10b.o
|
||||
nvkm-y += nvkm/subdev/fb/gv100.o
|
||||
nvkm-y += nvkm/subdev/fb/tu102.o
|
||||
nvkm-y += nvkm/subdev/fb/ga100.o
|
||||
nvkm-y += nvkm/subdev/fb/ga102.o
|
||||
|
||||
|
@ -40,12 +40,6 @@ ga102_fb_vpr_scrub(struct nvkm_fb *fb)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool
|
||||
ga102_fb_vpr_scrub_required(struct nvkm_fb *fb)
|
||||
{
|
||||
return (nvkm_rd32(fb->subdev.device, 0x1fa80c) & 0x00000010) != 0;
|
||||
}
|
||||
|
||||
static const struct nvkm_fb_func
|
||||
ga102_fb = {
|
||||
.dtor = gf100_fb_dtor,
|
||||
@ -56,7 +50,7 @@ ga102_fb = {
|
||||
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
|
||||
.ram_new = ga102_ram_new,
|
||||
.default_bigpage = 16,
|
||||
.vpr.scrub_required = ga102_fb_vpr_scrub_required,
|
||||
.vpr.scrub_required = tu102_fb_vpr_scrub_required,
|
||||
.vpr.scrub = ga102_fb_vpr_scrub,
|
||||
};
|
||||
|
||||
|
@ -49,8 +49,3 @@ gv100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, s
|
||||
}
|
||||
|
||||
MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
|
||||
MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin");
|
||||
MODULE_FIRMWARE("nvidia/tu104/nvdec/scrubber.bin");
|
||||
MODULE_FIRMWARE("nvidia/tu106/nvdec/scrubber.bin");
|
||||
MODULE_FIRMWARE("nvidia/tu116/nvdec/scrubber.bin");
|
||||
MODULE_FIRMWARE("nvidia/tu117/nvdec/scrubber.bin");
|
||||
|
@ -89,4 +89,6 @@ bool gp102_fb_vpr_scrub_required(struct nvkm_fb *);
|
||||
int gp102_fb_vpr_scrub(struct nvkm_fb *);
|
||||
|
||||
int gv100_fb_init_page(struct nvkm_fb *);
|
||||
|
||||
bool tu102_fb_vpr_scrub_required(struct nvkm_fb *);
|
||||
#endif
|
||||
|
55
drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
Normal file
55
drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright 2018 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#include "gf100.h"
|
||||
#include "ram.h"
|
||||
|
||||
bool
|
||||
tu102_fb_vpr_scrub_required(struct nvkm_fb *fb)
|
||||
{
|
||||
return (nvkm_rd32(fb->subdev.device, 0x1fa80c) & 0x00000010) != 0;
|
||||
}
|
||||
|
||||
static const struct nvkm_fb_func
|
||||
tu102_fb = {
|
||||
.dtor = gf100_fb_dtor,
|
||||
.oneinit = gf100_fb_oneinit,
|
||||
.init = gm200_fb_init,
|
||||
.init_page = gv100_fb_init_page,
|
||||
.init_unkn = gp100_fb_init_unkn,
|
||||
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
|
||||
.vpr.scrub_required = tu102_fb_vpr_scrub_required,
|
||||
.vpr.scrub = gp102_fb_vpr_scrub,
|
||||
.ram_new = gp100_ram_new,
|
||||
.default_bigpage = 16,
|
||||
};
|
||||
|
||||
int
|
||||
tu102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
|
||||
{
|
||||
return gp102_fb_new_(&tu102_fb, device, type, inst, pfb);
|
||||
}
|
||||
|
||||
MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin");
|
||||
MODULE_FIRMWARE("nvidia/tu104/nvdec/scrubber.bin");
|
||||
MODULE_FIRMWARE("nvidia/tu106/nvdec/scrubber.bin");
|
||||
MODULE_FIRMWARE("nvidia/tu116/nvdec/scrubber.bin");
|
||||
MODULE_FIRMWARE("nvidia/tu117/nvdec/scrubber.bin");
|
@ -225,7 +225,7 @@ gm20b_pmu_init(struct nvkm_pmu *pmu)
|
||||
|
||||
pmu->initmsg_received = false;
|
||||
|
||||
nvkm_falcon_load_dmem(falcon, &args, addr_args, sizeof(args), 0);
|
||||
nvkm_falcon_pio_wr(falcon, (u8 *)&args, 0, 0, DMEM, addr_args, sizeof(args), 0, false);
|
||||
nvkm_falcon_start(falcon);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1193,14 +1193,11 @@ static int boe_panel_enter_sleep_mode(struct boe_panel *boe)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int boe_panel_unprepare(struct drm_panel *panel)
|
||||
static int boe_panel_disable(struct drm_panel *panel)
|
||||
{
|
||||
struct boe_panel *boe = to_boe_panel(panel);
|
||||
int ret;
|
||||
|
||||
if (!boe->prepared)
|
||||
return 0;
|
||||
|
||||
ret = boe_panel_enter_sleep_mode(boe);
|
||||
if (ret < 0) {
|
||||
dev_err(panel->dev, "failed to set panel off: %d\n", ret);
|
||||
@ -1209,6 +1206,16 @@ static int boe_panel_unprepare(struct drm_panel *panel)
|
||||
|
||||
msleep(150);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int boe_panel_unprepare(struct drm_panel *panel)
|
||||
{
|
||||
struct boe_panel *boe = to_boe_panel(panel);
|
||||
|
||||
if (!boe->prepared)
|
||||
return 0;
|
||||
|
||||
if (boe->desc->discharge_on_disable) {
|
||||
regulator_disable(boe->avee);
|
||||
regulator_disable(boe->avdd);
|
||||
@ -1528,6 +1535,7 @@ static enum drm_panel_orientation boe_panel_get_orientation(struct drm_panel *pa
|
||||
}
|
||||
|
||||
static const struct drm_panel_funcs boe_panel_funcs = {
|
||||
.disable = boe_panel_disable,
|
||||
.unprepare = boe_panel_unprepare,
|
||||
.prepare = boe_panel_prepare,
|
||||
.enable = boe_panel_enable,
|
||||
|
@ -656,18 +656,8 @@ static const struct drm_crtc_helper_funcs ssd130x_crtc_helper_funcs = {
|
||||
.atomic_check = drm_crtc_helper_atomic_check,
|
||||
};
|
||||
|
||||
static void ssd130x_crtc_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *drm = crtc->dev;
|
||||
struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
|
||||
|
||||
ssd130x_init(ssd130x);
|
||||
|
||||
drm_atomic_helper_crtc_reset(crtc);
|
||||
}
|
||||
|
||||
static const struct drm_crtc_funcs ssd130x_crtc_funcs = {
|
||||
.reset = ssd130x_crtc_reset,
|
||||
.reset = drm_atomic_helper_crtc_reset,
|
||||
.destroy = drm_crtc_cleanup,
|
||||
.set_config = drm_atomic_helper_set_config,
|
||||
.page_flip = drm_atomic_helper_page_flip,
|
||||
@ -686,6 +676,12 @@ static void ssd130x_encoder_helper_atomic_enable(struct drm_encoder *encoder,
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
ret = ssd130x_init(ssd130x);
|
||||
if (ret) {
|
||||
ssd130x_power_off(ssd130x);
|
||||
return;
|
||||
}
|
||||
|
||||
ssd130x_write_cmd(ssd130x, 1, SSD130X_DISPLAY_ON);
|
||||
|
||||
backlight_enable(ssd130x->bl_dev);
|
||||
|
@ -3018,7 +3018,8 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
|
||||
}
|
||||
|
||||
vc4_hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops,
|
||||
vc4_hdmi, "vc4",
|
||||
vc4_hdmi,
|
||||
vc4_hdmi->variant->card_name,
|
||||
CEC_CAP_DEFAULTS |
|
||||
CEC_CAP_CONNECTOR_INFO, 1);
|
||||
ret = PTR_ERR_OR_ZERO(vc4_hdmi->cec_adap);
|
||||
|
Loading…
x
Reference in New Issue
Block a user