To make sure that we don't unintentionally perform any unclocked and/or unpowered R/W operation on GPU registers, before turning off clocks and regulators we must make sure that no GPU, JOB or MMU ISR execution is pending: doing that requires to add a mechanism to synchronize the interrupts on suspend. Add functions panfrost_{gpu,job,mmu}_suspend_irq() which will perform interrupts masking and ISR execution synchronization, and then call those in the panfrost_device_runtime_suspend() handler in the exact sequence of job (may require mmu!) -> mmu -> gpu. As a side note, JOB and MMU suspend_irq functions needed some special treatment: as their interrupt handlers will unmask interrupts, it was necessary to add an `is_suspended` bitmap which is used to address the possible corner case of unintentional IRQ unmasking because of ISR execution after a call to synchronize_irq(). At resume, clear each is_suspended bit in the reset path of JOB/MMU to allow unmasking the interrupts. Signed-off-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> Tested-by: Marek Szyprowski <m.szyprowski@samsung.com> Reviewed-by: Steven Price <steven.price@arm.com> Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com> Link: https://patchwork.freedesktop.org/patch/msgid/20231204114215.54575-4-angelogioacchino.delregno@collabora.com
54 lines
1.4 KiB
C
54 lines
1.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/* Copyright 2019 Collabora ltd. */
|
|
|
|
#ifndef __PANFROST_JOB_H__
|
|
#define __PANFROST_JOB_H__
|
|
|
|
#include <uapi/drm/panfrost_drm.h>
|
|
#include <drm/gpu_scheduler.h>
|
|
|
|
struct panfrost_device;
|
|
struct panfrost_gem_object;
|
|
struct panfrost_file_priv;
|
|
|
|
struct panfrost_job {
|
|
struct drm_sched_job base;
|
|
|
|
struct kref refcount;
|
|
|
|
struct panfrost_device *pfdev;
|
|
struct panfrost_mmu *mmu;
|
|
|
|
/* Fence to be signaled by IRQ handler when the job is complete. */
|
|
struct dma_fence *done_fence;
|
|
|
|
__u64 jc;
|
|
__u32 requirements;
|
|
__u32 flush_id;
|
|
|
|
struct panfrost_gem_mapping **mappings;
|
|
struct drm_gem_object **bos;
|
|
u32 bo_count;
|
|
|
|
/* Fence to be signaled by drm-sched once its done with the job */
|
|
struct dma_fence *render_done_fence;
|
|
|
|
struct panfrost_engine_usage *engine_usage;
|
|
bool is_profiled;
|
|
ktime_t start_time;
|
|
u64 start_cycles;
|
|
};
|
|
|
|
int panfrost_job_init(struct panfrost_device *pfdev);
|
|
void panfrost_job_fini(struct panfrost_device *pfdev);
|
|
int panfrost_job_open(struct panfrost_file_priv *panfrost_priv);
|
|
void panfrost_job_close(struct panfrost_file_priv *panfrost_priv);
|
|
int panfrost_job_get_slot(struct panfrost_job *job);
|
|
int panfrost_job_push(struct panfrost_job *job);
|
|
void panfrost_job_put(struct panfrost_job *job);
|
|
void panfrost_job_enable_interrupts(struct panfrost_device *pfdev);
|
|
void panfrost_job_suspend_irq(struct panfrost_device *pfdev);
|
|
int panfrost_job_is_idle(struct panfrost_device *pfdev);
|
|
|
|
#endif
|