drm/msm/adreno: split a6xx fault handler into generic and a6xx parts
Split the a6xx_fault_handler() into the generic adreno_fault_handler() and platform-specific parts. The adreno_fault_handler() can further be used by a5xx and hopefully by a4xx (at some point). Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org> Patchwork: https://patchwork.freedesktop.org/patch/522722/ Link: https://lore.kernel.org/r/20230214123504.3729522-3-dmitry.baryshkov@linaro.org Signed-off-by: Rob Clark <robdclark@chromium.org>
This commit is contained in:
parent
8cceb773f5
commit
f62ad0f6f4
@ -1362,73 +1362,23 @@ static const char *a6xx_fault_block(struct msm_gpu *gpu, u32 id)
|
|||||||
return a6xx_uche_fault_block(gpu, id);
|
return a6xx_uche_fault_block(gpu, id);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define ARM_SMMU_FSR_TF BIT(1)
|
|
||||||
#define ARM_SMMU_FSR_PF BIT(3)
|
|
||||||
#define ARM_SMMU_FSR_EF BIT(4)
|
|
||||||
|
|
||||||
static int a6xx_fault_handler(void *arg, unsigned long iova, int flags, void *data)
|
static int a6xx_fault_handler(void *arg, unsigned long iova, int flags, void *data)
|
||||||
{
|
{
|
||||||
struct msm_gpu *gpu = arg;
|
struct msm_gpu *gpu = arg;
|
||||||
struct adreno_smmu_fault_info *info = data;
|
struct adreno_smmu_fault_info *info = data;
|
||||||
const char *type = "UNKNOWN";
|
const char *block = "unknown";
|
||||||
const char *block;
|
|
||||||
bool do_devcoredump = info && !READ_ONCE(gpu->crashstate);
|
|
||||||
|
|
||||||
/*
|
u32 scratch[] = {
|
||||||
* If we aren't going to be resuming later from fault_worker, then do
|
|
||||||
* it now.
|
|
||||||
*/
|
|
||||||
if (!do_devcoredump) {
|
|
||||||
gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Print a default message if we couldn't get the data from the
|
|
||||||
* adreno-smmu-priv
|
|
||||||
*/
|
|
||||||
if (!info) {
|
|
||||||
pr_warn_ratelimited("*** gpu fault: iova=%.16lx flags=%d (%u,%u,%u,%u)\n",
|
|
||||||
iova, flags,
|
|
||||||
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)),
|
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)),
|
||||||
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)),
|
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)),
|
||||||
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)),
|
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)),
|
||||||
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)));
|
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)),
|
||||||
|
};
|
||||||
|
|
||||||
return 0;
|
if (info)
|
||||||
}
|
block = a6xx_fault_block(gpu, info->fsynr1 & 0xff);
|
||||||
|
|
||||||
if (info->fsr & ARM_SMMU_FSR_TF)
|
return adreno_fault_handler(gpu, iova, flags, info, block, scratch);
|
||||||
type = "TRANSLATION";
|
|
||||||
else if (info->fsr & ARM_SMMU_FSR_PF)
|
|
||||||
type = "PERMISSION";
|
|
||||||
else if (info->fsr & ARM_SMMU_FSR_EF)
|
|
||||||
type = "EXTERNAL";
|
|
||||||
|
|
||||||
block = a6xx_fault_block(gpu, info->fsynr1 & 0xff);
|
|
||||||
|
|
||||||
pr_warn_ratelimited("*** gpu fault: ttbr0=%.16llx iova=%.16lx dir=%s type=%s source=%s (%u,%u,%u,%u)\n",
|
|
||||||
info->ttbr0, iova,
|
|
||||||
flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ",
|
|
||||||
type, block,
|
|
||||||
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)),
|
|
||||||
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)),
|
|
||||||
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)),
|
|
||||||
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)));
|
|
||||||
|
|
||||||
if (do_devcoredump) {
|
|
||||||
/* Turn off the hangcheck timer to keep it from bothering us */
|
|
||||||
del_timer(&gpu->hangcheck_timer);
|
|
||||||
|
|
||||||
gpu->fault_info.ttbr0 = info->ttbr0;
|
|
||||||
gpu->fault_info.iova = iova;
|
|
||||||
gpu->fault_info.flags = flags;
|
|
||||||
gpu->fault_info.type = type;
|
|
||||||
gpu->fault_info.block = block;
|
|
||||||
|
|
||||||
kthread_queue_work(gpu->worker, &gpu->fault_work);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu)
|
static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu)
|
||||||
|
@ -246,6 +246,66 @@ u64 adreno_private_address_space_size(struct msm_gpu *gpu)
|
|||||||
return SZ_4G;
|
return SZ_4G;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define ARM_SMMU_FSR_TF BIT(1)
|
||||||
|
#define ARM_SMMU_FSR_PF BIT(3)
|
||||||
|
#define ARM_SMMU_FSR_EF BIT(4)
|
||||||
|
|
||||||
|
int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
|
||||||
|
struct adreno_smmu_fault_info *info, const char *block,
|
||||||
|
u32 scratch[4])
|
||||||
|
{
|
||||||
|
const char *type = "UNKNOWN";
|
||||||
|
bool do_devcoredump = info && !READ_ONCE(gpu->crashstate);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we aren't going to be resuming later from fault_worker, then do
|
||||||
|
* it now.
|
||||||
|
*/
|
||||||
|
if (!do_devcoredump) {
|
||||||
|
gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Print a default message if we couldn't get the data from the
|
||||||
|
* adreno-smmu-priv
|
||||||
|
*/
|
||||||
|
if (!info) {
|
||||||
|
pr_warn_ratelimited("*** gpu fault: iova=%.16lx flags=%d (%u,%u,%u,%u)\n",
|
||||||
|
iova, flags,
|
||||||
|
scratch[0], scratch[1], scratch[2], scratch[3]);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (info->fsr & ARM_SMMU_FSR_TF)
|
||||||
|
type = "TRANSLATION";
|
||||||
|
else if (info->fsr & ARM_SMMU_FSR_PF)
|
||||||
|
type = "PERMISSION";
|
||||||
|
else if (info->fsr & ARM_SMMU_FSR_EF)
|
||||||
|
type = "EXTERNAL";
|
||||||
|
|
||||||
|
pr_warn_ratelimited("*** gpu fault: ttbr0=%.16llx iova=%.16lx dir=%s type=%s source=%s (%u,%u,%u,%u)\n",
|
||||||
|
info->ttbr0, iova,
|
||||||
|
flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ",
|
||||||
|
type, block,
|
||||||
|
scratch[0], scratch[1], scratch[2], scratch[3]);
|
||||||
|
|
||||||
|
if (do_devcoredump) {
|
||||||
|
/* Turn off the hangcheck timer to keep it from bothering us */
|
||||||
|
del_timer(&gpu->hangcheck_timer);
|
||||||
|
|
||||||
|
gpu->fault_info.ttbr0 = info->ttbr0;
|
||||||
|
gpu->fault_info.iova = iova;
|
||||||
|
gpu->fault_info.flags = flags;
|
||||||
|
gpu->fault_info.type = type;
|
||||||
|
gpu->fault_info.block = block;
|
||||||
|
|
||||||
|
kthread_queue_work(gpu->worker, &gpu->fault_work);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
|
int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
|
||||||
uint32_t param, uint64_t *value, uint32_t *len)
|
uint32_t param, uint64_t *value, uint32_t *len)
|
||||||
{
|
{
|
||||||
|
@ -341,6 +341,10 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
|
|||||||
struct platform_device *pdev,
|
struct platform_device *pdev,
|
||||||
unsigned long quirks);
|
unsigned long quirks);
|
||||||
|
|
||||||
|
int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
|
||||||
|
struct adreno_smmu_fault_info *info, const char *block,
|
||||||
|
u32 scratch[4]);
|
||||||
|
|
||||||
int adreno_read_speedbin(struct device *dev, u32 *speedbin);
|
int adreno_read_speedbin(struct device *dev, u32 *speedbin);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
x
Reference in New Issue
Block a user