Merge tag 'drm-msm-next-2023-10-17' of https://gitlab.freedesktop.org/drm/msm into drm-next

Updates for v6.7

DP:
- use existing helpers for DPCD handling instead of open-coded functions
- set the subconnector type according to the plugged cable / dongle
  skip validity check for DP CTS EDID checksum

DPU:
- continued migration of feature flags to use core revision checks
- reworked interrupts code to use '0' as NO_IRQ, removed raw IRQ indices
  from log / trace output

gpu:
- a7xx support (a730, a740)
- fixes and additional speedbins for a635, a643

core:
- decouple msm_drv from kms to more cleanly support headless devices (like
  imx5+a2xx)

From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGvzkBL2_OgyOeP_b6rVEjrNdfm8jcKzaB04HqHyT5jYwA@mail.gmail.com
Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
Dave Airlie 2023-10-23 18:29:18 +10:00
commit 3f5ba636d6
99 changed files with 2371 additions and 1409 deletions

View File

@ -114,6 +114,7 @@ properties:
port@1:
$ref: /schemas/graph.yaml#/$defs/port-base
unevaluatedProperties: false
description: Output endpoint of the controller
properties:
endpoint:

View File

@ -21,7 +21,7 @@ properties:
compatible:
oneOf:
- items:
- pattern: '^qcom,adreno-gmu-6[0-9][0-9]\.[0-9]$'
- pattern: '^qcom,adreno-gmu-[67][0-9][0-9]\.[0-9]$'
- const: qcom,adreno-gmu
- const: qcom,adreno-gmu-wrapper
@ -64,6 +64,10 @@ properties:
iommus:
maxItems: 1
qcom,qmp:
$ref: /schemas/types.yaml#/definitions/phandle
description: Reference to the AOSS side-channel message RAM
operating-points-v2: true
opp-table:
@ -213,6 +217,47 @@ allOf:
- const: axi
- const: memnoc
- if:
properties:
compatible:
contains:
enum:
- qcom,adreno-gmu-730.1
- qcom,adreno-gmu-740.1
then:
properties:
reg:
items:
- description: Core GMU registers
- description: Resource controller registers
- description: GMU PDC registers
reg-names:
items:
- const: gmu
- const: rscc
- const: gmu_pdc
clocks:
items:
- description: GPU AHB clock
- description: GMU clock
- description: GPU CX clock
- description: GPU AXI clock
- description: GPU MEMNOC clock
- description: GMU HUB clock
- description: GPUSS DEMET clock
clock-names:
items:
- const: ahb
- const: gmu
- const: cxo
- const: axi
- const: memnoc
- const: hub
- const: demet
required:
- qcom,qmp
- if:
properties:
compatible:

View File

@ -23,7 +23,7 @@ properties:
The driver is parsing the compat string for Adreno to
figure out the gpu-id and patch level.
items:
- pattern: '^qcom,adreno-[3-6][0-9][0-9]\.[0-9]$'
- pattern: '^qcom,adreno-[3-7][0-9][0-9]\.[0-9]$'
- const: qcom,adreno
- description: |
The driver is parsing the compat string for Imageon to
@ -203,7 +203,7 @@ allOf:
properties:
compatible:
contains:
pattern: '^qcom,adreno-6[0-9][0-9]\.[0-9]$'
pattern: '^qcom,adreno-[67][0-9][0-9]\.[0-9]$'
then: # Starting with A6xx, the clocks are usually defined in the GMU node
properties:

View File

@ -38,12 +38,16 @@ properties:
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,msm8998-dpu
"^dsi@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
items:
@ -52,6 +56,8 @@ patternProperties:
"^phy@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,dsi-phy-10nm-8998

View File

@ -44,18 +44,24 @@ properties:
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,qcm2290-dpu
"^dsi@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,dsi-ctrl-6g-qcm2290
"^phy@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,dsi-phy-14nm-2290

View File

@ -44,18 +44,24 @@ properties:
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sc7180-dpu
"^displayport-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sc7180-dp
"^dsi@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
items:
@ -64,6 +70,8 @@ patternProperties:
"^phy@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,dsi-phy-10nm

View File

@ -44,18 +44,24 @@ properties:
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sc7280-dpu
"^displayport-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sc7280-dp
"^dsi@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
items:
@ -64,12 +70,16 @@ patternProperties:
"^edp@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sc7280-edp
"^phy@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
enum:

View File

@ -34,12 +34,16 @@ properties:
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sc8280xp-dpu
"^displayport-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
enum:

View File

@ -42,18 +42,24 @@ properties:
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sdm845-dpu
"^displayport-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sdm845-dp
"^dsi@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
items:
@ -62,6 +68,8 @@ patternProperties:
"^phy@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,dsi-phy-10nm

View File

@ -32,12 +32,16 @@ properties:
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sm6115-dpu
"^dsi@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
oneOf:
@ -50,6 +54,8 @@ patternProperties:
"^phy@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,dsi-phy-14nm-2290

View File

@ -43,12 +43,16 @@ properties:
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sm6125-dpu
"^dsi@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
items:
@ -57,6 +61,8 @@ patternProperties:
"^phy@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sm6125-dsi-phy-14nm

View File

@ -43,12 +43,16 @@ properties:
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sm6350-dpu
"^dsi@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
items:
@ -57,6 +61,8 @@ patternProperties:
"^phy@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,dsi-phy-10nm

View File

@ -43,12 +43,16 @@ properties:
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sm6375-dpu
"^dsi@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
items:
@ -57,6 +61,8 @@ patternProperties:
"^phy@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sm6375-dsi-phy-7nm

View File

@ -47,12 +47,16 @@ properties:
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sm8150-dpu
"^dsi@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
items:
@ -61,6 +65,8 @@ patternProperties:
"^phy@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,dsi-phy-7nm

View File

@ -46,12 +46,16 @@ properties:
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sm8250-dpu
"^dsi@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
items:
@ -60,6 +64,8 @@ patternProperties:
"^phy@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,dsi-phy-7nm

View File

@ -48,18 +48,24 @@ properties:
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sm8350-dpu
"^displayport-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sm8350-dp
"^dsi@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
items:
@ -68,6 +74,8 @@ patternProperties:
"^phy@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sm8350-dsi-phy-5nm

View File

@ -38,12 +38,16 @@ properties:
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sm8450-dpu
"^displayport-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
items:
@ -52,6 +56,8 @@ patternProperties:
"^dsi@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
items:
@ -60,6 +66,8 @@ patternProperties:
"^phy@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sm8450-dsi-phy-5nm

View File

@ -38,12 +38,16 @@ properties:
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sm8550-dpu
"^displayport-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
items:
@ -52,6 +56,8 @@ patternProperties:
"^dsi@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
items:
@ -60,6 +66,8 @@ patternProperties:
"^phy@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sm8550-dsi-phy-4nm

View File

@ -106,6 +106,7 @@ msm-y += \
msm_gpu_devfreq.o \
msm_io_utils.o \
msm_iommu.o \
msm_kms.o \
msm_perf.o \
msm_rd.o \
msm_ringbuffer.o \

View File

@ -1114,6 +1114,12 @@ enum a6xx_tex_type {
#define REG_A6XX_CP_MISC_CNTL 0x00000840
#define REG_A6XX_CP_APRIV_CNTL 0x00000844
#define A6XX_CP_APRIV_CNTL_CDWRITE 0x00000040
#define A6XX_CP_APRIV_CNTL_CDREAD 0x00000020
#define A6XX_CP_APRIV_CNTL_RBRPWB 0x00000008
#define A6XX_CP_APRIV_CNTL_RBPRIVLEVEL 0x00000004
#define A6XX_CP_APRIV_CNTL_RBFETCH 0x00000002
#define A6XX_CP_APRIV_CNTL_ICACHE 0x00000001
#define REG_A6XX_CP_PREEMPT_THRESHOLD 0x000008c0
@ -1939,6 +1945,8 @@ static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM_SEL(uint32_t i0) { return 0x00
#define REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE 0x00000122
#define REG_A7XX_RBBM_CLOCK_HYST2_VFD 0x0000012f
#define REG_A6XX_RBBM_LPAC_GBIF_CLIENT_QOS_CNTL 0x000005ff
#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_A 0x00000600
@ -8252,5 +8260,6 @@ static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1 0x00000002
#define REG_A7XX_CX_MISC_TCM_RET_CNTL 0x00000039
#endif /* A6XX_XML */

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/interconnect.h>
#include <linux/of_platform.h>
@ -202,9 +203,10 @@ int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu)
static int a6xx_gmu_start(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
u32 mask, reset_val, val;
int ret;
u32 val;
u32 mask, reset_val;
val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8);
if (val <= 0x20010004) {
@ -220,7 +222,11 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
/* Set the log wptr index
* note: downstream saves the value in poweroff and restores it here
*/
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
if (adreno_is_a7xx(adreno_gpu))
gmu_write(gmu, REG_A6XX_GMU_GENERAL_9, 0);
else
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
@ -513,6 +519,7 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct platform_device *pdev = to_platform_device(gmu->dev);
void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
u32 seqmem0_drv0_reg = REG_A6XX_RSCC_SEQ_MEM_0_DRV0;
void __iomem *seqptr = NULL;
uint32_t pdc_address_offset;
bool pdc_in_aop = false;
@ -520,7 +527,9 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
if (IS_ERR(pdcptr))
goto err;
if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu))
if (adreno_is_a650(adreno_gpu) ||
adreno_is_a660_family(adreno_gpu) ||
adreno_is_a7xx(adreno_gpu))
pdc_in_aop = true;
else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu))
pdc_address_offset = 0x30090;
@ -544,20 +553,26 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4,
adreno_is_a740_family(adreno_gpu) ? 0x80000021 : 0x80000000);
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
/* The second spin of A7xx GPUs messed with some register offsets.. */
if (adreno_is_a740_family(adreno_gpu))
seqmem0_drv0_reg = REG_A7XX_RSCC_SEQ_MEM_0_DRV0_A740;
/* Load RSC sequencer uCode for sleep and wakeup */
if (adreno_is_a650_family(adreno_gpu)) {
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab);
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581);
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2);
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad);
if (adreno_is_a650_family(adreno_gpu) ||
adreno_is_a7xx(adreno_gpu)) {
gmu_write_rscc(gmu, seqmem0_drv0_reg, 0xeaaae5a0);
gmu_write_rscc(gmu, seqmem0_drv0_reg + 1, 0xe1a1ebab);
gmu_write_rscc(gmu, seqmem0_drv0_reg + 2, 0xa2e0a581);
gmu_write_rscc(gmu, seqmem0_drv0_reg + 3, 0xecac82e2);
gmu_write_rscc(gmu, seqmem0_drv0_reg + 4, 0x0020edad);
} else {
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
@ -637,11 +652,18 @@ err:
/* Set up the idle state for the GMU */
static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
/* Disable GMU WB/RB buffer */
gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1);
gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1);
/* A7xx knows better by default! */
if (adreno_is_a7xx(adreno_gpu))
return;
gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
switch (gmu->idle_level) {
@ -698,7 +720,7 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
u32 itcm_base = 0x00000000;
u32 dtcm_base = 0x00040000;
if (adreno_is_a650_family(adreno_gpu))
if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu))
dtcm_base = 0x10004000;
if (gmu->legacy) {
@ -747,14 +769,22 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
u32 fence_range_lower, fence_range_upper;
u32 chipid, chipid_min = 0;
int ret;
u32 chipid;
if (adreno_is_a650_family(adreno_gpu)) {
/* Vote veto for FAL10 */
if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu)) {
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1);
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
}
/* Turn on TCM (Tightly Coupled Memory) retention */
if (adreno_is_a7xx(adreno_gpu))
a6xx_llc_write(a6xx_gpu, REG_A7XX_CX_MISC_TCM_RET_CNTL, 1);
else
gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
if (state == GMU_WARM_BOOT) {
ret = a6xx_rpmh_start(gmu);
if (ret)
@ -764,9 +794,6 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
"GMU firmware is not loaded\n"))
return -ENOENT;
/* Turn on register retention */
gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
ret = a6xx_rpmh_start(gmu);
if (ret)
return ret;
@ -776,6 +803,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
return ret;
}
/* Clear init result to make sure we are getting a fresh value */
gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
@ -783,8 +811,18 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova);
gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
if (adreno_is_a7xx(adreno_gpu)) {
fence_range_upper = 0x32;
fence_range_lower = 0x8a0;
} else {
fence_range_upper = 0xa;
fence_range_lower = 0xa0;
}
gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
(1 << 31) | (0xa << 18) | (0xa0));
BIT(31) |
FIELD_PREP(GENMASK(30, 18), fence_range_upper) |
FIELD_PREP(GENMASK(17, 0), fence_range_lower));
/*
* Snapshots toggle the NMI bit which will result in a jump to the NMI
@ -792,21 +830,49 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
*/
gmu_write(gmu, REG_A6XX_GMU_CM3_CFG, 0x4052);
/*
* Note that the GMU has a slightly different layout for
* chip_id, for whatever reason, so a bit of massaging
* is needed. The upper 16b are the same, but minor and
* patchid are packed in four bits each with the lower
* 8b unused:
*/
chipid = adreno_gpu->chip_id & 0xffff0000;
chipid |= (adreno_gpu->chip_id << 4) & 0xf000; /* minor */
chipid |= (adreno_gpu->chip_id << 8) & 0x0f00; /* patchid */
/* NOTE: A730 may also fall in this if-condition with a future GMU fw update. */
if (adreno_is_a7xx(adreno_gpu) && !adreno_is_a730(adreno_gpu)) {
/* A7xx GPUs have obfuscated chip IDs. Use constant maj = 7 */
chipid = FIELD_PREP(GENMASK(31, 24), 0x7);
gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
/*
* The min part has a 1-1 mapping for each GPU SKU.
* This chipid that the GMU expects corresponds to the "GENX_Y_Z" naming,
* where X = major, Y = minor, Z = patchlevel, e.g. GEN7_2_1 for prod A740.
*/
if (adreno_is_a740(adreno_gpu))
chipid_min = 2;
else
return -EINVAL;
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG,
gmu->log.iova | (gmu->log.size / SZ_4K - 1));
chipid |= FIELD_PREP(GENMASK(23, 16), chipid_min);
/* Get the patchid (which may vary) from the device tree */
chipid |= FIELD_PREP(GENMASK(15, 8), adreno_patchid(adreno_gpu));
} else {
/*
* Note that the GMU has a slightly different layout for
* chip_id, for whatever reason, so a bit of massaging
* is needed. The upper 16b are the same, but minor and
* patchid are packed in four bits each with the lower
* 8b unused:
*/
chipid = adreno_gpu->chip_id & 0xffff0000;
chipid |= (adreno_gpu->chip_id << 4) & 0xf000; /* minor */
chipid |= (adreno_gpu->chip_id << 8) & 0x0f00; /* patchid */
}
if (adreno_is_a7xx(adreno_gpu)) {
gmu_write(gmu, REG_A6XX_GMU_GENERAL_10, chipid);
gmu_write(gmu, REG_A6XX_GMU_GENERAL_8,
(gmu->log.iova & GENMASK(31, 12)) |
((gmu->log.size / SZ_4K - 1) & GENMASK(7, 0)));
} else {
gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG,
gmu->log.iova | (gmu->log.size / SZ_4K - 1));
}
/* Set up the lowest idle level on the GMU */
a6xx_gmu_power_config(gmu);
@ -857,17 +923,23 @@ static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
{
u32 val;
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
u32 val, seqmem_off = 0;
/* The second spin of A7xx GPUs messed with some register offsets.. */
if (adreno_is_a740_family(adreno_gpu))
seqmem_off = 4;
/* Make sure there are no outstanding RPMh votes */
gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
(val & 1), 100, 10000);
gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
(val & 1), 100, 10000);
gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
(val & 1), 100, 10000);
gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
(val & 1), 100, 1000);
gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS + seqmem_off,
val, (val & 1), 100, 10000);
gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS + seqmem_off,
val, (val & 1), 100, 10000);
gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS + seqmem_off,
val, (val & 1), 100, 10000);
gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS + seqmem_off,
val, (val & 1), 100, 1000);
}
/* Force the GMU off in case it isn't responsive */
@ -950,6 +1022,14 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
gmu->hung = false;
/* Notify AOSS about the ACD state (unimplemented for now => disable it) */
if (!IS_ERR(gmu->qmp)) {
ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}",
0 /* Hardcode ACD to be disabled for now */);
if (ret)
dev_err(gmu->dev, "failed to send GPU ACD state\n");
}
/* Turn on the resources */
pm_runtime_get_sync(gmu->dev);
@ -963,7 +1043,8 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
/* Use a known rate to bring up the GMU */
clk_set_rate(gmu->core_clk, 200000000);
clk_set_rate(gmu->hub_clk, 150000000);
clk_set_rate(gmu->hub_clk, adreno_is_a740_family(adreno_gpu) ?
200000000 : 150000000);
ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
if (ret) {
pm_runtime_put(gmu->gxpd);
@ -980,15 +1061,19 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
enable_irq(gmu->gmu_irq);
/* Check to see if we are doing a cold or warm boot */
status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
GMU_WARM_BOOT : GMU_COLD_BOOT;
/*
* Warm boot path does not work on newer GPUs
* Presumably this is because icache/dcache regions must be restored
*/
if (!gmu->legacy)
if (adreno_is_a7xx(adreno_gpu)) {
status = a6xx_llc_read(a6xx_gpu, REG_A7XX_CX_MISC_TCM_RET_CNTL) == 1 ?
GMU_WARM_BOOT : GMU_COLD_BOOT;
} else if (gmu->legacy) {
status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
GMU_WARM_BOOT : GMU_COLD_BOOT;
} else {
/*
* Warm boot path does not work on newer A6xx GPUs
* Presumably this is because icache/dcache regions must be restored
*/
status = GMU_COLD_BOOT;
}
ret = a6xx_gmu_fw_start(gmu, status);
if (ret)
@ -1473,6 +1558,9 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
dev_pm_domain_detach(gmu->gxpd, false);
}
if (!IS_ERR_OR_NULL(gmu->qmp))
qmp_put(gmu->qmp);
iounmap(gmu->mmio);
if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
iounmap(gmu->rscc);
@ -1569,6 +1657,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
struct platform_device *pdev = of_find_device_by_node(node);
struct device_link *link;
int ret;
if (!pdev)
@ -1600,7 +1689,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
* are otherwise unused by a660.
*/
gmu->dummy.size = SZ_4K;
if (adreno_is_a660_family(adreno_gpu)) {
if (adreno_is_a660_family(adreno_gpu) ||
adreno_is_a7xx(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7,
0x60400000, "debug");
if (ret)
@ -1616,7 +1706,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
goto err_memory;
/* Note that a650 family also includes a660 family: */
if (adreno_is_a650_family(adreno_gpu)) {
if (adreno_is_a650_family(adreno_gpu) ||
adreno_is_a7xx(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
SZ_16M - SZ_16K, 0x04000, "icache");
if (ret)
@ -1664,7 +1755,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
goto err_memory;
}
if (adreno_is_a650_family(adreno_gpu)) {
if (adreno_is_a650_family(adreno_gpu) ||
adreno_is_a7xx(adreno_gpu)) {
gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
if (IS_ERR(gmu->rscc)) {
ret = -ENODEV;
@ -1689,12 +1781,18 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
goto err_mmio;
}
if (!device_link_add(gmu->dev, gmu->cxpd,
DL_FLAG_PM_RUNTIME)) {
link = device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME);
if (!link) {
ret = -ENODEV;
goto detach_cxpd;
}
gmu->qmp = qmp_get(gmu->dev);
if (IS_ERR(gmu->qmp) && adreno_is_a7xx(adreno_gpu)) {
ret = PTR_ERR(gmu->qmp);
goto remove_device_link;
}
init_completion(&gmu->pd_gate);
complete_all(&gmu->pd_gate);
gmu->pd_nb.notifier_call = cxpd_notifier_cb;
@ -1718,6 +1816,9 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
return 0;
remove_device_link:
device_link_del(link);
detach_cxpd:
dev_pm_domain_detach(gmu->cxpd, false);

View File

@ -8,6 +8,7 @@
#include <linux/iopoll.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/soc/qcom/qcom_aoss.h>
#include "msm_drv.h"
#include "a6xx_hfi.h"
@ -96,6 +97,8 @@ struct a6xx_gmu {
/* For power domain callback */
struct notifier_block pd_nb;
struct completion pd_gate;
struct qmp *qmp;
};
static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)

View File

@ -360,6 +360,12 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
#define REG_A6XX_GMU_GENERAL_7 0x000051cc
#define REG_A6XX_GMU_GENERAL_8 0x000051cd
#define REG_A6XX_GMU_GENERAL_9 0x000051ce
#define REG_A6XX_GMU_GENERAL_10 0x000051cf
#define REG_A6XX_GMU_ISENSE_CTRL 0x0000515d
#define REG_A6XX_GPU_CS_ENABLE_REG 0x00008920
@ -471,6 +477,8 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
#define REG_A6XX_RSCC_SEQ_BUSY_DRV0 0x00000101
#define REG_A7XX_RSCC_SEQ_MEM_0_DRV0_A740 0x00000154
#define REG_A6XX_RSCC_SEQ_MEM_0_DRV0 0x00000180
#define REG_A6XX_RSCC_TCS0_DRV0_STATUS 0x00000346

View File

@ -103,6 +103,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
struct msm_ringbuffer *ring, struct msm_file_private *ctx)
{
bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1;
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
phys_addr_t ttbr;
u32 asid;
u64 memptr = rbmemptr(ring, ttbr0);
@ -114,9 +115,11 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
return;
if (!sysprof) {
/* Turn off protected mode to write to special registers */
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 0);
if (!adreno_is_a7xx(adreno_gpu)) {
/* Turn off protected mode to write to special registers */
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 0);
}
OUT_PKT4(ring, REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1);
OUT_RING(ring, 1);
@ -141,6 +144,16 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
OUT_RING(ring, lower_32_bits(ttbr));
OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr));
/*
* Sync both threads after switching pagetables and enable BR only
* to make sure BV doesn't race ahead while BR is still switching
* pagetables.
*/
if (adreno_is_a7xx(&a6xx_gpu->base)) {
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR);
}
/*
* And finally, trigger a uche flush to be sure there isn't anything
* lingering in that part of the GPU
@ -163,9 +176,11 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(0x1));
OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0));
/* Re-enable protected mode: */
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
if (!adreno_is_a7xx(adreno_gpu)) {
/* Re-enable protected mode: */
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
}
}
}
@ -252,6 +267,133 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
a6xx_flush(gpu, ring);
}
static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
/*
* Toggle concurrent binning for pagetable switch and set the thread to
* BR since only it can execute the pagetable switch packets.
*/
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR);
a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
rbmemptr_stats(ring, index, cpcycles_start));
get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
rbmemptr_stats(ring, index, alwayson_start));
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
OUT_RING(ring, CP_SET_THREAD_BOTH);
OUT_PKT7(ring, CP_SET_MARKER, 1);
OUT_RING(ring, 0x101); /* IFPC disable */
OUT_PKT7(ring, CP_SET_MARKER, 1);
OUT_RING(ring, 0x00d); /* IB1LIST start */
/* Submit the commands */
for (i = 0; i < submit->nr_cmds; i++) {
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
ibs++;
break;
}
/*
* Periodically update shadow-wptr if needed, so that we
* can see partial progress of submits with large # of
* cmds.. otherwise we could needlessly stall waiting for
* ringbuffer state, simply due to looking at a shadow
* rptr value that has not been updated
*/
if ((ibs % 32) == 0)
update_shadow_rptr(gpu, ring);
}
OUT_PKT7(ring, CP_SET_MARKER, 1);
OUT_RING(ring, 0x00e); /* IB1LIST end */
get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
rbmemptr_stats(ring, index, cpcycles_end));
get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
rbmemptr_stats(ring, index, alwayson_end));
/* Write the fence to the scratch register */
OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
OUT_RING(ring, submit->seqno);
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
OUT_RING(ring, CP_SET_THREAD_BR);
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, CCU_INVALIDATE_DEPTH);
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, CCU_INVALIDATE_COLOR);
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
OUT_RING(ring, CP_SET_THREAD_BV);
/*
* Make sure the timestamp is committed once BV pipe is
* completely done with this submission.
*/
OUT_PKT7(ring, CP_EVENT_WRITE, 4);
OUT_RING(ring, CACHE_CLEAN | BIT(27));
OUT_RING(ring, lower_32_bits(rbmemptr(ring, bv_fence)));
OUT_RING(ring, upper_32_bits(rbmemptr(ring, bv_fence)));
OUT_RING(ring, submit->seqno);
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
OUT_RING(ring, CP_SET_THREAD_BR);
/*
* This makes sure that BR doesn't race ahead and commit
* timestamp to memstore while BV is still processing
* this submission.
*/
OUT_PKT7(ring, CP_WAIT_TIMESTAMP, 4);
OUT_RING(ring, 0);
OUT_RING(ring, lower_32_bits(rbmemptr(ring, bv_fence)));
OUT_RING(ring, upper_32_bits(rbmemptr(ring, bv_fence)));
OUT_RING(ring, submit->seqno);
/* write the ringbuffer timestamp */
OUT_PKT7(ring, CP_EVENT_WRITE, 4);
OUT_RING(ring, CACHE_CLEAN | CP_EVENT_WRITE_0_IRQ | BIT(27));
OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, submit->seqno);
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
OUT_RING(ring, CP_SET_THREAD_BOTH);
OUT_PKT7(ring, CP_SET_MARKER, 1);
OUT_RING(ring, 0x100); /* IFPC enable */
trace_msm_gpu_submit_flush(submit,
gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER));
a6xx_flush(gpu, ring);
}
const struct adreno_reglist a612_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
@ -695,6 +837,121 @@ const struct adreno_reglist a690_hwcg[] = {
{}
};
const struct adreno_reglist a730_hwcg[] = {
{ REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222 },
{ REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022222 },
{ REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf },
{ REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080 },
{ REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222220 },
{ REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222 },
{ REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222 },
{ REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00222222 },
{ REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777 },
{ REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777 },
{ REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777 },
{ REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777 },
{ REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111 },
{ REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111 },
{ REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111 },
{ REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111 },
{ REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222 },
{ REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004 },
{ REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002 },
{ REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222 },
{ REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222 },
{ REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220 },
{ REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x44000f00 },
{ REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022 },
{ REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00555555 },
{ REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011 },
{ REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00440044 },
{ REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222 },
{ REG_A7XX_RBBM_CLOCK_MODE2_GRAS, 0x00000222 },
{ REG_A7XX_RBBM_CLOCK_MODE_BV_GRAS, 0x00222222 },
{ REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x02222223 },
{ REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222 },
{ REG_A7XX_RBBM_CLOCK_MODE_BV_GPC, 0x00222222 },
{ REG_A7XX_RBBM_CLOCK_MODE_BV_VFD, 0x00002222 },
{ REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000 },
{ REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004 },
{ REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000 },
{ REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000 },
{ REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200 },
{ REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222 },
{ REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222 },
{ REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000 },
{ REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000 },
{ REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002 },
{ REG_A7XX_RBBM_CLOCK_MODE_BV_LRZ, 0x55555552 },
{ REG_A7XX_RBBM_CLOCK_MODE_CP, 0x00000223 },
{ REG_A6XX_RBBM_CLOCK_CNTL, 0x8aa8aa82 },
{ REG_A6XX_RBBM_ISDB_CNT, 0x00000182 },
{ REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000 },
{ REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000 },
{ REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222 },
{ REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111 },
{ REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555 },
{},
};
const struct adreno_reglist a740_hwcg[] = {
{ REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222 },
{ REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x22022222 },
{ REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x003cf3cf },
{ REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080 },
{ REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222220 },
{ REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222 },
{ REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222 },
{ REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00222222 },
{ REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777 },
{ REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777 },
{ REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777 },
{ REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777 },
{ REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111 },
{ REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111 },
{ REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111 },
{ REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111 },
{ REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222 },
{ REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x00222222 },
{ REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000444 },
{ REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000222 },
{ REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222 },
{ REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222 },
{ REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220 },
{ REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x44000f00 },
{ REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022 },
{ REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00555555 },
{ REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011 },
{ REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00440044 },
{ REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222 },
{ REG_A7XX_RBBM_CLOCK_MODE2_GRAS, 0x00000222 },
{ REG_A7XX_RBBM_CLOCK_MODE_BV_GRAS, 0x00222222 },
{ REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x02222223 },
{ REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00222222 },
{ REG_A7XX_RBBM_CLOCK_MODE_BV_GPC, 0x00222222 },
{ REG_A7XX_RBBM_CLOCK_MODE_BV_VFD, 0x00002222 },
{ REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000 },
{ REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004 },
{ REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000 },
{ REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000000 },
{ REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200 },
{ REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00000000 },
{ REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222 },
{ REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000 },
{ REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000 },
{ REG_A7XX_RBBM_CLOCK_MODE_BV_LRZ, 0x55555552 },
{ REG_A7XX_RBBM_CLOCK_HYST2_VFD, 0x00000000 },
{ REG_A7XX_RBBM_CLOCK_MODE_CP, 0x00000222 },
{ REG_A6XX_RBBM_CLOCK_CNTL, 0x8aa8aa82 },
{ REG_A6XX_RBBM_ISDB_CNT, 0x00000182 },
{ REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000 },
{ REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000 },
{ REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222 },
{ REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111 },
{ REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555 },
{},
};
static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@ -702,7 +959,7 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
const struct adreno_reglist *reg;
unsigned int i;
u32 val, clock_cntl_on;
u32 val, clock_cntl_on, cgc_mode;
if (!adreno_gpu->info->hwcg)
return;
@ -714,6 +971,17 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
else
clock_cntl_on = 0x8aa8aa82;
if (adreno_is_a7xx(adreno_gpu)) {
cgc_mode = adreno_is_a740_family(adreno_gpu) ? 0x20222 : 0x20000;
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
state ? cgc_mode : 0);
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
state ? 0x10111 : 0);
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
state ? 0x5555 : 0);
}
val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL);
/* Don't re-program the registers if they are already correct */
@ -721,14 +989,14 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
return;
/* Disable SP clock before programming HWCG registers */
if (!adreno_is_a610(adreno_gpu))
if (!adreno_is_a610(adreno_gpu) && !adreno_is_a7xx(adreno_gpu))
gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
for (i = 0; (reg = &adreno_gpu->info->hwcg[i], reg->offset); i++)
gpu_write(gpu, reg->offset, state ? reg->value : 0);
/* Enable SP clock */
if (!adreno_is_a610(adreno_gpu))
if (!adreno_is_a610(adreno_gpu) && !adreno_is_a7xx(adreno_gpu))
gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
@ -897,6 +1165,59 @@ static const u32 a690_protect[] = {
A6XX_PROTECT_NORDWR(0x11c00, 0x00000), /*note: infiite range */
};
static const u32 a730_protect[] = {
A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
A6XX_PROTECT_RDONLY(0x0050b, 0x0058),
A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
A6XX_PROTECT_NORDWR(0x00510, 0x0000),
A6XX_PROTECT_NORDWR(0x00534, 0x0000),
A6XX_PROTECT_RDONLY(0x005fb, 0x009d),
A6XX_PROTECT_NORDWR(0x00699, 0x01e9),
A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
/* 0x008d0-0x008dd are unprotected on purpose for tools like perfetto */
A6XX_PROTECT_RDONLY(0x008de, 0x0154),
A6XX_PROTECT_NORDWR(0x00900, 0x004d),
A6XX_PROTECT_NORDWR(0x0098d, 0x00b2),
A6XX_PROTECT_NORDWR(0x00a41, 0x01be),
A6XX_PROTECT_NORDWR(0x00df0, 0x0001),
A6XX_PROTECT_NORDWR(0x00e01, 0x0000),
A6XX_PROTECT_NORDWR(0x00e07, 0x0008),
A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
A6XX_PROTECT_NORDWR(0x08e80, 0x0280),
A6XX_PROTECT_NORDWR(0x09624, 0x01db),
A6XX_PROTECT_NORDWR(0x09e40, 0x0000),
A6XX_PROTECT_NORDWR(0x09e64, 0x000d),
A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
A6XX_PROTECT_NORDWR(0x0ae50, 0x000f),
A6XX_PROTECT_NORDWR(0x0ae66, 0x0003),
A6XX_PROTECT_NORDWR(0x0ae6f, 0x0003),
A6XX_PROTECT_NORDWR(0x0b604, 0x0003),
A6XX_PROTECT_NORDWR(0x0ec00, 0x0fff),
A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
A6XX_PROTECT_NORDWR(0x18400, 0x0053),
A6XX_PROTECT_RDONLY(0x18454, 0x0004),
A6XX_PROTECT_NORDWR(0x18459, 0x1fff),
A6XX_PROTECT_NORDWR(0x1a459, 0x1fff),
A6XX_PROTECT_NORDWR(0x1c459, 0x1fff),
A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
A6XX_PROTECT_NORDWR(0x1f860, 0x0000),
A6XX_PROTECT_NORDWR(0x1f878, 0x002a),
/* CP_PROTECT_REG[44, 46] are left untouched! */
0,
0,
0,
A6XX_PROTECT_NORDWR(0x1f8c0, 0x00000),
};
static void a6xx_set_cp_protect(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@ -918,6 +1239,11 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu)
count = ARRAY_SIZE(a660_protect);
count_max = 48;
BUILD_BUG_ON(ARRAY_SIZE(a660_protect) > 48);
} else if (adreno_is_a730(adreno_gpu) || adreno_is_a740(adreno_gpu)) {
regs = a730_protect;
count = ARRAY_SIZE(a730_protect);
count_max = 48;
BUILD_BUG_ON(ARRAY_SIZE(a730_protect) > 48);
} else {
regs = a6xx_protect;
count = ARRAY_SIZE(a6xx_protect);
@ -984,7 +1310,10 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
if (adreno_is_a640_family(adreno_gpu))
amsbc = 1;
if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) {
if (adreno_is_a650(adreno_gpu) ||
adreno_is_a660(adreno_gpu) ||
adreno_is_a730(adreno_gpu) ||
adreno_is_a740_family(adreno_gpu)) {
/* TODO: get ddr type from bootloader and use 2 for LPDDR4 */
hbb_lo = 3;
amsbc = 1;
@ -1017,6 +1346,10 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
uavflagprd_inv << 4 | min_acc_len << 3 |
hbb_lo << 1 | ubwc_mode);
if (adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL,
FIELD_PREP(GENMASK(8, 5), hbb_lo));
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, min_acc_len << 23 | hbb_lo << 21);
}
@ -1049,6 +1382,55 @@ static int a6xx_cp_init(struct msm_gpu *gpu)
return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
}
static int a7xx_cp_init(struct msm_gpu *gpu)
{
struct msm_ringbuffer *ring = gpu->rb[0];
u32 mask;
/* Disable concurrent binning before sending CP init */
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
OUT_RING(ring, BIT(27));
OUT_PKT7(ring, CP_ME_INIT, 7);
/* Use multiple HW contexts */
mask = BIT(0);
/* Enable error detection */
mask |= BIT(1);
/* Set default reset state */
mask |= BIT(3);
/* Disable save/restore of performance counters across preemption */
mask |= BIT(6);
/* Enable the register init list with the spinlock */
mask |= BIT(8);
OUT_RING(ring, mask);
/* Enable multiple hardware contexts */
OUT_RING(ring, 0x00000003);
/* Enable error detection */
OUT_RING(ring, 0x20000000);
/* Operation mode mask */
OUT_RING(ring, 0x00000002);
/* *Don't* send a power up reg list for concurrent binning (TODO) */
/* Lo address */
OUT_RING(ring, 0x00000000);
/* Hi address */
OUT_RING(ring, 0x00000000);
/* BIT(31) set => read the regs from the list */
OUT_RING(ring, 0x00000000);
a6xx_flush(gpu, ring);
return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
}
/*
* Check that the microcode version is new enough to include several key
* security fixes. Return true if the ucode is safe.
@ -1065,6 +1447,10 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
if (IS_ERR(buf))
return false;
/* A7xx is safe! */
if (adreno_is_a7xx(adreno_gpu))
return true;
/*
* Targets up to a640 (a618, a630 and a640) need to check for a
* microcode version that is patched to support the whereami opcode or
@ -1181,22 +1567,46 @@ static int a6xx_zap_shader_init(struct msm_gpu *gpu)
}
#define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
A6XX_RBBM_INT_0_MASK_CP_IB2 | \
A6XX_RBBM_INT_0_MASK_CP_IB1 | \
A6XX_RBBM_INT_0_MASK_CP_RB | \
A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
A6XX_RBBM_INT_0_MASK_CP_IB2 | \
A6XX_RBBM_INT_0_MASK_CP_IB1 | \
A6XX_RBBM_INT_0_MASK_CP_RB | \
A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
#define A7XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
A6XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR | \
A6XX_RBBM_INT_0_MASK_CP_SW | \
A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
A6XX_RBBM_INT_0_MASK_PM4CPINTERRUPT | \
A6XX_RBBM_INT_0_MASK_CP_RB_DONE_TS | \
A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR | \
A6XX_RBBM_INT_0_MASK_TSBWRITEERROR)
#define A7XX_APRIV_MASK (A6XX_CP_APRIV_CNTL_ICACHE | \
A6XX_CP_APRIV_CNTL_RBFETCH | \
A6XX_CP_APRIV_CNTL_RBPRIVLEVEL | \
A6XX_CP_APRIV_CNTL_RBRPWB)
#define A7XX_BR_APRIVMASK (A7XX_APRIV_MASK | \
A6XX_CP_APRIV_CNTL_CDREAD | \
A6XX_CP_APRIV_CNTL_CDWRITE)
static int hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
u64 gmem_range_min;
int ret;
if (!adreno_has_gmu_wrapper(adreno_gpu)) {
@ -1219,6 +1629,10 @@ static int hw_init(struct msm_gpu *gpu)
mb();
}
/* Some GPUs are stubborn and take their sweet time to unhalt GBIF! */
if (adreno_is_a7xx(adreno_gpu) && a6xx_has_gbif(adreno_gpu))
spin_until(!gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK));
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
if (adreno_is_a619_holi(adreno_gpu))
@ -1232,19 +1646,21 @@ static int hw_init(struct msm_gpu *gpu)
gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE, 0x00000000);
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
/* Turn on 64 bit addressing for all blocks */
gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
if (!adreno_is_a7xx(adreno_gpu)) {
/* Turn on 64 bit addressing for all blocks */
gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
}
/* enable hardware clockgating */
a6xx_set_hwcg(gpu, true);
@ -1252,12 +1668,14 @@ static int hw_init(struct msm_gpu *gpu)
/* VBIF/GBIF start*/
if (adreno_is_a610(adreno_gpu) ||
adreno_is_a640_family(adreno_gpu) ||
adreno_is_a650_family(adreno_gpu)) {
adreno_is_a650_family(adreno_gpu) ||
adreno_is_a7xx(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3);
gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL,
adreno_is_a7xx(adreno_gpu) ? 0x2120212 : 0x3);
} else {
gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
}
@ -1265,24 +1683,39 @@ static int hw_init(struct msm_gpu *gpu)
if (adreno_is_a630(adreno_gpu))
gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
if (adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A6XX_UCHE_GBIF_GX_CONFIG, 0x10240e0);
/* Make all blocks contribute to the GPU BUSY perf counter */
gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
/* Disable L2 bypass in the UCHE */
gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, 0x0001ffffffffffc0llu);
gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
if (!adreno_is_a650_family(adreno_gpu)) {
/* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN, 0x00100000);
gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX,
0x00100000 + adreno_gpu->info->gmem - 1);
if (adreno_is_a7xx(adreno_gpu)) {
gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
} else {
gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, 0x0001ffffffffffc0llu);
gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
}
gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
if (!(adreno_is_a650_family(adreno_gpu) ||
adreno_is_a730(adreno_gpu))) {
gmem_range_min = adreno_is_a740_family(adreno_gpu) ? SZ_16M : SZ_1M;
/* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN, gmem_range_min);
gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX,
gmem_range_min + adreno_gpu->info->gmem - 1);
}
if (adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, BIT(23));
else {
gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
}
if (adreno_is_a640_family(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
@ -1290,7 +1723,7 @@ static int hw_init(struct msm_gpu *gpu)
} else if (adreno_is_a610(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x00800060);
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x40201b16);
} else {
} else if (!adreno_is_a7xx(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
}
@ -1302,7 +1735,7 @@ static int hw_init(struct msm_gpu *gpu)
if (adreno_is_a610(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 48);
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 47);
} else
} else if (!adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
/* Setting the primFifo thresholds default values,
@ -1318,7 +1751,7 @@ static int hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00018000);
else if (adreno_is_a610(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00080000);
else
else if (!adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000);
/* Set the AHB default slave response to "ERROR" */
@ -1327,13 +1760,22 @@ static int hw_init(struct msm_gpu *gpu)
/* Turn on performance counters */
gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1);
if (adreno_is_a7xx(adreno_gpu)) {
/* Turn on the IFPC counter (countable 4 on XOCLK4) */
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1,
FIELD_PREP(GENMASK(7, 0), 0x4));
}
/* Select CP0 to always count cycles */
gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL(0), PERF_CP_ALWAYS_COUNT);
a6xx_set_ubwc_config(gpu);
/* Enable fault detection */
if (adreno_is_a619(adreno_gpu))
if (adreno_is_a730(adreno_gpu) ||
adreno_is_a740_family(adreno_gpu))
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0xcfffff);
else if (adreno_is_a619(adreno_gpu))
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3fffff);
else if (adreno_is_a610(adreno_gpu))
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3ffff);
@ -1373,15 +1815,31 @@ static int hw_init(struct msm_gpu *gpu)
/* Set dualQ + disable afull for A660 GPU */
if (adreno_is_a660(adreno_gpu))
gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906);
else if (adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG,
FIELD_PREP(GENMASK(19, 16), 6) |
FIELD_PREP(GENMASK(15, 12), 6) |
FIELD_PREP(GENMASK(11, 8), 9) |
BIT(3) | BIT(2) |
FIELD_PREP(GENMASK(1, 0), 2));
/* Enable expanded apriv for targets that support it */
if (gpu->hw_apriv) {
gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
(1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1));
if (adreno_is_a7xx(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
A7XX_BR_APRIVMASK);
gpu_write(gpu, REG_A7XX_CP_BV_APRIV_CNTL,
A7XX_APRIV_MASK);
gpu_write(gpu, REG_A7XX_CP_LPAC_APRIV_CNTL,
A7XX_APRIV_MASK);
} else
gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
BIT(6) | BIT(5) | BIT(3) | BIT(2) | BIT(1));
}
/* Enable interrupts */
gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK,
adreno_is_a7xx(adreno_gpu) ? A7XX_INT_MASK : A6XX_INT_MASK);
ret = adreno_hw_init(gpu);
if (ret)
@ -1408,6 +1866,12 @@ static int hw_init(struct msm_gpu *gpu)
shadowptr(a6xx_gpu, gpu->rb[0]));
}
/* ..which means "always" on A7xx, also for BV shadow */
if (adreno_is_a7xx(adreno_gpu)) {
gpu_write64(gpu, REG_A7XX_CP_BV_RB_RPTR_ADDR,
rbmemptr(gpu->rb[0], bv_fence));
}
/* Always come up on rb 0 */
a6xx_gpu->cur_ring = gpu->rb[0];
@ -1416,7 +1880,7 @@ static int hw_init(struct msm_gpu *gpu)
/* Enable the SQE_to start the CP engine */
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
ret = a6xx_cp_init(gpu);
ret = adreno_is_a7xx(adreno_gpu) ? a7xx_cp_init(gpu) : a6xx_cp_init(gpu);
if (ret)
goto out;
@ -1653,7 +2117,7 @@ static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu)
(val & 0x3ffff), val);
}
if (status & A6XX_CP_INT_CP_AHB_ERROR)
if (status & A6XX_CP_INT_CP_AHB_ERROR && !adreno_is_a7xx(to_adreno_gpu(gpu)))
dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n");
if (status & A6XX_CP_INT_CP_VSD_PARITY_ERROR)
@ -1803,6 +2267,35 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval);
}
static void a7xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
if (IS_ERR(a6xx_gpu->llc_mmio))
return;
if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
gpu_scid &= GENMASK(4, 0);
gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL1,
FIELD_PREP(GENMASK(29, 25), gpu_scid) |
FIELD_PREP(GENMASK(24, 20), gpu_scid) |
FIELD_PREP(GENMASK(19, 15), gpu_scid) |
FIELD_PREP(GENMASK(14, 10), gpu_scid) |
FIELD_PREP(GENMASK(9, 5), gpu_scid) |
FIELD_PREP(GENMASK(4, 0), gpu_scid));
gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL0,
FIELD_PREP(GENMASK(14, 10), gpu_scid) |
BIT(8));
}
llcc_slice_activate(a6xx_gpu->htw_llc_slice);
}
static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
{
/* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */
@ -1814,7 +2307,7 @@ static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
}
static void a6xx_llc_slices_init(struct platform_device *pdev,
struct a6xx_gpu *a6xx_gpu)
struct a6xx_gpu *a6xx_gpu, bool is_a7xx)
{
struct device_node *phandle;
@ -1823,18 +2316,18 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
return;
/*
* There is a different programming path for targets with an mmu500
* attached, so detect if that is the case
* There is a different programming path for A6xx targets with an
* mmu500 attached, so detect if that is the case
*/
phandle = of_parse_phandle(pdev->dev.of_node, "iommus", 0);
a6xx_gpu->have_mmu500 = (phandle &&
of_device_is_compatible(phandle, "arm,mmu-500"));
of_node_put(phandle);
if (a6xx_gpu->have_mmu500)
a6xx_gpu->llc_mmio = NULL;
else
if (is_a7xx || !a6xx_gpu->have_mmu500)
a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem");
else
a6xx_gpu->llc_mmio = NULL;
a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
@ -1920,7 +2413,7 @@ static int a6xx_gmu_pm_resume(struct msm_gpu *gpu)
msm_devfreq_resume(gpu);
a6xx_llc_activate(a6xx_gpu);
adreno_is_a7xx(adreno_gpu) ? a7xx_llc_activate : a6xx_llc_activate(a6xx_gpu);
return ret;
}
@ -2237,7 +2730,7 @@ static int a6xx_set_supported_hw(struct device *dev, const struct adreno_info *i
DRM_DEV_ERROR(dev,
"missing support for speed-bin: %u. Some OPPs may not be supported by hardware\n",
speedbin);
return UINT_MAX;
supp_hw = BIT(0); /* Default */
}
ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1);
@ -2307,6 +2800,37 @@ static const struct adreno_gpu_funcs funcs_gmuwrapper = {
.get_timestamp = a6xx_get_timestamp,
};
static const struct adreno_gpu_funcs funcs_a7xx = {
.base = {
.get_param = adreno_get_param,
.set_param = adreno_set_param,
.hw_init = a6xx_hw_init,
.ucode_load = a6xx_ucode_load,
.pm_suspend = a6xx_gmu_pm_suspend,
.pm_resume = a6xx_gmu_pm_resume,
.recover = a6xx_recover,
.submit = a7xx_submit,
.active_ring = a6xx_active_ring,
.irq = a6xx_irq,
.destroy = a6xx_destroy,
#if defined(CONFIG_DRM_MSM_GPU_STATE)
.show = a6xx_show,
#endif
.gpu_busy = a6xx_gpu_busy,
.gpu_get_freq = a6xx_gmu_get_freq,
.gpu_set_freq = a6xx_gpu_set_freq,
#if defined(CONFIG_DRM_MSM_GPU_STATE)
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
#endif
.create_address_space = a6xx_create_address_space,
.create_private_address_space = a6xx_create_private_address_space,
.get_rptr = a6xx_get_rptr,
.progress = a6xx_progress,
},
.get_timestamp = a6xx_gmu_get_timestamp,
};
struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@ -2316,6 +2840,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
struct a6xx_gpu *a6xx_gpu;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
bool is_a7xx;
int ret;
a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL);
@ -2339,7 +2864,11 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_gpu->base.hw_apriv =
!!(config->info->quirks & ADRENO_QUIRK_HAS_HW_APRIV);
a6xx_llc_slices_init(pdev, a6xx_gpu);
/* gpu->info only gets assigned in adreno_gpu_init() */
is_a7xx = config->info->family == ADRENO_7XX_GEN1 ||
config->info->family == ADRENO_7XX_GEN2;
a6xx_llc_slices_init(pdev, a6xx_gpu, is_a7xx);
ret = a6xx_set_supported_hw(&pdev->dev, config->info);
if (ret) {
@ -2347,7 +2876,9 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
return ERR_PTR(ret);
}
if (adreno_has_gmu_wrapper(adreno_gpu))
if (is_a7xx)
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 1);
else if (adreno_has_gmu_wrapper(adreno_gpu))
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_gmuwrapper, 1);
else
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);

View File

@ -882,12 +882,13 @@ static void a6xx_snapshot_gmu_hfi_history(struct msm_gpu *gpu,
}
}
#define A6XX_REGLIST_SIZE 1
#define A6XX_GBIF_REGLIST_SIZE 1
static void a6xx_get_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
struct a6xx_crashdumper *dumper)
{
int i, count = ARRAY_SIZE(a6xx_ahb_reglist) +
int i, count = A6XX_REGLIST_SIZE +
ARRAY_SIZE(a6xx_reglist) +
ARRAY_SIZE(a6xx_hlsq_reglist) + A6XX_GBIF_REGLIST_SIZE;
int index = 0;
@ -901,12 +902,20 @@ static void a6xx_get_registers(struct msm_gpu *gpu,
a6xx_state->nr_registers = count;
for (i = 0; i < ARRAY_SIZE(a6xx_ahb_reglist); i++)
if (adreno_is_a7xx(adreno_gpu))
a6xx_get_ahb_gpu_registers(gpu,
a6xx_state, &a6xx_ahb_reglist[i],
a6xx_state, &a7xx_ahb_reglist,
&a6xx_state->registers[index++]);
else
a6xx_get_ahb_gpu_registers(gpu,
a6xx_state, &a6xx_ahb_reglist,
&a6xx_state->registers[index++]);
if (a6xx_has_gbif(adreno_gpu))
if (adreno_is_a7xx(adreno_gpu))
a6xx_get_ahb_gpu_registers(gpu,
a6xx_state, &a7xx_gbif_reglist,
&a6xx_state->registers[index++]);
else if (a6xx_has_gbif(adreno_gpu))
a6xx_get_ahb_gpu_registers(gpu,
a6xx_state, &a6xx_gbif_reglist,
&a6xx_state->registers[index++]);
@ -948,6 +957,18 @@ static u32 a6xx_get_cp_roq_size(struct msm_gpu *gpu)
return gpu_read(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2) >> 14;
}
static u32 a7xx_get_cp_roq_size(struct msm_gpu *gpu)
{
/*
* The value at CP_ROQ_THRESHOLDS_2[20:31] is in 4dword units.
* That register however is not directly accessible from APSS on A7xx.
* Program the SQE_UCODE_DBG_ADDR with offset=0x70d3 and read the value.
*/
gpu_write(gpu, REG_A6XX_CP_SQE_UCODE_DBG_ADDR, 0x70d3);
return 4 * (gpu_read(gpu, REG_A6XX_CP_SQE_UCODE_DBG_DATA) >> 20);
}
/* Read a block of data from an indexed register pair */
static void a6xx_get_indexed_regs(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
@ -1019,8 +1040,40 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
/* Restore the size in the hardware */
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, mempool_size);
}
a6xx_state->nr_indexed_regs = count;
static void a7xx_get_indexed_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state)
{
int i, indexed_count, mempool_count;
indexed_count = ARRAY_SIZE(a7xx_indexed_reglist);
mempool_count = ARRAY_SIZE(a7xx_cp_bv_mempool_indexed);
a6xx_state->indexed_regs = state_kcalloc(a6xx_state,
indexed_count + mempool_count,
sizeof(*a6xx_state->indexed_regs));
if (!a6xx_state->indexed_regs)
return;
a6xx_state->nr_indexed_regs = indexed_count + mempool_count;
/* First read the common regs */
for (i = 0; i < indexed_count; i++)
a6xx_get_indexed_regs(gpu, a6xx_state, &a7xx_indexed_reglist[i],
&a6xx_state->indexed_regs[i]);
gpu_rmw(gpu, REG_A6XX_CP_CHICKEN_DBG, 0, BIT(2));
gpu_rmw(gpu, REG_A7XX_CP_BV_CHICKEN_DBG, 0, BIT(2));
/* Get the contents of the CP_BV mempool */
for (i = 0; i < mempool_count; i++)
a6xx_get_indexed_regs(gpu, a6xx_state, a7xx_cp_bv_mempool_indexed,
&a6xx_state->indexed_regs[indexed_count - 1 + i]);
gpu_rmw(gpu, REG_A6XX_CP_CHICKEN_DBG, BIT(2), 0);
gpu_rmw(gpu, REG_A7XX_CP_BV_CHICKEN_DBG, BIT(2), 0);
return;
}
struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
@ -1056,6 +1109,12 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
return &a6xx_state->base;
/* Get the banks of indexed registers */
if (adreno_is_a7xx(adreno_gpu)) {
a7xx_get_indexed_registers(gpu, a6xx_state);
/* Further codeflow is untested on A7xx. */
return &a6xx_state->base;
}
a6xx_get_indexed_registers(gpu, a6xx_state);
/*

View File

@ -328,9 +328,8 @@ static const u32 a6xx_gbif_registers[] = {
0x3C00, 0X3C0B, 0X3C40, 0X3C47, 0X3CC0, 0X3CD1, 0xE3A, 0xE3A,
};
static const struct a6xx_registers a6xx_ahb_reglist[] = {
REGS(a6xx_ahb_registers, 0, 0),
};
static const struct a6xx_registers a6xx_ahb_reglist =
REGS(a6xx_ahb_registers, 0, 0);
static const struct a6xx_registers a6xx_vbif_reglist =
REGS(a6xx_vbif_registers, 0, 0);
@ -338,6 +337,27 @@ static const struct a6xx_registers a6xx_vbif_reglist =
static const struct a6xx_registers a6xx_gbif_reglist =
REGS(a6xx_gbif_registers, 0, 0);
static const u32 a7xx_ahb_registers[] = {
/* RBBM_STATUS */
0x210, 0x210,
/* RBBM_STATUS2-3 */
0x212, 0x213,
};
static const u32 a7xx_gbif_registers[] = {
0x3c00, 0x3c0b,
0x3c40, 0x3c42,
0x3c45, 0x3c47,
0x3c49, 0x3c4a,
0x3cc0, 0x3cd1,
};
static const struct a6xx_registers a7xx_ahb_reglist=
REGS(a7xx_ahb_registers, 0, 0);
static const struct a6xx_registers a7xx_gbif_reglist =
REGS(a7xx_gbif_registers, 0, 0);
static const u32 a6xx_gmu_gx_registers[] = {
/* GMU GX */
0x0000, 0x0000, 0x0010, 0x0013, 0x0016, 0x0016, 0x0018, 0x001b,
@ -384,14 +404,17 @@ static const struct a6xx_registers a6xx_gmu_reglist[] = {
};
static u32 a6xx_get_cp_roq_size(struct msm_gpu *gpu);
static u32 a7xx_get_cp_roq_size(struct msm_gpu *gpu);
static struct a6xx_indexed_registers {
struct a6xx_indexed_registers {
const char *name;
u32 addr;
u32 data;
u32 count;
u32 (*count_fn)(struct msm_gpu *gpu);
} a6xx_indexed_reglist[] = {
};
static struct a6xx_indexed_registers a6xx_indexed_reglist[] = {
{ "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL },
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
@ -402,11 +425,43 @@ static struct a6xx_indexed_registers {
REG_A6XX_CP_ROQ_DBG_DATA, 0, a6xx_get_cp_roq_size},
};
static struct a6xx_indexed_registers a7xx_indexed_reglist[] = {
{ "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL },
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
REG_A6XX_CP_DRAW_STATE_DATA, 0x100, NULL },
{ "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x8000, NULL },
{ "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_SQE_STAT_ADDR,
REG_A7XX_CP_BV_SQE_STAT_DATA, 0x33, NULL },
{ "CP_BV_DRAW_STATE_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x100, NULL },
{ "CP_BV_SQE_UCODE_DBG_ADDR", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA, 0x8000, NULL },
{ "CP_SQE_AC_STAT_ADDR", REG_A7XX_CP_SQE_AC_STAT_ADDR,
REG_A7XX_CP_SQE_AC_STAT_DATA, 0x33, NULL },
{ "CP_LPAC_DRAW_STATE_ADDR", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
REG_A7XX_CP_LPAC_DRAW_STATE_DATA, 0x100, NULL },
{ "CP_SQE_AC_UCODE_DBG_ADDR", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
REG_A7XX_CP_SQE_AC_UCODE_DBG_DATA, 0x8000, NULL },
{ "CP_LPAC_FIFO_DBG_ADDR", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
REG_A7XX_CP_LPAC_FIFO_DBG_DATA, 0x40, NULL },
{ "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
REG_A6XX_CP_ROQ_DBG_DATA, 0, a7xx_get_cp_roq_size },
};
static struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
"CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060, NULL,
};
static struct a6xx_indexed_registers a7xx_cp_bv_mempool_indexed[] = {
{ "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2100, NULL },
{ "CP_BV_MEMPOOL", REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR,
REG_A7XX_CP_BV_MEM_POOL_DBG_DATA, 0x2100, NULL },
};
#define DEBUGBUS(_id, _count) { .id = _id, .name = #_id, .count = _count }
static const struct a6xx_debugbus_block {

View File

@ -5,6 +5,8 @@
#include <linux/circ_buf.h>
#include <linux/list.h>
#include <soc/qcom/cmd-db.h>
#include "a6xx_gmu.h"
#include "a6xx_gmu.xml.h"
#include "a6xx_gpu.h"
@ -506,6 +508,88 @@ static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
msg->cnoc_cmds_data[0][0] = 0x40000000;
msg->cnoc_cmds_data[1][0] = 0x60000001;
}
static void a730_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
msg->bw_level_num = 12;
msg->ddr_cmds_num = 3;
msg->ddr_wait_bitmask = 0x7;
msg->ddr_cmds_addrs[0] = cmd_db_read_addr("SH0");
msg->ddr_cmds_addrs[1] = cmd_db_read_addr("MC0");
msg->ddr_cmds_addrs[2] = cmd_db_read_addr("ACV");
msg->ddr_cmds_data[0][0] = 0x40000000;
msg->ddr_cmds_data[0][1] = 0x40000000;
msg->ddr_cmds_data[0][2] = 0x40000000;
msg->ddr_cmds_data[1][0] = 0x600002e8;
msg->ddr_cmds_data[1][1] = 0x600003d0;
msg->ddr_cmds_data[1][2] = 0x60000008;
msg->ddr_cmds_data[2][0] = 0x6000068d;
msg->ddr_cmds_data[2][1] = 0x6000089a;
msg->ddr_cmds_data[2][2] = 0x60000008;
msg->ddr_cmds_data[3][0] = 0x600007f2;
msg->ddr_cmds_data[3][1] = 0x60000a6e;
msg->ddr_cmds_data[3][2] = 0x60000008;
msg->ddr_cmds_data[4][0] = 0x600009e5;
msg->ddr_cmds_data[4][1] = 0x60000cfd;
msg->ddr_cmds_data[4][2] = 0x60000008;
msg->ddr_cmds_data[5][0] = 0x60000b29;
msg->ddr_cmds_data[5][1] = 0x60000ea6;
msg->ddr_cmds_data[5][2] = 0x60000008;
msg->ddr_cmds_data[6][0] = 0x60001698;
msg->ddr_cmds_data[6][1] = 0x60001da8;
msg->ddr_cmds_data[6][2] = 0x60000008;
msg->ddr_cmds_data[7][0] = 0x600018d2;
msg->ddr_cmds_data[7][1] = 0x60002093;
msg->ddr_cmds_data[7][2] = 0x60000008;
msg->ddr_cmds_data[8][0] = 0x60001e66;
msg->ddr_cmds_data[8][1] = 0x600027e6;
msg->ddr_cmds_data[8][2] = 0x60000008;
msg->ddr_cmds_data[9][0] = 0x600027c2;
msg->ddr_cmds_data[9][1] = 0x6000342f;
msg->ddr_cmds_data[9][2] = 0x60000008;
msg->ddr_cmds_data[10][0] = 0x60002e71;
msg->ddr_cmds_data[10][1] = 0x60003cf5;
msg->ddr_cmds_data[10][2] = 0x60000008;
msg->ddr_cmds_data[11][0] = 0x600030ae;
msg->ddr_cmds_data[11][1] = 0x60003fe5;
msg->ddr_cmds_data[11][2] = 0x60000008;
msg->cnoc_cmds_num = 1;
msg->cnoc_wait_bitmask = 0x1;
msg->cnoc_cmds_addrs[0] = cmd_db_read_addr("CN0");
msg->cnoc_cmds_data[0][0] = 0x40000000;
msg->cnoc_cmds_data[1][0] = 0x60000001;
}
static void a740_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
msg->bw_level_num = 1;
msg->ddr_cmds_num = 3;
msg->ddr_wait_bitmask = 0x7;
msg->ddr_cmds_addrs[0] = cmd_db_read_addr("SH0");
msg->ddr_cmds_addrs[1] = cmd_db_read_addr("MC0");
msg->ddr_cmds_addrs[2] = cmd_db_read_addr("ACV");
msg->ddr_cmds_data[0][0] = 0x40000000;
msg->ddr_cmds_data[0][1] = 0x40000000;
msg->ddr_cmds_data[0][2] = 0x40000000;
/* TODO: add a proper dvfs table */
msg->cnoc_cmds_num = 1;
msg->cnoc_wait_bitmask = 0x1;
msg->cnoc_cmds_addrs[0] = cmd_db_read_addr("CN0");
msg->cnoc_cmds_data[0][0] = 0x40000000;
msg->cnoc_cmds_data[1][0] = 0x60000001;
}
static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
@ -564,6 +648,10 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
a660_build_bw_table(&msg);
else if (adreno_is_a690(adreno_gpu))
a690_build_bw_table(&msg);
else if (adreno_is_a730(adreno_gpu))
a730_build_bw_table(&msg);
else if (adreno_is_a740_family(adreno_gpu))
a740_build_bw_table(&msg);
else
a6xx_build_bw_table(&msg);

View File

@ -331,7 +331,7 @@ static const struct adreno_info gpulist[] = {
),
}, {
.machine = "qcom,sm6375",
.chip_ids = ADRENO_CHIP_IDS(0x06010900),
.chip_ids = ADRENO_CHIP_IDS(0x06010901),
.family = ADRENO_6XX_GEN1,
.revn = 619,
.fw = {
@ -454,11 +454,13 @@ static const struct adreno_info gpulist[] = {
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
.init = a6xx_gpu_init,
.zapfw = "a660_zap.mbn",
.hwcg = a660_hwcg,
.address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 117, 0 },
{ 172, 2 }, /* Called speedbin 1 downstream, but let's not break things! */
{ 190, 1 },
),
}, {
@ -490,6 +492,36 @@ static const struct adreno_info gpulist[] = {
.zapfw = "a690_zap.mdt",
.hwcg = a690_hwcg,
.address_space_size = SZ_16G,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x07030001),
.family = ADRENO_7XX_GEN1,
.fw = {
[ADRENO_FW_SQE] = "a730_sqe.fw",
[ADRENO_FW_GMU] = "gmu_gen70000.bin",
},
.gmem = SZ_2M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
.init = a6xx_gpu_init,
.zapfw = "a730_zap.mdt",
.hwcg = a730_hwcg,
.address_space_size = SZ_16G,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x43050a01), /* "C510v2" */
.family = ADRENO_7XX_GEN2,
.fw = {
[ADRENO_FW_SQE] = "a740_sqe.fw",
[ADRENO_FW_GMU] = "gmu_gen70200.bin",
},
.gmem = 3 * SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
.init = a6xx_gpu_init,
.zapfw = "a740_zap.mdt",
.hwcg = a740_hwcg,
.address_space_size = SZ_16G,
},
};

View File

@ -323,7 +323,11 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
*value = adreno_gpu->info->gmem;
return 0;
case MSM_PARAM_GMEM_BASE:
*value = !adreno_is_a650_family(adreno_gpu) ? 0x100000 : 0;
if (adreno_is_a650_family(adreno_gpu) ||
adreno_is_a740_family(adreno_gpu))
*value = 0;
else
*value = 0x100000;
return 0;
case MSM_PARAM_CHIP_ID:
*value = adreno_gpu->chip_id;
@ -567,6 +571,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
ring->cur = ring->start;
ring->next = ring->start;
ring->memptrs->rptr = 0;
ring->memptrs->bv_fence = ring->fctx->completed_fence;
/* Detect and clean up an impossible fence, ie. if GPU managed
* to scribble something invalid, we don't want that to confuse

View File

@ -46,6 +46,8 @@ enum adreno_family {
ADRENO_6XX_GEN2, /* a640 family */
ADRENO_6XX_GEN3, /* a650 family */
ADRENO_6XX_GEN4, /* a660 family */
ADRENO_7XX_GEN1, /* a730 family */
ADRENO_7XX_GEN2, /* a740 family */
};
#define ADRENO_QUIRK_TWO_PASS_USE_WFI BIT(0)
@ -75,7 +77,7 @@ struct adreno_reglist {
};
extern const struct adreno_reglist a612_hwcg[], a615_hwcg[], a630_hwcg[], a640_hwcg[], a650_hwcg[];
extern const struct adreno_reglist a660_hwcg[], a690_hwcg[];
extern const struct adreno_reglist a660_hwcg[], a690_hwcg[], a730_hwcg[], a740_hwcg[];
struct adreno_speedbin {
uint16_t fuse;
@ -391,7 +393,8 @@ static inline int adreno_is_a650_family(const struct adreno_gpu *gpu)
{
if (WARN_ON_ONCE(!gpu->info))
return false;
return gpu->info->family >= ADRENO_6XX_GEN3;
return gpu->info->family == ADRENO_6XX_GEN3 ||
gpu->info->family == ADRENO_6XX_GEN4;
}
static inline int adreno_is_a640_family(const struct adreno_gpu *gpu)
@ -401,6 +404,31 @@ static inline int adreno_is_a640_family(const struct adreno_gpu *gpu)
return gpu->info->family == ADRENO_6XX_GEN2;
}
static inline int adreno_is_a730(struct adreno_gpu *gpu)
{
return gpu->info->chip_ids[0] == 0x07030001;
}
static inline int adreno_is_a740(struct adreno_gpu *gpu)
{
return gpu->info->chip_ids[0] == 0x43050a01;
}
/* Placeholder to make future diffs smaller */
static inline int adreno_is_a740_family(struct adreno_gpu *gpu)
{
if (WARN_ON_ONCE(!gpu->info))
return false;
return gpu->info->family == ADRENO_7XX_GEN2;
}
static inline int adreno_is_a7xx(struct adreno_gpu *gpu)
{
/* Update with non-fake (i.e. non-A702) Gen 7 GPUs */
return gpu->info->family == ADRENO_7XX_GEN1 ||
adreno_is_a740_family(gpu);
}
u64 adreno_private_address_space_size(struct msm_gpu *gpu);
int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
uint32_t param, uint64_t *value, uint32_t *len);

View File

@ -244,7 +244,6 @@ static const struct dpu_intf_cfg msm8998_intf[] = {
.prog_fetch_lines_worst_case = 21,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x280,
@ -253,7 +252,6 @@ static const struct dpu_intf_cfg msm8998_intf[] = {
.prog_fetch_lines_worst_case = 21,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x280,
@ -262,7 +260,6 @@ static const struct dpu_intf_cfg msm8998_intf[] = {
.prog_fetch_lines_worst_case = 21,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x6b800, .len = 0x280,
@ -270,7 +267,6 @@ static const struct dpu_intf_cfg msm8998_intf[] = {
.prog_fetch_lines_worst_case = 21,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
.intr_tear_rd_ptr = -1,
},
};

View File

@ -260,7 +260,6 @@ static const struct dpu_intf_cfg sdm845_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x280,
@ -269,7 +268,6 @@ static const struct dpu_intf_cfg sdm845_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x280,
@ -278,7 +276,6 @@ static const struct dpu_intf_cfg sdm845_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x6b800, .len = 0x280,
@ -287,7 +284,6 @@ static const struct dpu_intf_cfg sdm845_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
.intr_tear_rd_ptr = -1,
},
};

View File

@ -220,7 +220,6 @@ static const struct dpu_pingpong_cfg sm8150_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = -1,
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
@ -228,7 +227,6 @@ static const struct dpu_pingpong_cfg sm8150_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = -1,
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
@ -236,7 +234,6 @@ static const struct dpu_pingpong_cfg sm8150_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = -1,
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
@ -244,7 +241,6 @@ static const struct dpu_pingpong_cfg sm8150_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
.intr_rdptr = -1,
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x72000, .len = 0xd4,
@ -252,7 +248,6 @@ static const struct dpu_pingpong_cfg sm8150_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
.intr_rdptr = -1,
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x72800, .len = 0xd4,
@ -260,7 +255,6 @@ static const struct dpu_pingpong_cfg sm8150_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
.intr_rdptr = -1,
},
};
@ -307,7 +301,6 @@ static const struct dpu_intf_cfg sm8150_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2bc,
@ -337,7 +330,6 @@ static const struct dpu_intf_cfg sm8150_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
.intr_tear_rd_ptr = -1,
},
};

View File

@ -219,7 +219,6 @@ static const struct dpu_pingpong_cfg sc8180x_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = -1,
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
@ -227,7 +226,6 @@ static const struct dpu_pingpong_cfg sc8180x_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = -1,
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
@ -235,7 +233,6 @@ static const struct dpu_pingpong_cfg sc8180x_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = -1,
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
@ -243,7 +240,6 @@ static const struct dpu_pingpong_cfg sc8180x_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
.intr_rdptr = -1,
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x72000, .len = 0xd4,
@ -251,7 +247,6 @@ static const struct dpu_pingpong_cfg sc8180x_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
.intr_rdptr = -1,
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x72800, .len = 0xd4,
@ -259,7 +254,6 @@ static const struct dpu_pingpong_cfg sc8180x_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
.intr_rdptr = -1,
},
};
@ -314,7 +308,6 @@ static const struct dpu_intf_cfg sc8180x_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2bc,
@ -346,7 +339,6 @@ static const struct dpu_intf_cfg sc8180x_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_4", .id = INTF_4,
.base = 0x6c000, .len = 0x280,
@ -356,7 +348,6 @@ static const struct dpu_intf_cfg sc8180x_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_5", .id = INTF_5,
.base = 0x6c800, .len = 0x280,
@ -366,7 +357,6 @@ static const struct dpu_intf_cfg sc8180x_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23),
.intr_tear_rd_ptr = -1,
},
};

View File

@ -129,7 +129,6 @@ static const struct dpu_pingpong_cfg sm6125_pp[] = {
.merge_3d = 0,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = -1,
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
@ -137,7 +136,6 @@ static const struct dpu_pingpong_cfg sm6125_pp[] = {
.merge_3d = 0,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = -1,
},
};
@ -151,7 +149,6 @@ static const struct dpu_intf_cfg sm6125_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,

View File

@ -219,7 +219,6 @@ static const struct dpu_pingpong_cfg sm8250_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = -1,
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
@ -227,7 +226,6 @@ static const struct dpu_pingpong_cfg sm8250_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = -1,
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
@ -235,7 +233,6 @@ static const struct dpu_pingpong_cfg sm8250_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = -1,
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
@ -243,7 +240,6 @@ static const struct dpu_pingpong_cfg sm8250_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
.intr_rdptr = -1,
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x72000, .len = 0xd4,
@ -251,7 +247,6 @@ static const struct dpu_pingpong_cfg sm8250_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
.intr_rdptr = -1,
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x72800, .len = 0xd4,
@ -259,7 +254,6 @@ static const struct dpu_pingpong_cfg sm8250_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
.intr_rdptr = -1,
},
};
@ -306,7 +300,6 @@ static const struct dpu_intf_cfg sm8250_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
@ -336,7 +329,6 @@ static const struct dpu_intf_cfg sm8250_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
.intr_tear_rd_ptr = -1,
},
};

View File

@ -120,7 +120,6 @@ static const struct dpu_pingpong_cfg sc7180_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = -1,
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
@ -128,7 +127,6 @@ static const struct dpu_pingpong_cfg sc7180_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = -1,
},
};
@ -142,7 +140,6 @@ static const struct dpu_intf_cfg sc7180_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,

View File

@ -83,7 +83,6 @@ static const struct dpu_pingpong_cfg sm6115_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = -1,
},
};

View File

@ -128,7 +128,6 @@ static struct dpu_pingpong_cfg sm6350_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = -1,
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
@ -136,7 +135,6 @@ static struct dpu_pingpong_cfg sm6350_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = -1,
},
};
@ -158,7 +156,6 @@ static const struct dpu_intf_cfg sm6350_intf[] = {
.prog_fetch_lines_worst_case = 35,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,

View File

@ -82,7 +82,6 @@ static const struct dpu_pingpong_cfg qcm2290_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = -1,
},
};

View File

@ -85,7 +85,6 @@ static const struct dpu_pingpong_cfg sm6375_pp[] = {
.sblk = &sdm845_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = -1,
},
};

View File

@ -218,7 +218,6 @@ static const struct dpu_pingpong_cfg sm8350_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = -1,
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
@ -226,7 +225,6 @@ static const struct dpu_pingpong_cfg sm8350_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = -1,
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
@ -234,7 +232,6 @@ static const struct dpu_pingpong_cfg sm8350_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = -1,
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
@ -242,7 +239,6 @@ static const struct dpu_pingpong_cfg sm8350_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
.intr_rdptr = -1,
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
@ -250,7 +246,6 @@ static const struct dpu_pingpong_cfg sm8350_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
.intr_rdptr = -1,
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
@ -258,7 +253,6 @@ static const struct dpu_pingpong_cfg sm8350_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
.intr_rdptr = -1,
},
};
@ -314,7 +308,6 @@ static const struct dpu_intf_cfg sm8350_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x2c4,
@ -344,7 +337,6 @@ static const struct dpu_intf_cfg sm8350_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
.intr_tear_rd_ptr = -1,
},
};

View File

@ -131,7 +131,6 @@ static const struct dpu_pingpong_cfg sc7280_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = -1,
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
@ -139,7 +138,6 @@ static const struct dpu_pingpong_cfg sc7280_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = -1,
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
@ -147,7 +145,6 @@ static const struct dpu_pingpong_cfg sc7280_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = -1,
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
@ -155,7 +152,6 @@ static const struct dpu_pingpong_cfg sc7280_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = 0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
.intr_rdptr = -1,
},
};
@ -194,7 +190,6 @@ static const struct dpu_intf_cfg sc7280_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x2c4,
@ -214,7 +209,6 @@ static const struct dpu_intf_cfg sc7280_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23),
.intr_tear_rd_ptr = -1,
},
};

View File

@ -221,7 +221,6 @@ static const struct dpu_pingpong_cfg sc8280xp_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = -1,
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
@ -229,7 +228,6 @@ static const struct dpu_pingpong_cfg sc8280xp_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = -1,
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
@ -237,7 +235,6 @@ static const struct dpu_pingpong_cfg sc8280xp_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = -1,
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
@ -245,7 +242,6 @@ static const struct dpu_pingpong_cfg sc8280xp_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
.intr_rdptr = -1,
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
@ -253,7 +249,6 @@ static const struct dpu_pingpong_cfg sc8280xp_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
.intr_rdptr = -1,
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
@ -261,7 +256,6 @@ static const struct dpu_pingpong_cfg sc8280xp_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
.intr_rdptr = -1,
},
};
@ -328,7 +322,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x300,
@ -358,7 +351,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_4", .id = INTF_4,
.base = 0x38000, .len = 0x280,
@ -368,7 +360,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_5", .id = INTF_5,
.base = 0x39000, .len = 0x280,
@ -378,7 +369,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_6", .id = INTF_6,
.base = 0x3a000, .len = 0x280,
@ -388,7 +378,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_7", .id = INTF_7,
.base = 0x3b000, .len = 0x280,
@ -398,7 +387,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 18),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 19),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_8", .id = INTF_8,
.base = 0x3c000, .len = 0x280,
@ -408,7 +396,6 @@ static const struct dpu_intf_cfg sc8280xp_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
.intr_tear_rd_ptr = -1,
},
};

View File

@ -221,7 +221,6 @@ static const struct dpu_pingpong_cfg sm8450_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = -1,
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
@ -229,7 +228,6 @@ static const struct dpu_pingpong_cfg sm8450_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = -1,
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
@ -237,7 +235,6 @@ static const struct dpu_pingpong_cfg sm8450_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = -1,
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
@ -245,7 +242,6 @@ static const struct dpu_pingpong_cfg sm8450_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
.intr_rdptr = -1,
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
@ -253,7 +249,6 @@ static const struct dpu_pingpong_cfg sm8450_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
.intr_rdptr = -1,
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
@ -261,23 +256,18 @@ static const struct dpu_pingpong_cfg sm8450_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
.intr_rdptr = -1,
}, {
.name = "pingpong_6", .id = PINGPONG_6,
.base = 0x65800, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
.intr_done = -1,
.intr_rdptr = -1,
}, {
.name = "pingpong_7", .id = PINGPONG_7,
.base = 0x65c00, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
.intr_done = -1,
.intr_rdptr = -1,
},
};
@ -336,7 +326,6 @@ static const struct dpu_intf_cfg sm8450_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x300,
@ -366,7 +355,6 @@ static const struct dpu_intf_cfg sm8450_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
.intr_tear_rd_ptr = -1,
},
};

View File

@ -24,16 +24,6 @@ static const struct dpu_mdp_cfg sm8550_mdp = {
.base = 0, .len = 0x494,
.features = BIT(DPU_MDP_PERIPH_0_REMOVED),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x4330, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x6330, .bit_off = 0 },
[DPU_CLK_CTRL_VIG2] = { .reg_off = 0x8330, .bit_off = 0 },
[DPU_CLK_CTRL_VIG3] = { .reg_off = 0xa330, .bit_off = 0 },
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x24330, .bit_off = 0 },
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x26330, .bit_off = 0 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x28330, .bit_off = 0 },
[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2a330, .bit_off = 0 },
[DPU_CLK_CTRL_DMA4] = { .reg_off = 0x2c330, .bit_off = 0 },
[DPU_CLK_CTRL_DMA5] = { .reg_off = 0x2e330, .bit_off = 0 },
[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
},
};
@ -81,7 +71,6 @@ static const struct dpu_sspp_cfg sm8550_sspp[] = {
.sblk = &sm8550_vig_sblk_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x344,
@ -89,7 +78,6 @@ static const struct dpu_sspp_cfg sm8550_sspp[] = {
.sblk = &sm8550_vig_sblk_1,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x344,
@ -97,7 +85,6 @@ static const struct dpu_sspp_cfg sm8550_sspp[] = {
.sblk = &sm8550_vig_sblk_2,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x344,
@ -105,7 +92,6 @@ static const struct dpu_sspp_cfg sm8550_sspp[] = {
.sblk = &sm8550_vig_sblk_3,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x344,
@ -113,7 +99,6 @@ static const struct dpu_sspp_cfg sm8550_sspp[] = {
.sblk = &sdm845_dma_sblk_0,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x344,
@ -121,7 +106,6 @@ static const struct dpu_sspp_cfg sm8550_sspp[] = {
.sblk = &sdm845_dma_sblk_1,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x344,
@ -129,7 +113,6 @@ static const struct dpu_sspp_cfg sm8550_sspp[] = {
.sblk = &sdm845_dma_sblk_2,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x344,
@ -137,7 +120,6 @@ static const struct dpu_sspp_cfg sm8550_sspp[] = {
.sblk = &sdm845_dma_sblk_3,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
}, {
.name = "sspp_12", .id = SSPP_DMA4,
.base = 0x2c000, .len = 0x344,
@ -145,7 +127,6 @@ static const struct dpu_sspp_cfg sm8550_sspp[] = {
.sblk = &sm8550_dma_sblk_4,
.xin_id = 14,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA4,
}, {
.name = "sspp_13", .id = SSPP_DMA5,
.base = 0x2e000, .len = 0x344,
@ -153,7 +134,6 @@ static const struct dpu_sspp_cfg sm8550_sspp[] = {
.sblk = &sm8550_dma_sblk_5,
.xin_id = 15,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA5,
},
};
@ -236,7 +216,6 @@ static const struct dpu_pingpong_cfg sm8550_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = -1,
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x6a000, .len = 0,
@ -244,7 +223,6 @@ static const struct dpu_pingpong_cfg sm8550_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = -1,
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x6b000, .len = 0,
@ -252,7 +230,6 @@ static const struct dpu_pingpong_cfg sm8550_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = -1,
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x6c000, .len = 0,
@ -260,7 +237,6 @@ static const struct dpu_pingpong_cfg sm8550_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
.intr_rdptr = -1,
}, {
.name = "pingpong_4", .id = PINGPONG_4,
.base = 0x6d000, .len = 0,
@ -268,7 +244,6 @@ static const struct dpu_pingpong_cfg sm8550_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
.intr_rdptr = -1,
}, {
.name = "pingpong_5", .id = PINGPONG_5,
.base = 0x6e000, .len = 0,
@ -276,23 +251,18 @@ static const struct dpu_pingpong_cfg sm8550_pp[] = {
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
.intr_rdptr = -1,
}, {
.name = "pingpong_6", .id = PINGPONG_6,
.base = 0x66000, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
.intr_done = -1,
.intr_rdptr = -1,
}, {
.name = "pingpong_7", .id = PINGPONG_7,
.base = 0x66400, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
.intr_done = -1,
.intr_rdptr = -1,
},
};
@ -341,6 +311,20 @@ static const struct dpu_dsc_cfg sm8550_dsc[] = {
},
};
static const struct dpu_wb_cfg sm8550_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
.format_list = wb2_formats,
.num_formats = ARRAY_SIZE(wb2_formats),
.xin_id = 6,
.vbif_idx = VBIF_RT,
.maxlinewidth = 4096,
.intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
},
};
static const struct dpu_intf_cfg sm8550_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@ -351,7 +335,6 @@ static const struct dpu_intf_cfg sm8550_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
.intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x35000, .len = 0x300,
@ -381,7 +364,6 @@ static const struct dpu_intf_cfg sm8550_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
.intr_tear_rd_ptr = -1,
},
};
@ -438,6 +420,8 @@ const struct dpu_mdss_cfg dpu_sm8550_cfg = {
.dsc = sm8550_dsc,
.merge_3d_count = ARRAY_SIZE(sm8550_merge_3d),
.merge_3d = sm8550_merge_3d,
.wb_count = ARRAY_SIZE(sm8550_wb),
.wb = sm8550_wb,
.intf_count = ARRAY_SIZE(sm8550_intf),
.intf = sm8550_intf,
.vbif_count = ARRAY_SIZE(sm8550_vbif),

View File

@ -37,7 +37,7 @@ irqreturn_t dpu_core_irq(struct msm_kms *kms);
*/
u32 dpu_core_irq_read(
struct dpu_kms *dpu_kms,
int irq_idx);
unsigned int irq_idx);
/**
* dpu_core_irq_register_callback - For registering callback function on IRQ
@ -52,8 +52,8 @@ u32 dpu_core_irq_read(
*/
int dpu_core_irq_register_callback(
struct dpu_kms *dpu_kms,
int irq_idx,
void (*irq_cb)(void *arg, int irq_idx),
unsigned int irq_idx,
void (*irq_cb)(void *arg),
void *irq_arg);
/**
@ -67,7 +67,7 @@ int dpu_core_irq_register_callback(
*/
int dpu_core_irq_unregister_callback(
struct dpu_kms *dpu_kms,
int irq_idx);
unsigned int irq_idx);
/**
* dpu_debugfs_core_irq_init - register core irq debugfs

View File

@ -1466,7 +1466,7 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct drm_crtc *crtc = NULL;
struct dpu_crtc *dpu_crtc = NULL;
struct dpu_crtc *dpu_crtc;
int i, ret;
dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);

View File

@ -347,8 +347,8 @@ static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
u32 irq_idx, struct dpu_encoder_wait_info *info);
int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
int irq,
void (*func)(void *arg, int irq_idx),
unsigned int irq_idx,
void (*func)(void *arg),
struct dpu_encoder_wait_info *wait_info)
{
u32 irq_status;
@ -362,54 +362,54 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
/* return EWOULDBLOCK since we know the wait isn't necessary */
if (phys_enc->enable_state == DPU_ENC_DISABLED) {
DRM_ERROR("encoder is disabled id=%u, callback=%ps, irq=%d\n",
DRM_ERROR("encoder is disabled id=%u, callback=%ps, IRQ=[%d, %d]\n",
DRMID(phys_enc->parent), func,
irq);
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
return -EWOULDBLOCK;
}
if (irq < 0) {
if (irq_idx < 0) {
DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n",
DRMID(phys_enc->parent), func);
return 0;
}
DRM_DEBUG_KMS("id=%u, callback=%ps, irq=%d, pp=%d, pending_cnt=%d\n",
DRM_DEBUG_KMS("id=%u, callback=%ps, IRQ=[%d, %d], pp=%d, pending_cnt=%d\n",
DRMID(phys_enc->parent), func,
irq, phys_enc->hw_pp->idx - PINGPONG_0,
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
ret = dpu_encoder_helper_wait_event_timeout(
DRMID(phys_enc->parent),
irq,
irq_idx,
wait_info);
if (ret <= 0) {
irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq);
irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq_idx);
if (irq_status) {
unsigned long flags;
DRM_DEBUG_KMS("irq not triggered id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
DRM_DEBUG_KMS("IRQ=[%d, %d] not triggered id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n",
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
DRMID(phys_enc->parent), func,
irq,
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
local_irq_save(flags);
func(phys_enc, irq);
func(phys_enc);
local_irq_restore(flags);
ret = 0;
} else {
ret = -ETIMEDOUT;
DRM_DEBUG_KMS("irq timeout id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
DRM_DEBUG_KMS("IRQ=[%d, %d] timeout id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n",
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
DRMID(phys_enc->parent), func,
irq,
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
}
} else {
ret = 0;
trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
func, irq,
func, DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
}
@ -1194,11 +1194,21 @@ static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc,
struct dpu_encoder_virt *dpu_enc = NULL;
int ret = 0;
struct drm_display_mode *cur_mode = NULL;
struct msm_drm_private *priv = drm_enc->dev->dev_private;
struct msm_display_info *disp_info;
int index;
dpu_enc = to_dpu_encoder_virt(drm_enc);
disp_info = &dpu_enc->disp_info;
index = disp_info->h_tile_instance[0];
dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc);
if (disp_info->intf_type == INTF_DP)
dpu_enc->wide_bus_en = msm_dp_wide_bus_available(priv->dp[index]);
else if (disp_info->intf_type == INTF_DSI)
dpu_enc->wide_bus_en = msm_dsi_wide_bus_enabled(priv->dsi[index]);
mutex_lock(&dpu_enc->enc_lock);
cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
@ -1537,7 +1547,7 @@ void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
static int dpu_encoder_helper_wait_event_timeout(
int32_t drm_id,
u32 irq_idx,
unsigned int irq_idx,
struct dpu_encoder_wait_info *info)
{
int rc = 0;
@ -1550,7 +1560,9 @@ static int dpu_encoder_helper_wait_event_timeout(
atomic_read(info->atomic_cnt) == 0, jiffies);
time = ktime_to_ms(ktime_get());
trace_dpu_enc_wait_event_timeout(drm_id, irq_idx, rc, time,
trace_dpu_enc_wait_event_timeout(drm_id,
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
rc, time,
expected_time,
atomic_read(info->atomic_cnt));
/* If we timed out, counter is valid and time is less, wait again */
@ -2383,10 +2395,6 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
timer_setup(&dpu_enc->frame_done_timer,
dpu_encoder_frame_done_timeout, 0);
if (disp_info->intf_type == INTF_DP)
dpu_enc->wide_bus_en = msm_dp_wide_bus_available(
priv->dp[disp_info->h_tile_instance[0]]);
INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
dpu_encoder_off_work);
dpu_enc->idle_timeout = IDLE_TIMEOUT;
@ -2480,8 +2488,6 @@ unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc)
void dpu_encoder_phys_init(struct dpu_encoder_phys *phys_enc,
struct dpu_enc_phys_init_params *p)
{
int i;
phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
phys_enc->hw_intf = p->hw_intf;
phys_enc->hw_wb = p->hw_wb;
@ -2491,9 +2497,6 @@ void dpu_encoder_phys_init(struct dpu_encoder_phys *phys_enc,
phys_enc->enc_spinlock = p->enc_spinlock;
phys_enc->enable_state = DPU_ENC_DISABLED;
for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
phys_enc->irq[i] = -EINVAL;
atomic_set(&phys_enc->vblank_refcount, 0);
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
atomic_set(&phys_enc->pending_ctlstart_cnt, 0);

View File

@ -193,7 +193,7 @@ struct dpu_encoder_phys {
atomic_t pending_ctlstart_cnt;
atomic_t pending_kickoff_cnt;
wait_queue_head_t pending_kickoff_wq;
int irq[INTR_IDX_MAX];
unsigned int irq[INTR_IDX_MAX];
bool has_intf_te;
};
@ -364,8 +364,8 @@ void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
* @Return: 0 or -ERROR
*/
int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
int irq,
void (*func)(void *arg, int irq_idx),
unsigned int irq,
void (*func)(void *arg),
struct dpu_encoder_wait_info *wait_info);
/**

View File

@ -72,11 +72,13 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg(
if (intf_cfg.dsc != 0)
cmd_mode_cfg.data_compress = true;
cmd_mode_cfg.wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
if (phys_enc->hw_intf->ops.program_intf_cmd_cfg)
phys_enc->hw_intf->ops.program_intf_cmd_cfg(phys_enc->hw_intf, &cmd_mode_cfg);
}
static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg)
{
struct dpu_encoder_phys *phys_enc = arg;
unsigned long lock_flags;
@ -103,19 +105,11 @@ static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
DPU_ATRACE_END("pp_done_irq");
}
static void dpu_encoder_phys_cmd_te_rd_ptr_irq(void *arg, int irq_idx)
static void dpu_encoder_phys_cmd_te_rd_ptr_irq(void *arg)
{
struct dpu_encoder_phys *phys_enc = arg;
struct dpu_encoder_phys_cmd *cmd_enc;
if (phys_enc->has_intf_te) {
if (!phys_enc->hw_intf)
return;
} else {
if (!phys_enc->hw_pp)
return;
}
DPU_ATRACE_BEGIN("rd_ptr_irq");
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
@ -126,7 +120,7 @@ static void dpu_encoder_phys_cmd_te_rd_ptr_irq(void *arg, int irq_idx)
DPU_ATRACE_END("rd_ptr_irq");
}
static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg)
{
struct dpu_encoder_phys *phys_enc = arg;
@ -139,7 +133,7 @@ static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
DPU_ATRACE_END("ctl_start_irq");
}
static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
static void dpu_encoder_phys_cmd_underrun_irq(void *arg)
{
struct dpu_encoder_phys *phys_enc = arg;
@ -333,24 +327,21 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
unsigned long vsync_hz;
struct dpu_kms *dpu_kms;
if (phys_enc->has_intf_te) {
if (!phys_enc->hw_intf ||
!phys_enc->hw_intf->ops.enable_tearcheck) {
DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
return;
}
DPU_DEBUG_CMDENC(cmd_enc, "");
} else {
if (!phys_enc->hw_pp ||
!phys_enc->hw_pp->ops.enable_tearcheck) {
DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
return;
}
DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
/*
* TODO: if/when resource allocation is refactored, move this to a
* place where the driver can actually return an error.
*/
if (!phys_enc->has_intf_te &&
(!phys_enc->hw_pp ||
!phys_enc->hw_pp->ops.enable_tearcheck)) {
DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
return;
}
DPU_DEBUG_CMDENC(cmd_enc, "intf %d pp %d\n",
phys_enc->hw_intf ? phys_enc->hw_intf->idx - INTF_0 : -1,
phys_enc->hw_pp ? phys_enc->hw_pp->idx - PINGPONG_0 : -1);
mode = &phys_enc->cached_mode;
dpu_kms = phys_enc->dpu_kms;
@ -776,8 +767,19 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
phys_enc->intf_mode = INTF_MODE_CMD;
cmd_enc->stream_sel = 0;
phys_enc->has_intf_te = test_bit(DPU_INTF_TE,
&phys_enc->hw_intf->cap->features);
if (!phys_enc->hw_intf) {
DPU_ERROR_CMDENC(cmd_enc, "no INTF provided\n");
return ERR_PTR(-EINVAL);
}
/* DPU before 5.0 use PINGPONG for TE handling */
if (phys_enc->dpu_kms->catalog->mdss_ver->core_major_ver >= 5)
phys_enc->has_intf_te = true;
if (phys_enc->has_intf_te && !phys_enc->hw_intf->ops.enable_tearcheck) {
DPU_ERROR_CMDENC(cmd_enc, "tearcheck not supported\n");
return ERR_PTR(-EINVAL);
}
atomic_set(&cmd_enc->pending_vblank_cnt, 0);
init_waitqueue_head(&cmd_enc->pending_vblank_wq);

View File

@ -297,7 +297,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
programmable_fetch_config(phys_enc, &timing_params);
}
static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
static void dpu_encoder_phys_vid_vblank_irq(void *arg)
{
struct dpu_encoder_phys *phys_enc = arg;
struct dpu_hw_ctl *hw_ctl;
@ -334,7 +334,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
DPU_ATRACE_END("vblank_irq");
}
static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
static void dpu_encoder_phys_vid_underrun_irq(void *arg)
{
struct dpu_encoder_phys *phys_enc = arg;

View File

@ -34,6 +34,23 @@ static bool dpu_encoder_phys_wb_is_master(struct dpu_encoder_phys *phys_enc)
return true;
}
static bool _dpu_encoder_phys_wb_clk_force_ctrl(struct dpu_hw_wb *wb,
struct dpu_hw_mdp *mdp,
bool enable, bool *forced_on)
{
if (wb->ops.setup_clk_force_ctrl) {
*forced_on = wb->ops.setup_clk_force_ctrl(wb, enable);
return true;
}
if (mdp->ops.setup_clk_force_ctrl) {
*forced_on = mdp->ops.setup_clk_force_ctrl(mdp, wb->caps->clk_ctrl, enable);
return true;
}
return false;
}
/**
* dpu_encoder_phys_wb_set_ot_limit - set OT limit for writeback interface
* @phys_enc: Pointer to physical encoder
@ -43,6 +60,7 @@ static void dpu_encoder_phys_wb_set_ot_limit(
{
struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
struct dpu_vbif_set_ot_params ot_params;
bool forced_on = false;
memset(&ot_params, 0, sizeof(ot_params));
ot_params.xin_id = hw_wb->caps->xin_id;
@ -52,10 +70,17 @@ static void dpu_encoder_phys_wb_set_ot_limit(
ot_params.is_wfd = true;
ot_params.frame_rate = drm_mode_vrefresh(&phys_enc->cached_mode);
ot_params.vbif_idx = hw_wb->caps->vbif_idx;
ot_params.clk_ctrl = hw_wb->caps->clk_ctrl;
ot_params.rd = false;
if (!_dpu_encoder_phys_wb_clk_force_ctrl(hw_wb, phys_enc->dpu_kms->hw_mdp,
true, &forced_on))
return;
dpu_vbif_set_ot_limit(phys_enc->dpu_kms, &ot_params);
if (forced_on)
_dpu_encoder_phys_wb_clk_force_ctrl(hw_wb, phys_enc->dpu_kms->hw_mdp,
false, &forced_on);
}
/**
@ -67,6 +92,7 @@ static void dpu_encoder_phys_wb_set_qos_remap(
{
struct dpu_hw_wb *hw_wb;
struct dpu_vbif_set_qos_params qos_params;
bool forced_on = false;
if (!phys_enc || !phys_enc->parent || !phys_enc->parent->crtc) {
DPU_ERROR("invalid arguments\n");
@ -83,7 +109,6 @@ static void dpu_encoder_phys_wb_set_qos_remap(
memset(&qos_params, 0, sizeof(qos_params));
qos_params.vbif_idx = hw_wb->caps->vbif_idx;
qos_params.xin_id = hw_wb->caps->xin_id;
qos_params.clk_ctrl = hw_wb->caps->clk_ctrl;
qos_params.num = hw_wb->idx - WB_0;
qos_params.is_rt = false;
@ -92,7 +117,15 @@ static void dpu_encoder_phys_wb_set_qos_remap(
qos_params.vbif_idx,
qos_params.xin_id, qos_params.is_rt);
if (!_dpu_encoder_phys_wb_clk_force_ctrl(hw_wb, phys_enc->dpu_kms->hw_mdp,
true, &forced_on))
return;
dpu_vbif_set_qos_remap(phys_enc->dpu_kms, &qos_params);
if (forced_on)
_dpu_encoder_phys_wb_clk_force_ctrl(hw_wb, phys_enc->dpu_kms->hw_mdp,
false, &forced_on);
}
/**
@ -345,7 +378,11 @@ static void dpu_encoder_phys_wb_setup(
}
static void _dpu_encoder_phys_wb_frame_done_helper(void *arg)
/**
* dpu_encoder_phys_wb_done_irq - writeback interrupt handler
* @arg: Pointer to writeback encoder
*/
static void dpu_encoder_phys_wb_done_irq(void *arg)
{
struct dpu_encoder_phys *phys_enc = arg;
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
@ -371,16 +408,6 @@ static void _dpu_encoder_phys_wb_frame_done_helper(void *arg)
wake_up_all(&phys_enc->pending_kickoff_wq);
}
/**
* dpu_encoder_phys_wb_done_irq - writeback interrupt handler
* @arg: Pointer to writeback encoder
* @irq_idx: interrupt index
*/
static void dpu_encoder_phys_wb_done_irq(void *arg, int irq_idx)
{
_dpu_encoder_phys_wb_frame_done_helper(arg);
}
/**
* dpu_encoder_phys_wb_irq_ctrl - irq control of WB
* @phys: Pointer to physical encoder

View File

@ -79,7 +79,7 @@
(BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
#define PINGPONG_SDM845_MASK \
(BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_TE) | BIT(DPU_PINGPONG_DSC))
(BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC))
#define PINGPONG_SDM845_TE2_MASK \
(PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
@ -100,7 +100,6 @@
#define INTF_SC7180_MASK \
(BIT(DPU_INTF_INPUT_CTRL) | \
BIT(DPU_INTF_TE) | \
BIT(DPU_INTF_STATUS_SUPPORTED) | \
BIT(DPU_DATA_HCTL_EN))

View File

@ -119,7 +119,6 @@ enum {
/**
* PINGPONG sub-blocks
* @DPU_PINGPONG_TE Tear check block
* @DPU_PINGPONG_TE2 Additional tear check block for split pipes
* @DPU_PINGPONG_SPLIT PP block supports split fifo
* @DPU_PINGPONG_SLAVE PP block is a suitable slave for split fifo
@ -128,8 +127,7 @@ enum {
* @DPU_PINGPONG_MAX
*/
enum {
DPU_PINGPONG_TE = 0x1,
DPU_PINGPONG_TE2,
DPU_PINGPONG_TE2 = 0x1,
DPU_PINGPONG_SPLIT,
DPU_PINGPONG_SLAVE,
DPU_PINGPONG_DITHER,
@ -160,7 +158,6 @@ enum {
* INTF sub-blocks
* @DPU_INTF_INPUT_CTRL Supports the setting of pp block from which
* pixel data arrives to this INTF
* @DPU_INTF_TE INTF block has TE configuration support
* @DPU_DATA_HCTL_EN Allows data to be transferred at different rate
* than video timing
* @DPU_INTF_STATUS_SUPPORTED INTF block has INTF_STATUS register
@ -168,7 +165,6 @@ enum {
*/
enum {
DPU_INTF_INPUT_CTRL = 0x1,
DPU_INTF_TE,
DPU_DATA_HCTL_EN,
DPU_INTF_STATUS_SUPPORTED,
DPU_INTF_MAX
@ -494,7 +490,7 @@ struct dpu_mdp_cfg {
*/
struct dpu_ctl_cfg {
DPU_HW_BLK_INFO;
s32 intr_start;
unsigned int intr_start;
};
/**
@ -557,8 +553,8 @@ struct dpu_dspp_cfg {
struct dpu_pingpong_cfg {
DPU_HW_BLK_INFO;
u32 merge_3d;
s32 intr_done;
s32 intr_rdptr;
unsigned int intr_done;
unsigned int intr_rdptr;
const struct dpu_pingpong_sub_blks *sblk;
};
@ -605,9 +601,9 @@ struct dpu_intf_cfg {
u32 type; /* interface type*/
u32 controller_id;
u32 prog_fetch_lines_worst_case;
s32 intr_underrun;
s32 intr_vsync;
s32 intr_tear_rd_ptr;
unsigned int intr_underrun;
unsigned int intr_vsync;
unsigned int intr_tear_rd_ptr;
};
/**
@ -626,7 +622,7 @@ struct dpu_wb_cfg {
u8 vbif_idx;
u32 maxlinewidth;
u32 xin_id;
s32 intr_wb_done;
unsigned int intr_wb_done;
const u32 *format_list;
u32 num_formats;
enum dpu_clk_ctrl_type clk_ctrl;

View File

@ -197,27 +197,40 @@ static const struct dpu_intr_reg dpu_intr_set_7xxx[] = {
},
};
#define DPU_IRQ_REG(irq_idx) (irq_idx / 32)
#define DPU_IRQ_MASK(irq_idx) (BIT(irq_idx % 32))
#define DPU_IRQ_MASK(irq_idx) (BIT(DPU_IRQ_BIT(irq_idx)))
static inline bool dpu_core_irq_is_valid(unsigned int irq_idx)
{
return irq_idx && irq_idx <= DPU_NUM_IRQS;
}
static inline struct dpu_hw_intr_entry *dpu_core_irq_get_entry(struct dpu_hw_intr *intr,
unsigned int irq_idx)
{
return &intr->irq_tbl[irq_idx - 1];
}
/**
* dpu_core_irq_callback_handler - dispatch core interrupts
* @dpu_kms: Pointer to DPU's KMS structure
* @irq_idx: interrupt index
*/
static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, unsigned int irq_idx)
{
VERB("irq_idx=%d\n", irq_idx);
struct dpu_hw_intr_entry *irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
if (!dpu_kms->hw_intr->irq_tbl[irq_idx].cb)
DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
VERB("IRQ=[%d, %d]\n", DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
atomic_inc(&dpu_kms->hw_intr->irq_tbl[irq_idx].count);
if (!irq_entry->cb)
DRM_ERROR("no registered cb, IRQ=[%d, %d]\n",
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
atomic_inc(&irq_entry->count);
/*
* Perform registered function callback
*/
dpu_kms->hw_intr->irq_tbl[irq_idx].cb(dpu_kms->hw_intr->irq_tbl[irq_idx].arg, irq_idx);
irq_entry->cb(irq_entry->arg);
}
irqreturn_t dpu_core_irq(struct msm_kms *kms)
@ -225,7 +238,7 @@ irqreturn_t dpu_core_irq(struct msm_kms *kms)
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int reg_idx;
int irq_idx;
unsigned int irq_idx;
u32 irq_status;
u32 enable_mask;
int bit;
@ -281,7 +294,8 @@ irqreturn_t dpu_core_irq(struct msm_kms *kms)
return IRQ_HANDLED;
}
static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr,
unsigned int irq_idx)
{
int reg_idx;
const struct dpu_intr_reg *reg;
@ -291,8 +305,9 @@ static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
if (!intr)
return -EINVAL;
if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
pr_err("invalid IRQ index: [%d]\n", irq_idx);
if (!dpu_core_irq_is_valid(irq_idx)) {
pr_err("invalid IRQ=[%d, %d]\n",
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
return -EINVAL;
}
@ -328,13 +343,15 @@ static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
intr->cache_irq_mask[reg_idx] = cache_irq_mask;
}
pr_debug("DPU IRQ %d %senabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", irq_idx, dbgstr,
pr_debug("DPU IRQ=[%d, %d] %senabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n",
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), dbgstr,
DPU_IRQ_MASK(irq_idx), cache_irq_mask);
return 0;
}
static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr,
unsigned int irq_idx)
{
int reg_idx;
const struct dpu_intr_reg *reg;
@ -344,8 +361,9 @@ static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
if (!intr)
return -EINVAL;
if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
pr_err("invalid IRQ index: [%d]\n", irq_idx);
if (!dpu_core_irq_is_valid(irq_idx)) {
pr_err("invalid IRQ=[%d, %d]\n",
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
return -EINVAL;
}
@ -377,7 +395,8 @@ static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
intr->cache_irq_mask[reg_idx] = cache_irq_mask;
}
pr_debug("DPU IRQ %d %sdisabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", irq_idx, dbgstr,
pr_debug("DPU IRQ=[%d, %d] %sdisabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n",
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), dbgstr,
DPU_IRQ_MASK(irq_idx), cache_irq_mask);
return 0;
@ -419,7 +438,8 @@ static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
wmb();
}
u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx)
u32 dpu_core_irq_read(struct dpu_kms *dpu_kms,
unsigned int irq_idx)
{
struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int reg_idx;
@ -429,14 +449,8 @@ u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx)
if (!intr)
return 0;
if (irq_idx < 0) {
DPU_ERROR("[%pS] invalid irq_idx=%d\n",
__builtin_return_address(0), irq_idx);
return 0;
}
if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
pr_err("invalid IRQ index: [%d]\n", irq_idx);
if (!dpu_core_irq_is_valid(irq_idx)) {
pr_err("invalid IRQ=[%d, %d]\n", DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
return 0;
}
@ -462,13 +476,12 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
const struct dpu_mdss_cfg *m)
{
struct dpu_hw_intr *intr;
int nirq = MDP_INTR_MAX * 32;
unsigned int i;
if (!addr || !m)
return ERR_PTR(-EINVAL);
intr = kzalloc(struct_size(intr, irq_tbl, nirq), GFP_KERNEL);
intr = kzalloc(sizeof(*intr), GFP_KERNEL);
if (!intr)
return ERR_PTR(-ENOMEM);
@ -479,8 +492,6 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
intr->hw.blk_addr = addr + m->mdp[0].base;
intr->total_irqs = nirq;
intr->irq_mask = BIT(MDP_SSPP_TOP0_INTR) |
BIT(MDP_SSPP_TOP0_INTR2) |
BIT(MDP_SSPP_TOP0_HIST_INTR);
@ -492,7 +503,7 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
intr->irq_mask |= BIT(MDP_INTFn_INTR(intf->id));
if (intf->intr_tear_rd_ptr != -1)
if (intf->intr_tear_rd_ptr)
intr->irq_mask |= BIT(DPU_IRQ_REG(intf->intr_tear_rd_ptr));
}
@ -506,76 +517,87 @@ void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
kfree(intr);
}
int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
void (*irq_cb)(void *arg, int irq_idx),
void *irq_arg)
int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms,
unsigned int irq_idx,
void (*irq_cb)(void *arg),
void *irq_arg)
{
struct dpu_hw_intr_entry *irq_entry;
unsigned long irq_flags;
int ret;
if (!irq_cb) {
DPU_ERROR("invalid ird_idx:%d irq_cb:%ps\n", irq_idx, irq_cb);
DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
return -EINVAL;
}
if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
if (!dpu_core_irq_is_valid(irq_idx)) {
DPU_ERROR("invalid IRQ=[%d, %d]\n",
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
return -EINVAL;
}
VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
VERB("[%pS] IRQ=[%d, %d]\n", __builtin_return_address(0),
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
if (unlikely(WARN_ON(dpu_kms->hw_intr->irq_tbl[irq_idx].cb))) {
irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
if (unlikely(WARN_ON(irq_entry->cb))) {
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
return -EBUSY;
}
trace_dpu_core_irq_register_callback(irq_idx, irq_cb);
dpu_kms->hw_intr->irq_tbl[irq_idx].arg = irq_arg;
dpu_kms->hw_intr->irq_tbl[irq_idx].cb = irq_cb;
trace_dpu_core_irq_register_callback(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
irq_entry->arg = irq_arg;
irq_entry->cb = irq_cb;
ret = dpu_hw_intr_enable_irq_locked(
dpu_kms->hw_intr,
irq_idx);
if (ret)
DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
irq_idx);
DPU_ERROR("Failed/ to enable IRQ=[%d, %d]\n",
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
trace_dpu_irq_register_success(irq_idx);
trace_dpu_irq_register_success(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
return 0;
}
int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms,
unsigned int irq_idx)
{
struct dpu_hw_intr_entry *irq_entry;
unsigned long irq_flags;
int ret;
if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
if (!dpu_core_irq_is_valid(irq_idx)) {
DPU_ERROR("invalid IRQ=[%d, %d]\n",
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
return -EINVAL;
}
VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
VERB("[%pS] IRQ=[%d, %d]\n", __builtin_return_address(0),
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
trace_dpu_core_irq_unregister_callback(irq_idx);
trace_dpu_core_irq_unregister_callback(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx);
if (ret)
DPU_ERROR("Fail to disable IRQ for irq_idx:%d: %d\n",
irq_idx, ret);
DPU_ERROR("Failed to disable IRQ=[%d, %d]: %d\n",
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), ret);
dpu_kms->hw_intr->irq_tbl[irq_idx].cb = NULL;
dpu_kms->hw_intr->irq_tbl[irq_idx].arg = NULL;
irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
irq_entry->cb = NULL;
irq_entry->arg = NULL;
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
trace_dpu_irq_unregister_success(irq_idx);
trace_dpu_irq_unregister_success(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
return 0;
}
@ -584,18 +606,21 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
{
struct dpu_kms *dpu_kms = s->private;
struct dpu_hw_intr_entry *irq_entry;
unsigned long irq_flags;
int i, irq_count;
void *cb;
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
for (i = 1; i <= DPU_NUM_IRQS; i++) {
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
irq_count = atomic_read(&dpu_kms->hw_intr->irq_tbl[i].count);
cb = dpu_kms->hw_intr->irq_tbl[i].cb;
irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
irq_count = atomic_read(&irq_entry->count);
cb = irq_entry->cb;
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
if (irq_count || cb)
seq_printf(s, "idx:%d irq:%d cb:%ps\n", i, irq_count, cb);
seq_printf(s, "IRQ=[%d, %d] count:%d cb:%ps\n",
DPU_IRQ_REG(i), DPU_IRQ_BIT(i), irq_count, cb);
}
return 0;
@ -614,6 +639,7 @@ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
void dpu_core_irq_preinstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
struct dpu_hw_intr_entry *irq_entry;
int i;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
@ -621,22 +647,28 @@ void dpu_core_irq_preinstall(struct msm_kms *kms)
dpu_disable_all_irqs(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
atomic_set(&dpu_kms->hw_intr->irq_tbl[i].count, 0);
for (i = 1; i <= DPU_NUM_IRQS; i++) {
irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
atomic_set(&irq_entry->count, 0);
}
}
void dpu_core_irq_uninstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
struct dpu_hw_intr_entry *irq_entry;
int i;
if (!dpu_kms->hw_intr)
return;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
if (dpu_kms->hw_intr->irq_tbl[i].cb)
DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
for (i = 1; i <= DPU_NUM_IRQS; i++) {
irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
if (irq_entry->cb)
DPU_ERROR("IRQ=[%d, %d] still enabled/registered\n",
DPU_IRQ_REG(i), DPU_IRQ_BIT(i));
}
dpu_clear_irqs(dpu_kms);
dpu_disable_all_irqs(dpu_kms);

View File

@ -36,7 +36,17 @@ enum dpu_hw_intr_reg {
#define MDP_INTFn_INTR(intf) (MDP_INTF0_INTR + (intf - INTF_0))
#define DPU_IRQ_IDX(reg_idx, offset) (reg_idx * 32 + offset)
#define DPU_IRQ_IDX(reg_idx, offset) (1 + reg_idx * 32 + offset)
#define DPU_IRQ_REG(irq_idx) ((irq_idx - 1) / 32)
#define DPU_IRQ_BIT(irq_idx) ((irq_idx - 1) % 32)
#define DPU_NUM_IRQS (MDP_INTR_MAX * 32)
struct dpu_hw_intr_entry {
void (*cb)(void *arg);
void *arg;
atomic_t count;
};
/**
* struct dpu_hw_intr: hw interrupts handling data structure
@ -44,7 +54,6 @@ enum dpu_hw_intr_reg {
* @ops: function pointer mapping for IRQ handling
* @cache_irq_mask: array of IRQ enable masks reg storage created during init
* @save_irq_status: array of IRQ status reg storage created during init
* @total_irqs: total number of irq_idx mapped in the hw_interrupts
* @irq_lock: spinlock for accessing IRQ resources
* @irq_cb_tbl: array of IRQ callbacks
*/
@ -52,16 +61,11 @@ struct dpu_hw_intr {
struct dpu_hw_blk_reg_map hw;
u32 cache_irq_mask[MDP_INTR_MAX];
u32 *save_irq_status;
u32 total_irqs;
spinlock_t irq_lock;
unsigned long irq_mask;
const struct dpu_intr_reg *intr_set;
struct {
void (*cb)(void *arg, int irq_idx);
void *arg;
atomic_t count;
} irq_tbl[] __counted_by(total_irqs);
struct dpu_hw_intr_entry irq_tbl[DPU_NUM_IRQS];
};
/**

View File

@ -521,34 +521,12 @@ static void dpu_hw_intf_program_intf_cmd_cfg(struct dpu_hw_intf *ctx,
if (cmd_mode_cfg->data_compress)
intf_cfg2 |= INTF_CFG2_DCE_DATA_COMPRESS;
if (cmd_mode_cfg->wide_bus_en)
intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN;
DPU_REG_WRITE(&ctx->hw, INTF_CONFIG2, intf_cfg2);
}
static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
unsigned long cap, const struct dpu_mdss_version *mdss_rev)
{
ops->setup_timing_gen = dpu_hw_intf_setup_timing_engine;
ops->setup_prg_fetch = dpu_hw_intf_setup_prg_fetch;
ops->get_status = dpu_hw_intf_get_status;
ops->enable_timing = dpu_hw_intf_enable_timing_engine;
ops->get_line_count = dpu_hw_intf_get_line_count;
if (cap & BIT(DPU_INTF_INPUT_CTRL))
ops->bind_pingpong_blk = dpu_hw_intf_bind_pingpong_blk;
ops->setup_misr = dpu_hw_intf_setup_misr;
ops->collect_misr = dpu_hw_intf_collect_misr;
if (cap & BIT(DPU_INTF_TE)) {
ops->enable_tearcheck = dpu_hw_intf_enable_te;
ops->disable_tearcheck = dpu_hw_intf_disable_te;
ops->connect_external_te = dpu_hw_intf_connect_external_te;
ops->vsync_sel = dpu_hw_intf_vsync_sel;
ops->disable_autorefresh = dpu_hw_intf_disable_autorefresh;
}
if (mdss_rev->core_major_ver >= 7)
ops->program_intf_cmd_cfg = dpu_hw_intf_program_intf_cmd_cfg;
}
struct dpu_hw_intf *dpu_hw_intf_init(const struct dpu_intf_cfg *cfg,
void __iomem *addr, const struct dpu_mdss_version *mdss_rev)
{
@ -571,7 +549,35 @@ struct dpu_hw_intf *dpu_hw_intf_init(const struct dpu_intf_cfg *cfg,
*/
c->idx = cfg->id;
c->cap = cfg;
_setup_intf_ops(&c->ops, c->cap->features, mdss_rev);
c->ops.setup_timing_gen = dpu_hw_intf_setup_timing_engine;
c->ops.setup_prg_fetch = dpu_hw_intf_setup_prg_fetch;
c->ops.get_status = dpu_hw_intf_get_status;
c->ops.enable_timing = dpu_hw_intf_enable_timing_engine;
c->ops.get_line_count = dpu_hw_intf_get_line_count;
c->ops.setup_misr = dpu_hw_intf_setup_misr;
c->ops.collect_misr = dpu_hw_intf_collect_misr;
if (cfg->features & BIT(DPU_INTF_INPUT_CTRL))
c->ops.bind_pingpong_blk = dpu_hw_intf_bind_pingpong_blk;
/* INTF TE is only for DSI interfaces */
if (mdss_rev->core_major_ver >= 5 && cfg->type == INTF_DSI) {
WARN_ON(!cfg->intr_tear_rd_ptr);
c->ops.enable_tearcheck = dpu_hw_intf_enable_te;
c->ops.disable_tearcheck = dpu_hw_intf_disable_te;
c->ops.connect_external_te = dpu_hw_intf_connect_external_te;
c->ops.vsync_sel = dpu_hw_intf_vsync_sel;
c->ops.disable_autorefresh = dpu_hw_intf_disable_autorefresh;
}
/* Technically, INTF_CONFIG2 is present for DPU 5.0+, but
* we can configure it for DPU 7.0+ since the wide bus and DSC flags
* would not be set for DPU < 7.0 anyways
*/
if (mdss_rev->core_major_ver >= 7)
c->ops.program_intf_cmd_cfg = dpu_hw_intf_program_intf_cmd_cfg;
return c;
}

View File

@ -50,6 +50,7 @@ struct dpu_hw_intf_status {
struct dpu_hw_intf_cmd_mode_cfg {
u8 data_compress; /* enable data compress between dpu and dsi */
u8 wide_bus_en; /* enable databus widen mode */
};
/**

View File

@ -281,29 +281,8 @@ static int dpu_hw_pp_setup_dsc(struct dpu_hw_pingpong *pp)
return 0;
}
static void _setup_pingpong_ops(struct dpu_hw_pingpong *c,
unsigned long features)
{
if (test_bit(DPU_PINGPONG_TE, &features)) {
c->ops.enable_tearcheck = dpu_hw_pp_enable_te;
c->ops.disable_tearcheck = dpu_hw_pp_disable_te;
c->ops.connect_external_te = dpu_hw_pp_connect_external_te;
c->ops.get_line_count = dpu_hw_pp_get_line_count;
c->ops.disable_autorefresh = dpu_hw_pp_disable_autorefresh;
}
if (test_bit(DPU_PINGPONG_DSC, &features)) {
c->ops.setup_dsc = dpu_hw_pp_setup_dsc;
c->ops.enable_dsc = dpu_hw_pp_dsc_enable;
c->ops.disable_dsc = dpu_hw_pp_dsc_disable;
}
if (test_bit(DPU_PINGPONG_DITHER, &features))
c->ops.setup_dither = dpu_hw_pp_setup_dither;
};
struct dpu_hw_pingpong *dpu_hw_pingpong_init(const struct dpu_pingpong_cfg *cfg,
void __iomem *addr)
void __iomem *addr, const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_pingpong *c;
@ -316,7 +295,25 @@ struct dpu_hw_pingpong *dpu_hw_pingpong_init(const struct dpu_pingpong_cfg *cfg,
c->idx = cfg->id;
c->caps = cfg;
_setup_pingpong_ops(c, c->caps->features);
if (mdss_rev->core_major_ver < 5) {
WARN_ON(!cfg->intr_rdptr);
c->ops.enable_tearcheck = dpu_hw_pp_enable_te;
c->ops.disable_tearcheck = dpu_hw_pp_disable_te;
c->ops.connect_external_te = dpu_hw_pp_connect_external_te;
c->ops.get_line_count = dpu_hw_pp_get_line_count;
c->ops.disable_autorefresh = dpu_hw_pp_disable_autorefresh;
}
if (test_bit(DPU_PINGPONG_DSC, &cfg->features)) {
c->ops.setup_dsc = dpu_hw_pp_setup_dsc;
c->ops.enable_dsc = dpu_hw_pp_dsc_enable;
c->ops.disable_dsc = dpu_hw_pp_dsc_disable;
}
if (test_bit(DPU_PINGPONG_DITHER, &cfg->features))
c->ops.setup_dither = dpu_hw_pp_setup_dither;
return c;
}

View File

@ -123,10 +123,11 @@ static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
* pingpong catalog entry.
* @cfg: Pingpong catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
* @mdss_rev: dpu core's major and minor versions
* Return: Error code or allocated dpu_hw_pingpong context
*/
struct dpu_hw_pingpong *dpu_hw_pingpong_init(const struct dpu_pingpong_cfg *cfg,
void __iomem *addr);
void __iomem *addr, const struct dpu_mdss_version *mdss_rev);
/**
* dpu_hw_pingpong_destroy - destroys pingpong driver context

View File

@ -69,6 +69,7 @@
#define SSPP_EXCL_REC_XY_REC1 0x188
#define SSPP_EXCL_REC_SIZE 0x1B4
#define SSPP_EXCL_REC_XY 0x1B8
#define SSPP_CLK_CTRL 0x330
/* SSPP_SRC_OP_MODE & OP_MODE_REC1 */
#define MDSS_MDP_OP_DEINTERLACE BIT(22)
@ -581,8 +582,18 @@ static void dpu_hw_sspp_setup_cdp(struct dpu_sw_pipe *pipe,
dpu_setup_cdp(&ctx->hw, cdp_cntl_offset, fmt, enable);
}
static bool dpu_hw_sspp_setup_clk_force_ctrl(struct dpu_hw_sspp *ctx, bool enable)
{
static const struct dpu_clk_ctrl_reg sspp_clk_ctrl = {
.reg_off = SSPP_CLK_CTRL,
.bit_off = 0
};
return dpu_hw_clk_force_ctrl(&ctx->hw, &sspp_clk_ctrl, enable);
}
static void _setup_layer_ops(struct dpu_hw_sspp *c,
unsigned long features)
unsigned long features, const struct dpu_mdss_version *mdss_rev)
{
c->ops.setup_format = dpu_hw_sspp_setup_format;
c->ops.setup_rects = dpu_hw_sspp_setup_rects;
@ -612,6 +623,9 @@ static void _setup_layer_ops(struct dpu_hw_sspp *c,
if (test_bit(DPU_SSPP_CDP, &features))
c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
if (mdss_rev->core_major_ver >= 9)
c->ops.setup_clk_force_ctrl = dpu_hw_sspp_setup_clk_force_ctrl;
}
#ifdef CONFIG_DEBUG_FS
@ -672,7 +686,8 @@ int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms,
#endif
struct dpu_hw_sspp *dpu_hw_sspp_init(const struct dpu_sspp_cfg *cfg,
void __iomem *addr, const struct msm_mdss_data *mdss_data)
void __iomem *addr, const struct msm_mdss_data *mdss_data,
const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_sspp *hw_pipe;
@ -690,7 +705,7 @@ struct dpu_hw_sspp *dpu_hw_sspp_init(const struct dpu_sspp_cfg *cfg,
hw_pipe->ubwc = mdss_data;
hw_pipe->idx = cfg->id;
hw_pipe->cap = cfg;
_setup_layer_ops(hw_pipe, hw_pipe->cap->features);
_setup_layer_ops(hw_pipe, hw_pipe->cap->features, mdss_rev);
return hw_pipe;
}

View File

@ -271,6 +271,14 @@ struct dpu_hw_sspp_ops {
void (*setup_qos_ctrl)(struct dpu_hw_sspp *ctx,
bool danger_safe_en);
/**
* setup_clk_force_ctrl - setup clock force control
* @ctx: Pointer to pipe context
* @enable: enable clock force if true
*/
bool (*setup_clk_force_ctrl)(struct dpu_hw_sspp *ctx,
bool enable);
/**
* setup_histogram - setup histograms
* @ctx: Pointer to pipe context
@ -334,9 +342,11 @@ struct dpu_kms;
* @cfg: Pipe catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
* @mdss_data: UBWC / MDSS configuration data
* @mdss_rev: dpu core's major and minor versions
*/
struct dpu_hw_sspp *dpu_hw_sspp_init(const struct dpu_sspp_cfg *cfg,
void __iomem *addr, const struct msm_mdss_data *mdss_data);
void __iomem *addr, const struct msm_mdss_data *mdss_data,
const struct dpu_mdss_version *mdss_rev);
/**
* dpu_hw_sspp_destroy(): Destroys SSPP driver context

View File

@ -66,34 +66,13 @@ static void dpu_hw_setup_split_pipe(struct dpu_hw_mdp *mdp,
static bool dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp *mdp,
enum dpu_clk_ctrl_type clk_ctrl, bool enable)
{
struct dpu_hw_blk_reg_map *c;
u32 reg_off, bit_off;
u32 reg_val, new_val;
bool clk_forced_on;
if (!mdp)
return false;
c = &mdp->hw;
if (clk_ctrl <= DPU_CLK_CTRL_NONE || clk_ctrl >= DPU_CLK_CTRL_MAX)
return false;
reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off;
bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off;
reg_val = DPU_REG_READ(c, reg_off);
if (enable)
new_val = reg_val | BIT(bit_off);
else
new_val = reg_val & ~BIT(bit_off);
DPU_REG_WRITE(c, reg_off, new_val);
clk_forced_on = !(reg_val & BIT(bit_off));
return clk_forced_on;
return dpu_hw_clk_force_ctrl(&mdp->hw, &mdp->caps->clk_ctrls[clk_ctrl], enable);
}

View File

@ -546,3 +546,24 @@ void dpu_setup_cdp(struct dpu_hw_blk_reg_map *c, u32 offset,
DPU_REG_WRITE(c, offset, cdp_cntl);
}
bool dpu_hw_clk_force_ctrl(struct dpu_hw_blk_reg_map *c,
const struct dpu_clk_ctrl_reg *clk_ctrl_reg,
bool enable)
{
u32 reg_val, new_val;
bool clk_forced_on;
reg_val = DPU_REG_READ(c, clk_ctrl_reg->reg_off);
if (enable)
new_val = reg_val | BIT(clk_ctrl_reg->bit_off);
else
new_val = reg_val & ~BIT(clk_ctrl_reg->bit_off);
DPU_REG_WRITE(c, clk_ctrl_reg->reg_off, new_val);
clk_forced_on = !(reg_val & BIT(clk_ctrl_reg->bit_off));
return clk_forced_on;
}

View File

@ -367,4 +367,8 @@ int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c,
u32 misr_signature_offset,
u32 *misr_value);
bool dpu_hw_clk_force_ctrl(struct dpu_hw_blk_reg_map *c,
const struct dpu_clk_ctrl_reg *clk_ctrl_reg,
bool enable);
#endif /* _DPU_HW_UTIL_H */

View File

@ -43,6 +43,7 @@
#define WB_MUX 0x150
#define WB_CROP_CTRL 0x154
#define WB_CROP_OFFSET 0x158
#define WB_CLK_CTRL 0x178
#define WB_CSC_BASE 0x260
#define WB_DST_ADDR_SW_STATUS 0x2B0
#define WB_CDP_CNTL 0x2B4
@ -175,8 +176,18 @@ static void dpu_hw_wb_bind_pingpong_blk(
DPU_REG_WRITE(c, WB_MUX, mux_cfg);
}
static bool dpu_hw_wb_setup_clk_force_ctrl(struct dpu_hw_wb *ctx, bool enable)
{
static const struct dpu_clk_ctrl_reg wb_clk_ctrl = {
.reg_off = WB_CLK_CTRL,
.bit_off = 0
};
return dpu_hw_clk_force_ctrl(&ctx->hw, &wb_clk_ctrl, enable);
}
static void _setup_wb_ops(struct dpu_hw_wb_ops *ops,
unsigned long features)
unsigned long features, const struct dpu_mdss_version *mdss_rev)
{
ops->setup_outaddress = dpu_hw_wb_setup_outaddress;
ops->setup_outformat = dpu_hw_wb_setup_format;
@ -192,10 +203,13 @@ static void _setup_wb_ops(struct dpu_hw_wb_ops *ops,
if (test_bit(DPU_WB_INPUT_CTRL, &features))
ops->bind_pingpong_blk = dpu_hw_wb_bind_pingpong_blk;
if (mdss_rev->core_major_ver >= 9)
ops->setup_clk_force_ctrl = dpu_hw_wb_setup_clk_force_ctrl;
}
struct dpu_hw_wb *dpu_hw_wb_init(const struct dpu_wb_cfg *cfg,
void __iomem *addr)
void __iomem *addr, const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_wb *c;
@ -212,7 +226,7 @@ struct dpu_hw_wb *dpu_hw_wb_init(const struct dpu_wb_cfg *cfg,
/* Assign ops */
c->idx = cfg->id;
c->caps = cfg;
_setup_wb_ops(&c->ops, c->caps->features);
_setup_wb_ops(&c->ops, c->caps->features, mdss_rev);
return c;
}

View File

@ -29,6 +29,7 @@ struct dpu_hw_wb_cfg {
* @setup_outformat: setup output format of writeback block from writeback job
* @setup_qos_lut: setup qos LUT for writeback block based on input
* @setup_cdp: setup chroma down prefetch block for writeback block
* @setup_clk_force_ctrl: setup clock force control
* @bind_pingpong_blk: enable/disable the connection with ping-pong block
*/
struct dpu_hw_wb_ops {
@ -48,6 +49,9 @@ struct dpu_hw_wb_ops {
const struct dpu_format *fmt,
bool enable);
bool (*setup_clk_force_ctrl)(struct dpu_hw_wb *ctx,
bool enable);
void (*bind_pingpong_blk)(struct dpu_hw_wb *ctx,
const enum dpu_pingpong pp);
};
@ -74,10 +78,11 @@ struct dpu_hw_wb {
* dpu_hw_wb_init() - Initializes the writeback hw driver object.
* @cfg: wb_path catalog entry for which driver object is required
* @addr: mapped register io address of MDP
* @mdss_rev: dpu core's major and minor versions
* Return: Error code or allocated dpu_hw_wb context
*/
struct dpu_hw_wb *dpu_hw_wb_init(const struct dpu_wb_cfg *cfg,
void __iomem *addr);
void __iomem *addr, const struct dpu_mdss_version *mdss_rev);
/**
* dpu_hw_wb_destroy(): Destroy writeback hw driver object.

View File

@ -389,8 +389,7 @@ static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
{
struct icc_path *path0;
struct icc_path *path1;
struct drm_device *dev = dpu_kms->dev;
struct device *dpu_dev = dev->dev;
struct device *dpu_dev = &dpu_kms->pdev->dev;
path0 = msm_icc_get(dpu_dev, "mdp0-mem");
path1 = msm_icc_get(dpu_dev, "mdp1-mem");
@ -829,21 +828,9 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
dpu_kms->catalog = NULL;
if (dpu_kms->vbif[VBIF_NRT])
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
dpu_kms->vbif[VBIF_NRT] = NULL;
if (dpu_kms->vbif[VBIF_RT])
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
dpu_kms->vbif[VBIF_RT] = NULL;
if (dpu_kms->hw_mdp)
dpu_hw_mdp_destroy(dpu_kms->hw_mdp);
dpu_kms->hw_mdp = NULL;
if (dpu_kms->mmio)
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
dpu_kms->mmio = NULL;
}
static void dpu_kms_destroy(struct msm_kms *kms)
@ -1079,30 +1066,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
atomic_set(&dpu_kms->bandwidth_ref, 0);
dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp");
if (IS_ERR(dpu_kms->mmio)) {
rc = PTR_ERR(dpu_kms->mmio);
DPU_ERROR("mdp register memory map failed: %d\n", rc);
dpu_kms->mmio = NULL;
goto error;
}
DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif");
if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
DPU_ERROR("vbif register memory map failed: %d\n", rc);
dpu_kms->vbif[VBIF_RT] = NULL;
goto error;
}
dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(dpu_kms->pdev, "vbif_nrt");
if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
dpu_kms->vbif[VBIF_NRT] = NULL;
DPU_DEBUG("VBIF NRT is not defined");
}
dpu_kms_parse_data_bus_icc_path(dpu_kms);
rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev);
if (rc < 0)
goto error;
@ -1241,33 +1204,11 @@ static int dpu_kms_init(struct drm_device *ddev)
struct msm_drm_private *priv = ddev->dev_private;
struct device *dev = ddev->dev;
struct platform_device *pdev = to_platform_device(dev);
struct dpu_kms *dpu_kms;
int irq;
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct dev_pm_opp *opp;
int ret = 0;
unsigned long max_freq = ULONG_MAX;
dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
if (!dpu_kms)
return -ENOMEM;
ret = devm_pm_opp_set_clkname(dev, "core");
if (ret)
return ret;
/* OPP table is optional */
ret = devm_pm_opp_of_add_table(dev);
if (ret && ret != -ENODEV) {
dev_err(dev, "invalid OPP table in device tree\n");
return ret;
}
ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_kms->clocks);
if (ret < 0) {
DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
return ret;
}
dpu_kms->num_clocks = ret;
opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
if (!IS_ERR(opp))
dev_pm_opp_put(opp);
@ -1280,26 +1221,74 @@ static int dpu_kms_init(struct drm_device *ddev)
return ret;
}
dpu_kms->dev = ddev;
dpu_kms->pdev = pdev;
pm_runtime_enable(&pdev->dev);
dpu_kms->rpm_enabled = true;
priv->kms = &dpu_kms->base;
irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
if (!irq) {
DPU_ERROR("failed to get irq\n");
return -EINVAL;
}
dpu_kms->base.irq = irq;
return 0;
}
static int dpu_dev_probe(struct platform_device *pdev)
{
return msm_drv_probe(&pdev->dev, dpu_kms_init);
struct device *dev = &pdev->dev;
struct dpu_kms *dpu_kms;
int irq;
int ret = 0;
dpu_kms = devm_kzalloc(dev, sizeof(*dpu_kms), GFP_KERNEL);
if (!dpu_kms)
return -ENOMEM;
dpu_kms->pdev = pdev;
ret = devm_pm_opp_set_clkname(dev, "core");
if (ret)
return ret;
/* OPP table is optional */
ret = devm_pm_opp_of_add_table(dev);
if (ret && ret != -ENODEV)
return dev_err_probe(dev, ret, "invalid OPP table in device tree\n");
ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_kms->clocks);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to parse clocks\n");
dpu_kms->num_clocks = ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return dev_err_probe(dev, irq, "failed to get irq\n");
dpu_kms->base.irq = irq;
dpu_kms->mmio = msm_ioremap(pdev, "mdp");
if (IS_ERR(dpu_kms->mmio)) {
ret = PTR_ERR(dpu_kms->mmio);
DPU_ERROR("mdp register memory map failed: %d\n", ret);
dpu_kms->mmio = NULL;
return ret;
}
DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
dpu_kms->vbif[VBIF_RT] = msm_ioremap(pdev, "vbif");
if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
ret = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
DPU_ERROR("vbif register memory map failed: %d\n", ret);
dpu_kms->vbif[VBIF_RT] = NULL;
return ret;
}
dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(pdev, "vbif_nrt");
if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
dpu_kms->vbif[VBIF_NRT] = NULL;
DPU_DEBUG("VBIF NRT is not defined");
}
ret = dpu_kms_parse_data_bus_icc_path(dpu_kms);
if (ret)
return ret;
return msm_drv_probe(&pdev->dev, dpu_kms_init, &dpu_kms->base);
}
static void dpu_dev_remove(struct platform_device *pdev)
@ -1353,8 +1342,8 @@ static const struct dev_pm_ops dpu_pm_ops = {
SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
.prepare = msm_pm_prepare,
.complete = msm_pm_complete,
.prepare = msm_kms_pm_prepare,
.complete = msm_kms_pm_complete,
};
static const struct of_device_id dpu_dt_match[] = {
@ -1381,7 +1370,7 @@ MODULE_DEVICE_TABLE(of, dpu_dt_match);
static struct platform_driver dpu_driver = {
.probe = dpu_dev_probe,
.remove_new = dpu_dev_remove,
.shutdown = msm_drv_shutdown,
.shutdown = msm_kms_shutdown,
.driver = {
.name = "msm_dpu",
.of_match_table = dpu_dt_match,

View File

@ -333,6 +333,23 @@ static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
enable);
}
static bool _dpu_plane_sspp_clk_force_ctrl(struct dpu_hw_sspp *sspp,
struct dpu_hw_mdp *mdp,
bool enable, bool *forced_on)
{
if (sspp->ops.setup_clk_force_ctrl) {
*forced_on = sspp->ops.setup_clk_force_ctrl(sspp, enable);
return true;
}
if (mdp->ops.setup_clk_force_ctrl) {
*forced_on = mdp->ops.setup_clk_force_ctrl(mdp, sspp->cap->clk_ctrl, enable);
return true;
}
return false;
}
/**
* _dpu_plane_set_ot_limit - set OT limit for the given plane
* @plane: Pointer to drm plane
@ -348,6 +365,7 @@ static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_vbif_set_ot_params ot_params;
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
bool forced_on = false;
memset(&ot_params, 0, sizeof(ot_params));
ot_params.xin_id = pipe->sspp->cap->xin_id;
@ -357,10 +375,17 @@ static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
ot_params.is_wfd = !pdpu->is_rt_pipe;
ot_params.frame_rate = frame_rate;
ot_params.vbif_idx = VBIF_RT;
ot_params.clk_ctrl = pipe->sspp->cap->clk_ctrl;
ot_params.rd = true;
if (!_dpu_plane_sspp_clk_force_ctrl(pipe->sspp, dpu_kms->hw_mdp,
true, &forced_on))
return;
dpu_vbif_set_ot_limit(dpu_kms, &ot_params);
if (forced_on)
_dpu_plane_sspp_clk_force_ctrl(pipe->sspp, dpu_kms->hw_mdp,
false, &forced_on);
}
/**
@ -374,21 +399,28 @@ static void _dpu_plane_set_qos_remap(struct drm_plane *plane,
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_vbif_set_qos_params qos_params;
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
bool forced_on = false;
memset(&qos_params, 0, sizeof(qos_params));
qos_params.vbif_idx = VBIF_RT;
qos_params.clk_ctrl = pipe->sspp->cap->clk_ctrl;
qos_params.xin_id = pipe->sspp->cap->xin_id;
qos_params.num = pipe->sspp->idx - SSPP_VIG0;
qos_params.is_rt = pdpu->is_rt_pipe;
DPU_DEBUG_PLANE(pdpu, "pipe:%d vbif:%d xin:%d rt:%d, clk_ctrl:%d\n",
DPU_DEBUG_PLANE(pdpu, "pipe:%d vbif:%d xin:%d rt:%d\n",
qos_params.num,
qos_params.vbif_idx,
qos_params.xin_id, qos_params.is_rt,
qos_params.clk_ctrl);
qos_params.xin_id, qos_params.is_rt);
if (!_dpu_plane_sspp_clk_force_ctrl(pipe->sspp, dpu_kms->hw_mdp,
true, &forced_on))
return;
dpu_vbif_set_qos_remap(dpu_kms, &qos_params);
if (forced_on)
_dpu_plane_sspp_clk_force_ctrl(pipe->sspp, dpu_kms->hw_mdp,
false, &forced_on);
}
static void _dpu_plane_setup_scaler3(struct dpu_hw_sspp *pipe_hw,

View File

@ -146,7 +146,7 @@ int dpu_rm_init(struct dpu_rm *rm,
struct dpu_hw_pingpong *hw;
const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
hw = dpu_hw_pingpong_init(pp, mmio);
hw = dpu_hw_pingpong_init(pp, mmio, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed pingpong object creation: err %d\n",
@ -175,7 +175,7 @@ int dpu_rm_init(struct dpu_rm *rm,
struct dpu_hw_wb *hw;
const struct dpu_wb_cfg *wb = &cat->wb[i];
hw = dpu_hw_wb_init(wb, mmio);
hw = dpu_hw_wb_init(wb, mmio, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed wb object creation: err %d\n", rc);
@ -231,7 +231,7 @@ int dpu_rm_init(struct dpu_rm *rm,
struct dpu_hw_sspp *hw;
const struct dpu_sspp_cfg *sspp = &cat->sspp[i];
hw = dpu_hw_sspp_init(sspp, mmio, mdss_data);
hw = dpu_hw_sspp_init(sspp, mmio, mdss_data, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed sspp object creation: err %d\n", rc);

View File

@ -168,46 +168,50 @@ TRACE_EVENT(dpu_perf_crtc_update,
);
DECLARE_EVENT_CLASS(dpu_irq_template,
TP_PROTO(int irq_idx),
TP_ARGS(irq_idx),
TP_PROTO(unsigned int irq_reg, unsigned int irq_bit),
TP_ARGS(irq_reg, irq_bit),
TP_STRUCT__entry(
__field( int, irq_idx )
__field( unsigned int, irq_reg )
__field( unsigned int, irq_bit )
),
TP_fast_assign(
__entry->irq_idx = irq_idx;
__entry->irq_reg = irq_reg;
__entry->irq_bit = irq_bit;
),
TP_printk("irq=%d", __entry->irq_idx)
TP_printk("IRQ=[%d, %d]", __entry->irq_reg, __entry->irq_bit)
);
DEFINE_EVENT(dpu_irq_template, dpu_irq_register_success,
TP_PROTO(int irq_idx),
TP_ARGS(irq_idx)
TP_PROTO(unsigned int irq_reg, unsigned int irq_bit),
TP_ARGS(irq_reg, irq_bit)
);
DEFINE_EVENT(dpu_irq_template, dpu_irq_unregister_success,
TP_PROTO(int irq_idx),
TP_ARGS(irq_idx)
TP_PROTO(unsigned int irq_reg, unsigned int irq_bit),
TP_ARGS(irq_reg, irq_bit)
);
TRACE_EVENT(dpu_enc_irq_wait_success,
TP_PROTO(uint32_t drm_id, void *func,
int irq_idx, enum dpu_pingpong pp_idx, int atomic_cnt),
TP_ARGS(drm_id, func, irq_idx, pp_idx, atomic_cnt),
unsigned int irq_reg, unsigned int irq_bit, enum dpu_pingpong pp_idx, int atomic_cnt),
TP_ARGS(drm_id, func, irq_reg, irq_bit, pp_idx, atomic_cnt),
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( void *, func )
__field( int, irq_idx )
__field( unsigned int, irq_reg )
__field( unsigned int, irq_bit )
__field( enum dpu_pingpong, pp_idx )
__field( int, atomic_cnt )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->func = func;
__entry->irq_idx = irq_idx;
__entry->irq_reg = irq_reg;
__entry->irq_bit = irq_bit;
__entry->pp_idx = pp_idx;
__entry->atomic_cnt = atomic_cnt;
),
TP_printk("id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d",
TP_printk("id=%u, callback=%ps, IRQ=[%d, %d], pp=%d, atomic_cnt=%d",
__entry->drm_id, __entry->func,
__entry->irq_idx, __entry->pp_idx, __entry->atomic_cnt)
__entry->irq_reg, __entry->irq_bit, __entry->pp_idx, __entry->atomic_cnt)
);
DECLARE_EVENT_CLASS(dpu_drm_obj_template,
@ -484,12 +488,13 @@ DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_more_pending,
);
TRACE_EVENT(dpu_enc_wait_event_timeout,
TP_PROTO(uint32_t drm_id, int irq_idx, int rc, s64 time,
TP_PROTO(uint32_t drm_id, unsigned int irq_reg, unsigned int irq_bit, int rc, s64 time,
s64 expected_time, int atomic_cnt),
TP_ARGS(drm_id, irq_idx, rc, time, expected_time, atomic_cnt),
TP_ARGS(drm_id, irq_reg, irq_bit, rc, time, expected_time, atomic_cnt),
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( int, irq_idx )
__field( unsigned int, irq_reg )
__field( unsigned int, irq_bit )
__field( int, rc )
__field( s64, time )
__field( s64, expected_time )
@ -497,14 +502,15 @@ TRACE_EVENT(dpu_enc_wait_event_timeout,
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->irq_idx = irq_idx;
__entry->irq_reg = irq_reg;
__entry->irq_bit = irq_bit;
__entry->rc = rc;
__entry->time = time;
__entry->expected_time = expected_time;
__entry->atomic_cnt = atomic_cnt;
),
TP_printk("id=%u, irq_idx=%d, rc=%d, time=%lld, expected=%lld cnt=%d",
__entry->drm_id, __entry->irq_idx, __entry->rc, __entry->time,
TP_printk("id=%u, IRQ=[%d, %d], rc=%d, time=%lld, expected=%lld cnt=%d",
__entry->drm_id, __entry->irq_reg, __entry->irq_bit, __entry->rc, __entry->time,
__entry->expected_time, __entry->atomic_cnt)
);
@ -863,30 +869,34 @@ TRACE_EVENT(dpu_intf_connect_ext_te,
);
TRACE_EVENT(dpu_core_irq_register_callback,
TP_PROTO(int irq_idx, void *callback),
TP_ARGS(irq_idx, callback),
TP_PROTO(unsigned int irq_reg, unsigned int irq_bit, void *callback),
TP_ARGS(irq_reg, irq_bit, callback),
TP_STRUCT__entry(
__field( int, irq_idx )
__field( unsigned int, irq_reg )
__field( unsigned int, irq_bit )
__field( void *, callback)
),
TP_fast_assign(
__entry->irq_idx = irq_idx;
__entry->irq_reg = irq_reg;
__entry->irq_bit = irq_bit;
__entry->callback = callback;
),
TP_printk("irq_idx:%d callback:%ps", __entry->irq_idx,
TP_printk("IRQ=[%d, %d] callback:%ps", __entry->irq_reg, __entry->irq_bit,
__entry->callback)
);
TRACE_EVENT(dpu_core_irq_unregister_callback,
TP_PROTO(int irq_idx),
TP_ARGS(irq_idx),
TP_PROTO(unsigned int irq_reg, unsigned int irq_bit),
TP_ARGS(irq_reg, irq_bit),
TP_STRUCT__entry(
__field( int, irq_idx )
__field( unsigned int, irq_reg )
__field( unsigned int, irq_bit )
),
TP_fast_assign(
__entry->irq_idx = irq_idx;
__entry->irq_reg = irq_reg;
__entry->irq_bit = irq_bit;
),
TP_printk("irq_idx:%d", __entry->irq_idx)
TP_printk("IRQ=[%d, %d]", __entry->irq_reg, __entry->irq_bit)
);
TRACE_EVENT(dpu_core_perf_update_clk,

View File

@ -169,23 +169,16 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_ot_params *params)
{
struct dpu_hw_vbif *vbif;
struct dpu_hw_mdp *mdp;
bool forced_on = false;
u32 ot_lim;
int ret;
mdp = dpu_kms->hw_mdp;
vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
if (!vbif || !mdp) {
DRM_DEBUG_ATOMIC("invalid arguments vbif %d mdp %d\n",
vbif != NULL, mdp != NULL);
if (!vbif) {
DRM_DEBUG_ATOMIC("invalid arguments vbif %d\n", vbif != NULL);
return;
}
if (!mdp->ops.setup_clk_force_ctrl ||
!vbif->ops.set_limit_conf ||
!vbif->ops.set_halt_ctrl)
if (!vbif->ops.set_limit_conf || !vbif->ops.set_halt_ctrl)
return;
/* set write_gather_en for all write clients */
@ -200,8 +193,6 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
params->vbif_idx);
forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
@ -211,25 +202,19 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
trace_dpu_vbif_wait_xin_halt_fail(vbif->idx, params->xin_id);
vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
if (forced_on)
mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
}
void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_qos_params *params)
{
struct dpu_hw_vbif *vbif;
struct dpu_hw_mdp *mdp;
bool forced_on = false;
const struct dpu_vbif_qos_tbl *qos_tbl;
int i;
if (!params || !dpu_kms->hw_mdp) {
if (!params) {
DPU_ERROR("invalid arguments\n");
return;
}
mdp = dpu_kms->hw_mdp;
vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
@ -238,7 +223,7 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
return;
}
if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
if (!vbif->ops.set_qos_remap) {
DRM_DEBUG_ATOMIC("qos remap not supported\n");
return;
}
@ -251,8 +236,6 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
return;
}
forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
for (i = 0; i < qos_tbl->npriority_lvl; i++) {
DRM_DEBUG_ATOMIC("%s xin:%d lvl:%d/%d\n",
dpu_vbif_name(params->vbif_idx), params->xin_id, i,
@ -260,9 +243,6 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
vbif->ops.set_qos_remap(vbif, params->xin_id, i,
qos_tbl->priority_lvl[i]);
}
if (forced_on)
mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
}
void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)

View File

@ -16,13 +16,11 @@ struct dpu_vbif_set_ot_params {
bool rd;
bool is_wfd;
u32 vbif_idx;
u32 clk_ctrl;
};
struct dpu_vbif_set_memtype_params {
u32 xin_id;
u32 vbif_idx;
u32 clk_ctrl;
bool is_cacheable;
};
@ -30,14 +28,12 @@ struct dpu_vbif_set_memtype_params {
* struct dpu_vbif_set_qos_params - QoS remapper parameter
* @vbif_idx: vbif identifier
* @xin_id: client interface identifier
* @clk_ctrl: clock control identifier of the xin
* @num: pipe identifier (debug only)
* @is_rt: true if pipe is used in real-time use case
*/
struct dpu_vbif_set_qos_params {
u32 vbif_idx;
u32 xin_id;
u32 clk_ctrl;
u32 num;
bool is_rt;
};

View File

@ -135,8 +135,6 @@ static void mdp4_destroy(struct msm_kms *kms)
pm_runtime_disable(dev);
mdp_kms_destroy(&mdp4_kms->base);
kfree(mdp4_kms);
}
static const struct mdp_kms_funcs kms_funcs = {
@ -380,56 +378,27 @@ static int mdp4_kms_init(struct drm_device *dev)
{
struct platform_device *pdev = to_platform_device(dev->dev);
struct msm_drm_private *priv = dev->dev_private;
struct mdp4_kms *mdp4_kms;
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(priv->kms));
struct msm_kms *kms = NULL;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
int irq, ret;
int ret;
u32 major, minor;
unsigned long max_clk;
/* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
max_clk = 266667000;
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
if (!mdp4_kms) {
DRM_DEV_ERROR(dev->dev, "failed to allocate kms\n");
return -ENOMEM;
}
ret = mdp_kms_init(&mdp4_kms->base, &kms_funcs);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to init kms\n");
goto fail;
}
priv->kms = &mdp4_kms->base.base;
kms = priv->kms;
mdp4_kms->dev = dev;
mdp4_kms->mmio = msm_ioremap(pdev, NULL);
if (IS_ERR(mdp4_kms->mmio)) {
ret = PTR_ERR(mdp4_kms->mmio);
goto fail;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto fail;
}
kms->irq = irq;
/* NOTE: driver for this regulator still missing upstream.. use
* _get_exclusive() and ignore the error if it does not exist
* (and hope that the bootloader left it on for us)
*/
mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
if (IS_ERR(mdp4_kms->vdd))
mdp4_kms->vdd = NULL;
if (mdp4_kms->vdd) {
ret = regulator_enable(mdp4_kms->vdd);
if (ret) {
@ -438,24 +407,6 @@ static int mdp4_kms_init(struct drm_device *dev)
}
}
mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
if (IS_ERR(mdp4_kms->clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get core_clk\n");
ret = PTR_ERR(mdp4_kms->clk);
goto fail;
}
mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
if (IS_ERR(mdp4_kms->pclk))
mdp4_kms->pclk = NULL;
mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
if (IS_ERR(mdp4_kms->axi_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n");
ret = PTR_ERR(mdp4_kms->axi_clk);
goto fail;
}
clk_set_rate(mdp4_kms->clk, max_clk);
read_mdp_hw_revision(mdp4_kms, &major, &minor);
@ -470,10 +421,9 @@ static int mdp4_kms_init(struct drm_device *dev)
mdp4_kms->rev = minor;
if (mdp4_kms->rev >= 2) {
mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
if (IS_ERR(mdp4_kms->lut_clk)) {
if (!mdp4_kms->lut_clk) {
DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
ret = PTR_ERR(mdp4_kms->lut_clk);
ret = -ENODEV;
goto fail;
}
clk_set_rate(mdp4_kms->lut_clk, max_clk);
@ -551,13 +501,59 @@ fail:
}
static const struct dev_pm_ops mdp4_pm_ops = {
.prepare = msm_pm_prepare,
.complete = msm_pm_complete,
.prepare = msm_kms_pm_prepare,
.complete = msm_kms_pm_complete,
};
static int mdp4_probe(struct platform_device *pdev)
{
return msm_drv_probe(&pdev->dev, mdp4_kms_init);
struct device *dev = &pdev->dev;
struct mdp4_kms *mdp4_kms;
int irq;
mdp4_kms = devm_kzalloc(dev, sizeof(*mdp4_kms), GFP_KERNEL);
if (!mdp4_kms)
return dev_err_probe(dev, -ENOMEM, "failed to allocate kms\n");
mdp4_kms->mmio = msm_ioremap(pdev, NULL);
if (IS_ERR(mdp4_kms->mmio))
return PTR_ERR(mdp4_kms->mmio);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return dev_err_probe(dev, irq, "failed to get irq\n");
mdp4_kms->base.base.irq = irq;
/* NOTE: driver for this regulator still missing upstream.. use
* _get_exclusive() and ignore the error if it does not exist
* (and hope that the bootloader left it on for us)
*/
mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
if (IS_ERR(mdp4_kms->vdd))
mdp4_kms->vdd = NULL;
mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
if (IS_ERR(mdp4_kms->clk))
return dev_err_probe(dev, PTR_ERR(mdp4_kms->clk), "failed to get core_clk\n");
mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
if (IS_ERR(mdp4_kms->pclk))
mdp4_kms->pclk = NULL;
mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
if (IS_ERR(mdp4_kms->axi_clk))
return dev_err_probe(dev, PTR_ERR(mdp4_kms->axi_clk), "failed to get axi_clk\n");
/*
* This is required for revn >= 2. Handle errors here and let the kms
* init bail out if the clock is not provided.
*/
mdp4_kms->lut_clk = devm_clk_get_optional(&pdev->dev, "lut_clk");
if (IS_ERR(mdp4_kms->lut_clk))
return dev_err_probe(dev, PTR_ERR(mdp4_kms->lut_clk), "failed to get lut_clk\n");
return msm_drv_probe(&pdev->dev, mdp4_kms_init, &mdp4_kms->base.base);
}
static void mdp4_remove(struct platform_device *pdev)
@ -574,7 +570,7 @@ MODULE_DEVICE_TABLE(of, mdp4_dt_match);
static struct platform_driver mdp4_platform_driver = {
.probe = mdp4_probe,
.remove_new = mdp4_remove,
.shutdown = msm_drv_shutdown,
.shutdown = msm_kms_shutdown,
.driver = {
.name = "mdp4",
.of_match_table = mdp4_dt_match,

View File

@ -554,20 +554,16 @@ static int mdp5_kms_init(struct drm_device *dev)
struct platform_device *pdev;
struct mdp5_kms *mdp5_kms;
struct mdp5_cfg *config;
struct msm_kms *kms;
struct msm_kms *kms = priv->kms;
struct msm_gem_address_space *aspace;
int irq, i, ret;
int i, ret;
ret = mdp5_init(to_platform_device(dev->dev), dev);
if (ret)
return ret;
/* priv->kms would have been populated by the MDP5 driver */
kms = priv->kms;
if (!kms)
return -ENOMEM;
mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
pdev = mdp5_kms->pdev;
ret = mdp_kms_init(&mdp5_kms->base, &kms_funcs);
@ -576,15 +572,6 @@ static int mdp5_kms_init(struct drm_device *dev)
goto fail;
}
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (!irq) {
ret = -EINVAL;
DRM_DEV_ERROR(&pdev->dev, "failed to get irq\n");
goto fail;
}
kms->irq = irq;
config = mdp5_cfg_get_config(mdp5_kms->cfg);
/* make sure things are off before attaching iommu (bootloader could
@ -787,60 +774,23 @@ static int interface_init(struct mdp5_kms *mdp5_kms)
static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct mdp5_kms *mdp5_kms;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
struct mdp5_cfg *config;
u32 major, minor;
int ret;
mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL);
if (!mdp5_kms) {
ret = -ENOMEM;
goto fail;
}
spin_lock_init(&mdp5_kms->resource_lock);
mdp5_kms->dev = dev;
mdp5_kms->pdev = pdev;
ret = mdp5_global_obj_init(mdp5_kms);
if (ret)
goto fail;
mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys");
if (IS_ERR(mdp5_kms->mmio)) {
ret = PTR_ERR(mdp5_kms->mmio);
goto fail;
}
/* mandatory clocks: */
ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
if (ret)
goto fail;
ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
if (ret)
goto fail;
ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
if (ret)
goto fail;
ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
if (ret)
goto fail;
/* optional clocks: */
get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
get_clk(pdev, &mdp5_kms->tbu_clk, "tbu", false);
get_clk(pdev, &mdp5_kms->tbu_rt_clk, "tbu_rt", false);
/* we need to set a default rate before enabling. Set a safe
* rate first, then figure out hw revision, and then set a
* more optimal rate:
*/
clk_set_rate(mdp5_kms->core_clk, 200000000);
/* set uninit-ed kms */
priv->kms = &mdp5_kms->base.base;
pm_runtime_enable(&pdev->dev);
mdp5_kms->rpm_enabled = true;
@ -931,15 +881,53 @@ static int mdp5_setup_interconnect(struct platform_device *pdev)
static int mdp5_dev_probe(struct platform_device *pdev)
{
int ret;
struct mdp5_kms *mdp5_kms;
int ret, irq;
DBG("");
mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL);
if (!mdp5_kms)
return -ENOMEM;
ret = mdp5_setup_interconnect(pdev);
if (ret)
return ret;
return msm_drv_probe(&pdev->dev, mdp5_kms_init);
mdp5_kms->pdev = pdev;
spin_lock_init(&mdp5_kms->resource_lock);
mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys");
if (IS_ERR(mdp5_kms->mmio))
return PTR_ERR(mdp5_kms->mmio);
/* mandatory clocks: */
ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
if (ret)
return ret;
ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
if (ret)
return ret;
ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
if (ret)
return ret;
ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
if (ret)
return ret;
/* optional clocks: */
get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
get_clk(pdev, &mdp5_kms->tbu_clk, "tbu", false);
get_clk(pdev, &mdp5_kms->tbu_rt_clk, "tbu_rt", false);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return dev_err_probe(&pdev->dev, irq, "failed to get irq\n");
mdp5_kms->base.base.irq = irq;
return msm_drv_probe(&pdev->dev, mdp5_kms_init, &mdp5_kms->base.base);
}
static void mdp5_dev_remove(struct platform_device *pdev)
@ -972,8 +960,8 @@ static __maybe_unused int mdp5_runtime_resume(struct device *dev)
static const struct dev_pm_ops mdp5_pm_ops = {
SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL)
.prepare = msm_pm_prepare,
.complete = msm_pm_complete,
.prepare = msm_kms_pm_prepare,
.complete = msm_kms_pm_complete,
};
static const struct of_device_id mdp5_dt_match[] = {
@ -987,7 +975,7 @@ MODULE_DEVICE_TABLE(of, mdp5_dt_match);
static struct platform_driver mdp5_driver = {
.probe = mdp5_dev_probe,
.remove_new = mdp5_dev_remove,
.shutdown = msm_drv_shutdown,
.shutdown = msm_kms_shutdown,
.driver = {
.name = "msm_mdp",
.of_match_table = mdp5_dt_match,

View File

@ -379,7 +379,7 @@ struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_
{
struct mdp5_smp_state *state;
struct mdp5_global_state *global_state;
struct mdp5_smp *smp = NULL;
struct mdp5_smp *smp;
int ret;
smp = kzalloc(sizeof(*smp), GFP_KERNEL);

View File

@ -88,7 +88,6 @@ struct dp_display_private {
bool audio_supported;
struct drm_device *drm_dev;
struct platform_device *pdev;
struct dentry *root;
struct dp_parser *parser;
@ -341,21 +340,6 @@ static const struct component_ops dp_display_comp_ops = {
.unbind = dp_display_unbind,
};
static bool dp_display_is_ds_bridge(struct dp_panel *panel)
{
return (panel->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_PRESENT);
}
static bool dp_display_is_sink_count_zero(struct dp_display_private *dp)
{
drm_dbg_dp(dp->drm_dev, "present=%#x sink_count=%d\n",
dp->panel->dpcd[DP_DOWNSTREAMPORT_PRESENT],
dp->link->sink_count);
return dp_display_is_ds_bridge(dp->panel) &&
(dp->link->sink_count == 0);
}
static void dp_display_send_hpd_event(struct msm_dp *dp_display)
{
struct dp_display_private *dp;
@ -379,8 +363,12 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
}
/* reset video pattern flag on disconnect */
if (!hpd)
if (!hpd) {
dp->panel->video_test = false;
drm_dp_set_subconnector_property(dp->dp_display.connector,
connector_status_disconnected,
dp->panel->dpcd, dp->panel->downstream_ports);
}
dp->dp_display.is_connected = hpd;
@ -408,6 +396,9 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
dp_link_process_request(dp->link);
drm_dp_set_subconnector_property(dp->dp_display.connector, connector_status_connected,
dp->panel->dpcd, dp->panel->downstream_ports);
edid = dp->panel->edid;
dp->dp_display.psr_supported = dp->panel->psr_cap.version && psr_enabled;
@ -514,7 +505,7 @@ static int dp_display_handle_port_ststus_changed(struct dp_display_private *dp)
{
int rc = 0;
if (dp_display_is_sink_count_zero(dp)) {
if (drm_dp_is_branch(dp->panel->dpcd) && dp->link->sink_count == 0) {
drm_dbg_dp(dp->drm_dev, "sink count is zero, nothing to do\n");
if (dp->hpd_state != ST_DISCONNECTED) {
dp->hpd_state = ST_DISCONNECT_PENDING;
@ -603,7 +594,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
return 0;
}
ret = dp_display_usbpd_configure_cb(&dp->pdev->dev);
ret = dp_display_usbpd_configure_cb(&dp->dp_display.pdev->dev);
if (ret) { /* link train failed */
dp->hpd_state = ST_DISCONNECTED;
} else {
@ -651,7 +642,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
if (dp->link->sink_count == 0) {
dp_display_host_phy_exit(dp);
}
dp_display_notify_disconnect(&dp->pdev->dev);
dp_display_notify_disconnect(&dp->dp_display.pdev->dev);
mutex_unlock(&dp->event_mutex);
return 0;
} else if (state == ST_DISCONNECT_PENDING) {
@ -661,7 +652,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
dp_ctrl_off_link(dp->ctrl);
dp_display_host_phy_exit(dp);
dp->hpd_state = ST_DISCONNECTED;
dp_display_notify_disconnect(&dp->pdev->dev);
dp_display_notify_disconnect(&dp->dp_display.pdev->dev);
mutex_unlock(&dp->event_mutex);
return 0;
}
@ -670,7 +661,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
* We don't need separate work for disconnect as
* connect/attention interrupts are disabled
*/
dp_display_notify_disconnect(&dp->pdev->dev);
dp_display_notify_disconnect(&dp->dp_display.pdev->dev);
if (state == ST_DISPLAY_OFF) {
dp->hpd_state = ST_DISCONNECTED;
@ -712,7 +703,7 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
return 0;
}
dp_display_usbpd_attention_cb(&dp->pdev->dev);
dp_display_usbpd_attention_cb(&dp->dp_display.pdev->dev);
drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
@ -733,12 +724,12 @@ static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
static int dp_init_sub_modules(struct dp_display_private *dp)
{
int rc = 0;
struct device *dev = &dp->pdev->dev;
struct device *dev = &dp->dp_display.pdev->dev;
struct dp_panel_in panel_in = {
.dev = dev,
};
dp->parser = dp_parser_get(dp->pdev);
dp->parser = dp_parser_get(dp->dp_display.pdev);
if (IS_ERR(dp->parser)) {
rc = PTR_ERR(dp->parser);
DRM_ERROR("failed to initialize parser, rc = %d\n", rc);
@ -799,7 +790,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
goto error_ctrl;
}
dp->audio = dp_audio_get(dp->pdev, dp->panel, dp->catalog);
dp->audio = dp_audio_get(dp->dp_display.pdev, dp->panel, dp->catalog);
if (IS_ERR(dp->audio)) {
rc = PTR_ERR(dp->audio);
pr_err("failed to initialize audio, rc = %d\n", rc);
@ -1205,7 +1196,7 @@ int dp_display_request_irq(struct msm_dp *dp_display)
dp = container_of(dp_display, struct dp_display_private, dp_display);
dp->irq = irq_of_parse_and_map(dp->pdev->dev.of_node, 0);
dp->irq = irq_of_parse_and_map(dp->dp_display.pdev->dev.of_node, 0);
if (!dp->irq) {
DRM_ERROR("failed to get irq\n");
return -EINVAL;
@ -1261,7 +1252,7 @@ static int dp_display_probe(struct platform_device *pdev)
if (!desc)
return -EINVAL;
dp->pdev = pdev;
dp->dp_display.pdev = pdev;
dp->name = "drm_dp";
dp->id = desc->id;
dp->dp_display.connector_type = desc->connector_type;
@ -1467,7 +1458,7 @@ void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor)
int rc;
dp = container_of(dp_display, struct dp_display_private, dp_display);
dev = &dp->pdev->dev;
dev = &dp->dp_display.pdev->dev;
dp->debug = dp_debug_get(dev, dp->panel,
dp->link, dp->dp_display.connector,
@ -1487,7 +1478,7 @@ static int dp_display_get_next_bridge(struct msm_dp *dp)
struct device *dev;
dp_priv = container_of(dp, struct dp_display_private, dp_display);
dev = &dp_priv->pdev->dev;
dev = &dp_priv->dp_display.pdev->dev;
aux_bus = of_get_child_by_name(dev->of_node, "aux-bus");
if (aux_bus && dp->is_edp) {
@ -1539,7 +1530,6 @@ error:
int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
struct drm_encoder *encoder)
{
struct msm_drm_private *priv = dev->dev_private;
struct dp_display_private *dp_priv;
int ret;
@ -1557,17 +1547,13 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
if (ret)
return ret;
dp_display->bridge = dp_bridge_init(dp_display, dev, encoder);
if (IS_ERR(dp_display->bridge)) {
ret = PTR_ERR(dp_display->bridge);
ret = dp_bridge_init(dp_display, dev, encoder);
if (ret) {
DRM_DEV_ERROR(dev->dev,
"failed to create dp bridge: %d\n", ret);
dp_display->bridge = NULL;
return ret;
}
priv->bridges[priv->num_bridges++] = dp_display->bridge;
dp_display->connector = dp_drm_connector_init(dp_display, encoder);
if (IS_ERR(dp_display->connector)) {
ret = PTR_ERR(dp_display->connector);

View File

@ -12,6 +12,7 @@
struct msm_dp {
struct drm_device *drm_dev;
struct platform_device *pdev;
struct device *codec_dev;
struct drm_bridge *bridge;
struct drm_connector *connector;

View File

@ -272,7 +272,7 @@ static const struct drm_bridge_funcs edp_bridge_ops = {
.atomic_check = edp_bridge_atomic_check,
};
struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
struct drm_encoder *encoder)
{
int rc;
@ -281,7 +281,7 @@ struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *
dp_bridge = devm_kzalloc(dev->dev, sizeof(*dp_bridge), GFP_KERNEL);
if (!dp_bridge)
return ERR_PTR(-ENOMEM);
return -ENOMEM;
dp_bridge->dp_display = dp_display;
@ -307,14 +307,18 @@ struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *
DRM_BRIDGE_OP_MODES;
}
drm_bridge_add(bridge);
rc = devm_drm_bridge_add(dev->dev, bridge);
if (rc) {
DRM_ERROR("failed to add bridge, rc=%d\n", rc);
return rc;
}
rc = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (rc) {
DRM_ERROR("failed to attach bridge, rc=%d\n", rc);
drm_bridge_remove(bridge);
return ERR_PTR(rc);
return rc;
}
if (dp_display->next_bridge) {
@ -323,12 +327,13 @@ struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (rc < 0) {
DRM_ERROR("failed to attach panel bridge: %d\n", rc);
drm_bridge_remove(bridge);
return ERR_PTR(rc);
return rc;
}
}
return bridge;
dp_display->bridge = bridge;
return 0;
}
/* connector initialization */

View File

@ -20,7 +20,7 @@ struct msm_dp_bridge {
#define to_dp_bridge(x) container_of((x), struct msm_dp_bridge, bridge)
struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder);
struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
struct drm_encoder *encoder);
void dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,

View File

@ -712,49 +712,17 @@ end:
return ret;
}
/**
* dp_link_parse_sink_count() - parses the sink count
* @dp_link: pointer to link module data
*
* Parses the DPCD to check if there is an update to the sink count
* (Byte 0x200), and whether all the sink devices connected have Content
* Protection enabled.
*/
static int dp_link_parse_sink_count(struct dp_link *dp_link)
{
ssize_t rlen;
bool cp_ready;
struct dp_link_private *link = container_of(dp_link,
struct dp_link_private, dp_link);
rlen = drm_dp_dpcd_readb(link->aux, DP_SINK_COUNT,
&link->dp_link.sink_count);
if (rlen < 0) {
DRM_ERROR("sink count read failed. rlen=%zd\n", rlen);
return rlen;
}
cp_ready = link->dp_link.sink_count & DP_SINK_CP_READY;
link->dp_link.sink_count =
DP_GET_SINK_COUNT(link->dp_link.sink_count);
drm_dbg_dp(link->drm_dev, "sink_count = 0x%x, cp_ready = 0x%x\n",
link->dp_link.sink_count, cp_ready);
return 0;
}
static int dp_link_parse_sink_status_field(struct dp_link_private *link)
{
int len = 0;
int len;
link->prev_sink_count = link->dp_link.sink_count;
len = dp_link_parse_sink_count(&link->dp_link);
len = drm_dp_read_sink_count(link->aux);
if (len < 0) {
DRM_ERROR("DP parse sink count failed\n");
return len;
}
link->dp_link.sink_count = len;
len = drm_dp_dpcd_read_link_status(link->aux,
link->link_status);

View File

@ -17,7 +17,6 @@ struct dp_panel_private {
struct dp_link *link;
struct dp_catalog *catalog;
bool panel_on;
bool aux_cfg_update_done;
};
static void dp_panel_read_psr_cap(struct dp_panel_private *panel)
@ -43,58 +42,24 @@ static void dp_panel_read_psr_cap(struct dp_panel_private *panel)
static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
{
int rc = 0;
size_t len;
ssize_t rlen;
int rc;
struct dp_panel_private *panel;
struct dp_link_info *link_info;
u8 *dpcd, major = 0, minor = 0, temp;
u32 offset = DP_DPCD_REV;
dpcd = dp_panel->dpcd;
u8 *dpcd, major, minor;
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
dpcd = dp_panel->dpcd;
rc = drm_dp_read_dpcd_caps(panel->aux, dpcd);
if (rc)
return rc;
link_info = &dp_panel->link_info;
rlen = drm_dp_dpcd_read(panel->aux, offset,
dpcd, (DP_RECEIVER_CAP_SIZE + 1));
if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
DRM_ERROR("dpcd read failed, rlen=%zd\n", rlen);
if (rlen == -ETIMEDOUT)
rc = rlen;
else
rc = -EINVAL;
goto end;
}
temp = dpcd[DP_TRAINING_AUX_RD_INTERVAL];
/* check for EXTENDED_RECEIVER_CAPABILITY_FIELD_PRESENT */
if (temp & BIT(7)) {
drm_dbg_dp(panel->drm_dev,
"using EXTENDED_RECEIVER_CAPABILITY_FIELD\n");
offset = DPRX_EXTENDED_DPCD_FIELD;
}
rlen = drm_dp_dpcd_read(panel->aux, offset,
dpcd, (DP_RECEIVER_CAP_SIZE + 1));
if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
DRM_ERROR("dpcd read failed, rlen=%zd\n", rlen);
if (rlen == -ETIMEDOUT)
rc = rlen;
else
rc = -EINVAL;
goto end;
}
link_info->revision = dpcd[DP_DPCD_REV];
major = (link_info->revision >> 4) & 0x0f;
minor = link_info->revision & 0x0f;
link_info->rate = drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
link_info->num_lanes = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
link_info->rate = drm_dp_max_link_rate(dpcd);
link_info->num_lanes = drm_dp_max_lane_count(dpcd);
/* Limit data lanes from data-lanes of endpoint property of dtsi */
if (link_info->num_lanes > dp_panel->max_dp_lanes)
@ -111,25 +76,8 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
if (drm_dp_enhanced_frame_cap(dpcd))
link_info->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
dp_panel->dfp_present = dpcd[DP_DOWNSTREAMPORT_PRESENT];
dp_panel->dfp_present &= DP_DWN_STRM_PORT_PRESENT;
if (dp_panel->dfp_present && (dpcd[DP_DPCD_REV] > 0x10)) {
dp_panel->ds_port_cnt = dpcd[DP_DOWN_STREAM_PORT_COUNT];
dp_panel->ds_port_cnt &= DP_PORT_COUNT_MASK;
len = DP_DOWNSTREAM_PORTS * DP_DOWNSTREAM_CAP_SIZE;
rlen = drm_dp_dpcd_read(panel->aux,
DP_DOWNSTREAM_PORT_0, dp_panel->ds_cap_info, len);
if (rlen < len) {
DRM_ERROR("ds port status failed, rlen=%zd\n", rlen);
rc = -EINVAL;
goto end;
}
}
dp_panel_read_psr_cap(panel);
end:
return rc;
}
@ -179,8 +127,8 @@ static int dp_panel_update_modes(struct drm_connector *connector,
int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
struct drm_connector *connector)
{
int rc = 0, bw_code;
int rlen, count;
int rc, bw_code;
int count;
struct dp_panel_private *panel;
if (!dp_panel || !connector) {
@ -205,20 +153,19 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
return -EINVAL;
}
if (dp_panel->dfp_present) {
rlen = drm_dp_dpcd_read(panel->aux, DP_SINK_COUNT,
&count, 1);
if (rlen == 1) {
count = DP_GET_SINK_COUNT(count);
if (!count) {
DRM_ERROR("no downstream ports connected\n");
panel->link->sink_count = 0;
rc = -ENOTCONN;
goto end;
}
if (drm_dp_is_branch(dp_panel->dpcd)) {
count = drm_dp_read_sink_count(panel->aux);
if (!count) {
panel->link->sink_count = 0;
return -ENOTCONN;
}
}
rc = drm_dp_read_downstream_info(panel->aux, dp_panel->dpcd,
dp_panel->downstream_ports);
if (rc)
return rc;
kfree(dp_panel->edid);
dp_panel->edid = NULL;
@ -233,19 +180,6 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
}
}
if (panel->aux_cfg_update_done) {
drm_dbg_dp(panel->drm_dev,
"read DPCD with updated AUX config\n");
rc = dp_panel_read_dpcd(dp_panel);
bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
if (rc || !is_link_rate_valid(bw_code) ||
!is_lane_count_valid(dp_panel->link_info.num_lanes)
|| (bw_code > dp_panel->max_bw_code)) {
DRM_ERROR("read dpcd failed %d\n", rc);
return rc;
}
panel->aux_cfg_update_done = false;
}
end:
return rc;
}
@ -289,26 +223,9 @@ int dp_panel_get_modes(struct dp_panel *dp_panel,
static u8 dp_panel_get_edid_checksum(struct edid *edid)
{
struct edid *last_block;
u8 *raw_edid;
bool is_edid_corrupt = false;
edid += edid->extensions;
if (!edid) {
DRM_ERROR("invalid edid input\n");
return 0;
}
raw_edid = (u8 *)edid;
raw_edid += (edid->extensions * EDID_LENGTH);
last_block = (struct edid *)raw_edid;
/* block type extension */
drm_edid_block_valid(raw_edid, 1, false, &is_edid_corrupt);
if (!is_edid_corrupt)
return last_block->checksum;
DRM_ERROR("Invalid block, no checksum\n");
return 0;
return edid->checksum;
}
void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
@ -490,7 +407,6 @@ struct dp_panel *dp_panel_get(struct dp_panel_in *in)
dp_panel = &panel->dp_panel;
dp_panel->max_bw_code = DP_LINK_BW_8_1;
panel->aux_cfg_update_done = false;
return dp_panel;
}

View File

@ -13,11 +13,6 @@
struct edid;
#define DPRX_EXTENDED_DPCD_FIELD 0x2200
#define DP_DOWNSTREAM_PORTS 4
#define DP_DOWNSTREAM_CAP_SIZE 4
struct dp_display_mode {
struct drm_display_mode drm_mode;
u32 capabilities;
@ -40,10 +35,8 @@ struct dp_panel_psr {
struct dp_panel {
/* dpcd raw data */
u8 dpcd[DP_RECEIVER_CAP_SIZE + 1];
u8 ds_cap_info[DP_DOWNSTREAM_PORTS * DP_DOWNSTREAM_CAP_SIZE];
u32 ds_port_cnt;
u32 dfp_present;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
struct dp_link_info link_info;
struct drm_dp_desc desc;

View File

@ -17,6 +17,11 @@ struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
return msm_dsi_host_get_dsc_config(msm_dsi->host);
}
bool msm_dsi_wide_bus_enabled(struct msm_dsi *msm_dsi)
{
return msm_dsi_host_is_wide_bus_enabled(msm_dsi->host);
}
static int dsi_get_phy(struct msm_dsi *msm_dsi)
{
struct platform_device *pdev = msm_dsi->pdev;
@ -126,6 +131,7 @@ static void dsi_unbind(struct device *dev, struct device *master,
struct msm_drm_private *priv = dev_get_drvdata(master);
struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
msm_dsi_tx_buf_free(msm_dsi->host);
priv->dsi[msm_dsi->id] = NULL;
}
@ -210,20 +216,14 @@ void __exit msm_dsi_unregister(void)
int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
struct drm_encoder *encoder)
{
struct msm_drm_private *priv = dev->dev_private;
int ret;
if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) {
DRM_DEV_ERROR(dev->dev, "too many bridges\n");
return -ENOSPC;
}
msm_dsi->dev = dev;
ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to modeset init host: %d\n", ret);
goto fail;
return ret;
}
if (msm_dsi_is_bonded_dsi(msm_dsi) &&
@ -237,32 +237,20 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
msm_dsi->encoder = encoder;
msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
if (IS_ERR(msm_dsi->bridge)) {
ret = PTR_ERR(msm_dsi->bridge);
ret = msm_dsi_manager_bridge_init(msm_dsi);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to create dsi bridge: %d\n", ret);
msm_dsi->bridge = NULL;
goto fail;
return ret;
}
ret = msm_dsi_manager_ext_bridge_init(msm_dsi->id);
if (ret) {
DRM_DEV_ERROR(dev->dev,
"failed to create dsi connector: %d\n", ret);
goto fail;
return ret;
}
priv->bridges[priv->num_bridges++] = msm_dsi->bridge;
return 0;
fail:
/* bridge/connector are normally destroyed by drm: */
if (msm_dsi->bridge) {
msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
msm_dsi->bridge = NULL;
}
return ret;
}
void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi)

View File

@ -56,8 +56,7 @@ struct msm_dsi {
};
/* dsi manager */
struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
int msm_dsi_manager_bridge_init(struct msm_dsi *msm_dsi);
int msm_dsi_manager_ext_bridge_init(u8 id);
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
@ -125,6 +124,7 @@ int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size);
void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host);
void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host);
void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host);
void msm_dsi_tx_buf_free(struct mipi_dsi_host *mipi_host);
int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
int dsi_clk_init_v2(struct msm_dsi_host *msm_host);
@ -134,6 +134,7 @@ int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host);
void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host);
struct drm_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host);
bool msm_dsi_host_is_wide_bus_enabled(struct mipi_dsi_host *host);
/* dsi phy */
struct msm_dsi_phy;

View File

@ -664,6 +664,7 @@ static inline uint32_t DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP(enum dsi_rgb_swap v
return ((val) << DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__SHIFT) & DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__MASK;
}
#define DSI_CMD_MODE_MDP_CTRL2_BURST_MODE 0x00010000
#define DSI_CMD_MODE_MDP_CTRL2_DATABUS_WIDEN 0x00100000
#define REG_DSI_CMD_MODE_MDP_STREAM2_CTRL 0x000001b8
#define DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__MASK 0x0000003f

View File

@ -147,6 +147,7 @@ struct msm_dsi_host {
/* DSI 6G TX buffer*/
struct drm_gem_object *tx_gem_obj;
struct msm_gem_address_space *aspace;
/* DSI v2 TX buffer */
void *tx_buf;
@ -710,6 +711,15 @@ static void dsi_ctrl_disable(struct msm_dsi_host *msm_host)
dsi_write(msm_host, REG_DSI_CTRL, 0);
}
bool msm_dsi_host_is_wide_bus_enabled(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
return msm_host->dsc &&
(msm_host->cfg_hnd->major == MSM_DSI_VER_MAJOR_6G &&
msm_host->cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V2_5_0);
}
static void dsi_ctrl_enable(struct msm_dsi_host *msm_host,
struct msm_dsi_phy_shared_timings *phy_shared_timings, struct msm_dsi_phy *phy)
{
@ -753,10 +763,16 @@ static void dsi_ctrl_enable(struct msm_dsi_host *msm_host,
data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
if (msm_host->cfg_hnd->major == MSM_DSI_VER_MAJOR_6G &&
msm_host->cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_3) {
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
data = dsi_read(msm_host, REG_DSI_CMD_MODE_MDP_CTRL2);
data |= DSI_CMD_MODE_MDP_CTRL2_BURST_MODE;
if (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_3)
data |= DSI_CMD_MODE_MDP_CTRL2_BURST_MODE;
/* TODO: Allow for video-mode support once tested/fixed */
if (msm_dsi_host_is_wide_bus_enabled(&msm_host->base))
data |= DSI_CMD_MODE_MDP_CTRL2_DATABUS_WIDEN;
dsi_write(msm_host, REG_DSI_CMD_MODE_MDP_CTRL2, data);
}
}
@ -894,6 +910,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
u32 hdisplay = mode->hdisplay;
u32 wc;
int ret;
bool wide_bus_enabled = msm_dsi_host_is_wide_bus_enabled(&msm_host->base);
DBG("");
@ -914,6 +931,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
if (msm_host->dsc) {
struct drm_dsc_config *dsc = msm_host->dsc;
u32 bytes_per_pclk;
/* update dsc params with timing params */
if (!dsc || !mode->hdisplay || !mode->vdisplay) {
@ -937,7 +955,13 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
* pulse width same
*/
h_total -= hdisplay;
hdisplay = DIV_ROUND_UP(msm_dsc_get_bytes_per_line(msm_host->dsc), 3);
if (wide_bus_enabled && !(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
bytes_per_pclk = 6;
else
bytes_per_pclk = 3;
hdisplay = DIV_ROUND_UP(msm_dsc_get_bytes_per_line(msm_host->dsc), bytes_per_pclk);
h_total += hdisplay;
ha_end = ha_start + hdisplay;
}
@ -1111,8 +1135,10 @@ int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
uint64_t iova;
u8 *data;
msm_host->aspace = msm_gem_address_space_get(priv->kms->aspace);
data = msm_gem_kernel_new(dev, size, MSM_BO_WC,
priv->kms->aspace,
msm_host->aspace,
&msm_host->tx_gem_obj, &iova);
if (IS_ERR(data)) {
@ -1141,10 +1167,10 @@ int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size)
return 0;
}
static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
void msm_dsi_tx_buf_free(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
struct drm_device *dev = msm_host->dev;
struct msm_drm_private *priv;
/*
* This is possible if we're tearing down before we've had a chance to
@ -1155,11 +1181,11 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
if (!dev)
return;
priv = dev->dev_private;
if (msm_host->tx_gem_obj) {
msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace);
drm_gem_object_put(msm_host->tx_gem_obj);
msm_gem_kernel_put(msm_host->tx_gem_obj, msm_host->aspace);
msm_gem_address_space_put(msm_host->aspace);
msm_host->tx_gem_obj = NULL;
msm_host->aspace = NULL;
}
if (msm_host->tx_buf)
@ -1945,7 +1971,6 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host)
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
DBG("");
dsi_tx_buf_free(msm_host);
if (msm_host->workqueue) {
destroy_workqueue(msm_host->workqueue);
msm_host->workqueue = NULL;

View File

@ -466,9 +466,8 @@ static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = {
};
/* initialize bridge */
struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
int msm_dsi_manager_bridge_init(struct msm_dsi *msm_dsi)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_bridge *bridge = NULL;
struct dsi_bridge *dsi_bridge;
struct drm_encoder *encoder;
@ -476,31 +475,27 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
dsi_bridge = devm_kzalloc(msm_dsi->dev->dev,
sizeof(*dsi_bridge), GFP_KERNEL);
if (!dsi_bridge) {
ret = -ENOMEM;
goto fail;
}
if (!dsi_bridge)
return -ENOMEM;
dsi_bridge->id = id;
dsi_bridge->id = msm_dsi->id;
encoder = msm_dsi->encoder;
bridge = &dsi_bridge->base;
bridge->funcs = &dsi_mgr_bridge_funcs;
drm_bridge_add(bridge);
ret = devm_drm_bridge_add(msm_dsi->dev->dev, bridge);
if (ret)
return ret;
ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret)
goto fail;
return ret;
return bridge;
msm_dsi->bridge = bridge;
fail:
if (bridge)
msm_dsi_manager_bridge_destroy(bridge);
return ERR_PTR(ret);
return 0;
}
int msm_dsi_manager_ext_bridge_init(u8 id)
@ -557,11 +552,6 @@ int msm_dsi_manager_ext_bridge_init(u8 id)
return 0;
}
void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
{
drm_bridge_remove(bridge);
}
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);

View File

@ -160,24 +160,16 @@ fail:
int msm_hdmi_modeset_init(struct hdmi *hdmi,
struct drm_device *dev, struct drm_encoder *encoder)
{
struct msm_drm_private *priv = dev->dev_private;
int ret;
if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) {
DRM_DEV_ERROR(dev->dev, "too many bridges\n");
return -ENOSPC;
}
hdmi->dev = dev;
hdmi->encoder = encoder;
hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
hdmi->bridge = msm_hdmi_bridge_init(hdmi);
if (IS_ERR(hdmi->bridge)) {
ret = PTR_ERR(hdmi->bridge);
ret = msm_hdmi_bridge_init(hdmi);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to create HDMI bridge: %d\n", ret);
hdmi->bridge = NULL;
goto fail;
}
@ -215,16 +207,9 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
priv->bridges[priv->num_bridges++] = hdmi->bridge;
return 0;
fail:
/* bridge is normally destroyed by drm: */
if (hdmi->bridge) {
msm_hdmi_bridge_destroy(hdmi->bridge);
hdmi->bridge = NULL;
}
if (hdmi->connector) {
hdmi->connector->funcs->destroy(hdmi->connector);
hdmi->connector = NULL;
@ -395,6 +380,9 @@ static void msm_hdmi_unbind(struct device *dev, struct device *master,
if (priv->hdmi->audio_pdev)
platform_device_unregister(priv->hdmi->audio_pdev);
if (priv->hdmi->bridge)
msm_hdmi_hpd_disable(priv->hdmi);
msm_hdmi_destroy(priv->hdmi);
priv->hdmi = NULL;
}

View File

@ -224,14 +224,13 @@ void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate);
* hdmi bridge:
*/
struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi);
void msm_hdmi_bridge_destroy(struct drm_bridge *bridge);
int msm_hdmi_bridge_init(struct hdmi *hdmi);
void msm_hdmi_hpd_irq(struct drm_bridge *bridge);
enum drm_connector_status msm_hdmi_bridge_detect(
struct drm_bridge *bridge);
int msm_hdmi_hpd_enable(struct drm_bridge *bridge);
void msm_hdmi_hpd_disable(struct hdmi_bridge *hdmi_bridge);
void msm_hdmi_hpd_disable(struct hdmi *hdmi);
/*
* i2c adapter for ddc:

View File

@ -11,14 +11,6 @@
#include "msm_kms.h"
#include "hdmi.h"
void msm_hdmi_bridge_destroy(struct drm_bridge *bridge)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
msm_hdmi_hpd_disable(hdmi_bridge);
drm_bridge_remove(bridge);
}
static void msm_hdmi_power_on(struct drm_bridge *bridge)
{
struct drm_device *dev = bridge->dev;
@ -317,7 +309,7 @@ msm_hdmi_hotplug_work(struct work_struct *work)
}
/* initialize bridge */
struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)
int msm_hdmi_bridge_init(struct hdmi *hdmi)
{
struct drm_bridge *bridge = NULL;
struct hdmi_bridge *hdmi_bridge;
@ -325,10 +317,8 @@ struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)
hdmi_bridge = devm_kzalloc(hdmi->dev->dev,
sizeof(*hdmi_bridge), GFP_KERNEL);
if (!hdmi_bridge) {
ret = -ENOMEM;
goto fail;
}
if (!hdmi_bridge)
return -ENOMEM;
hdmi_bridge->hdmi = hdmi;
INIT_WORK(&hdmi_bridge->hpd_work, msm_hdmi_hotplug_work);
@ -341,17 +331,15 @@ struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)
DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_EDID;
drm_bridge_add(bridge);
ret = devm_drm_bridge_add(hdmi->dev->dev, bridge);
if (ret)
return ret;
ret = drm_bridge_attach(hdmi->encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
goto fail;
return ret;
return bridge;
hdmi->bridge = bridge;
fail:
if (bridge)
msm_hdmi_bridge_destroy(bridge);
return ERR_PTR(ret);
return 0;
}

View File

@ -147,9 +147,8 @@ fail:
return ret;
}
void msm_hdmi_hpd_disable(struct hdmi_bridge *hdmi_bridge)
void msm_hdmi_hpd_disable(struct hdmi *hdmi)
{
struct hdmi *hdmi = hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
int ret;

View File

@ -266,6 +266,9 @@ static int msm_fb_show(struct seq_file *m, void *arg)
static struct drm_info_list msm_debugfs_list[] = {
{"gem", msm_gem_show},
{ "mm", msm_mm_show },
};
static struct drm_info_list msm_kms_debugfs_list[] = {
{ "fb", msm_fb_show },
};
@ -314,8 +317,13 @@ void msm_debugfs_init(struct drm_minor *minor)
debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
dev, &msm_gpu_fops);
debugfs_create_file("kms", S_IRUSR, minor->debugfs_root,
dev, &msm_kms_fops);
if (priv->kms) {
drm_debugfs_create_files(msm_kms_debugfs_list,
ARRAY_SIZE(msm_kms_debugfs_list),
minor->debugfs_root, minor);
debugfs_create_file("kms", S_IRUSR, minor->debugfs_root,
dev, &msm_kms_fops);
}
debugfs_create_u32("hangcheck_period_ms", 0600, minor->debugfs_root,
&priv->hangcheck_period);

View File

@ -7,29 +7,17 @@
#include <linux/dma-mapping.h>
#include <linux/fault-inject.h>
#include <linux/kthread.h>
#include <linux/of_address.h>
#include <linux/sched/mm.h>
#include <linux/uaccess.h>
#include <uapi/linux/sched/types.h>
#include <drm/drm_aperture.h>
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_prime.h>
#include <drm/drm_of.h>
#include <drm/drm_vblank.h>
#include "disp/msm_disp_snapshot.h"
#include "msm_drv.h"
#include "msm_debugfs.h"
#include "msm_fence.h"
#include "msm_gem.h"
#include "msm_gpu.h"
#include "msm_kms.h"
#include "msm_mmu.h"
#include "adreno/adreno_gpu.h"
/*
@ -56,16 +44,6 @@
static void msm_deinit_vram(struct drm_device *ddev);
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = msm_framebuffer_create,
.atomic_check = msm_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
.atomic_commit_tail = msm_atomic_commit_tail,
};
static char *vram = "16m";
MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
module_param(vram, charp, 0);
@ -83,125 +61,11 @@ DECLARE_FAULT_ATTR(fail_gem_alloc);
DECLARE_FAULT_ATTR(fail_gem_iova);
#endif
static irqreturn_t msm_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
BUG_ON(!kms);
return kms->funcs->irq(kms);
}
static void msm_irq_preinstall(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
BUG_ON(!kms);
kms->funcs->irq_preinstall(kms);
}
static int msm_irq_postinstall(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
BUG_ON(!kms);
if (kms->funcs->irq_postinstall)
return kms->funcs->irq_postinstall(kms);
return 0;
}
static int msm_irq_install(struct drm_device *dev, unsigned int irq)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
int ret;
if (irq == IRQ_NOTCONNECTED)
return -ENOTCONN;
msm_irq_preinstall(dev);
ret = request_irq(irq, msm_irq, 0, dev->driver->name, dev);
if (ret)
return ret;
kms->irq_requested = true;
ret = msm_irq_postinstall(dev);
if (ret) {
free_irq(irq, dev);
return ret;
}
return 0;
}
static void msm_irq_uninstall(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
kms->funcs->irq_uninstall(kms);
if (kms->irq_requested)
free_irq(kms->irq, dev);
}
struct msm_vblank_work {
struct work_struct work;
struct drm_crtc *crtc;
bool enable;
struct msm_drm_private *priv;
};
static void vblank_ctrl_worker(struct work_struct *work)
{
struct msm_vblank_work *vbl_work = container_of(work,
struct msm_vblank_work, work);
struct msm_drm_private *priv = vbl_work->priv;
struct msm_kms *kms = priv->kms;
if (vbl_work->enable)
kms->funcs->enable_vblank(kms, vbl_work->crtc);
else
kms->funcs->disable_vblank(kms, vbl_work->crtc);
kfree(vbl_work);
}
static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
struct drm_crtc *crtc, bool enable)
{
struct msm_vblank_work *vbl_work;
vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
if (!vbl_work)
return -ENOMEM;
INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
vbl_work->crtc = crtc;
vbl_work->enable = enable;
vbl_work->priv = priv;
queue_work(priv->wq, &vbl_work->work);
return 0;
}
static int msm_drm_uninit(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct msm_drm_private *priv = platform_get_drvdata(pdev);
struct drm_device *ddev = priv->dev;
struct msm_kms *kms = priv->kms;
int i;
/*
* Shutdown the hw if we're far enough along where things might be on.
@ -212,7 +76,8 @@ static int msm_drm_uninit(struct device *dev)
*/
if (ddev->registered) {
drm_dev_unregister(ddev);
drm_atomic_helper_shutdown(ddev);
if (priv->kms)
drm_atomic_helper_shutdown(ddev);
}
/* We must cancel and cleanup any pending vblank enable/disable
@ -222,36 +87,13 @@ static int msm_drm_uninit(struct device *dev)
flush_workqueue(priv->wq);
/* clean up event worker threads */
for (i = 0; i < priv->num_crtcs; i++) {
if (priv->event_thread[i].worker)
kthread_destroy_worker(priv->event_thread[i].worker);
}
msm_gem_shrinker_cleanup(ddev);
drm_kms_helper_poll_fini(ddev);
msm_perf_debugfs_cleanup(priv);
msm_rd_debugfs_cleanup(priv);
if (kms)
msm_disp_snapshot_destroy(ddev);
drm_mode_config_cleanup(ddev);
for (i = 0; i < priv->num_bridges; i++)
drm_bridge_remove(priv->bridges[i]);
priv->num_bridges = 0;
if (kms) {
pm_runtime_get_sync(dev);
msm_irq_uninstall(ddev);
pm_runtime_put_sync(dev);
}
if (kms && kms->funcs)
kms->funcs->destroy(kms);
if (priv->kms)
msm_drm_kms_uninit(dev);
msm_deinit_vram(ddev);
@ -265,42 +107,6 @@ static int msm_drm_uninit(struct device *dev)
return 0;
}
struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
{
struct msm_gem_address_space *aspace;
struct msm_mmu *mmu;
struct device *mdp_dev = dev->dev;
struct device *mdss_dev = mdp_dev->parent;
struct device *iommu_dev;
/*
* IOMMUs can be a part of MDSS device tree binding, or the
* MDP/DPU device.
*/
if (device_iommu_mapped(mdp_dev))
iommu_dev = mdp_dev;
else
iommu_dev = mdss_dev;
mmu = msm_iommu_new(iommu_dev, 0);
if (IS_ERR(mmu))
return ERR_CAST(mmu);
if (!mmu) {
drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n");
return NULL;
}
aspace = msm_gem_address_space_create(mmu, "mdp_kms",
0x1000, 0x100000000 - 0x1000);
if (IS_ERR(aspace)) {
dev_err(mdp_dev, "aspace create, error %pe\n", aspace);
mmu->funcs->destroy(mmu);
}
return aspace;
}
bool msm_use_mmu(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@ -406,8 +212,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
{
struct msm_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *ddev;
struct msm_kms *kms;
struct drm_crtc *crtc;
int ret;
if (drm_firmware_drivers_only())
@ -444,11 +248,15 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
might_lock(&priv->lru.lock);
fs_reclaim_release(GFP_KERNEL);
drm_mode_config_init(ddev);
if (priv->kms_init) {
ret = drmm_mode_config_init(ddev);
if (ret)
goto err_destroy_wq;
}
ret = msm_init_vram(ddev);
if (ret)
goto err_cleanup_mode_config;
goto err_destroy_wq;
dma_set_max_seg_size(dev, UINT_MAX);
@ -457,96 +265,33 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (ret)
goto err_deinit_vram;
/* the fw fb could be anywhere in memory */
ret = drm_aperture_remove_framebuffers(drv);
if (ret)
goto err_msm_uninit;
msm_gem_shrinker_init(ddev);
if (priv->kms_init) {
ret = priv->kms_init(ddev);
if (ret) {
DRM_DEV_ERROR(dev, "failed to load kms\n");
priv->kms = NULL;
ret = msm_drm_kms_init(dev, drv);
if (ret)
goto err_msm_uninit;
}
kms = priv->kms;
} else {
/* valid only for the dummy headless case, where of_node=NULL */
WARN_ON(dev->of_node);
kms = NULL;
}
/* Enable normalization of plane zpos */
ddev->mode_config.normalize_zpos = true;
if (kms) {
kms->dev = ddev;
ret = kms->funcs->hw_init(kms);
if (ret) {
DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
goto err_msm_uninit;
}
}
drm_helper_move_panel_connectors_to_head(ddev);
ddev->mode_config.funcs = &mode_config_funcs;
ddev->mode_config.helper_private = &mode_config_helper_funcs;
drm_for_each_crtc(crtc, ddev) {
struct msm_drm_thread *ev_thread;
/* initialize event thread */
ev_thread = &priv->event_thread[drm_crtc_index(crtc)];
ev_thread->dev = ddev;
ev_thread->worker = kthread_create_worker(0, "crtc_event:%d", crtc->base.id);
if (IS_ERR(ev_thread->worker)) {
ret = PTR_ERR(ev_thread->worker);
DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
ev_thread->worker = NULL;
goto err_msm_uninit;
}
sched_set_fifo(ev_thread->worker->task);
}
ret = drm_vblank_init(ddev, priv->num_crtcs);
if (ret < 0) {
DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
goto err_msm_uninit;
}
if (kms) {
pm_runtime_get_sync(dev);
ret = msm_irq_install(ddev, kms->irq);
pm_runtime_put_sync(dev);
if (ret < 0) {
DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
goto err_msm_uninit;
}
ddev->driver_features &= ~DRIVER_MODESET;
ddev->driver_features &= ~DRIVER_ATOMIC;
}
ret = drm_dev_register(ddev, 0);
if (ret)
goto err_msm_uninit;
if (kms) {
ret = msm_disp_snapshot_init(ddev);
if (ret)
DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
}
drm_mode_config_reset(ddev);
ret = msm_debugfs_late_init(ddev);
if (ret)
goto err_msm_uninit;
drm_kms_helper_poll_init(ddev);
if (kms)
if (priv->kms_init) {
drm_kms_helper_poll_init(ddev);
msm_fbdev_setup(ddev);
}
return 0;
@ -557,8 +302,7 @@ err_msm_uninit:
err_deinit_vram:
msm_deinit_vram(ddev);
err_cleanup_mode_config:
drm_mode_config_cleanup(ddev);
err_destroy_wq:
destroy_workqueue(priv->wq);
err_put_dev:
drm_dev_put(ddev);
@ -638,28 +382,6 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file)
context_close(ctx);
}
int msm_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
if (!kms)
return -ENXIO;
drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
return vblank_ctrl_queue_work(priv, crtc, true);
}
void msm_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
if (!kms)
return;
drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
vblank_ctrl_queue_work(priv, crtc, false);
}
/*
* DRM ioctls:
*/
@ -1102,33 +824,6 @@ static const struct drm_driver msm_driver = {
.patchlevel = MSM_VERSION_PATCHLEVEL,
};
int msm_pm_prepare(struct device *dev)
{
struct msm_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *ddev = priv ? priv->dev : NULL;
if (!priv || !priv->kms)
return 0;
return drm_mode_config_helper_suspend(ddev);
}
void msm_pm_complete(struct device *dev)
{
struct msm_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *ddev = priv ? priv->dev : NULL;
if (!priv || !priv->kms)
return;
drm_mode_config_helper_resume(ddev);
}
static const struct dev_pm_ops msm_pm_ops = {
.prepare = msm_pm_prepare,
.complete = msm_pm_complete,
};
/*
* Componentized driver support:
*/
@ -1230,7 +925,8 @@ const struct component_master_ops msm_drm_ops = {
};
int msm_drv_probe(struct device *master_dev,
int (*kms_init)(struct drm_device *dev))
int (*kms_init)(struct drm_device *dev),
struct msm_kms *kms)
{
struct msm_drm_private *priv;
struct component_match *match = NULL;
@ -1240,6 +936,7 @@ int msm_drv_probe(struct device *master_dev,
if (!priv)
return -ENOMEM;
priv->kms = kms;
priv->kms_init = kms_init;
dev_set_drvdata(master_dev, priv);
@ -1275,7 +972,7 @@ int msm_drv_probe(struct device *master_dev,
static int msm_pdev_probe(struct platform_device *pdev)
{
return msm_drv_probe(&pdev->dev, NULL);
return msm_drv_probe(&pdev->dev, NULL, NULL);
}
static void msm_pdev_remove(struct platform_device *pdev)
@ -1283,29 +980,11 @@ static void msm_pdev_remove(struct platform_device *pdev)
component_master_del(&pdev->dev, &msm_drm_ops);
}
void msm_drv_shutdown(struct platform_device *pdev)
{
struct msm_drm_private *priv = platform_get_drvdata(pdev);
struct drm_device *drm = priv ? priv->dev : NULL;
/*
* Shutdown the hw if we're far enough along where things might be on.
* If we run this too early, we'll end up panicking in any variety of
* places. Since we don't register the drm device until late in
* msm_drm_init, drm_dev->registered is used as an indicator that the
* shutdown will be successful.
*/
if (drm && drm->registered && priv->kms)
drm_atomic_helper_shutdown(drm);
}
static struct platform_driver msm_platform_driver = {
.probe = msm_pdev_probe,
.remove_new = msm_pdev_remove,
.shutdown = msm_drv_shutdown,
.driver = {
.name = "msm",
.pm = &msm_pm_ops,
},
};

View File

@ -206,9 +206,6 @@ struct msm_drm_private {
struct msm_drm_thread event_thread[MAX_CRTCS];
unsigned int num_bridges;
struct drm_bridge *bridges[MAX_BRIDGES];
/* VRAM carveout, used when no IOMMU: */
struct {
unsigned long size;
@ -343,6 +340,7 @@ void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi
bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi);
bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi);
bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi);
bool msm_dsi_wide_bus_enabled(struct msm_dsi *msm_dsi);
struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi);
#else
static inline void __init msm_dsi_register(void)
@ -372,6 +370,10 @@ static inline bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi)
{
return false;
}
static inline bool msm_dsi_wide_bus_enabled(struct msm_dsi *msm_dsi)
{
return false;
}
static inline struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
{
@ -561,12 +563,13 @@ static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
extern const struct component_master_ops msm_drm_ops;
int msm_pm_prepare(struct device *dev);
void msm_pm_complete(struct device *dev);
int msm_kms_pm_prepare(struct device *dev);
void msm_kms_pm_complete(struct device *dev);
int msm_drv_probe(struct device *dev,
int (*kms_init)(struct drm_device *dev));
void msm_drv_shutdown(struct platform_device *pdev);
int (*kms_init)(struct drm_device *dev),
struct msm_kms *kms);
void msm_kms_shutdown(struct platform_device *pdev);
#endif /* __MSM_DRV_H__ */

View File

@ -0,0 +1,345 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*/
#include <linux/kthread.h>
#include <linux/sched/mm.h>
#include <uapi/linux/sched/types.h>
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_vblank.h>
#include "disp/msm_disp_snapshot.h"
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_kms.h"
#include "msm_mmu.h"
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = msm_framebuffer_create,
.atomic_check = msm_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
.atomic_commit_tail = msm_atomic_commit_tail,
};
static irqreturn_t msm_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
BUG_ON(!kms);
return kms->funcs->irq(kms);
}
static void msm_irq_preinstall(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
BUG_ON(!kms);
kms->funcs->irq_preinstall(kms);
}
static int msm_irq_postinstall(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
BUG_ON(!kms);
if (kms->funcs->irq_postinstall)
return kms->funcs->irq_postinstall(kms);
return 0;
}
static int msm_irq_install(struct drm_device *dev, unsigned int irq)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
int ret;
if (irq == IRQ_NOTCONNECTED)
return -ENOTCONN;
msm_irq_preinstall(dev);
ret = request_irq(irq, msm_irq, 0, dev->driver->name, dev);
if (ret)
return ret;
kms->irq_requested = true;
ret = msm_irq_postinstall(dev);
if (ret) {
free_irq(irq, dev);
return ret;
}
return 0;
}
static void msm_irq_uninstall(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
kms->funcs->irq_uninstall(kms);
if (kms->irq_requested)
free_irq(kms->irq, dev);
}
struct msm_vblank_work {
struct work_struct work;
struct drm_crtc *crtc;
bool enable;
struct msm_drm_private *priv;
};
static void vblank_ctrl_worker(struct work_struct *work)
{
struct msm_vblank_work *vbl_work = container_of(work,
struct msm_vblank_work, work);
struct msm_drm_private *priv = vbl_work->priv;
struct msm_kms *kms = priv->kms;
if (vbl_work->enable)
kms->funcs->enable_vblank(kms, vbl_work->crtc);
else
kms->funcs->disable_vblank(kms, vbl_work->crtc);
kfree(vbl_work);
}
static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
struct drm_crtc *crtc, bool enable)
{
struct msm_vblank_work *vbl_work;
vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
if (!vbl_work)
return -ENOMEM;
INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
vbl_work->crtc = crtc;
vbl_work->enable = enable;
vbl_work->priv = priv;
queue_work(priv->wq, &vbl_work->work);
return 0;
}
int msm_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
if (!kms)
return -ENXIO;
drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
return vblank_ctrl_queue_work(priv, crtc, true);
}
void msm_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
if (!kms)
return;
drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
vblank_ctrl_queue_work(priv, crtc, false);
}
struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
{
struct msm_gem_address_space *aspace;
struct msm_mmu *mmu;
struct device *mdp_dev = dev->dev;
struct device *mdss_dev = mdp_dev->parent;
struct device *iommu_dev;
/*
* IOMMUs can be a part of MDSS device tree binding, or the
* MDP/DPU device.
*/
if (device_iommu_mapped(mdp_dev))
iommu_dev = mdp_dev;
else
iommu_dev = mdss_dev;
mmu = msm_iommu_new(iommu_dev, 0);
if (IS_ERR(mmu))
return ERR_CAST(mmu);
if (!mmu) {
drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n");
return NULL;
}
aspace = msm_gem_address_space_create(mmu, "mdp_kms",
0x1000, 0x100000000 - 0x1000);
if (IS_ERR(aspace)) {
dev_err(mdp_dev, "aspace create, error %pe\n", aspace);
mmu->funcs->destroy(mmu);
}
return aspace;
}
void msm_drm_kms_uninit(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct msm_drm_private *priv = platform_get_drvdata(pdev);
struct drm_device *ddev = priv->dev;
struct msm_kms *kms = priv->kms;
int i;
BUG_ON(!kms);
/* clean up event worker threads */
for (i = 0; i < priv->num_crtcs; i++) {
if (priv->event_thread[i].worker)
kthread_destroy_worker(priv->event_thread[i].worker);
}
drm_kms_helper_poll_fini(ddev);
msm_disp_snapshot_destroy(ddev);
pm_runtime_get_sync(dev);
msm_irq_uninstall(ddev);
pm_runtime_put_sync(dev);
if (kms && kms->funcs)
kms->funcs->destroy(kms);
}
int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
{
struct msm_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *ddev = priv->dev;
struct msm_kms *kms = priv->kms;
struct drm_crtc *crtc;
int ret;
/* the fw fb could be anywhere in memory */
ret = drm_aperture_remove_framebuffers(drv);
if (ret)
return ret;
ret = priv->kms_init(ddev);
if (ret) {
DRM_DEV_ERROR(dev, "failed to load kms\n");
priv->kms = NULL;
return ret;
}
/* Enable normalization of plane zpos */
ddev->mode_config.normalize_zpos = true;
ddev->mode_config.funcs = &mode_config_funcs;
ddev->mode_config.helper_private = &mode_config_helper_funcs;
kms->dev = ddev;
ret = kms->funcs->hw_init(kms);
if (ret) {
DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
goto err_msm_uninit;
}
drm_helper_move_panel_connectors_to_head(ddev);
drm_for_each_crtc(crtc, ddev) {
struct msm_drm_thread *ev_thread;
/* initialize event thread */
ev_thread = &priv->event_thread[drm_crtc_index(crtc)];
ev_thread->dev = ddev;
ev_thread->worker = kthread_create_worker(0, "crtc_event:%d", crtc->base.id);
if (IS_ERR(ev_thread->worker)) {
ret = PTR_ERR(ev_thread->worker);
DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
ev_thread->worker = NULL;
goto err_msm_uninit;
}
sched_set_fifo(ev_thread->worker->task);
}
ret = drm_vblank_init(ddev, priv->num_crtcs);
if (ret < 0) {
DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
goto err_msm_uninit;
}
pm_runtime_get_sync(dev);
ret = msm_irq_install(ddev, kms->irq);
pm_runtime_put_sync(dev);
if (ret < 0) {
DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
goto err_msm_uninit;
}
ret = msm_disp_snapshot_init(ddev);
if (ret)
DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
drm_mode_config_reset(ddev);
return 0;
err_msm_uninit:
return ret;
}
int msm_kms_pm_prepare(struct device *dev)
{
struct msm_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *ddev = priv ? priv->dev : NULL;
if (!priv || !priv->kms)
return 0;
return drm_mode_config_helper_suspend(ddev);
}
void msm_kms_pm_complete(struct device *dev)
{
struct msm_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *ddev = priv ? priv->dev : NULL;
if (!priv || !priv->kms)
return;
drm_mode_config_helper_resume(ddev);
}
void msm_kms_shutdown(struct platform_device *pdev)
{
struct msm_drm_private *priv = platform_get_drvdata(pdev);
struct drm_device *drm = priv ? priv->dev : NULL;
/*
* Shutdown the hw if we're far enough along where things might be on.
* If we run this too early, we'll end up panicking in any variety of
* places. Since we don't register the drm device until late in
* msm_drm_init, drm_dev->registered is used as an indicator that the
* shutdown will be successful.
*/
if (drm && drm->registered && priv->kms)
drm_atomic_helper_shutdown(drm);
}

View File

@ -195,4 +195,7 @@ static inline void msm_kms_destroy(struct msm_kms *kms)
drm_for_each_crtc_reverse(crtc, dev) \
for_each_if (drm_crtc_mask(crtc) & (crtc_mask))
int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv);
void msm_drm_kms_uninit(struct device *dev);
#endif /* __MSM_KMS_H__ */

View File

@ -30,6 +30,8 @@ struct msm_gpu_submit_stats {
struct msm_rbmemptrs {
volatile uint32_t rptr;
volatile uint32_t fence;
/* Introduced on A7xx */
volatile uint32_t bv_fence;
volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
volatile u64 ttbr0;