From cd285535b8eeed314813783ba9b5a851bdf6bca8 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Sat, 2 Apr 2022 17:55:50 +0200 Subject: [PATCH 01/17] dt-bindings: mailbox: qcom-ipcc: simplify the example Consumer examples in the bindings of resource providers are trivial, useless and duplicating code. Additionally the incomplete qcom,smp2p example triggers DT schema warnings. Cleanup the example by removing the consumer part and fixing the indentation to DT schema convention. Reported-by: Rob Herring Signed-off-by: Krzysztof Kozlowski Reviewed-by: Rob Herring Signed-off-by: Jassi Brar --- .../bindings/mailbox/qcom-ipcc.yaml | 29 +++++++------------ 1 file changed, 10 insertions(+), 19 deletions(-) diff --git a/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml b/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml index 866efb278813..dfdc72345a2a 100644 --- a/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml +++ b/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml @@ -61,23 +61,14 @@ additionalProperties: false examples: - | - #include - #include + #include + #include - mailbox@408000 { - compatible = "qcom,sm8250-ipcc", "qcom,ipcc"; - reg = <0x408000 0x1000>; - interrupts = ; - interrupt-controller; - #interrupt-cells = <3>; - #mbox-cells = <2>; - }; - - smp2p-modem { - compatible = "qcom,smp2p"; - interrupts-extended = <&ipcc_mproc IPCC_CLIENT_MPSS - IPCC_MPROC_SIGNAL_SMP2P IRQ_TYPE_EDGE_RISING>; - mboxes = <&ipcc_mproc IPCC_CLIENT_MPSS IPCC_MPROC_SIGNAL_SMP2P>; - - /* Other SMP2P fields */ - }; + mailbox@408000 { + compatible = "qcom,sm8250-ipcc", "qcom,ipcc"; + reg = <0x408000 0x1000>; + interrupts = ; + interrupt-controller; + #interrupt-cells = <3>; + #mbox-cells = <2>; + }; From 262190a8ca2b1e1ec75b8a4f1c7f07e585facd6f Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Sun, 3 Apr 2022 10:53:04 -0400 Subject: [PATCH 02/17] mailbox: imx: remove redundant initializer Smatch reports this issue imx-mailbox.c:887:10: warning: Initializer entry defined twice imx-mailbox.c:889:10: also defined here .rxdb = imx_mu_generic_rxdb, Is listed twice, so remove one. Signed-off-by: Tom Rix Signed-off-by: Jassi Brar --- drivers/mailbox/imx-mailbox.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c index e88f544a1548..df8a785be324 100644 --- a/drivers/mailbox/imx-mailbox.c +++ b/drivers/mailbox/imx-mailbox.c @@ -886,7 +886,6 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = { .rx = imx_mu_generic_rx, .rxdb = imx_mu_generic_rxdb, .init = imx_mu_init_generic, - .rxdb = imx_mu_generic_rxdb, .type = IMX_MU_V2, .xTR = 0x200, .xRR = 0x280, From 1b0070aca35ed70f9cc6f97b5d9f7f98cb4771fd Mon Sep 17 00:00:00 2001 From: Xiaomeng Tong Date: Tue, 5 Apr 2022 16:05:56 +0800 Subject: [PATCH 03/17] mailbox: remove an unneeded NULL check on list iterator The list iterator is always non-NULL so it doesn't need to be checked. Thus just remove the unnecessary NULL check. Signed-off-by: Xiaomeng Tong Signed-off-by: Jassi Brar --- drivers/mailbox/tegra-hsp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c index 78f7265039c6..a55d6df99697 100644 --- a/drivers/mailbox/tegra-hsp.c +++ b/drivers/mailbox/tegra-hsp.c @@ -804,7 +804,7 @@ static int __maybe_unused tegra_hsp_resume(struct device *dev) struct tegra_hsp_doorbell *db; list_for_each_entry(db, &hsp->doorbells, list) { - if (db && db->channel.chan) + if (db->channel.chan) tegra_hsp_doorbell_startup(db->channel.chan); } From 1b3418ac6451b771b677e049b8a48d96e258e922 Mon Sep 17 00:00:00 2001 From: Yongqiang Niu Date: Wed, 6 Apr 2022 17:12:13 +0800 Subject: [PATCH 04/17] dt-bindings: gce: add the GCE header file for MT8186 Add the GCE header file to define GCE subsys ids, hardware event ids and constants for MT8186. Signed-off-by: Yongqiang Niu Acked-by: Rob Herring Reviewed-by: AngeloGioacchino Del Regno Signed-off-by: Jassi Brar --- include/dt-bindings/gce/mt8186-gce.h | 421 +++++++++++++++++++++++++++ 1 file changed, 421 insertions(+) create mode 100644 include/dt-bindings/gce/mt8186-gce.h diff --git a/include/dt-bindings/gce/mt8186-gce.h b/include/dt-bindings/gce/mt8186-gce.h new file mode 100644 index 000000000000..f12e3cb586ce --- /dev/null +++ b/include/dt-bindings/gce/mt8186-gce.h @@ -0,0 +1,421 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (C) 2022 MediaTek Inc. + * Author: Yongqiang Niu + */ + +#ifndef _DT_BINDINGS_GCE_MT8186_H +#define _DT_BINDINGS_GCE_MT8186_H + +/* assign timeout 0 also means default */ +#define CMDQ_NO_TIMEOUT 0xffffffff +#define CMDQ_TIMEOUT_DEFAULT 1000 + +/* GCE thread priority */ +#define CMDQ_THR_PRIO_LOWEST 0 +#define CMDQ_THR_PRIO_1 1 +#define CMDQ_THR_PRIO_2 2 +#define CMDQ_THR_PRIO_3 3 +#define CMDQ_THR_PRIO_4 4 +#define CMDQ_THR_PRIO_5 5 +#define CMDQ_THR_PRIO_6 6 +#define CMDQ_THR_PRIO_HIGHEST 7 + +/* CPR count in 32bit register */ +#define GCE_CPR_COUNT 1312 + +/* GCE subsys table */ +#define SUBSYS_1300XXXX 0 +#define SUBSYS_1400XXXX 1 +#define SUBSYS_1401XXXX 2 +#define SUBSYS_1402XXXX 3 +#define SUBSYS_1502XXXX 4 +#define SUBSYS_1582XXXX 5 +#define SUBSYS_1B00XXXX 6 +#define SUBSYS_1C00XXXX 7 +#define SUBSYS_1C10XXXX 8 +#define SUBSYS_1000XXXX 9 +#define SUBSYS_1001XXXX 10 +#define SUBSYS_1020XXXX 11 +#define SUBSYS_1021XXXX 12 +#define SUBSYS_1022XXXX 13 +#define SUBSYS_1023XXXX 14 +#define SUBSYS_1060XXXX 15 +#define SUBSYS_1602XXXX 16 +#define SUBSYS_1608XXXX 17 +#define SUBSYS_1700XXXX 18 +#define SUBSYS_1701XXXX 19 +#define SUBSYS_1702XXXX 20 +#define SUBSYS_1703XXXX 21 +#define SUBSYS_1706XXXX 22 +#define SUBSYS_1A00XXXX 23 +#define SUBSYS_1A01XXXX 24 +#define SUBSYS_1A02XXXX 25 +#define SUBSYS_1A03XXXX 26 +#define SUBSYS_1A04XXXX 27 +#define SUBSYS_1A05XXXX 28 +#define SUBSYS_1A06XXXX 29 +#define SUBSYS_NO_SUPPORT 99 + +/* GCE General Purpose Register (GPR) support + * Leave note for scenario usage here + */ +/* GCE: write mask */ +#define GCE_GPR_R00 0x00 +#define GCE_GPR_R01 0x01 +/* MDP: P1: JPEG dest */ +#define GCE_GPR_R02 0x02 +#define GCE_GPR_R03 0x03 +/* MDP: PQ color */ +#define GCE_GPR_R04 0x04 +/* MDP: 2D sharpness */ +#define GCE_GPR_R05 0x05 +/* DISP: poll esd */ +#define GCE_GPR_R06 0x06 +#define GCE_GPR_R07 0x07 +/* MDP: P4: 2D sharpness dst */ +#define GCE_GPR_R08 0x08 +#define GCE_GPR_R09 0x09 +/* VCU: poll with timeout for GPR timer */ +#define GCE_GPR_R10 0x0A +#define GCE_GPR_R11 0x0B +/* CMDQ: debug */ +#define GCE_GPR_R12 0x0C +#define GCE_GPR_R13 0x0D +/* CMDQ: P7: debug */ +#define GCE_GPR_R14 0x0E +#define GCE_GPR_R15 0x0F + +/* GCE hardware events */ +/* VDEC */ +#define CMDQ_EVENT_LINE_COUNT_THRESHOLD_INTERRUPT 0 +#define CMDQ_EVENT_VDEC_INT 1 +#define CMDQ_EVENT_VDEC_PAUSE 2 +#define CMDQ_EVENT_VDEC_DEC_ERROR 3 +#define CMDQ_EVENT_MDEC_TIMEOUT 4 +#define CMDQ_EVENT_DRAM_ACCESS_DONE 5 +#define CMDQ_EVENT_INI_FETCH_RDY 6 +#define CMDQ_EVENT_PROCESS_FLAG 7 +#define CMDQ_EVENT_SEARCH_START_CODE_DONE 8 +#define CMDQ_EVENT_REF_REORDER_DONE 9 +#define CMDQ_EVENT_WP_TBLE_DONE 10 +#define CMDQ_EVENT_COUNT_SRAM_CLR_DONE 11 +#define CMDQ_EVENT_GCE_CNT_OP_THRESHOLD 15 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_0 16 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_1 17 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_2 18 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_3 19 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_4 20 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_5 21 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_6 22 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_7 23 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_8 24 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_9 25 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_10 26 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_11 27 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_12 28 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_13 29 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_14 30 +#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_15 31 +#define CMDQ_EVENT_WPE_GCE_FRAME_DONE 32 + +/* CAM */ +#define CMDQ_EVENT_ISP_FRAME_DONE_A 65 +#define CMDQ_EVENT_ISP_FRAME_DONE_B 66 +#define CMDQ_EVENT_CAMSV1_PASS1_DONE 70 +#define CMDQ_EVENT_CAMSV2_PASS1_DONE 71 +#define CMDQ_EVENT_CAMSV3_PASS1_DONE 72 +#define CMDQ_EVENT_MRAW_0_PASS1_DONE 73 +#define CMDQ_EVENT_SENINF_CAM0_FIFO_FULL 75 +#define CMDQ_EVENT_SENINF_CAM1_FIFO_FULL 76 +#define CMDQ_EVENT_SENINF_CAM2_FIFO_FULL 77 +#define CMDQ_EVENT_SENINF_CAM3_FIFO_FULL 78 +#define CMDQ_EVENT_SENINF_CAM4_FIFO_FULL 79 +#define CMDQ_EVENT_SENINF_CAM5_FIFO_FULL 80 +#define CMDQ_EVENT_SENINF_CAM6_FIFO_FULL 81 +#define CMDQ_EVENT_SENINF_CAM7_FIFO_FULL 82 +#define CMDQ_EVENT_SENINF_CAM8_FIFO_FULL 83 +#define CMDQ_EVENT_SENINF_CAM9_FIFO_FULL 84 +#define CMDQ_EVENT_SENINF_CAM10_FIFO_FULL 85 +#define CMDQ_EVENT_SENINF_CAM11_FIFO_FULL 86 +#define CMDQ_EVENT_SENINF_CAM12_FIFO_FULL 87 +#define CMDQ_EVENT_TG_OVRUN_A_INT 88 +#define CMDQ_EVENT_DMA_R1_ERROR_A_INT 89 +#define CMDQ_EVENT_TG_OVRUN_B_INT 90 +#define CMDQ_EVENT_DMA_R1_ERROR_B_INT 91 +#define CMDQ_EVENT_TG_OVRUN_M0_INT 94 +#define CMDQ_EVENT_R1_ERROR_M0_INT 95 +#define CMDQ_EVENT_TG_GRABERR_M0_INT 96 +#define CMDQ_EVENT_TG_GRABERR_A_INT 98 +#define CMDQ_EVENT_CQ_VR_SNAP_A_INT 99 +#define CMDQ_EVENT_TG_GRABERR_B_INT 100 +#define CMDQ_EVENT_CQ_VR_SNAP_B_INT 101 +/* VENC */ +#define CMDQ_EVENT_VENC_CMDQ_FRAME_DONE 129 +#define CMDQ_EVENT_VENC_CMDQ_PAUSE_DONE 130 +#define CMDQ_EVENT_JPGENC_CMDQ_DONE 131 +#define CMDQ_EVENT_VENC_CMDQ_MB_DONE 132 +#define CMDQ_EVENT_VENC_CMDQ_128BYTE_CNT_DONE 133 +#define CMDQ_EVENT_VENC_CMDQ_PPS_DONE 136 +#define CMDQ_EVENT_VENC_CMDQ_SPS_DONE 137 +#define CMDQ_EVENT_VENC_CMDQ_VPS_DONE 138 +/* IPE */ +#define CMDQ_EVENT_FDVT_DONE 161 +#define CMDQ_EVENT_FE_DONE 162 +#define CMDQ_EVENT_RSC_DONE 163 +#define CMDQ_EVENT_DVS_DONE_ASYNC_SHOT 164 +#define CMDQ_EVENT_DVP_DONE_ASYNC_SHOT 165 +/* IMG2 */ +#define CMDQ_EVENT_GCE_IMG2_EVENT0 193 +#define CMDQ_EVENT_GCE_IMG2_EVENT1 194 +#define CMDQ_EVENT_GCE_IMG2_EVENT2 195 +#define CMDQ_EVENT_GCE_IMG2_EVENT3 196 +#define CMDQ_EVENT_GCE_IMG2_EVENT4 197 +#define CMDQ_EVENT_GCE_IMG2_EVENT5 198 +#define CMDQ_EVENT_GCE_IMG2_EVENT6 199 +#define CMDQ_EVENT_GCE_IMG2_EVENT7 200 +#define CMDQ_EVENT_GCE_IMG2_EVENT8 201 +#define CMDQ_EVENT_GCE_IMG2_EVENT9 202 +#define CMDQ_EVENT_GCE_IMG2_EVENT10 203 +#define CMDQ_EVENT_GCE_IMG2_EVENT11 204 +#define CMDQ_EVENT_GCE_IMG2_EVENT12 205 +#define CMDQ_EVENT_GCE_IMG2_EVENT13 206 +#define CMDQ_EVENT_GCE_IMG2_EVENT14 207 +#define CMDQ_EVENT_GCE_IMG2_EVENT15 208 +#define CMDQ_EVENT_GCE_IMG2_EVENT16 209 +#define CMDQ_EVENT_GCE_IMG2_EVENT17 210 +#define CMDQ_EVENT_GCE_IMG2_EVENT18 211 +#define CMDQ_EVENT_GCE_IMG2_EVENT19 212 +#define CMDQ_EVENT_GCE_IMG2_EVENT20 213 +#define CMDQ_EVENT_GCE_IMG2_EVENT21 214 +#define CMDQ_EVENT_GCE_IMG2_EVENT22 215 +#define CMDQ_EVENT_GCE_IMG2_EVENT23 216 +/* IMG1 */ +#define CMDQ_EVENT_GCE_IMG1_EVENT0 225 +#define CMDQ_EVENT_GCE_IMG1_EVENT1 226 +#define CMDQ_EVENT_GCE_IMG1_EVENT2 227 +#define CMDQ_EVENT_GCE_IMG1_EVENT3 228 +#define CMDQ_EVENT_GCE_IMG1_EVENT4 229 +#define CMDQ_EVENT_GCE_IMG1_EVENT5 230 +#define CMDQ_EVENT_GCE_IMG1_EVENT6 231 +#define CMDQ_EVENT_GCE_IMG1_EVENT7 232 +#define CMDQ_EVENT_GCE_IMG1_EVENT8 233 +#define CMDQ_EVENT_GCE_IMG1_EVENT9 234 +#define CMDQ_EVENT_GCE_IMG1_EVENT10 235 +#define CMDQ_EVENT_GCE_IMG1_EVENT11 236 +#define CMDQ_EVENT_GCE_IMG1_EVENT12 237 +#define CMDQ_EVENT_GCE_IMG1_EVENT13 238 +#define CMDQ_EVENT_GCE_IMG1_EVENT14 239 +#define CMDQ_EVENT_GCE_IMG1_EVENT15 240 +#define CMDQ_EVENT_GCE_IMG1_EVENT16 241 +#define CMDQ_EVENT_GCE_IMG1_EVENT17 242 +#define CMDQ_EVENT_GCE_IMG1_EVENT18 243 +#define CMDQ_EVENT_GCE_IMG1_EVENT19 244 +#define CMDQ_EVENT_GCE_IMG1_EVENT20 245 +#define CMDQ_EVENT_GCE_IMG1_EVENT21 246 +#define CMDQ_EVENT_GCE_IMG1_EVENT22 247 +#define CMDQ_EVENT_GCE_IMG1_EVENT23 248 +/* MDP */ +#define CMDQ_EVENT_MDP_RDMA0_SOF 256 +#define CMDQ_EVENT_MDP_RDMA1_SOF 257 +#define CMDQ_EVENT_MDP_AAL0_SOF 258 +#define CMDQ_EVENT_MDP_AAL1_SOF 259 +#define CMDQ_EVENT_MDP_HDR0_SOF 260 +#define CMDQ_EVENT_MDP_RSZ0_SOF 261 +#define CMDQ_EVENT_MDP_RSZ1_SOF 262 +#define CMDQ_EVENT_MDP_WROT0_SOF 263 +#define CMDQ_EVENT_MDP_WROT1_SOF 264 +#define CMDQ_EVENT_MDP_TDSHP0_SOF 265 +#define CMDQ_EVENT_MDP_TDSHP1_SOF 266 +#define CMDQ_EVENT_IMG_DL_RELAY0_SOF 267 +#define CMDQ_EVENT_IMG_DL_RELAY1_SOF 268 +#define CMDQ_EVENT_MDP_COLOR0_SOF 269 +#define CMDQ_EVENT_MDP_WROT3_FRAME_DONE 288 +#define CMDQ_EVENT_MDP_WROT2_FRAME_DONE 289 +#define CMDQ_EVENT_MDP_WROT1_FRAME_DONE 290 +#define CMDQ_EVENT_MDP_WROT0_FRAME_DONE 291 +#define CMDQ_EVENT_MDP_TDSHP3_FRAME_DONE 292 +#define CMDQ_EVENT_MDP_TDSHP2_FRAME_DONE 293 +#define CMDQ_EVENT_MDP_TDSHP1_FRAME_DONE 294 +#define CMDQ_EVENT_MDP_TDSHP0_FRAME_DONE 295 +#define CMDQ_EVENT_MDP_RSZ3_FRAME_DONE 296 +#define CMDQ_EVENT_MDP_RSZ2_FRAME_DONE 297 +#define CMDQ_EVENT_MDP_RSZ1_FRAME_DONE 298 +#define CMDQ_EVENT_MDP_RSZ0_FRAME_DONE 299 +#define CMDQ_EVENT_MDP_RDMA3_FRAME_DONE 300 +#define CMDQ_EVENT_MDP_RDMA2_FRAME_DONE 301 +#define CMDQ_EVENT_MDP_RDMA1_FRAME_DONE 302 +#define CMDQ_EVENT_MDP_RDMA0_FRAME_DONE 303 +#define CMDQ_EVENT_MDP_HDR1_FRAME_DONE 304 +#define CMDQ_EVENT_MDP_HDR0_FRAME_DONE 305 +#define CMDQ_EVENT_MDP_COLOR0_FRAME_DONE 306 +#define CMDQ_EVENT_MDP_AAL3_FRAME_DONE 307 +#define CMDQ_EVENT_MDP_AAL2_FRAME_DONE 308 +#define CMDQ_EVENT_MDP_AAL1_FRAME_DONE 309 +#define CMDQ_EVENT_MDP_AAL0_FRAME_DONE 310 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_0 320 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_1 321 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_2 322 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_3 323 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_4 324 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_5 325 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_6 326 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_7 327 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_8 328 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_9 329 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_10 330 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_11 331 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_12 332 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_13 333 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_14 334 +#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_15 335 +#define CMDQ_EVENT_MDP_WROT3_SW_RST_DONE_ENG_EVENT 336 +#define CMDQ_EVENT_MDP_WROT2_SW_RST_DONE_ENG_EVENT 337 +#define CMDQ_EVENT_MDP_WROT1_SW_RST_DONE_ENG_EVENT 338 +#define CMDQ_EVENT_MDP_WROT0_SW_RST_DONE_ENG_EVENT 339 +#define CMDQ_EVENT_MDP_RDMA3_SW_RST_DONE_ENG_EVENT 340 +#define CMDQ_EVENT_MDP_RDMA2_SW_RST_DONE_ENG_EVENT 341 +#define CMDQ_EVENT_MDP_RDMA1_SW_RST_DONE_ENG_EVENT 342 +#define CMDQ_EVENT_MDP_RDMA0_SW_RST_DONE_ENG_EVENT 343 +/* DISP */ +#define CMDQ_EVENT_DISP_OVL0_SOF 384 +#define CMDQ_EVENT_DISP_OVL0_2L_SOF 385 +#define CMDQ_EVENT_DISP_RDMA0_SOF 386 +#define CMDQ_EVENT_DISP_RSZ0_SOF 387 +#define CMDQ_EVENT_DISP_COLOR0_SOF 388 +#define CMDQ_EVENT_DISP_CCORR0_SOF 389 +#define CMDQ_EVENT_DISP_CCORR1_SOF 390 +#define CMDQ_EVENT_DISP_AAL0_SOF 391 +#define CMDQ_EVENT_DISP_GAMMA0_SOF 392 +#define CMDQ_EVENT_DISP_POSTMASK0_SOF 393 +#define CMDQ_EVENT_DISP_DITHER0_SOF 394 +#define CMDQ_EVENT_DISP_CM0_SOF 395 +#define CMDQ_EVENT_DISP_SPR0_SOF 396 +#define CMDQ_EVENT_DISP_DSC_WRAP0_SOF 397 +#define CMDQ_EVENT_DSI0_SOF 398 +#define CMDQ_EVENT_DISP_WDMA0_SOF 399 +#define CMDQ_EVENT_DISP_PWM0_SOF 400 +#define CMDQ_EVENT_DSI0_FRAME_DONE 410 +#define CMDQ_EVENT_DISP_WDMA0_FRAME_DONE 411 +#define CMDQ_EVENT_DISP_SPR0_FRAME_DONE 412 +#define CMDQ_EVENT_DISP_RSZ0_FRAME_DONE 413 +#define CMDQ_EVENT_DISP_RDMA0_FRAME_DONE 414 +#define CMDQ_EVENT_DISP_POSTMASK0_FRAME_DONE 415 +#define CMDQ_EVENT_DISP_OVL0_FRAME_DONE 416 +#define CMDQ_EVENT_DISP_OVL0_2L_FRAME_DONE 417 +#define CMDQ_EVENT_DISP_GAMMA0_FRAME_DONE 418 +#define CMDQ_EVENT_DISP_DSC_WRAP0_CORE0_FRAME_DONE 420 +#define CMDQ_EVENT_DISP_DITHER0_FRAME_DONE 421 +#define CMDQ_EVENT_DISP_COLOR0_FRAME_DONE 422 +#define CMDQ_EVENT_DISP_CM0_FRAME_DONE 423 +#define CMDQ_EVENT_DISP_CCORR1_FRAME_DONE 424 +#define CMDQ_EVENT_DISP_CCORR0_FRAME_DONE 425 +#define CMDQ_EVENT_DISP_AAL0_FRAME_DONE 426 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_0 434 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_1 435 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_2 436 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_3 437 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_4 438 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_5 439 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_6 440 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_7 441 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_8 442 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_9 443 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_10 444 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_11 445 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_12 446 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_13 447 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_14 448 +#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_15 449 +#define CMDQ_EVENT_DSI0_TE_ENG_EVENT 450 +#define CMDQ_EVENT_DSI0_IRQ_ENG_EVENT 451 +#define CMDQ_EVENT_DSI0_DONE_ENG_EVENT 452 +#define CMDQ_EVENT_DISP_WDMA0_SW_RST_DONE_ENG_EVENT 453 +#define CMDQ_EVENT_DISP_SMIASSERT_ENG_EVENT 454 +#define CMDQ_EVENT_DISP_POSTMASK0_RST_DONE_ENG_EVENT 455 +#define CMDQ_EVENT_DISP_OVL0_RST_DONE_ENG_EVENT 456 +#define CMDQ_EVENT_DISP_OVL0_2L_RST_DONE_ENG_EVENT 457 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_0 458 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_1 459 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_2 460 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_3 461 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_4 462 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_5 463 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_6 464 +#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_7 465 +#define CMDQ_EVENT_OUT_EVENT_0 898 + +/* CMDQ sw tokens + * Following definitions are gce sw token which may use by clients + * event operation API. + * Note that token 512 to 639 may set secure + */ + +/* end of hw event and begin of sw token */ +#define CMDQ_MAX_HW_EVENT 512 + +/* Config thread notify trigger thread */ +#define CMDQ_SYNC_TOKEN_CONFIG_DIRTY 640 +/* Trigger thread notify config thread */ +#define CMDQ_SYNC_TOKEN_STREAM_EOF 641 +/* Block Trigger thread until the ESD check finishes. */ +#define CMDQ_SYNC_TOKEN_ESD_EOF 642 +#define CMDQ_SYNC_TOKEN_STREAM_BLOCK 643 +/* check CABC setup finish */ +#define CMDQ_SYNC_TOKEN_CABC_EOF 644 + +/* Notify normal CMDQ there are some secure task done + * MUST NOT CHANGE, this token sync with secure world + */ +#define CMDQ_SYNC_SECURE_THR_EOF 647 + +/* CMDQ use sw token */ +#define CMDQ_SYNC_TOKEN_USER_0 649 +#define CMDQ_SYNC_TOKEN_USER_1 650 +#define CMDQ_SYNC_TOKEN_POLL_MONITOR 651 +#define CMDQ_SYNC_TOKEN_TPR_LOCK 652 + +/* ISP sw token */ +#define CMDQ_SYNC_TOKEN_MSS 665 +#define CMDQ_SYNC_TOKEN_MSF 666 + +/* DISP sw token */ +#define CMDQ_SYNC_TOKEN_SODI 671 + +/* GPR access tokens (for register backup) + * There are 15 32-bit GPR, 3 GPR form a set + * (64-bit for address, 32-bit for value) + * MUST NOT CHANGE, these tokens sync with MDP + */ +#define CMDQ_SYNC_TOKEN_GPR_SET_0 700 +#define CMDQ_SYNC_TOKEN_GPR_SET_1 701 +#define CMDQ_SYNC_TOKEN_GPR_SET_2 702 +#define CMDQ_SYNC_TOKEN_GPR_SET_3 703 +#define CMDQ_SYNC_TOKEN_GPR_SET_4 704 + +/* Resource lock event to control resource in GCE thread */ +#define CMDQ_SYNC_RESOURCE_WROT0 710 +#define CMDQ_SYNC_RESOURCE_WROT1 711 + +/* event for gpr timer, used in sleep and poll with timeout */ +#define CMDQ_TOKEN_GPR_TIMER_R0 994 +#define CMDQ_TOKEN_GPR_TIMER_R1 995 +#define CMDQ_TOKEN_GPR_TIMER_R2 996 +#define CMDQ_TOKEN_GPR_TIMER_R3 997 +#define CMDQ_TOKEN_GPR_TIMER_R4 998 +#define CMDQ_TOKEN_GPR_TIMER_R5 999 +#define CMDQ_TOKEN_GPR_TIMER_R6 1000 +#define CMDQ_TOKEN_GPR_TIMER_R7 1001 +#define CMDQ_TOKEN_GPR_TIMER_R8 1002 +#define CMDQ_TOKEN_GPR_TIMER_R9 1003 +#define CMDQ_TOKEN_GPR_TIMER_R10 1004 +#define CMDQ_TOKEN_GPR_TIMER_R11 1005 +#define CMDQ_TOKEN_GPR_TIMER_R12 1006 +#define CMDQ_TOKEN_GPR_TIMER_R13 1007 +#define CMDQ_TOKEN_GPR_TIMER_R14 1008 +#define CMDQ_TOKEN_GPR_TIMER_R15 1009 + +#define CMDQ_EVENT_MAX 0x3FF +/* CMDQ sw tokens END */ + +#endif From 8f585d14030dcf8fbec2f864d189515e8be37a80 Mon Sep 17 00:00:00 2001 From: Kartik Date: Thu, 14 Apr 2022 13:05:55 +0530 Subject: [PATCH 05/17] mailbox: tegra-hsp: Add tegra_hsp_sm_ops This patch introduces tegra_hsp_sm_ops to abstract send & receive API's for shared mailboxes. Signed-off-by: Kartik Signed-off-by: Jassi Brar --- drivers/mailbox/tegra-hsp.c | 78 +++++++++++++++++++++++-------------- 1 file changed, 49 insertions(+), 29 deletions(-) diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c index a55d6df99697..1a4c801878b0 100644 --- a/drivers/mailbox/tegra-hsp.c +++ b/drivers/mailbox/tegra-hsp.c @@ -67,8 +67,14 @@ struct tegra_hsp_doorbell { unsigned int index; }; +struct tegra_hsp_sm_ops { + void (*send)(struct tegra_hsp_channel *channel, void *data); + void (*recv)(struct tegra_hsp_channel *channel); +}; + struct tegra_hsp_mailbox { struct tegra_hsp_channel channel; + const struct tegra_hsp_sm_ops *ops; unsigned int index; bool producer; }; @@ -208,8 +214,7 @@ static irqreturn_t tegra_hsp_shared_irq(int irq, void *data) { struct tegra_hsp *hsp = data; unsigned long bit, mask; - u32 status, value; - void *msg; + u32 status; status = tegra_hsp_readl(hsp, HSP_INT_IR) & hsp->mask; @@ -245,25 +250,8 @@ static irqreturn_t tegra_hsp_shared_irq(int irq, void *data) for_each_set_bit(bit, &mask, hsp->num_sm) { struct tegra_hsp_mailbox *mb = &hsp->mailboxes[bit]; - if (!mb->producer) { - value = tegra_hsp_channel_readl(&mb->channel, - HSP_SM_SHRD_MBOX); - value &= ~HSP_SM_SHRD_MBOX_FULL; - msg = (void *)(unsigned long)value; - mbox_chan_received_data(mb->channel.chan, msg); - - /* - * Need to clear all bits here since some producers, - * such as TCU, depend on fields in the register - * getting cleared by the consumer. - * - * The mailbox API doesn't give the consumers a way - * of doing that explicitly, so we have to make sure - * we cover all possible cases. - */ - tegra_hsp_channel_writel(&mb->channel, 0x0, - HSP_SM_SHRD_MBOX); - } + if (!mb->producer) + mb->ops->recv(&mb->channel); } return IRQ_HANDLED; @@ -372,21 +360,52 @@ static const struct mbox_chan_ops tegra_hsp_db_ops = { .shutdown = tegra_hsp_doorbell_shutdown, }; -static int tegra_hsp_mailbox_send_data(struct mbox_chan *chan, void *data) +static void tegra_hsp_sm_send32(struct tegra_hsp_channel *channel, void *data) { - struct tegra_hsp_mailbox *mb = chan->con_priv; - struct tegra_hsp *hsp = mb->channel.hsp; - unsigned long flags; u32 value; - if (WARN_ON(!mb->producer)) - return -EPERM; - /* copy data and mark mailbox full */ value = (u32)(unsigned long)data; value |= HSP_SM_SHRD_MBOX_FULL; - tegra_hsp_channel_writel(&mb->channel, value, HSP_SM_SHRD_MBOX); + tegra_hsp_channel_writel(channel, value, HSP_SM_SHRD_MBOX); +} + +static void tegra_hsp_sm_recv32(struct tegra_hsp_channel *channel) +{ + u32 value; + void *msg; + + value = tegra_hsp_channel_readl(channel, HSP_SM_SHRD_MBOX); + value &= ~HSP_SM_SHRD_MBOX_FULL; + msg = (void *)(unsigned long)value; + mbox_chan_received_data(channel->chan, msg); + + /* + * Need to clear all bits here since some producers, such as TCU, depend + * on fields in the register getting cleared by the consumer. + * + * The mailbox API doesn't give the consumers a way of doing that + * explicitly, so we have to make sure we cover all possible cases. + */ + tegra_hsp_channel_writel(channel, 0x0, HSP_SM_SHRD_MBOX); +} + +static const struct tegra_hsp_sm_ops tegra_hsp_sm_32bit_ops = { + .send = tegra_hsp_sm_send32, + .recv = tegra_hsp_sm_recv32, +}; + +static int tegra_hsp_mailbox_send_data(struct mbox_chan *chan, void *data) +{ + struct tegra_hsp_mailbox *mb = chan->con_priv; + struct tegra_hsp *hsp = mb->channel.hsp; + unsigned long flags; + + if (WARN_ON(!mb->producer)) + return -EPERM; + + mb->ops->send(&mb->channel, data); /* enable EMPTY interrupt for the shared mailbox */ spin_lock_irqsave(&hsp->lock, flags); @@ -557,6 +576,7 @@ static struct mbox_chan *tegra_hsp_sm_xlate(struct mbox_controller *mbox, return ERR_PTR(-ENODEV); mb = &hsp->mailboxes[index]; + mb->ops = &tegra_hsp_sm_32bit_ops; if ((args->args[1] & TEGRA_HSP_SM_FLAG_TX) == 0) mb->producer = false; From 58919326e72f63c380dc3271dd1cc8bdf1bbe3e4 Mon Sep 17 00:00:00 2001 From: Kartik Date: Thu, 14 Apr 2022 13:05:56 +0530 Subject: [PATCH 06/17] dt-bindings: tegra186-hsp: add type for shared mailboxes Tegra234 supports sending/receiving 32-bit and 128-bit data over a shared mailbox. Based on the data size to be used, clients need to specify the type of shared mailbox in the device tree. Add a macro for 128-bit shared mailbox. Mailbox clients can use this macro as a flag in device tree to enable 128-bit data support for a shared mailbox. Signed-off-by: Kartik Acked-by: Rob Herring Signed-off-by: Jassi Brar --- .../devicetree/bindings/mailbox/nvidia,tegra186-hsp.yaml | 9 +++++++++ include/dt-bindings/mailbox/tegra186-hsp.h | 5 +++++ 2 files changed, 14 insertions(+) diff --git a/Documentation/devicetree/bindings/mailbox/nvidia,tegra186-hsp.yaml b/Documentation/devicetree/bindings/mailbox/nvidia,tegra186-hsp.yaml index 9f7a7296b57f..a3e87516d637 100644 --- a/Documentation/devicetree/bindings/mailbox/nvidia,tegra186-hsp.yaml +++ b/Documentation/devicetree/bindings/mailbox/nvidia,tegra186-hsp.yaml @@ -26,6 +26,15 @@ description: | second cell is used to identify the mailbox that the client is going to use. + For shared mailboxes, the first cell composed of two fields: + - bits 15..8: + A bit mask of flags that further specifies the type of shared + mailbox to be used (based on the data size). If no flag is + specified then, 32-bit shared mailbox is used. + - bits 7..0: + Defines the type of the mailbox to be used. This field should be + TEGRA_HSP_MBOX_TYPE_SM for shared mailboxes. + For doorbells, the second cell specifies the index of the doorbell to use. diff --git a/include/dt-bindings/mailbox/tegra186-hsp.h b/include/dt-bindings/mailbox/tegra186-hsp.h index 3bdec7a84d35..b9ccae2aa9e2 100644 --- a/include/dt-bindings/mailbox/tegra186-hsp.h +++ b/include/dt-bindings/mailbox/tegra186-hsp.h @@ -15,6 +15,11 @@ #define TEGRA_HSP_MBOX_TYPE_SS 0x2 #define TEGRA_HSP_MBOX_TYPE_AS 0x3 +/* + * These define the types of shared mailbox supported based on data size. + */ +#define TEGRA_HSP_MBOX_TYPE_SM_128BIT (1 << 8) + /* * These defines represent the bit associated with the given master ID in the * doorbell registers. From 74c20dd0f89238068de5bb6ecd4e968eddab339d Mon Sep 17 00:00:00 2001 From: Kartik Date: Thu, 14 Apr 2022 13:05:57 +0530 Subject: [PATCH 07/17] mailbox: tegra-hsp: Add 128-bit shared mailbox support Add support for 128-bit shared mailboxes found on Tegra234 chips. Signed-off-by: Kartik Signed-off-by: Jassi Brar --- drivers/mailbox/tegra-hsp.c | 77 +++++++++++++++++++++++++++++++++++-- 1 file changed, 74 insertions(+), 3 deletions(-) diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c index 1a4c801878b0..573481e436f5 100644 --- a/drivers/mailbox/tegra-hsp.c +++ b/drivers/mailbox/tegra-hsp.c @@ -46,10 +46,18 @@ #define HSP_SM_SHRD_MBOX_FULL_INT_IE 0x04 #define HSP_SM_SHRD_MBOX_EMPTY_INT_IE 0x08 +#define HSP_SHRD_MBOX_TYPE1_TAG 0x40 +#define HSP_SHRD_MBOX_TYPE1_DATA0 0x48 +#define HSP_SHRD_MBOX_TYPE1_DATA1 0x4c +#define HSP_SHRD_MBOX_TYPE1_DATA2 0x50 +#define HSP_SHRD_MBOX_TYPE1_DATA3 0x54 + #define HSP_DB_CCPLEX 1 #define HSP_DB_BPMP 3 #define HSP_DB_MAX 7 +#define HSP_MBOX_TYPE_MASK 0xff + struct tegra_hsp_channel; struct tegra_hsp; @@ -88,6 +96,7 @@ struct tegra_hsp_db_map { struct tegra_hsp_soc { const struct tegra_hsp_db_map *map; bool has_per_mb_ie; + bool has_128_bit_mb; }; struct tegra_hsp { @@ -396,6 +405,51 @@ static const struct tegra_hsp_sm_ops tegra_hsp_sm_32bit_ops = { .recv = tegra_hsp_sm_recv32, }; +static void tegra_hsp_sm_send128(struct tegra_hsp_channel *channel, void *data) +{ + u32 value[4]; + + memcpy(value, data, sizeof(value)); + + /* Copy data */ + tegra_hsp_channel_writel(channel, value[0], HSP_SHRD_MBOX_TYPE1_DATA0); + tegra_hsp_channel_writel(channel, value[1], HSP_SHRD_MBOX_TYPE1_DATA1); + tegra_hsp_channel_writel(channel, value[2], HSP_SHRD_MBOX_TYPE1_DATA2); + tegra_hsp_channel_writel(channel, value[3], HSP_SHRD_MBOX_TYPE1_DATA3); + + /* Update tag to mark mailbox full */ + tegra_hsp_channel_writel(channel, HSP_SM_SHRD_MBOX_FULL, + HSP_SHRD_MBOX_TYPE1_TAG); +} + +static void tegra_hsp_sm_recv128(struct tegra_hsp_channel *channel) +{ + u32 value[4]; + void *msg; + + value[0] = tegra_hsp_channel_readl(channel, HSP_SHRD_MBOX_TYPE1_DATA0); + value[1] = tegra_hsp_channel_readl(channel, HSP_SHRD_MBOX_TYPE1_DATA1); + value[2] = tegra_hsp_channel_readl(channel, HSP_SHRD_MBOX_TYPE1_DATA2); + value[3] = tegra_hsp_channel_readl(channel, HSP_SHRD_MBOX_TYPE1_DATA3); + + msg = (void *)(unsigned long)value; + mbox_chan_received_data(channel->chan, msg); + + /* + * Clear data registers and tag. + */ + tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA0); + tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA1); + tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA2); + tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA3); + tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_TAG); +} + +static const struct tegra_hsp_sm_ops tegra_hsp_sm_128bit_ops = { + .send = tegra_hsp_sm_send128, + .recv = tegra_hsp_sm_recv128, +}; + static int tegra_hsp_mailbox_send_data(struct mbox_chan *chan, void *data) { struct tegra_hsp_mailbox *mb = chan->con_priv; @@ -571,12 +625,20 @@ static struct mbox_chan *tegra_hsp_sm_xlate(struct mbox_controller *mbox, index = args->args[1] & TEGRA_HSP_SM_MASK; - if (type != TEGRA_HSP_MBOX_TYPE_SM || !hsp->shared_irqs || - index >= hsp->num_sm) + if ((type & HSP_MBOX_TYPE_MASK) != TEGRA_HSP_MBOX_TYPE_SM || + !hsp->shared_irqs || index >= hsp->num_sm) return ERR_PTR(-ENODEV); mb = &hsp->mailboxes[index]; - mb->ops = &tegra_hsp_sm_32bit_ops; + + if (type & TEGRA_HSP_MBOX_TYPE_SM_128BIT) { + if (!hsp->soc->has_128_bit_mb) + return ERR_PTR(-ENODEV); + + mb->ops = &tegra_hsp_sm_128bit_ops; + } else { + mb->ops = &tegra_hsp_sm_32bit_ops; + } if ((args->args[1] & TEGRA_HSP_SM_FLAG_TX) == 0) mb->producer = false; @@ -853,16 +915,25 @@ static const struct tegra_hsp_db_map tegra186_hsp_db_map[] = { static const struct tegra_hsp_soc tegra186_hsp_soc = { .map = tegra186_hsp_db_map, .has_per_mb_ie = false, + .has_128_bit_mb = false, }; static const struct tegra_hsp_soc tegra194_hsp_soc = { .map = tegra186_hsp_db_map, .has_per_mb_ie = true, + .has_128_bit_mb = false, +}; + +static const struct tegra_hsp_soc tegra234_hsp_soc = { + .map = tegra186_hsp_db_map, + .has_per_mb_ie = false, + .has_128_bit_mb = true, }; static const struct of_device_id tegra_hsp_match[] = { { .compatible = "nvidia,tegra186-hsp", .data = &tegra186_hsp_soc }, { .compatible = "nvidia,tegra194-hsp", .data = &tegra194_hsp_soc }, + { .compatible = "nvidia,tegra234-hsp", .data = &tegra234_hsp_soc }, { } }; From dea27cda46116afea45fdaade52adedb732d77ab Mon Sep 17 00:00:00 2001 From: Tinghan Shen Date: Fri, 22 Apr 2022 10:39:08 +0800 Subject: [PATCH 08/17] dt-bindings: mailbox: mtk,adsp-mbox: add mt8186 compatible name Add compatible name for MediaTek MT8186 SoC ADSP mailbox. Signed-off-by: Tinghan Shen Acked-by: Rob Herring Signed-off-by: Jassi Brar --- .../devicetree/bindings/mailbox/mtk,adsp-mbox.yaml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/Documentation/devicetree/bindings/mailbox/mtk,adsp-mbox.yaml b/Documentation/devicetree/bindings/mailbox/mtk,adsp-mbox.yaml index fe454a1fba17..72c1d9e82c89 100644 --- a/Documentation/devicetree/bindings/mailbox/mtk,adsp-mbox.yaml +++ b/Documentation/devicetree/bindings/mailbox/mtk,adsp-mbox.yaml @@ -11,14 +11,15 @@ maintainers: description: | The MTK ADSP mailbox Inter-Processor Communication (IPC) enables the SoC - to ommunicate with ADSP by passing messages through two mailbox channels. + to communicate with ADSP by passing messages through two mailbox channels. The MTK ADSP mailbox IPC also provides the ability for one processor to signal the other processor using interrupts. properties: compatible: - items: - - const: mediatek,mt8195-adsp-mbox + enum: + - mediatek,mt8195-adsp-mbox + - mediatek,mt8186-adsp-mbox "#mbox-cells": const: 0 From 02b5c35a617137ccad8d6988805ab54c3a9efc81 Mon Sep 17 00:00:00 2001 From: Tinghan Shen Date: Fri, 22 Apr 2022 10:39:09 +0800 Subject: [PATCH 09/17] mailbox: mediatek: support mt8186 adsp mailbox Add support of mt8186 adsp mailbox. Signed-off-by: Tinghan Shen Signed-off-by: Jassi Brar --- drivers/mailbox/mtk-adsp-mailbox.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/mailbox/mtk-adsp-mailbox.c b/drivers/mailbox/mtk-adsp-mailbox.c index 5e7378090c7b..14bc0057de81 100644 --- a/drivers/mailbox/mtk-adsp-mailbox.c +++ b/drivers/mailbox/mtk-adsp-mailbox.c @@ -149,6 +149,13 @@ static int mtk_adsp_mbox_probe(struct platform_device *pdev) return devm_mbox_controller_register(dev, &priv->mbox); } +static const struct mtk_adsp_mbox_cfg mt8186_adsp_mbox_cfg = { + .set_in = 0x00, + .set_out = 0x04, + .clr_in = 0x08, + .clr_out = 0x0C, +}; + static const struct mtk_adsp_mbox_cfg mt8195_adsp_mbox_cfg = { .set_in = 0x00, .set_out = 0x1c, @@ -157,6 +164,7 @@ static const struct mtk_adsp_mbox_cfg mt8195_adsp_mbox_cfg = { }; static const struct of_device_id mtk_adsp_mbox_of_match[] = { + { .compatible = "mediatek,mt8186-adsp-mbox", .data = &mt8186_adsp_mbox_cfg }, { .compatible = "mediatek,mt8195-adsp-mbox", .data = &mt8195_adsp_mbox_cfg }, {}, }; From 504ff5b00853b05133c9cddfc351548b48cc5bdc Mon Sep 17 00:00:00 2001 From: ran jianping Date: Wed, 27 Apr 2022 05:41:00 +0000 Subject: [PATCH 10/17] mailbox:imx: using pm_runtime_resume_and_get Using pm_runtime_resume_and_get() to replace pm_runtime_get_sync and pm_runtime_put_noidle. This change is just to simplify the code, no actual functional changes. Reported-by: Zeal Robot Signed-off-by: ran jianping Signed-off-by: Jassi Brar --- drivers/mailbox/imx-mailbox.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c index df8a785be324..b10239d6ef93 100644 --- a/drivers/mailbox/imx-mailbox.c +++ b/drivers/mailbox/imx-mailbox.c @@ -830,11 +830,9 @@ static int imx_mu_probe(struct platform_device *pdev) pm_runtime_enable(dev); - ret = pm_runtime_get_sync(dev); - if (ret < 0) { - pm_runtime_put_noidle(dev); + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) goto disable_runtime_pm; - } ret = pm_runtime_put_sync(dev); if (ret < 0) From d9512696082bcfd068ce65476c85c5b17511d4df Mon Sep 17 00:00:00 2001 From: ran jianping Date: Thu, 28 Apr 2022 06:42:09 +0000 Subject: [PATCH 11/17] mailbox: omap: using pm_runtime_resume_and_get to simplify the code Using pm_runtime_resume_and_get() to replace pm_runtime_get_sync and pm_runtime_put_noidle. This change is just to simplify the code, no actual functional changes. Reported-by: Zeal Robot Signed-off-by: ran jianping Signed-off-by: Jassi Brar --- drivers/mailbox/omap-mailbox.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c index 58f3d569f095..098c82d87137 100644 --- a/drivers/mailbox/omap-mailbox.c +++ b/drivers/mailbox/omap-mailbox.c @@ -856,11 +856,9 @@ static int omap_mbox_probe(struct platform_device *pdev) platform_set_drvdata(pdev, mdev); pm_runtime_enable(mdev->dev); - ret = pm_runtime_get_sync(mdev->dev); - if (ret < 0) { - pm_runtime_put_noidle(mdev->dev); + ret = pm_runtime_resume_and_get(mdev->dev); + if (ret < 0) goto unregister; - } /* * just print the raw revision register, the format is not From a022c7c96ca1c7d8b4d2702973bf4b1a7cff5958 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Sun, 1 May 2022 12:34:27 +0200 Subject: [PATCH 12/17] mailbox: correct kerneldoc Correct kerneldoc warnings like: drivers/mailbox/arm_mhu_db.c:47: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst drivers/mailbox/qcom-ipcc.c:58: warning: Function parameter or member 'num_chans' not described in 'qcom_ipcc' Signed-off-by: Krzysztof Kozlowski Acked-by: Viresh Kumar Acked-by: Sudeep Holla Signed-off-by: Jassi Brar --- drivers/mailbox/arm_mhu_db.c | 2 +- drivers/mailbox/arm_mhuv2.c | 3 ++- drivers/mailbox/qcom-ipcc.c | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/mailbox/arm_mhu_db.c b/drivers/mailbox/arm_mhu_db.c index 8674153cc893..aa0a4d83880f 100644 --- a/drivers/mailbox/arm_mhu_db.c +++ b/drivers/mailbox/arm_mhu_db.c @@ -44,7 +44,7 @@ struct arm_mhu { }; /** - * ARM MHU Mailbox allocated channel information + * struct mhu_db_channel - ARM MHU Mailbox allocated channel information * * @mhu: Pointer to parent mailbox device * @pchan: Physical channel within which this doorbell resides in diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c index d997f8ebfa98..a47aef8df52f 100644 --- a/drivers/mailbox/arm_mhuv2.c +++ b/drivers/mailbox/arm_mhuv2.c @@ -160,7 +160,8 @@ enum mhuv2_frame { * struct mhuv2 - MHUv2 mailbox controller data * * @mbox: Mailbox controller belonging to the MHU frame. - * @send/recv: Base address of the register mapping region. + * @send: Base address of the register mapping region. + * @recv: Base address of the register mapping region. * @frame: Frame type: RECEIVER_FRAME or SENDER_FRAME. * @irq: Interrupt. * @windows: Channel windows implemented by the platform. diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c index c5d963222014..881706da59c0 100644 --- a/drivers/mailbox/qcom-ipcc.c +++ b/drivers/mailbox/qcom-ipcc.c @@ -41,9 +41,10 @@ struct qcom_ipcc_chan_info { * @dev: Device associated with this instance * @base: Base address of the IPCC frame associated to APSS * @irq_domain: The irq_domain associated with this instance - * @chan: The mailbox channels array + * @chans: The mailbox channels array * @mchan: The per-mailbox channel info array * @mbox: The mailbox controller + * @num_chans: Number of @chans elements * @irq: Summary irq */ struct qcom_ipcc { From 9accf46b7fb893a46eff3a57407b02ac227a7cd7 Mon Sep 17 00:00:00 2001 From: Fabien Dessenne Date: Tue, 3 May 2022 16:55:59 +0200 Subject: [PATCH 13/17] dt-bindings: mailbox: remove the IPCC "wakeup" IRQ The stm32 ipcc mailbox driver supports only two interrupts (rx and tx), so remove the unsupported "wakeup" one. Signed-off-by: Fabien Dessenne Reviewed-by: Rob Herring Signed-off-by: Jassi Brar --- .../devicetree/bindings/mailbox/st,stm32-ipcc.yaml | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/Documentation/devicetree/bindings/mailbox/st,stm32-ipcc.yaml b/Documentation/devicetree/bindings/mailbox/st,stm32-ipcc.yaml index 8eb4bf52ea27..2c8b47285aa3 100644 --- a/Documentation/devicetree/bindings/mailbox/st,stm32-ipcc.yaml +++ b/Documentation/devicetree/bindings/mailbox/st,stm32-ipcc.yaml @@ -30,15 +30,11 @@ properties: items: - description: rx channel occupied - description: tx channel free - - description: wakeup source - minItems: 2 interrupt-names: items: - const: rx - const: tx - - const: wakeup - minItems: 2 wakeup-source: true @@ -70,10 +66,9 @@ examples: #mbox-cells = <1>; reg = <0x4c001000 0x400>; st,proc-id = <0>; - interrupts-extended = <&intc GIC_SPI 100 IRQ_TYPE_NONE>, - <&intc GIC_SPI 101 IRQ_TYPE_NONE>, - <&aiec 62 1>; - interrupt-names = "rx", "tx", "wakeup"; + interrupts-extended = <&exti 61 1>, + <&intc GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "rx", "tx"; clocks = <&rcc_clk IPCC>; wakeup-source; }; From 369e4ef87a8f5da7c348ec2c61ec5cd726e8337a Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Mon, 9 May 2022 09:17:16 -0500 Subject: [PATCH 14/17] mailbox: pcc: Fix an invalid-load caught by the address sanitizer `pcc_mailbox_probe` doesn't initialize all memory that has been allocated before the first time that one of it's members `txdone_irq` may be accessed. This leads to a an invalid load any time that this member is accessed: [ 2.429769] UBSAN: invalid-load in drivers/mailbox/pcc.c:684:22 [ 2.430324] UBSAN: invalid-load in drivers/mailbox/mailbox.c:486:12 [ 4.276782] UBSAN: invalid-load in drivers/acpi/cppc_acpi.c:314:45 Link: https://bugzilla.kernel.org/show_bug.cgi?id=215587 Fixes: ce028702ddbc ("mailbox: pcc: Move bulk of PCCT parsing into pcc_mbox_probe") Signed-off-by: Mario Limonciello Reviewed-by: Sudeep Holla Signed-off-by: Jassi Brar --- drivers/mailbox/pcc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c index ed18936b8ce6..ebfa33a40fce 100644 --- a/drivers/mailbox/pcc.c +++ b/drivers/mailbox/pcc.c @@ -654,7 +654,7 @@ static int pcc_mbox_probe(struct platform_device *pdev) goto err; } - pcc_mbox_ctrl = devm_kmalloc(dev, sizeof(*pcc_mbox_ctrl), GFP_KERNEL); + pcc_mbox_ctrl = devm_kzalloc(dev, sizeof(*pcc_mbox_ctrl), GFP_KERNEL); if (!pcc_mbox_ctrl) { rc = -ENOMEM; goto err; From c25f77899753fdd6f43fa6999abd53fd0059496e Mon Sep 17 00:00:00 2001 From: Prasad Sodagudi Date: Tue, 17 May 2022 16:13:00 +0530 Subject: [PATCH 15/17] mailbox: qcom-ipcc: Log the pending interrupt during resume Enable logging of the pending interrupt that triggered device wakeup. This logging information helps to debug IRQs that cause periodic device wakeups by printing the detailed information of pending IPCC interrupts. Scenario: Device wakeup caused by Modem crash Logs: qcom-ipcc mailbox: virq: 182 triggered client-id: 2; signal-id: 2 From the IPCC bindings it can further be understood that the client here is IPCC_CLIENT_MPSS and the signal was IPCC_MPROC_SIGNAL_SMP2P. Reviewed-by: Manivannan Sadhasivam Signed-off-by: Prasad Sodagudi Signed-off-by: Sibi Sankar Signed-off-by: Jassi Brar --- drivers/mailbox/qcom-ipcc.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c index 881706da59c0..2583b20cdeb7 100644 --- a/drivers/mailbox/qcom-ipcc.c +++ b/drivers/mailbox/qcom-ipcc.c @@ -255,6 +255,24 @@ static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc, return devm_mbox_controller_register(dev, mbox); } +static int qcom_ipcc_pm_resume(struct device *dev) +{ + struct qcom_ipcc *ipcc = dev_get_drvdata(dev); + u32 hwirq; + int virq; + + hwirq = readl(ipcc->base + IPCC_REG_RECV_ID); + if (hwirq == IPCC_NO_PENDING_IRQ) + return 0; + + virq = irq_find_mapping(ipcc->irq_domain, hwirq); + + dev_dbg(dev, "virq: %d triggered client-id: %ld; signal-id: %ld\n", virq, + FIELD_GET(IPCC_CLIENT_ID_MASK, hwirq), FIELD_GET(IPCC_SIGNAL_ID_MASK, hwirq)); + + return 0; +} + static int qcom_ipcc_probe(struct platform_device *pdev) { struct qcom_ipcc *ipcc; @@ -325,6 +343,10 @@ static const struct of_device_id qcom_ipcc_of_match[] = { }; MODULE_DEVICE_TABLE(of, qcom_ipcc_of_match); +static const struct dev_pm_ops qcom_ipcc_dev_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, qcom_ipcc_pm_resume) +}; + static struct platform_driver qcom_ipcc_driver = { .probe = qcom_ipcc_probe, .remove = qcom_ipcc_remove, @@ -332,6 +354,7 @@ static struct platform_driver qcom_ipcc_driver = { .name = "qcom-ipcc", .of_match_table = qcom_ipcc_of_match, .suppress_bind_attrs = true, + .pm = pm_sleep_ptr(&qcom_ipcc_dev_pm_ops), }, }; From bca1a1004615efe141fd78f360ecc48c60bc4ad5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Ard=C3=B6?= Date: Thu, 31 Mar 2022 09:01:15 +0200 Subject: [PATCH 16/17] mailbox: forward the hrtimer if not queued and under a lock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit c7dacf5b0f32957b24ef29df1207dc2cd8307743, "mailbox: avoid timer start from callback" The previous commit was reverted since it lead to a race that caused the hrtimer to not be started at all. The check for hrtimer_active() in msg_submit() will return true if the callback function txdone_hrtimer() is currently running. This function could return HRTIMER_NORESTART and then the timer will not be restarted, and also msg_submit() will not start the timer. This will lead to a message actually being submitted but no timer will start to check for its compleation. The original fix that added checking hrtimer_active() was added to avoid a warning with hrtimer_forward. Looking in the kernel another solution to avoid this warning is to check hrtimer_is_queued() before calling hrtimer_forward_now() instead. This however requires a lock so the timer is not started by msg_submit() inbetween this check and the hrtimer_forward() call. Fixes: c7dacf5b0f32 ("mailbox: avoid timer start from callback") Signed-off-by: Björn Ardö Signed-off-by: Jassi Brar --- drivers/mailbox/mailbox.c | 19 +++++++++++++------ include/linux/mailbox_controller.h | 1 + 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c index 3e7d4b20ab34..4229b9b5da98 100644 --- a/drivers/mailbox/mailbox.c +++ b/drivers/mailbox/mailbox.c @@ -82,11 +82,11 @@ static void msg_submit(struct mbox_chan *chan) exit: spin_unlock_irqrestore(&chan->lock, flags); - /* kick start the timer immediately to avoid delays */ if (!err && (chan->txdone_method & TXDONE_BY_POLL)) { - /* but only if not already active */ - if (!hrtimer_active(&chan->mbox->poll_hrt)) - hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL); + /* kick start the timer immediately to avoid delays */ + spin_lock_irqsave(&chan->mbox->poll_hrt_lock, flags); + hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL); + spin_unlock_irqrestore(&chan->mbox->poll_hrt_lock, flags); } } @@ -120,20 +120,26 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer) container_of(hrtimer, struct mbox_controller, poll_hrt); bool txdone, resched = false; int i; + unsigned long flags; for (i = 0; i < mbox->num_chans; i++) { struct mbox_chan *chan = &mbox->chans[i]; if (chan->active_req && chan->cl) { - resched = true; txdone = chan->mbox->ops->last_tx_done(chan); if (txdone) tx_tick(chan, 0); + else + resched = true; } } if (resched) { - hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period)); + spin_lock_irqsave(&mbox->poll_hrt_lock, flags); + if (!hrtimer_is_queued(hrtimer)) + hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period)); + spin_unlock_irqrestore(&mbox->poll_hrt_lock, flags); + return HRTIMER_RESTART; } return HRTIMER_NORESTART; @@ -500,6 +506,7 @@ int mbox_controller_register(struct mbox_controller *mbox) hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); mbox->poll_hrt.function = txdone_hrtimer; + spin_lock_init(&mbox->poll_hrt_lock); } for (i = 0; i < mbox->num_chans; i++) { diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h index 36d6ce673503..6fee33cb52f5 100644 --- a/include/linux/mailbox_controller.h +++ b/include/linux/mailbox_controller.h @@ -83,6 +83,7 @@ struct mbox_controller { const struct of_phandle_args *sp); /* Internal to API */ struct hrtimer poll_hrt; + spinlock_t poll_hrt_lock; struct list_head node; }; From 79f9fbe303520d2c32b70f04f2bb02cc2baaa4c3 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Mon, 23 May 2022 15:47:02 -0700 Subject: [PATCH 17/17] mailbox: qcom-ipcc: Fix -Wunused-function with CONFIG_PM_SLEEP=n When CONFIG_PM_SLEEP is not set, there is a warning that qcom_ipcc_pm_resume() is unused: drivers/mailbox/qcom-ipcc.c:258:12: error: 'qcom_ipcc_pm_resume' defined but not used [-Werror=unused-function] 258 | static int qcom_ipcc_pm_resume(struct device *dev) | ^~~~~~~~~~~~~~~~~~~ cc1: all warnings being treated as errors Commit 1a3c7bb08826 ("PM: core: Add new *_PM_OPS macros, deprecate old ones") reworked the PM_OPS macros to avoid this problem. Use NOIRQ_SYSTEM_SLEEP_PM_OPS directly so that qcom_ipcc_pm_resume() always appears to be used to the compiler, even though it will be dead code eliminated in the !CONFIG_PM_SLEEP case. Fixes: c25f77899753 ("mailbox: qcom-ipcc: Log the pending interrupt during resume") Signed-off-by: Nathan Chancellor Reviewed-by: Sibi Sankar Signed-off-by: Jassi Brar --- drivers/mailbox/qcom-ipcc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c index 2583b20cdeb7..31d58b7d55fe 100644 --- a/drivers/mailbox/qcom-ipcc.c +++ b/drivers/mailbox/qcom-ipcc.c @@ -344,7 +344,7 @@ static const struct of_device_id qcom_ipcc_of_match[] = { MODULE_DEVICE_TABLE(of, qcom_ipcc_of_match); static const struct dev_pm_ops qcom_ipcc_dev_pm_ops = { - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, qcom_ipcc_pm_resume) + NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, qcom_ipcc_pm_resume) }; static struct platform_driver qcom_ipcc_driver = {