diff --git a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml index 4219936eda5a..1fa28e976559 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml +++ b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml @@ -19,6 +19,7 @@ properties: - qcom,msm8916-dsi-ctrl - qcom,msm8953-dsi-ctrl - qcom,msm8974-dsi-ctrl + - qcom,msm8976-dsi-ctrl - qcom,msm8996-dsi-ctrl - qcom,msm8998-dsi-ctrl - qcom,qcm2290-dsi-ctrl @@ -248,6 +249,7 @@ allOf: contains: enum: - qcom,msm8953-dsi-ctrl + - qcom,msm8976-dsi-ctrl then: properties: clocks: diff --git a/Documentation/devicetree/bindings/display/msm/gmu.yaml b/Documentation/devicetree/bindings/display/msm/gmu.yaml index 4e1c25b42908..b3837368a260 100644 --- a/Documentation/devicetree/bindings/display/msm/gmu.yaml +++ b/Documentation/devicetree/bindings/display/msm/gmu.yaml @@ -224,6 +224,7 @@ allOf: enum: - qcom,adreno-gmu-730.1 - qcom,adreno-gmu-740.1 + - qcom,adreno-gmu-750.1 then: properties: reg: diff --git a/Documentation/devicetree/bindings/display/msm/gpu.yaml b/Documentation/devicetree/bindings/display/msm/gpu.yaml index b019db954793..40b5c6bd11f8 100644 --- a/Documentation/devicetree/bindings/display/msm/gpu.yaml +++ b/Documentation/devicetree/bindings/display/msm/gpu.yaml @@ -23,7 +23,7 @@ properties: The driver is parsing the compat string for Adreno to figure out the gpu-id and patch level. items: - - pattern: '^qcom,adreno-[3-7][0-9][0-9]\.[0-9]$' + - pattern: '^qcom,adreno-[3-7][0-9][0-9]\.[0-9]+$' - const: qcom,adreno - description: | The driver is parsing the compat string for Imageon to @@ -127,7 +127,7 @@ allOf: properties: compatible: contains: - pattern: '^qcom,adreno-[3-5][0-9][0-9]\.[0-9]$' + pattern: '^qcom,adreno-[3-5][0-9][0-9]\.[0-9]+$' then: properties: @@ -203,7 +203,7 @@ allOf: properties: compatible: contains: - pattern: '^qcom,adreno-[67][0-9][0-9]\.[0-9]$' + pattern: '^qcom,adreno-[67][0-9][0-9]\.[0-9]+$' then: # Starting with A6xx, the clocks are usually defined in the GMU node properties: diff --git a/Documentation/devicetree/bindings/display/msm/qcom,mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,mdss.yaml index 0999ea07f47b..e4576546bf0d 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,mdss.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,mdss.yaml @@ -127,6 +127,7 @@ patternProperties: - qcom,dsi-phy-20nm - qcom,dsi-phy-28nm-8226 - qcom,dsi-phy-28nm-hpm + - qcom,dsi-phy-28nm-hpm-fam-b - qcom,dsi-phy-28nm-lp - qcom,hdmi-phy-8084 - qcom,hdmi-phy-8660 diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8650-dpu.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8650-dpu.yaml index a01d15a03317..c4087cc5abbd 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,sm8650-dpu.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8650-dpu.yaml @@ -13,7 +13,9 @@ $ref: /schemas/display/msm/dpu-common.yaml# properties: compatible: - const: qcom,sm8650-dpu + enum: + - qcom,sm8650-dpu + - qcom,x1e80100-dpu reg: items: diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8650-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8650-mdss.yaml index bd11119dc93d..24cece1e888b 100644 --- a/Documentation/devicetree/bindings/display/msm/qcom,sm8650-mdss.yaml +++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8650-mdss.yaml @@ -37,18 +37,21 @@ properties: patternProperties: "^display-controller@[0-9a-f]+$": type: object + additionalProperties: true properties: compatible: const: qcom,sm8650-dpu "^displayport-controller@[0-9a-f]+$": type: object + additionalProperties: true properties: compatible: const: qcom,sm8650-dp "^dsi@[0-9a-f]+$": type: object + additionalProperties: true properties: compatible: items: @@ -57,6 +60,7 @@ patternProperties: "^phy@[0-9a-f]+$": type: object + additionalProperties: true properties: compatible: const: qcom,sm8650-dsi-phy-4nm diff --git a/Documentation/devicetree/bindings/display/msm/qcom,x1e80100-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,x1e80100-mdss.yaml new file mode 100644 index 000000000000..3b01a0e47333 --- /dev/null +++ b/Documentation/devicetree/bindings/display/msm/qcom,x1e80100-mdss.yaml @@ -0,0 +1,251 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/msm/qcom,x1e80100-mdss.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm X1E80100 Display MDSS + +maintainers: + - Abel Vesa + +description: + X1E80100 MSM Mobile Display Subsystem(MDSS), which encapsulates sub-blocks like + DPU display controller, DP interfaces, etc. + +$ref: /schemas/display/msm/mdss-common.yaml# + +properties: + compatible: + const: qcom,x1e80100-mdss + + clocks: + items: + - description: Display AHB + - description: Display hf AXI + - description: Display core + + iommus: + maxItems: 1 + + interconnects: + maxItems: 3 + + interconnect-names: + maxItems: 3 + +patternProperties: + "^display-controller@[0-9a-f]+$": + type: object + additionalProperties: true + properties: + compatible: + const: qcom,x1e80100-dpu + + "^displayport-controller@[0-9a-f]+$": + type: object + additionalProperties: true + properties: + compatible: + const: qcom,x1e80100-dp + + "^phy@[0-9a-f]+$": + type: object + additionalProperties: true + properties: + compatible: + const: qcom,x1e80100-dp-phy + +required: + - compatible + +unevaluatedProperties: false + +examples: + - | + #include + #include + #include + #include + #include + + display-subsystem@ae00000 { + compatible = "qcom,x1e80100-mdss"; + reg = <0x0ae00000 0x1000>; + reg-names = "mdss"; + + interconnects = <&mmss_noc MASTER_MDP 0 &gem_noc SLAVE_LLCC 0>, + <&mc_virt MASTER_LLCC 0 &mc_virt SLAVE_EBI1 0>, + <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_DISPLAY_CFG 0>; + interconnect-names = "mdp0-mem", "mdp1-mem", "cpu-cfg"; + + resets = <&dispcc_core_bcr>; + + power-domains = <&dispcc_gdsc>; + + clocks = <&dispcc_ahb_clk>, + <&gcc_disp_hf_axi_clk>, + <&dispcc_mdp_clk>; + clock-names = "bus", "nrt_bus", "core"; + + interrupts = ; + interrupt-controller; + #interrupt-cells = <1>; + + iommus = <&apps_smmu 0x1c00 0x2>; + + #address-cells = <1>; + #size-cells = <1>; + ranges; + + display-controller@ae01000 { + compatible = "qcom,x1e80100-dpu"; + reg = <0x0ae01000 0x8f000>, + <0x0aeb0000 0x2008>; + reg-names = "mdp", "vbif"; + + clocks = <&gcc_axi_clk>, + <&dispcc_ahb_clk>, + <&dispcc_mdp_lut_clk>, + <&dispcc_mdp_clk>, + <&dispcc_mdp_vsync_clk>; + clock-names = "nrt_bus", + "iface", + "lut", + "core", + "vsync"; + + assigned-clocks = <&dispcc_mdp_vsync_clk>; + assigned-clock-rates = <19200000>; + + operating-points-v2 = <&mdp_opp_table>; + power-domains = <&rpmhpd RPMHPD_MMCX>; + + interrupt-parent = <&mdss>; + interrupts = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dpu_intf1_out: endpoint { + remote-endpoint = <&dsi0_in>; + }; + }; + + port@1 { + reg = <1>; + dpu_intf2_out: endpoint { + remote-endpoint = <&dsi1_in>; + }; + }; + }; + + mdp_opp_table: opp-table { + compatible = "operating-points-v2"; + + opp-200000000 { + opp-hz = /bits/ 64 <200000000>; + required-opps = <&rpmhpd_opp_low_svs>; + }; + + opp-325000000 { + opp-hz = /bits/ 64 <325000000>; + required-opps = <&rpmhpd_opp_svs>; + }; + + opp-375000000 { + opp-hz = /bits/ 64 <375000000>; + required-opps = <&rpmhpd_opp_svs_l1>; + }; + + opp-514000000 { + opp-hz = /bits/ 64 <514000000>; + required-opps = <&rpmhpd_opp_nom>; + }; + }; + }; + + displayport-controller@ae90000 { + compatible = "qcom,x1e80100-dp"; + reg = <0 0xae90000 0 0x200>, + <0 0xae90200 0 0x200>, + <0 0xae90400 0 0x600>, + <0 0xae91000 0 0x400>, + <0 0xae91400 0 0x400>; + + interrupt-parent = <&mdss>; + interrupts = <12>; + + clocks = <&dispcc_mdss_ahb_clk>, + <&dispcc_dptx0_aux_clk>, + <&dispcc_dptx0_link_clk>, + <&dispcc_dptx0_link_intf_clk>, + <&dispcc_dptx0_pixel0_clk>; + clock-names = "core_iface", "core_aux", + "ctrl_link", + "ctrl_link_iface", + "stream_pixel"; + + assigned-clocks = <&dispcc_mdss_dptx0_link_clk_src>, + <&dispcc_mdss_dptx0_pixel0_clk_src>; + assigned-clock-parents = <&usb_1_ss0_qmpphy QMP_USB43DP_DP_LINK_CLK>, + <&usb_1_ss0_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>; + + operating-points-v2 = <&mdss_dp0_opp_table>; + + power-domains = <&rpmhpd RPMHPD_MMCX>; + + phys = <&usb_1_ss0_qmpphy QMP_USB43DP_DP_PHY>; + phy-names = "dp"; + + #sound-dai-cells = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + mdss_dp0_in: endpoint { + remote-endpoint = <&mdss_intf0_out>; + }; + }; + + port@1 { + reg = <1>; + + mdss_dp0_out: endpoint { + }; + }; + }; + + mdss_dp0_opp_table: opp-table { + compatible = "operating-points-v2"; + + opp-160000000 { + opp-hz = /bits/ 64 <160000000>; + required-opps = <&rpmhpd_opp_low_svs>; + }; + + opp-270000000 { + opp-hz = /bits/ 64 <270000000>; + required-opps = <&rpmhpd_opp_svs>; + }; + + opp-540000000 { + opp-hz = /bits/ 64 <540000000>; + required-opps = <&rpmhpd_opp_svs_l1>; + }; + + opp-810000000 { + opp-hz = /bits/ 64 <810000000>; + required-opps = <&rpmhpd_opp_nom>; + }; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml index a4042ae24770..740631782540 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml +++ b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml @@ -93,6 +93,7 @@ properties: - qcom,sm8350-smmu-500 - qcom,sm8450-smmu-500 - qcom,sm8550-smmu-500 + - qcom,sm8650-smmu-500 - const: qcom,adreno-smmu - const: qcom,smmu-500 - const: arm,mmu-500 @@ -484,7 +485,12 @@ allOf: - if: properties: compatible: - const: qcom,sm8450-smmu-500 + items: + - const: qcom,sm8450-smmu-500 + - const: qcom,adreno-smmu + - const: qcom,smmu-500 + - const: arm,mmu-500 + then: properties: clock-names: @@ -508,7 +514,13 @@ allOf: - if: properties: compatible: - const: qcom,sm8550-smmu-500 + items: + - enum: + - qcom,sm8550-smmu-500 + - qcom,sm8650-smmu-500 + - const: qcom,adreno-smmu + - const: qcom,smmu-500 + - const: arm,mmu-500 then: properties: clock-names: @@ -544,7 +556,6 @@ allOf: - qcom,sdx65-smmu-500 - qcom,sm6350-smmu-500 - qcom,sm6375-smmu-500 - - qcom,sm8650-smmu-500 - qcom,x1e80100-smmu-500 then: properties: diff --git a/MAINTAINERS b/MAINTAINERS index 2a95919fbc3d..7cccc6084c2b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -614,7 +614,7 @@ AGPGART DRIVER M: David Airlie L: dri-devel@lists.freedesktop.org S: Maintained -T: git git://anongit.freedesktop.org/drm/drm +T: git https://gitlab.freedesktop.org/drm/kernel.git F: drivers/char/agp/ F: include/linux/agp* F: include/uapi/linux/agp* @@ -7007,7 +7007,7 @@ L: dri-devel@lists.freedesktop.org S: Maintained B: https://gitlab.freedesktop.org/drm C: irc://irc.oftc.net/dri-devel -T: git git://anongit.freedesktop.org/drm/drm +T: git https://gitlab.freedesktop.org/drm/kernel.git F: Documentation/devicetree/bindings/display/ F: Documentation/devicetree/bindings/gpu/ F: Documentation/gpu/ diff --git a/arch/powerpc/include/asm/backlight.h b/arch/powerpc/include/asm/backlight.h index 1b5eab62ed04..061a910d7492 100644 --- a/arch/powerpc/include/asm/backlight.h +++ b/arch/powerpc/include/asm/backlight.h @@ -10,15 +10,14 @@ #define __ASM_POWERPC_BACKLIGHT_H #ifdef __KERNEL__ -#include #include +struct backlight_device; + /* For locking instructions, see the implementation file */ extern struct backlight_device *pmac_backlight; extern struct mutex pmac_backlight_mutex; -extern int pmac_backlight_curve_lookup(struct fb_info *info, int value); - extern int pmac_has_backlight_type(const char *type); extern void pmac_backlight_key(int direction); diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c index aeb79a8b3e10..12bc01353bd3 100644 --- a/arch/powerpc/platforms/powermac/backlight.c +++ b/arch/powerpc/platforms/powermac/backlight.c @@ -9,7 +9,6 @@ */ #include -#include #include #include #include @@ -72,31 +71,6 @@ int pmac_has_backlight_type(const char *type) return 0; } -int pmac_backlight_curve_lookup(struct fb_info *info, int value) -{ - int level = (FB_BACKLIGHT_LEVELS - 1); - - if (info && info->bl_dev) { - int i, max = 0; - - /* Look for biggest value */ - for (i = 0; i < FB_BACKLIGHT_LEVELS; i++) - max = max((int)info->bl_curve[i], max); - - /* Look for nearest value */ - for (i = 0; i < FB_BACKLIGHT_LEVELS; i++) { - int diff = abs(info->bl_curve[i] - value); - if (diff < max) { - max = diff; - level = i; - } - } - - } - - return level; -} - static void pmac_backlight_key_worker(struct work_struct *work) { if (atomic_read(&kernel_backlight_disabled)) diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c index a6b48703dc9e..880ffcb50088 100644 --- a/drivers/firmware/sysfb.c +++ b/drivers/firmware/sysfb.c @@ -127,8 +127,10 @@ static __init int sysfb_init(void) sysfb_apply_efi_quirks(); parent = sysfb_parent_dev(si); - if (IS_ERR(parent)) + if (IS_ERR(parent)) { + ret = PTR_ERR(parent); goto unlock_mutex; + } /* try to create a simple-framebuffer device */ compatible = sysfb_parse_mode(si, &mode); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 9246bca0a008..9c62552bec34 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -194,6 +194,7 @@ extern int amdgpu_emu_mode; extern uint amdgpu_smu_memory_pool_size; extern int amdgpu_smu_pptable_id; extern uint amdgpu_dc_feature_mask; +extern uint amdgpu_freesync_vid_mode; extern uint amdgpu_dc_debug_mask; extern uint amdgpu_dc_visual_confirm; extern int amdgpu_dm_abm_level; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 190039f14c30..f5f2945711be 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -742,6 +742,11 @@ void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev) amdgpu_device_flush_hdp(adev, NULL); } +bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev) +{ + return amdgpu_ras_get_fed_status(adev); +} + void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, enum amdgpu_ras_block block, bool reset) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index e60f63ccf79a..0ef223c2affb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -320,7 +320,7 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem, void **kptr, uint64_t *size); void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem); -int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo); +int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo); int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info, struct dma_fence __rcu **ef); @@ -337,6 +337,7 @@ int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, struct tile_config *config); void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, enum amdgpu_ras_block block, bool reset); +bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev); bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem); void amdgpu_amdkfd_block_mmu_notifications(void *p); int amdgpu_amdkfd_criu_resume(void *p); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 5cd84f72bf26..14dc9d2d8d53 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -2189,13 +2189,12 @@ int amdgpu_amdkfd_gpuvm_sync_memory( /** * amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and increment reference count - * @adev: Device to which allocated BO belongs * @bo: Buffer object to be mapped * * Before return, bo reference count is incremented. To release the reference and unpin/ * unmap the BO, call amdgpu_amdkfd_free_gtt_mem. */ -int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo) +int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo) { int ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index b0ea4ddc8e72..e68bd6f8a6a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4040,8 +4040,10 @@ int amdgpu_device_init(struct amdgpu_device *adev, * early on during init and before calling to RREG32. */ adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev"); - if (!adev->reset_domain) - return -ENOMEM; + if (!adev->reset_domain) { + r = -ENOMEM; + goto unmap_memory; + } /* detect hw virtualization here */ amdgpu_detect_virtualization(adev); @@ -4051,20 +4053,20 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_device_get_job_timeout_settings(adev); if (r) { dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); - return r; + goto unmap_memory; } /* early init functions */ r = amdgpu_device_ip_early_init(adev); if (r) - return r; + goto unmap_memory; amdgpu_device_set_mcbp(adev); /* Get rid of things like offb */ r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver); if (r) - return r; + goto unmap_memory; /* Enable TMZ based on IP_VERSION */ amdgpu_gmc_tmz_set(adev); @@ -4074,7 +4076,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (adev->gmc.xgmi.supported) { r = adev->gfxhub.funcs->get_xgmi_info(adev); if (r) - return r; + goto unmap_memory; } /* enable PCIE atomic ops */ @@ -4343,6 +4345,8 @@ release_ras_con: failed: amdgpu_vf_error_trans_all(adev); +unmap_memory: + iounmap(adev->rmmio); return r; } @@ -5321,6 +5325,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, list_for_each_entry(tmp_adev, device_list_handle, reset_list) { if (need_full_reset) { /* post card */ + amdgpu_ras_set_fed(tmp_adev, false); r = amdgpu_device_asic_init(tmp_adev); if (r) { dev_warn(tmp_adev->dev, "asic atom init failed!"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 78588334577a..502333725b49 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1683,6 +1683,7 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(11, 0, 3): case IP_VERSION(11, 0, 4): case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 1): amdgpu_device_ip_block_add(adev, &soc21_common_ip_block); break; default: @@ -1730,6 +1731,7 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(11, 0, 3): case IP_VERSION(11, 0, 4): case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 1): amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block); break; default: @@ -2003,6 +2005,7 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(11, 0, 3): case IP_VERSION(11, 0, 4): case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 1): amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block); break; default: @@ -2137,6 +2140,7 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block); break; case IP_VERSION(4, 0, 5): + case IP_VERSION(4, 0, 6): amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block); amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block); break; @@ -2182,6 +2186,7 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(11, 0, 3): case IP_VERSION(11, 0, 4): case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 1): amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block); adev->enable_mes = true; adev->enable_mes_kiq = true; @@ -2460,6 +2465,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->family = AMDGPU_FAMILY_GC_11_0_1; break; case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 1): adev->family = AMDGPU_FAMILY_GC_11_5_0; break; default: @@ -2479,6 +2485,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 4): case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 1): adev->flags |= AMD_IS_APU; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index b8fbe97efe1d..3ecc7ef95172 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -1350,14 +1350,6 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev) "dither", amdgpu_dither_enum_list, sz); - if (adev->dc_enabled) { - adev->mode_info.abm_level_property = - drm_property_create_range(adev_to_drm(adev), 0, - "abm level", 0, 4); - if (!adev->mode_info.abm_level_property) - return -ENOMEM; - } - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index af7fae7907d7..6acffedf648c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -199,6 +199,7 @@ int amdgpu_mes_kiq; int amdgpu_noretry = -1; int amdgpu_force_asic_type = -1; int amdgpu_tmz = -1; /* auto */ +uint amdgpu_freesync_vid_mode; int amdgpu_reset_method = -1; /* auto */ int amdgpu_num_kcq = -1; int amdgpu_smartshift_bias; @@ -883,6 +884,32 @@ module_param_named(damageclips, amdgpu_damage_clips, int, 0444); MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)"); module_param_named(tmz, amdgpu_tmz, int, 0444); +/** + * DOC: freesync_video (uint) + * Enable the optimization to adjust front porch timing to achieve seamless + * mode change experience when setting a freesync supported mode for which full + * modeset is not needed. + * + * The Display Core will add a set of modes derived from the base FreeSync + * video mode into the corresponding connector's mode list based on commonly + * used refresh rates and VRR range of the connected display, when users enable + * this feature. From the userspace perspective, they can see a seamless mode + * change experience when the change between different refresh rates under the + * same resolution. Additionally, userspace applications such as Video playback + * can read this modeset list and change the refresh rate based on the video + * frame rate. Finally, the userspace can also derive an appropriate mode for a + * particular refresh rate based on the FreeSync Mode and add it to the + * connector's mode list. + * + * Note: This is an experimental feature. + * + * The default value: 0 (off). + */ +MODULE_PARM_DESC( + freesync_video, + "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)"); +module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444); + /** * DOC: reset_method (int) * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco) @@ -2764,8 +2791,7 @@ static int amdgpu_pmops_runtime_idle(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); - /* we don't want the main rpm_idle to call suspend - we want to autosuspend */ - int ret = 1; + int ret; if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) { pm_runtime_forbid(dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 70bff8cecfda..10832b470448 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -61,9 +61,7 @@ static struct kmem_cache *amdgpu_fence_slab; int amdgpu_fence_slab_init(void) { - amdgpu_fence_slab = kmem_cache_create( - "amdgpu_fence", sizeof(struct amdgpu_fence), 0, - SLAB_HWCACHE_ALIGN, NULL); + amdgpu_fence_slab = KMEM_CACHE(amdgpu_fence, SLAB_HWCACHE_ALIGN); if (!amdgpu_fence_slab) return -ENOMEM; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index f04803a44b44..f8b48fd93108 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -304,11 +304,11 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, return -EINVAL; } -int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - struct amdgpu_irq_src *irq, int xcc_id) +int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, int xcc_id) { struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; + struct amdgpu_irq_src *irq = &kiq->irq; + struct amdgpu_ring *ring = &kiq->ring; int r = 0; spin_lock_init(&kiq->ring_lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index f23bafec71c5..8fcf889ddce9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -471,9 +471,7 @@ static inline u32 amdgpu_gfx_create_bitmask(u32 bit_width) void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh); -int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - struct amdgpu_irq_src *irq, int xcc_id); +int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, int xcc_id); void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index d4a848c51a83..be4629cdac04 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -843,6 +843,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) case IP_VERSION(10, 3, 3): case IP_VERSION(11, 0, 4): case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 1): /* Don't enable it by default yet. */ if (amdgpu_tmz < 1) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 2e4911050cc5..1fe21a70ddd0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -324,8 +324,6 @@ struct amdgpu_mode_info { struct drm_property *audio_property; /* FMT dithering */ struct drm_property *dither_property; - /* Adaptive Backlight Modulation (power feature) */ - struct drm_property *abm_level_property; /* hardcoded DFP edid from BIOS */ struct edid *bios_hardcoded_edid; int bios_hardcoded_edid_size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c index 51ca544a7094..d085687a47ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c @@ -53,14 +53,6 @@ u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev) return 0; } -void amdgpu_nbio_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, - uint64_t *count1) -{ - if (adev->nbio.funcs->get_pcie_usage) - adev->nbio.funcs->get_pcie_usage(adev, count0, count1); - -} - int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { int r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index 65e35059de40..7b8c03be1d9e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -102,8 +102,6 @@ struct amdgpu_nbio_funcs { u32 (*get_memory_partition_mode)(struct amdgpu_device *adev, u32 *supp_modes); u64 (*get_pcie_replay_count)(struct amdgpu_device *adev); - void (*get_pcie_usage)(struct amdgpu_device *adev, uint64_t *count0, - uint64_t *count1); }; struct amdgpu_nbio { @@ -116,7 +114,6 @@ struct amdgpu_nbio { }; int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev); -void amdgpu_nbio_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, uint64_t *count1); int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 46f3d1013e8c..8ebab6f22e5a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -2439,6 +2439,18 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET; set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); + /* For any RAS error that needs a full reset to + * recover, set the fatal error status + */ + if (hive) { + list_for_each_entry(remote_adev, + &hive->device_list, + gmc.xgmi.head) + amdgpu_ras_set_fed(remote_adev, + true); + } else { + amdgpu_ras_set_fed(adev, true); + } psp_fatal_error_recovery_quirk(&adev->psp); } } @@ -3440,6 +3452,26 @@ int amdgpu_ras_fini(struct amdgpu_device *adev) return 0; } +bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev) +{ + struct amdgpu_ras *ras; + + ras = amdgpu_ras_get_context(adev); + if (!ras) + return false; + + return atomic_read(&ras->fed); +} + +void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status) +{ + struct amdgpu_ras *ras; + + ras = amdgpu_ras_get_context(adev); + if (ras) + atomic_set(&ras->fed, !!status); +} + void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) { if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { @@ -3620,6 +3652,7 @@ int amdgpu_ras_is_supported(struct amdgpu_device *adev, block == AMDGPU_RAS_BLOCK__SDMA || block == AMDGPU_RAS_BLOCK__VCN || block == AMDGPU_RAS_BLOCK__JPEG) && + (amdgpu_ras_mask & (1 << block)) && amdgpu_ras_is_poison_mode_supported(adev) && amdgpu_ras_get_ras_block(adev, block, 0)) ret = 1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index d10e5bb0e52f..e0f8ce9d8440 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -477,6 +477,8 @@ struct amdgpu_ras { wait_queue_head_t page_retirement_wq; struct mutex page_retirement_lock; atomic_t page_retirement_req_cnt; + /* Fatal error detected flag */ + atomic_t fed; }; struct ras_fs_data { @@ -873,4 +875,8 @@ void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info, void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *mca_err_addr); + +void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status); +bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c index e1ee1c7117fb..d234b7ccfaaf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c @@ -159,9 +159,7 @@ int amdgpu_ring_mux_init(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, mux->ring_entry_size = entry_size; mux->s_resubmit = false; - amdgpu_mux_chunk_slab = kmem_cache_create("amdgpu_mux_chunk", - sizeof(struct amdgpu_mux_chunk), 0, - SLAB_HWCACHE_ALIGN, NULL); + amdgpu_mux_chunk_slab = KMEM_CACHE(amdgpu_mux_chunk, SLAB_HWCACHE_ALIGN); if (!amdgpu_mux_chunk_slab) { DRM_ERROR("create amdgpu_mux_chunk cache failed\n"); return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 1b013a44ca99..bdf1ef825d89 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -441,9 +441,7 @@ void amdgpu_sync_free(struct amdgpu_sync *sync) */ int amdgpu_sync_init(void) { - amdgpu_sync_slab = kmem_cache_create( - "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0, - SLAB_HWCACHE_ALIGN, NULL); + amdgpu_sync_slab = KMEM_CACHE(amdgpu_sync_entry, SLAB_HWCACHE_ALIGN); if (!amdgpu_sync_slab) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index eb2a88991206..b2535023764f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -59,6 +59,7 @@ #define FIRMWARE_VCN4_0_3 "amdgpu/vcn_4_0_3.bin" #define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin" #define FIRMWARE_VCN4_0_5 "amdgpu/vcn_4_0_5.bin" +#define FIRMWARE_VCN4_0_6 "amdgpu/vcn_4_0_6.bin" #define FIRMWARE_VCN5_0_0 "amdgpu/vcn_5_0_0.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN); @@ -83,6 +84,7 @@ MODULE_FIRMWARE(FIRMWARE_VCN4_0_2); MODULE_FIRMWARE(FIRMWARE_VCN4_0_3); MODULE_FIRMWARE(FIRMWARE_VCN4_0_4); MODULE_FIRMWARE(FIRMWARE_VCN4_0_5); +MODULE_FIRMWARE(FIRMWARE_VCN4_0_6); MODULE_FIRMWARE(FIRMWARE_VCN5_0_0); static void amdgpu_vcn_idle_work_handler(struct work_struct *work); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 6ff7d3fb2008..7a4eae36778a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -979,7 +979,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f * SCRATCH_REG0 = read/write value * SCRATCH_REG1[30:28] = command * SCRATCH_REG1[19:0] = address in dword - * SCRATCH_REG1[26:24] = Error reporting + * SCRATCH_REG1[27:24] = Error reporting */ writel(v, scratch_reg0); writel((offset | flag), scratch_reg1); @@ -993,7 +993,8 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f udelay(10); } - if (i >= timeout) { + tmp = readl(scratch_reg1); + if (i >= timeout || (tmp & AMDGPU_RLCG_SCRATCH1_ERROR_MASK) != 0) { if (amdgpu_sriov_rlcg_error_report_enabled(adev)) { if (tmp & AMDGPU_RLCG_VFGATE_DISABLED) { dev_err(adev->dev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index fa7be5f277b9..3f59b7b5523f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -45,6 +45,7 @@ #define AMDGPU_RLCG_REG_NOT_IN_RANGE 0x1000000 #define AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK 0xFFFFF +#define AMDGPU_RLCG_SCRATCH1_ERROR_MASK 0xF000000 /* all asic after AI use this offset */ #define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5 diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index b888613f653f..72362df352f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -320,7 +320,7 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, DEBUG("IMM 0x%02X\n", val); return val; } - return 0; + break; case ATOM_ARG_PLL: idx = U8(*ptr); (*ptr)++; diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 4dfaa017cf7f..a3a643254d7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1638,28 +1638,18 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) PCI_EXP_LNKCTL_HAWD); /* linkctl2 */ - pcie_capability_read_word(root, PCI_EXP_LNKCTL2, - &tmp16); - tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP | - PCI_EXP_LNKCTL2_TX_MARGIN); - tmp16 |= (bridge_cfg2 & - (PCI_EXP_LNKCTL2_ENTER_COMP | - PCI_EXP_LNKCTL2_TX_MARGIN)); - pcie_capability_write_word(root, - PCI_EXP_LNKCTL2, - tmp16); - - pcie_capability_read_word(adev->pdev, - PCI_EXP_LNKCTL2, - &tmp16); - tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP | - PCI_EXP_LNKCTL2_TX_MARGIN); - tmp16 |= (gpu_cfg2 & - (PCI_EXP_LNKCTL2_ENTER_COMP | - PCI_EXP_LNKCTL2_TX_MARGIN)); - pcie_capability_write_word(adev->pdev, - PCI_EXP_LNKCTL2, - tmp16); + pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL2, + PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN, + bridge_cfg2 & + (PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN)); + pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL2, + PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN, + gpu_cfg2 & + (PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN)); tmp = RREG32_PCIE(ixPCIE_LC_CNTL4); tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK; @@ -1674,16 +1664,15 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK; WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl); - pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~PCI_EXP_LNKCTL2_TLS; - + tmp16 = 0; if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */ else tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */ - pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16); + pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL2, + PCI_EXP_LNKCTL2_TLS, tmp16); speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL); speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index b02d63328f1c..691fa40e4e01 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4490,7 +4490,7 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, static int gfx_v10_0_sw_init(void *handle) { int i, j, k, r, ring_id = 0; - struct amdgpu_kiq *kiq; + int xcc_id = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { @@ -4619,8 +4619,7 @@ static int gfx_v10_0_sw_init(void *handle) return r; } - kiq = &adev->gfx.kiq[0]; - r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0); + r = amdgpu_gfx_kiq_init_ring(adev, xcc_id); if (r) return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 2fb1342d5bd9..0d90d60a21d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -89,6 +89,10 @@ MODULE_FIRMWARE("amdgpu/gc_11_5_0_pfp.bin"); MODULE_FIRMWARE("amdgpu/gc_11_5_0_me.bin"); MODULE_FIRMWARE("amdgpu/gc_11_5_0_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_11_5_0_rlc.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_5_1_pfp.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_5_1_me.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_5_1_mec.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_5_1_rlc.bin"); static const struct soc15_reg_golden golden_settings_gc_11_0[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000) @@ -907,6 +911,7 @@ static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev) case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 4): case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 1): adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -1329,7 +1334,7 @@ static int gfx_v11_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev) static int gfx_v11_0_sw_init(void *handle) { int i, j, k, r, ring_id = 0; - struct amdgpu_kiq *kiq; + int xcc_id = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { @@ -1346,6 +1351,7 @@ static int gfx_v11_0_sw_init(void *handle) case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 4): case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 1): adev->gfx.me.num_me = 1; adev->gfx.me.num_pipe_per_me = 1; adev->gfx.me.num_queue_per_pipe = 1; @@ -1454,8 +1460,7 @@ static int gfx_v11_0_sw_init(void *handle) return r; } - kiq = &adev->gfx.kiq[0]; - r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0); + r = amdgpu_gfx_kiq_init_ring(adev, xcc_id); if (r) return r; } @@ -2588,7 +2593,8 @@ static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev) IP_VERSION(11, 0, 1) || amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 4) || - amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 0)) + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 0) || + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 1)) bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1); else @@ -5082,6 +5088,7 @@ static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable) case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 4): case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 1): WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1); break; default: @@ -5117,6 +5124,7 @@ static int gfx_v11_0_set_powergating_state(void *handle, case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 4): case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 1): if (!enable) amdgpu_gfx_off_ctrl(adev, false); @@ -5148,6 +5156,7 @@ static int gfx_v11_0_set_clockgating_state(void *handle, case IP_VERSION(11, 0, 3): case IP_VERSION(11, 0, 4): case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 1): gfx_v11_0_update_gfx_clock_gating(adev, state == AMD_CG_STATE_GATE); break; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index ea174b76ee70..b97ea62212b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1900,8 +1900,8 @@ static void gfx_v8_0_sq_irq_work_func(struct work_struct *work); static int gfx_v8_0_sw_init(void *handle) { int i, j, k, r, ring_id; + int xcc_id = 0; struct amdgpu_ring *ring; - struct amdgpu_kiq *kiq; struct amdgpu_device *adev = (struct amdgpu_device *)handle; switch (adev->asic_type) { @@ -2022,8 +2022,7 @@ static int gfx_v8_0_sw_init(void *handle) return r; } - kiq = &adev->gfx.kiq[0]; - r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0); + r = amdgpu_gfx_kiq_init_ring(adev, xcc_id); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 169d45268ef6..7669f82aa1da 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1997,8 +1997,8 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, static int gfx_v9_0_sw_init(void *handle) { int i, j, k, r, ring_id; + int xcc_id = 0; struct amdgpu_ring *ring; - struct amdgpu_kiq *kiq; struct amdgpu_device *adev = (struct amdgpu_device *)handle; unsigned int hw_prio; @@ -2151,8 +2151,7 @@ static int gfx_v9_0_sw_init(void *handle) return r; } - kiq = &adev->gfx.kiq[0]; - r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0); + r = amdgpu_gfx_kiq_init_ring(adev, xcc_id); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index aace4594a603..b53c8fd4e8cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -843,7 +843,6 @@ static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, static int gfx_v9_4_3_sw_init(void *handle) { int i, j, k, r, ring_id, xcc_id, num_xcc; - struct amdgpu_kiq *kiq; struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->gfx.mec.num_mec = 2; @@ -912,8 +911,7 @@ static int gfx_v9_4_3_sw_init(void *handle) return r; } - kiq = &adev->gfx.kiq[xcc_id]; - r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, xcc_id); + r = amdgpu_gfx_kiq_init_ring(adev, xcc_id); if (r) return r; @@ -3953,6 +3951,9 @@ static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev, uint32_t i; uint32_t data; + if (amdgpu_sriov_vf(adev)) + return; + data = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_TIMEOUT_CONFIG); data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE, amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 998daa702b44..a3812f0036a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -587,6 +587,7 @@ static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev) adev->gfxhub.funcs = &gfxhub_v3_0_3_funcs; break; case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 1): adev->gfxhub.funcs = &gfxhub_v11_5_0_funcs; break; default: @@ -748,6 +749,7 @@ static int gmc_v11_0_sw_init(void *handle) case IP_VERSION(11, 0, 3): case IP_VERSION(11, 0, 4): case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 1): set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); /* diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index d442ae85162d..1439e62e9378 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -496,14 +496,14 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, if (j >= AMDGPU_MMHUB0(0)) tmp = RREG32_SOC15_IP(MMHUB, reg); else - tmp = RREG32_SOC15_IP(GC, reg); + tmp = RREG32_XCC(reg, j); tmp &= ~bits; if (j >= AMDGPU_MMHUB0(0)) WREG32_SOC15_IP(MMHUB, reg, tmp); else - WREG32_SOC15_IP(GC, reg, tmp); + WREG32_XCC(reg, tmp, j); } } break; @@ -524,14 +524,14 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, if (j >= AMDGPU_MMHUB0(0)) tmp = RREG32_SOC15_IP(MMHUB, reg); else - tmp = RREG32_SOC15_IP(GC, reg); + tmp = RREG32_XCC(reg, j); tmp |= bits; if (j >= AMDGPU_MMHUB0(0)) WREG32_SOC15_IP(MMHUB, reg, tmp); else - WREG32_SOC15_IP(GC, reg, tmp); + WREG32_XCC(reg, tmp, j); } } break; diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c index c0bdab3bf0e4..3e91a8e42c21 100644 --- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c @@ -37,6 +37,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_4_imu.bin"); MODULE_FIRMWARE("amdgpu/gc_11_5_0_imu.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_5_1_imu.bin"); static int imu_v11_0_init_microcode(struct amdgpu_device *adev) { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c index 8d1754e35605..edf5bcdd2bc9 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c @@ -53,6 +53,11 @@ static int jpeg_v4_0_5_set_powergating_state(void *handle, static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring); +static int amdgpu_ih_clientid_jpeg[] = { + SOC15_IH_CLIENTID_VCN, + SOC15_IH_CLIENTID_VCN1 +}; + /** * jpeg_v4_0_5_early_init - set function pointers * @@ -64,8 +69,20 @@ static int jpeg_v4_0_5_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { + case IP_VERSION(4, 0, 5): + adev->jpeg.num_jpeg_inst = 1; + break; + case IP_VERSION(4, 0, 6): + adev->jpeg.num_jpeg_inst = 2; + break; + default: + DRM_DEV_ERROR(adev->dev, + "Failed to init vcn ip block(UVD_HWIP:0x%x)\n", + amdgpu_ip_version(adev, UVD_HWIP, 0)); + return -EINVAL; + } - adev->jpeg.num_jpeg_inst = 1; adev->jpeg.num_jpeg_rings = 1; jpeg_v4_0_5_set_dec_ring_funcs(adev); @@ -85,25 +102,30 @@ static int jpeg_v4_0_5_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring; - int r; + int r, i; - /* JPEG TRAP */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - VCN_4_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq); - if (r) - return r; + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; - /* JPEG DJPEG POISON EVENT */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->irq); - if (r) - return r; + /* JPEG TRAP */ + r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], + VCN_4_0__SRCID__JPEG_DECODE, &adev->jpeg.inst[i].irq); + if (r) + return r; - /* JPEG EJPEG POISON EVENT */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->irq); - if (r) - return r; + /* JPEG DJPEG POISON EVENT */ + r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], + VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].irq); + if (r) + return r; + + /* JPEG EJPEG POISON EVENT */ + r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], + VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].irq); + if (r) + return r; + } r = amdgpu_jpeg_sw_init(adev); if (r) @@ -113,21 +135,23 @@ static int jpeg_v4_0_5_sw_init(void *handle) if (r) return r; - ring = adev->jpeg.inst->ring_dec; - ring->use_doorbell = true; - ring->doorbell_index = amdgpu_sriov_vf(adev) ? - (((adev->doorbell_index.vcn.vcn_ring0_1) << 1) + 4) : - ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1); - ring->vm_hub = AMDGPU_MMHUB0(0); + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; - sprintf(ring->name, "jpeg_dec"); - r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, - AMDGPU_RING_PRIO_DEFAULT, NULL); - if (r) - return r; + ring = adev->jpeg.inst[i].ring_dec; + ring->use_doorbell = true; + ring->vm_hub = AMDGPU_MMHUB0(0); + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i; + sprintf(ring->name, "jpeg_dec_%d", i); + r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, + 0, AMDGPU_RING_PRIO_DEFAULT, NULL); + if (r) + return r; - adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET; - adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH); + adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET; + adev->jpeg.inst[i].external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, i, regUVD_JPEG_PITCH); + } return 0; } @@ -162,8 +186,8 @@ static int jpeg_v4_0_5_sw_fini(void *handle) static int jpeg_v4_0_5_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; - int r; + struct amdgpu_ring *ring; + int r, i; // TODO: Enable ring test with DPG support if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) { @@ -171,9 +195,15 @@ static int jpeg_v4_0_5_hw_init(void *handle) return 0; } - r = amdgpu_ring_test_helper(ring); - if (r) - return r; + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + ring = adev->jpeg.inst[i].ring_dec; + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + } if (!r) DRM_INFO("JPEG decode initialized successfully under SPG Mode\n"); @@ -191,14 +221,20 @@ static int jpeg_v4_0_5_hw_init(void *handle) static int jpeg_v4_0_5_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i; cancel_delayed_work_sync(&adev->vcn.idle_work); - if (!amdgpu_sriov_vf(adev)) { - if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && - RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS)) - jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE); - } + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + if (!amdgpu_sriov_vf(adev)) { + if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && + RREG32_SOC15(JPEG, i, regUVD_JRBC_STATUS)) + jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE); + } + } return 0; } @@ -440,13 +476,17 @@ static void jpeg_v4_0_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) */ static int jpeg_v4_0_5_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; + struct amdgpu_ring *ring; int r, i; if (adev->pm.dpm_enabled) amdgpu_dpm_enable_jpeg(adev, true); for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + ring = adev->jpeg.inst[i].ring_dec; /* doorbell programming is done for every playback */ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i, i); @@ -509,11 +549,14 @@ static int jpeg_v4_0_5_stop(struct amdgpu_device *adev) int r, i; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) { jpeg_v4_0_5_stop_dpg_mode(adev, i); continue; } + /* reset JMI */ WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JMI_CNTL), UVD_JMI_CNTL__SOFT_RESET_MASK, @@ -526,7 +569,6 @@ static int jpeg_v4_0_5_stop(struct amdgpu_device *adev) if (r) return r; } - if (adev->pm.dpm_enabled) amdgpu_dpm_enable_jpeg(adev, false); @@ -544,7 +586,7 @@ static uint64_t jpeg_v4_0_5_dec_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR); + return RREG32_SOC15(JPEG, ring->me, regUVD_JRBC_RB_RPTR); } /** @@ -561,7 +603,7 @@ static uint64_t jpeg_v4_0_5_dec_ring_get_wptr(struct amdgpu_ring *ring) if (ring->use_doorbell) return *ring->wptr_cpu_addr; else - return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR); + return RREG32_SOC15(JPEG, ring->me, regUVD_JRBC_RB_WPTR); } /** @@ -579,29 +621,41 @@ static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring) *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); } else { - WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(JPEG, ring->me, regUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); } } static bool jpeg_v4_0_5_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int ret = 1; + int i, ret = 1; - ret &= (((RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS) & - UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == - UVD_JRBC_STATUS__RB_JOB_DONE_MASK)); + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + ret &= (((RREG32_SOC15(JPEG, i, regUVD_JRBC_STATUS) & + UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == + UVD_JRBC_STATUS__RB_JOB_DONE_MASK)); + } return ret; } static int jpeg_v4_0_5_wait_for_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i; - return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS, - UVD_JRBC_STATUS__RB_JOB_DONE_MASK, - UVD_JRBC_STATUS__RB_JOB_DONE_MASK); + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + return SOC15_WAIT_ON_RREG(JPEG, i, regUVD_JRBC_STATUS, + UVD_JRBC_STATUS__RB_JOB_DONE_MASK, + UVD_JRBC_STATUS__RB_JOB_DONE_MASK); + } + + return 0; } static int jpeg_v4_0_5_set_clockgating_state(void *handle, @@ -657,11 +711,25 @@ static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { + uint32_t ip_instance; + DRM_DEBUG("IH: JPEG TRAP\n"); + switch (entry->client_id) { + case SOC15_IH_CLIENTID_VCN: + ip_instance = 0; + break; + case SOC15_IH_CLIENTID_VCN1: + ip_instance = 1; + break; + default: + DRM_ERROR("Unhandled client id: %d\n", entry->client_id); + return 0; + } + switch (entry->src_id) { case VCN_4_0__SRCID__JPEG_DECODE: - amdgpu_fence_process(adev->jpeg.inst->ring_dec); + amdgpu_fence_process(adev->jpeg.inst[ip_instance].ring_dec); break; case VCN_4_0__SRCID_DJPEG0_POISON: case VCN_4_0__SRCID_EJPEG0_POISON: @@ -734,6 +802,7 @@ static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev) continue; adev->jpeg.inst[i].ring_dec->funcs = &jpeg_v4_0_5_dec_ring_vm_funcs; + adev->jpeg.inst[i].ring_dec->me = i; DRM_DEV_INFO(adev->dev, "JPEG%d decode is enabled in VM mode\n", i); } } diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 26d71a22395d..072c478665ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -49,6 +49,8 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes_2.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes1.bin"); MODULE_FIRMWARE("amdgpu/gc_11_5_0_mes_2.bin"); MODULE_FIRMWARE("amdgpu/gc_11_5_0_mes1.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_5_1_mes_2.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_5_1_mes1.bin"); static int mes_v11_0_hw_fini(void *handle); @@ -56,6 +58,7 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev); static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev); #define MES_EOP_SIZE 2048 +#define GFX_MES_DRAM_SIZE 0x80000 static void mes_v11_0_ring_set_wptr(struct amdgpu_ring *ring) { @@ -475,7 +478,13 @@ static int mes_v11_0_allocate_ucode_data_buffer(struct amdgpu_device *adev, le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes)); fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); - r = amdgpu_bo_create_reserved(adev, fw_size, + if (fw_size > GFX_MES_DRAM_SIZE) { + dev_err(adev->dev, "PIPE%d ucode data fw size (%d) is greater than dram size (%d)\n", + pipe, fw_size, GFX_MES_DRAM_SIZE); + return -EINVAL; + } + + r = amdgpu_bo_create_reserved(adev, GFX_MES_DRAM_SIZE, 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT, @@ -611,8 +620,8 @@ static int mes_v11_0_load_microcode(struct amdgpu_device *adev, WREG32_SOC15(GC, 0, regCP_MES_MDBASE_HI, upper_32_bits(adev->mes.data_fw_gpu_addr[pipe])); - /* Set 0x3FFFF (256K-1) to CP_MES_MDBOUND_LO */ - WREG32_SOC15(GC, 0, regCP_MES_MDBOUND_LO, 0x3FFFF); + /* Set 0x7FFFF (512K-1) to CP_MES_MDBOUND_LO */ + WREG32_SOC15(GC, 0, regCP_MES_MDBOUND_LO, 0x7FFFF); if (prime_icache) { /* invalidate ICACHE */ diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index b4723d68eab0..40d1e209eab7 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -35,15 +35,6 @@ /* Core 0 Port 0 counter */ #define smnPCIEP_NAK_COUNTER 0x1A340218 -#define smnPCIE_PERF_CNTL_TXCLK3 0x1A38021c -#define smnPCIE_PERF_CNTL_TXCLK7 0x1A380888 -#define smnPCIE_PERF_COUNT_CNTL 0x1A380200 -#define smnPCIE_PERF_COUNT0_TXCLK3 0x1A380220 -#define smnPCIE_PERF_COUNT0_TXCLK7 0x1A38088C -#define smnPCIE_PERF_COUNT0_UPVAL_TXCLK3 0x1A3808F8 -#define smnPCIE_PERF_COUNT0_UPVAL_TXCLK7 0x1A380918 - - static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev) { WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL, @@ -484,59 +475,6 @@ static u64 nbio_v7_9_get_pcie_replay_count(struct amdgpu_device *adev) return (nak_r + nak_g); } -static void nbio_v7_9_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, - uint64_t *count1) -{ - uint32_t perfctrrx = 0; - uint32_t perfctrtx = 0; - - /* This reports 0 on APUs, so return to avoid writing/reading registers - * that may or may not be different from their GPU counterparts - */ - if (adev->flags & AMD_IS_APU) - return; - - /* Use TXCLK3 counter group for rx event */ - /* Use TXCLK7 counter group for tx event */ - /* Set the 2 events that we wish to watch, defined above */ - /* 40 is event# for received msgs */ - /* 2 is event# of posted requests sent */ - perfctrrx = REG_SET_FIELD(perfctrrx, PCIE_PERF_CNTL_TXCLK3, EVENT0_SEL, 40); - perfctrtx = REG_SET_FIELD(perfctrtx, PCIE_PERF_CNTL_TXCLK7, EVENT0_SEL, 2); - - /* Write to enable desired perf counters */ - WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctrrx); - WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK7, perfctrtx); - - /* Zero out and enable SHADOW_WR - * Write 0x6: - * Bit 1 = Global Shadow wr(1) - * Bit 2 = Global counter reset enable(1) - */ - WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000006); - - /* Enable Gloabl Counter - * Write 0x1: - * Bit 0 = Global Counter Enable(1) - */ - WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000001); - - msleep(1000); - - /* Disable Global Counter, Reset and enable SHADOW_WR - * Write 0x6: - * Bit 1 = Global Shadow wr(1) - * Bit 2 = Global counter reset enable(1) - */ - WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000006); - - /* Get the upper and lower count */ - *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | - ((uint64_t)RREG32_PCIE(smnPCIE_PERF_COUNT0_UPVAL_TXCLK3) << 32); - *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK7) | - ((uint64_t)RREG32_PCIE(smnPCIE_PERF_COUNT0_UPVAL_TXCLK7) << 32); -} - const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { .get_hdp_flush_req_offset = nbio_v7_9_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v7_9_get_hdp_flush_done_offset, @@ -561,7 +499,6 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { .get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode, .init_registers = nbio_v7_9_init_registers, .get_pcie_replay_count = nbio_v7_9_get_pcie_replay_count, - .get_pcie_usage = nbio_v7_9_get_pcie_usage, }; static void nbio_v7_9_query_ras_error_count(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index a757526153e5..23e4ef4fff7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -2331,28 +2331,18 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) gpu_cfg & PCI_EXP_LNKCTL_HAWD); - pcie_capability_read_word(root, PCI_EXP_LNKCTL2, - &tmp16); - tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP | - PCI_EXP_LNKCTL2_TX_MARGIN); - tmp16 |= (bridge_cfg2 & - (PCI_EXP_LNKCTL2_ENTER_COMP | - PCI_EXP_LNKCTL2_TX_MARGIN)); - pcie_capability_write_word(root, - PCI_EXP_LNKCTL2, - tmp16); - - pcie_capability_read_word(adev->pdev, - PCI_EXP_LNKCTL2, - &tmp16); - tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP | - PCI_EXP_LNKCTL2_TX_MARGIN); - tmp16 |= (gpu_cfg2 & - (PCI_EXP_LNKCTL2_ENTER_COMP | - PCI_EXP_LNKCTL2_TX_MARGIN)); - pcie_capability_write_word(adev->pdev, - PCI_EXP_LNKCTL2, - tmp16); + pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL2, + PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN, + bridge_cfg2 & + (PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN)); + pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL2, + PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN, + gpu_cfg2 & + (PCI_EXP_LNKCTL2_ENTER_COMP | + PCI_EXP_LNKCTL2_TX_MARGIN)); tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); tmp &= ~LC_SET_QUIESCE; @@ -2365,16 +2355,15 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE; WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); - pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16); - tmp16 &= ~PCI_EXP_LNKCTL2_TLS; - + tmp16 = 0; if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */ else tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */ - pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16); + pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL2, + PCI_EXP_LNKCTL2_TLS, tmp16); speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index c64c01e2944a..dec81ccf6240 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -574,11 +574,34 @@ soc15_asic_reset_method(struct amdgpu_device *adev) return AMD_RESET_METHOD_MODE1; } +static bool soc15_need_reset_on_resume(struct amdgpu_device *adev) +{ + u32 sol_reg; + + sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); + + /* Will reset for the following suspend abort cases. + * 1) Only reset limit on APU side, dGPU hasn't checked yet. + * 2) S3 suspend abort and TOS already launched. + */ + if (adev->flags & AMD_IS_APU && adev->in_s3 && + !adev->suspend_complete && + sol_reg) + return true; + + return false; +} + static int soc15_asic_reset(struct amdgpu_device *adev) { /* original raven doesn't have full asic reset */ - if ((adev->apu_flags & AMD_APU_IS_RAVEN) || - (adev->apu_flags & AMD_APU_IS_RAVEN2)) + /* On the latest Raven, the GPU reset can be performed + * successfully. So now, temporarily enable it for the + * S3 suspend abort case. + */ + if (((adev->apu_flags & AMD_APU_IS_RAVEN) || + (adev->apu_flags & AMD_APU_IS_RAVEN2)) && + !soc15_need_reset_on_resume(adev)) return 0; switch (soc15_asic_reset_method(adev)) { @@ -895,7 +918,6 @@ static const struct amdgpu_asic_funcs aqua_vanjaram_asic_funcs = .get_config_memsize = &soc15_get_config_memsize, .need_full_reset = &soc15_need_full_reset, .init_doorbell_index = &aqua_vanjaram_doorbell_index_init, - .get_pcie_usage = &amdgpu_nbio_get_pcie_usage, .need_reset_on_init = &soc15_need_reset_on_init, .get_pcie_replay_count = &amdgpu_nbio_get_pcie_replay_count, .supports_baco = &soc15_supports_baco, @@ -1278,7 +1300,8 @@ static int soc15_common_hw_fini(void *handle) if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_put_irq(adev); - if (adev->nbio.ras_if && + if ((!amdgpu_sriov_vf(adev)) && + adev->nbio.ras_if && amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) { if (adev->nbio.ras && adev->nbio.ras->init_ras_controller_interrupt) @@ -1298,24 +1321,6 @@ static int soc15_common_suspend(void *handle) return soc15_common_hw_fini(adev); } -static bool soc15_need_reset_on_resume(struct amdgpu_device *adev) -{ - u32 sol_reg; - - sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); - - /* Will reset for the following suspend abort cases. - * 1) Only reset limit on APU side, dGPU hasn't checked yet. - * 2) S3 suspend abort and TOS already launched. - */ - if (adev->flags & AMD_IS_APU && adev->in_s3 && - !adev->suspend_complete && - sol_reg) - return true; - - return false; -} - static int soc15_common_resume(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 5f81c264e310..b92fd6760fa9 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -185,6 +185,12 @@ static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode, } } return 0; + case IP_VERSION(4, 0, 6): + if (encode) + *codecs = &vcn_4_0_0_video_codecs_encode_vcn0; + else + *codecs = &vcn_4_0_0_video_codecs_decode_vcn0; + return 0; default: return -EINVAL; } @@ -717,6 +723,35 @@ static int soc21_common_early_init(void *handle) AMD_PG_SUPPORT_GFX_PG; adev->external_rev_id = adev->rev_id + 0x1; break; + case IP_VERSION(11, 5, 1): + adev->cg_flags = + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_FGCG | + AMD_CG_SUPPORT_REPEATER_FGCG | + AMD_CG_SUPPORT_GFX_PERF_CLK | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_GFX_3D_CGLS | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_HDP_DS | + AMD_CG_SUPPORT_HDP_SD | + AMD_CG_SUPPORT_ATHUB_MGCG | + AMD_CG_SUPPORT_ATHUB_LS | + AMD_CG_SUPPORT_IH_CG | + AMD_CG_SUPPORT_BIF_MGCG | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_JPEG_MGCG; + adev->pg_flags = + AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_VCN_DPG | + AMD_PG_SUPPORT_JPEG; + adev->external_rev_id = adev->rev_id + 0xc1; + break; default: /* FIXME: not supported yet */ return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index 14ef7a24be7b..77af4e25ff46 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -348,7 +348,8 @@ static int umc_v12_0_query_error_address(struct amdgpu_device *adev, } /* calculate error address if ue error is detected */ - if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)) { + if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status) || + umc_v12_0_is_deferred_error(adev, mc_umc_status)) { mc_umc_addrt0 = SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index 49e4c3c09aca..0468955338b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -1684,6 +1684,9 @@ static int vcn_v4_0_5_process_interrupt(struct amdgpu_device *adev, struct amdgp case SOC15_IH_CLIENTID_VCN: ip_instance = 0; break; + case SOC15_IH_CLIENTID_VCN1: + ip_instance = 1; + break; default: DRM_ERROR("Unhandled client id: %d\n", entry->client_id); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c index db66e6cccaf2..b9e785846637 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c @@ -291,27 +291,29 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev) adev->nbio.funcs->ih_control(adev); - if ((amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 2, 1)) && - adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { - ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN); - if (adev->irq.ih.use_bus_addr) { - ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, - MC_SPACE_GPA_ENABLE, 1); + if (!amdgpu_sriov_vf(adev)) { + if ((amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 2, 1)) && + adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { + ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN); + if (adev->irq.ih.use_bus_addr) { + ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, + MC_SPACE_GPA_ENABLE, 1); + } + WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken); } - WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken); - } - /* psp firmware won't program IH_CHICKEN for aldebaran - * driver needs to program it properly according to - * MC_SPACE type in IH_RB_CNTL */ - if ((amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 0)) || - (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 2))) { - ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN); - if (adev->irq.ih.use_bus_addr) { - ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, - MC_SPACE_GPA_ENABLE, 1); + /* psp firmware won't program IH_CHICKEN for aldebaran + * driver needs to program it properly according to + * MC_SPACE type in IH_RB_CNTL */ + if ((amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 0)) || + (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 2))) { + ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN); + if (adev->irq.ih.use_bus_addr) { + ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, + MC_SPACE_GPA_ENABLE, 1); + } + WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN, ih_chicken); } - WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN, ih_chicken); } for (i = 0; i < ARRAY_SIZE(ih); i++) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 80e90fdef291..f030cafc5a0a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -371,7 +371,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, goto err_wptr_map_gart; } - err = amdgpu_amdkfd_map_gtt_bo_to_gart(dev->adev, wptr_bo); + err = amdgpu_amdkfd_map_gtt_bo_to_gart(wptr_bo); if (err) { pr_err("Failed to map wptr bo to GART\n"); goto err_wptr_map_gart; @@ -2935,6 +2935,7 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v if (IS_ERR_OR_NULL(target)) { pr_debug("Cannot find process PID %i to debug\n", args->pid); r = target ? PTR_ERR(target) : -ESRCH; + target = NULL; goto out; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 002b08fa632f..7f2ae0d15d4a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1676,6 +1676,7 @@ int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pc case IP_VERSION(11, 0, 3): case IP_VERSION(11, 0, 4): case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 1): num_of_cache_types = kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd, *pcache_info); break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 0a9cf9dfc224..4d399c0c8a57 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -96,6 +96,7 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) case IP_VERSION(6, 0, 2): case IP_VERSION(6, 0, 3): case IP_VERSION(6, 1, 0): + case IP_VERSION(6, 1, 1): kfd->device_info.num_sdma_queues_per_engine = 8; break; default: @@ -113,6 +114,7 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) case IP_VERSION(6, 0, 2): case IP_VERSION(6, 0, 3): case IP_VERSION(6, 1, 0): + case IP_VERSION(6, 1, 1): /* Reserve 1 for paging and 1 for gfx */ kfd->device_info.num_reserved_sdma_queues_per_engine = 2; /* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */ @@ -165,6 +167,7 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd) case IP_VERSION(11, 0, 3): case IP_VERSION(11, 0, 4): case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 5, 1): kfd->device_info.event_interrupt_class = &event_interrupt_class_v11; break; default: @@ -420,6 +423,10 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) gfx_target_version = 110500; f2g = &gfx_v11_kfd2kgd; break; + case IP_VERSION(11, 5, 1): + gfx_target_version = 110501; + f2g = &gfx_v11_kfd2kgd; + break; default: break; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index c0e71543389a..f4d395e38683 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1903,6 +1903,10 @@ int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm, uint64_t *fence_addr = dqm->fence_addr; while (*fence_addr != fence_value) { + /* Fatal err detected, this response won't come */ + if (amdgpu_amdkfd_is_fed(dqm->dev->adev)) + return -EIO; + if (time_after(jiffies, end_jiffies)) { dev_err(dev, "qcm fence wait loop timeout expired\n"); /* In HWS case, this is used to halt the driver thread diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 1bea629c49ca..32c926986dbb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -286,7 +286,7 @@ err_no_space: return -ENOMEM; } -void kq_submit_packet(struct kernel_queue *kq) +int kq_submit_packet(struct kernel_queue *kq) { #ifdef DEBUG int i; @@ -298,6 +298,10 @@ void kq_submit_packet(struct kernel_queue *kq) } pr_debug("\n"); #endif + /* Fatal err detected, packet submission won't go through */ + if (amdgpu_amdkfd_is_fed(kq->dev->adev)) + return -EIO; + if (kq->dev->kfd->device_info.doorbell_size == 8) { *kq->wptr64_kernel = kq->pending_wptr64; write_kernel_doorbell64(kq->queue->properties.doorbell_ptr, @@ -307,6 +311,8 @@ void kq_submit_packet(struct kernel_queue *kq) write_kernel_doorbell(kq->queue->properties.doorbell_ptr, kq->pending_wptr); } + + return 0; } void kq_rollback_packet(struct kernel_queue *kq) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h index 9a6244430845..e24ee50acdf0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h @@ -47,7 +47,7 @@ int kq_acquire_packet_buffer(struct kernel_queue *kq, size_t packet_size_in_dwords, unsigned int **buffer_ptr); -void kq_submit_packet(struct kernel_queue *kq); +int kq_submit_packet(struct kernel_queue *kq); void kq_rollback_packet(struct kernel_queue *kq); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 401096c103b2..d6f65f39072b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -288,7 +288,7 @@ int pm_send_set_resources(struct packet_manager *pm, retval = pm->pmf->set_resources(pm, buffer, res); if (!retval) - kq_submit_packet(pm->priv_queue); + retval = kq_submit_packet(pm->priv_queue); else kq_rollback_packet(pm->priv_queue); @@ -325,7 +325,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) if (retval) goto fail_create_runlist; - kq_submit_packet(pm->priv_queue); + retval = kq_submit_packet(pm->priv_queue); mutex_unlock(&pm->lock); @@ -361,7 +361,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value); if (!retval) - kq_submit_packet(pm->priv_queue); + retval = kq_submit_packet(pm->priv_queue); else kq_rollback_packet(pm->priv_queue); @@ -392,7 +392,7 @@ int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period) retval = pm->pmf->set_grace_period(pm, buffer, grace_period); if (!retval) - kq_submit_packet(pm->priv_queue); + retval = kq_submit_packet(pm->priv_queue); else kq_rollback_packet(pm->priv_queue); } @@ -421,7 +421,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, retval = pm->pmf->unmap_queues(pm, buffer, filter, filter_param, reset); if (!retval) - kq_submit_packet(pm->priv_queue); + retval = kq_submit_packet(pm->priv_queue); else kq_rollback_packet(pm->priv_queue); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index bc9eb847ecfe..c51f131eaa2f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1997,8 +1997,9 @@ int kfd_topology_add_device(struct kfd_node *gpu) HSA_CAP_ASIC_REVISION_MASK); dev->node_props.location_id = pci_dev_id(gpu->adev->pdev); - if (KFD_GC_VERSION(dev->gpu->kfd) == IP_VERSION(9, 4, 3)) - dev->node_props.location_id |= dev->gpu->node_id; + /* On multi-partition nodes, node id = location_id[31:28] */ + if (gpu->kfd->num_nodes > 1) + dev->node_props.location_id |= (dev->gpu->node_id << 28); dev->node_props.domain = pci_domain_nr(gpu->adev->pdev->bus); dev->node_props.max_engine_clk_fcompute = diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index bcdd4f28b64c..6701f1fde79c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -5235,6 +5235,10 @@ static inline void fill_dc_dirty_rect(struct drm_plane *plane, * @new_plane_state: New state of @plane * @crtc_state: New state of CRTC connected to the @plane * @flip_addrs: DC flip tracking struct, which also tracts dirty rects + * @is_psr_su: Flag indicating whether Panel Self Refresh Selective Update (PSR SU) is enabled. + * If PSR SU is enabled and damage clips are available, only the regions of the screen + * that have changed will be updated. If PSR SU is not enabled, + * or if damage clips are not available, the entire screen will be updated. * @dirty_regions_changed: dirty regions changed * * For PSR SU, DC informs the DMUB uController of dirty rectangle regions @@ -6229,7 +6233,8 @@ create_stream_for_sink(struct drm_connector *connector, */ DRM_DEBUG_DRIVER("No preferred mode found\n"); } else if (aconnector) { - recalculate_timing = is_freesync_video_mode(&mode, aconnector); + recalculate_timing = amdgpu_freesync_vid_mode && + is_freesync_video_mode(&mode, aconnector); if (recalculate_timing) { freesync_mode = get_highest_refresh_rate_mode(aconnector, false); drm_mode_copy(&saved_mode, &mode); @@ -6389,9 +6394,6 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, } else if (property == adev->mode_info.underscan_property) { dm_new_state->underscan_enable = val; ret = 0; - } else if (property == adev->mode_info.abm_level_property) { - dm_new_state->abm_level = val ?: ABM_LEVEL_IMMEDIATE_DISABLE; - ret = 0; } return ret; @@ -6434,10 +6436,6 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, } else if (property == adev->mode_info.underscan_property) { *val = dm_state->underscan_enable; ret = 0; - } else if (property == adev->mode_info.abm_level_property) { - *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ? - dm_state->abm_level : 0; - ret = 0; } return ret; @@ -6656,7 +6654,7 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) struct edid *edid; struct i2c_adapter *ddc; - if (dc_link->aux_mode) + if (dc_link && dc_link->aux_mode) ddc = &aconnector->dm_dp_aux.aux.ddc; else ddc = &aconnector->i2c->base; @@ -7547,7 +7545,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - if (!edid) + if (!(amdgpu_freesync_vid_mode && edid)) return; if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) @@ -7664,13 +7662,6 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, aconnector->base.state->max_bpc = 16; aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc; - if (connector_type == DRM_MODE_CONNECTOR_eDP && - (dc_is_dmcu_initialized(adev->dm.dc) || - adev->dm.dc->ctx->dmub_srv) && amdgpu_dm_abm_level < 0) { - drm_object_attach_property(&aconnector->base.base, - adev->mode_info.abm_level_property, 0); - } - if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { /* Content Type is currently only implemented for HDMI. */ drm_connector_attach_content_type_property(&aconnector->base); @@ -9847,7 +9838,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, * TODO: Refactor this function to allow this check to work * in all conditions. */ - if (dm_new_crtc_state->stream && + if (amdgpu_freesync_vid_mode && + dm_new_crtc_state->stream && is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) goto skip_modeset; @@ -9887,7 +9879,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, } /* Now check if we should set freesync video mode */ - if (dm_new_crtc_state->stream && + if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) && is_timing_unchanged_for_freesync(new_crtc_state, @@ -9900,7 +9892,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, set_freesync_fixed_config(dm_new_crtc_state); goto skip_modeset; - } else if (aconnector && + } else if (amdgpu_freesync_vid_mode && aconnector && is_freesync_video_mode(&new_crtc_state->mode, aconnector)) { struct drm_display_mode *high_mode; diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c index 39530b2ea495..b30c2cdc1a61 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c @@ -23,8 +23,6 @@ * */ -#include - #include "resource.h" #include "dm_services.h" #include "dce_calcs.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 86ee4fe4f5e3..9f0f25aee426 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -23,8 +23,6 @@ * */ -#include - #include "dal_asic_id.h" #include "dc_types.h" #include "dccg.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c index 26feefbb8990..b77804cfde0f 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c @@ -132,7 +132,7 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base) int dprefclk_wdivider; int dprefclk_src_sel; int dp_ref_clk_khz; - int target_div; + int target_div = 600000; /* ASSERT DP Reference Clock source is from DFS*/ REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c index 60761ff3cbf1..2a74e2d74909 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c @@ -23,8 +23,6 @@ * */ -#include - #include "reg_helper.h" #include "core_types.h" #include "clk_mgr_internal.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c deleted file mode 100644 index 61dd12198a3c..000000000000 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.c +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2012-16 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "reg_helper.h" -#include "clk_mgr_internal.h" -#include "rv1_clk_mgr_clk.h" - -#include "ip/Discovery/hwid.h" -#include "ip/Discovery/v1/ip_offset_1.h" -#include "ip/CLK/clk_10_0_default.h" -#include "ip/CLK/clk_10_0_offset.h" -#include "ip/CLK/clk_10_0_reg.h" -#include "ip/CLK/clk_10_0_sh_mask.h" - -#include "dce100/dce_clk_mgr.h" - -#define CLK_BASE_INNER(inst) \ - CLK_BASE__INST ## inst ## _SEG0 - - -#define CLK_REG(reg_name, block, inst)\ - CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## _ ## inst ## _ ## reg_name - -#define REG(reg_name) \ - CLK_REG(reg_name, CLK0, 0) - - -/* Only used by testing framework*/ -void rv1_dump_clk_registers(struct clk_state_registers *regs, struct clk_bypass *bypass, struct clk_mgr *clk_mgr_base) -{ - struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - - regs->CLK0_CLK8_CURRENT_CNT = REG_READ(CLK0_CLK8_CURRENT_CNT) / 10; //dcf clk - - bypass->dcfclk_bypass = REG_READ(CLK0_CLK8_BYPASS_CNTL) & 0x0007; - if (bypass->dcfclk_bypass < 0 || bypass->dcfclk_bypass > 4) - bypass->dcfclk_bypass = 0; - - - regs->CLK0_CLK8_DS_CNTL = REG_READ(CLK0_CLK8_DS_CNTL) / 10; //dcf deep sleep divider - - regs->CLK0_CLK8_ALLOW_DS = REG_READ(CLK0_CLK8_ALLOW_DS); //dcf deep sleep allow - - regs->CLK0_CLK10_CURRENT_CNT = REG_READ(CLK0_CLK10_CURRENT_CNT) / 10; //dpref clk - - bypass->dispclk_pypass = REG_READ(CLK0_CLK10_BYPASS_CNTL) & 0x0007; - if (bypass->dispclk_pypass < 0 || bypass->dispclk_pypass > 4) - bypass->dispclk_pypass = 0; - - regs->CLK0_CLK11_CURRENT_CNT = REG_READ(CLK0_CLK11_CURRENT_CNT) / 10; //disp clk - - bypass->dprefclk_bypass = REG_READ(CLK0_CLK11_BYPASS_CNTL) & 0x0007; - if (bypass->dprefclk_bypass < 0 || bypass->dprefclk_bypass > 4) - bypass->dprefclk_bypass = 0; - -} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c index d72acbb049b1..23b390245b5d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c @@ -26,6 +26,10 @@ #include "core_types.h" #include "clk_mgr_internal.h" #include "reg_helper.h" +#include "dm_helpers.h" + +#include "rn_clk_mgr_vbios_smu.h" + #include #include "renoir_ip_offset.h" @@ -33,8 +37,6 @@ #include "mp/mp_12_0_0_offset.h" #include "mp/mp_12_0_0_sh_mask.h" -#include "rn_clk_mgr_vbios_smu.h" - #define REG(reg_name) \ (MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) @@ -120,7 +122,10 @@ static int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, result = rn_smu_wait_for_response(clk_mgr, 10, 200000); - ASSERT(result == VBIOSSMC_Result_OK || result == VBIOSSMC_Result_UnknownCmd); + if (IS_SMU_TIMEOUT(result)) { + ASSERT(0); + dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000); + } /* Actual dispclk set is returned in the parameter register */ return REG_READ(MP1_SMN_C2PMSG_83); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c index 19e5b3be9275..b4fb17b7a096 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c @@ -29,6 +29,7 @@ #include #include "dcn301_smu.h" +#include "dm_helpers.h" #include "vangogh_ip_offset.h" @@ -120,7 +121,10 @@ static int dcn301_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, result = dcn301_smu_wait_for_response(clk_mgr, 10, 200000); - ASSERT(result == VBIOSSMC_Result_OK); + if (IS_SMU_TIMEOUT(result)) { + ASSERT(0); + dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000); + } /* Actual dispclk set is returned in the parameter register */ return REG_READ(MP1_SMN_C2PMSG_83); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index e64e45e4c833..668f05c8654e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -243,10 +243,8 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) /* Get UCLK, update bounding box */ clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base); - DC_FP_START(); /* WM range table */ dcn32_build_wm_range_table(clk_mgr); - DC_FP_END(); } static void dcn32_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr, @@ -817,7 +815,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, dmcu->funcs->set_psr_wait_loop(dmcu, clk_mgr_base->clks.dispclk_khz / 1000 / 7); - if (dc->config.enable_auto_dpm_test_logs) { + if (dc->config.enable_auto_dpm_test_logs && safe_to_lower) { dcn32_auto_dpm_test_log(new_clocks, clk_mgr, context); } } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 4d5194293dbd..5211c1c0f3c0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2032,7 +2032,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c return result; } -static bool commit_minimal_transition_state(struct dc *dc, +static bool commit_minimal_transition_state_legacy(struct dc *dc, struct dc_state *transition_base_context); /** @@ -2098,7 +2098,7 @@ enum dc_status dc_commit_streams(struct dc *dc, } if (handle_exit_odm2to1) - res = commit_minimal_transition_state(dc, dc->current_state); + res = commit_minimal_transition_state_legacy(dc, dc->current_state); context = dc_state_create_current_copy(dc); if (!context) @@ -2952,8 +2952,8 @@ static void copy_stream_update_to_stream(struct dc *dc, } } -static void backup_plane_states_for_stream( - struct dc_plane_state plane_states[MAX_SURFACE_NUM], +static void backup_planes_and_stream_state( + struct dc_scratch_space *scratch, struct dc_stream_state *stream) { int i; @@ -2962,12 +2962,20 @@ static void backup_plane_states_for_stream( if (!status) return; - for (i = 0; i < status->plane_count; i++) - plane_states[i] = *status->plane_states[i]; + for (i = 0; i < status->plane_count; i++) { + scratch->plane_states[i] = *status->plane_states[i]; + scratch->gamma_correction[i] = *status->plane_states[i]->gamma_correction; + scratch->in_transfer_func[i] = *status->plane_states[i]->in_transfer_func; + scratch->lut3d_func[i] = *status->plane_states[i]->lut3d_func; + scratch->in_shaper_func[i] = *status->plane_states[i]->in_shaper_func; + scratch->blend_tf[i] = *status->plane_states[i]->blend_tf; + } + scratch->stream_state = *stream; + scratch->out_transfer_func = *stream->out_transfer_func; } -static void restore_plane_states_for_stream( - struct dc_plane_state plane_states[MAX_SURFACE_NUM], +static void restore_planes_and_stream_state( + struct dc_scratch_space *scratch, struct dc_stream_state *stream) { int i; @@ -2976,8 +2984,16 @@ static void restore_plane_states_for_stream( if (!status) return; - for (i = 0; i < status->plane_count; i++) - *status->plane_states[i] = plane_states[i]; + for (i = 0; i < status->plane_count; i++) { + *status->plane_states[i] = scratch->plane_states[i]; + *status->plane_states[i]->gamma_correction = scratch->gamma_correction[i]; + *status->plane_states[i]->in_transfer_func = scratch->in_transfer_func[i]; + *status->plane_states[i]->lut3d_func = scratch->lut3d_func[i]; + *status->plane_states[i]->in_shaper_func = scratch->in_shaper_func[i]; + *status->plane_states[i]->blend_tf = scratch->blend_tf[i]; + } + *stream = scratch->stream_state; + *stream->out_transfer_func = scratch->out_transfer_func; } static bool update_planes_and_stream_state(struct dc *dc, @@ -3003,7 +3019,7 @@ static bool update_planes_and_stream_state(struct dc *dc, } context = dc->current_state; - backup_plane_states_for_stream(dc->current_state->scratch.plane_states, stream); + backup_planes_and_stream_state(&dc->current_state->scratch, stream); update_type = dc_check_update_surfaces_for_stream( dc, srf_updates, surface_count, stream_update, stream_status); @@ -3103,7 +3119,7 @@ static bool update_planes_and_stream_state(struct dc *dc, *new_context = context; *new_update_type = update_type; - backup_plane_states_for_stream(context->scratch.plane_states, stream); + backup_planes_and_stream_state(&context->scratch, stream); return true; @@ -4047,7 +4063,23 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc, return minimal_transition_context; } -static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc, + +/** + * commit_minimal_transition_state - Commit a minimal state based on current or new context + * + * @dc: DC structure, used to get the current state + * @context: New context + * @stream: Stream getting the update for the flip + * + * The function takes in current state and new state and determine a minimal transition state + * as the intermediate step which could make the transition between current and new states + * seamless. If found, it will commit the minimal transition state and update current state to + * this minimal transition state and return true, if not, it will return false. + * + * Return: + * Return True if the minimal transition succeeded, false otherwise + */ +static bool commit_minimal_transition_state(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) { @@ -4056,12 +4088,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc, struct pipe_split_policy_backup policy; /* commit based on new context */ - /* Since all phantom pipes are removed in full validation, - * we have to save and restore the subvp/mall config when - * we do a minimal transition since the flags marking the - * pipe as subvp/phantom will be cleared (dc copy constructor - * creates a shallow copy). - */ minimal_transition_context = create_minimal_transition_state(dc, context, &policy); if (minimal_transition_context) { @@ -4078,7 +4104,7 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc, if (!success) { /* commit based on current context */ - restore_plane_states_for_stream(dc->current_state->scratch.plane_states, stream); + restore_planes_and_stream_state(&dc->current_state->scratch, stream); minimal_transition_context = create_minimal_transition_state(dc, dc->current_state, &policy); if (minimal_transition_context) { @@ -4091,7 +4117,7 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc, } release_minimal_transition_state(dc, minimal_transition_context, &policy); } - restore_plane_states_for_stream(context->scratch.plane_states, stream); + restore_planes_and_stream_state(&context->scratch, stream); } ASSERT(success); @@ -4099,7 +4125,7 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc, } /** - * commit_minimal_transition_state - Create a transition pipe split state + * commit_minimal_transition_state_legacy - Create a transition pipe split state * * @dc: Used to get the current state status * @transition_base_context: New transition state @@ -4116,7 +4142,7 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc, * Return: * Return false if something is wrong in the transition state. */ -static bool commit_minimal_transition_state(struct dc *dc, +static bool commit_minimal_transition_state_legacy(struct dc *dc, struct dc_state *transition_base_context) { struct dc_state *transition_context; @@ -4354,53 +4380,6 @@ static bool fast_update_only(struct dc *dc, && !full_update_required(dc, srf_updates, surface_count, stream_update, stream); } -static bool should_commit_minimal_transition_for_windowed_mpo_odm(struct dc *dc, - struct dc_stream_state *stream, - struct dc_state *context) -{ - struct pipe_ctx *cur_pipe, *new_pipe; - bool cur_is_odm_in_use, new_is_odm_in_use; - struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream); - struct dc_stream_status *new_stream_status = stream_get_status(context, stream); - - if (!dc->debug.enable_single_display_2to1_odm_policy || - !dc->config.enable_windowed_mpo_odm) - /* skip the check if windowed MPO ODM or dynamic ODM is turned - * off. - */ - return false; - - if (context == dc->current_state) - /* skip the check for fast update */ - return false; - - if (new_stream_status->plane_count != cur_stream_status->plane_count) - /* plane count changed, not a plane scaling update so not the - * case we are looking for - */ - return false; - - cur_pipe = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, stream); - new_pipe = resource_get_otg_master_for_stream(&context->res_ctx, stream); - if (!cur_pipe || !new_pipe) - return false; - cur_is_odm_in_use = resource_get_odm_slice_count(cur_pipe) > 1; - new_is_odm_in_use = resource_get_odm_slice_count(new_pipe) > 1; - if (cur_is_odm_in_use == new_is_odm_in_use) - /* ODM state isn't changed, not the case we are looking for */ - return false; - - if (dc->hwss.is_pipe_topology_transition_seamless && - dc->hwss.is_pipe_topology_transition_seamless( - dc, dc->current_state, context)) - /* transition can be achieved without the need for committing - * minimal transition state first - */ - return false; - - return true; -} - bool dc_update_planes_and_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, @@ -4433,7 +4412,7 @@ bool dc_update_planes_and_stream(struct dc *dc, /* on plane addition, minimal state is the current one */ if (force_minimal_pipe_splitting && is_plane_addition && - !commit_minimal_transition_state(dc, dc->current_state)) + !commit_minimal_transition_state_legacy(dc, dc->current_state)) return false; if (!update_planes_and_stream_state( @@ -4448,32 +4427,19 @@ bool dc_update_planes_and_stream(struct dc *dc, /* on plane removal, minimal state is the new one */ if (force_minimal_pipe_splitting && !is_plane_addition) { - /* Since all phantom pipes are removed in full validation, - * we have to save and restore the subvp/mall config when - * we do a minimal transition since the flags marking the - * pipe as subvp/phantom will be cleared (dc copy constructor - * creates a shallow copy). - */ - if (!commit_minimal_transition_state(dc, context)) { + if (!commit_minimal_transition_state_legacy(dc, context)) { dc_state_release(context); return false; } update_type = UPDATE_TYPE_FULL; } - /* when windowed MPO ODM is supported, we need to handle a special case - * where we can transition between ODM combine and MPC combine due to - * plane scaling update. This transition will require us to commit - * minimal transition state. The condition to trigger this update can't - * be predicted by could_mpcc_tree_change_for_active_pipes because we - * can only determine it after DML validation. Therefore we can't rely - * on the existing commit minimal transition state sequence. Instead - * we have to add additional handling here to handle this transition - * with its own special sequence. - */ - if (should_commit_minimal_transition_for_windowed_mpo_odm(dc, stream, context)) - commit_minimal_transition_state_for_windowed_mpo_odm(dc, + if (dc->hwss.is_pipe_topology_transition_seamless && + !dc->hwss.is_pipe_topology_transition_seamless( + dc, dc->current_state, context)) { + commit_minimal_transition_state(dc, context, stream); + } update_seamless_boot_flags(dc, context, surface_count, stream); if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) { commit_planes_for_stream_fast(dc, diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 9b42f6fc8c69..ee8453bf958f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -51,7 +51,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.272" +#define DC_VER "3.2.273" #define MAX_SURFACES 3 #define MAX_PLANES 6 diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c index d761b0df2878..e224a028d68a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c @@ -34,6 +34,7 @@ #include "dc_bios_types.h" #include "link_enc_cfg.h" +#include "dc_dmub_srv.h" #include "gpio_service_interface.h" #ifndef MIN @@ -61,6 +62,38 @@ #define AUX_REG_WRITE(reg_name, val) \ dm_write_reg(CTX, AUX_REG(reg_name), val) +static uint8_t phy_id_from_transmitter(enum transmitter t) +{ + uint8_t phy_id; + + switch (t) { + case TRANSMITTER_UNIPHY_A: + phy_id = 0; + break; + case TRANSMITTER_UNIPHY_B: + phy_id = 1; + break; + case TRANSMITTER_UNIPHY_C: + phy_id = 2; + break; + case TRANSMITTER_UNIPHY_D: + phy_id = 3; + break; + case TRANSMITTER_UNIPHY_E: + phy_id = 4; + break; + case TRANSMITTER_UNIPHY_F: + phy_id = 5; + break; + case TRANSMITTER_UNIPHY_G: + phy_id = 6; + break; + default: + phy_id = 0; + break; + } + return phy_id; +} void enc32_hw_init(struct link_encoder *enc) { @@ -117,38 +150,50 @@ void dcn32_link_encoder_enable_dp_output( } } -static bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc) +static bool query_dp_alt_from_dmub(struct link_encoder *enc, + union dmub_rb_cmd *cmd) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); - uint32_t dp_alt_mode_disable = 0; - bool is_usb_c_alt_mode = false; - if (enc->features.flags.bits.DP_IS_USB_C) { - /* if value == 1 alt mode is disabled, otherwise it is enabled */ - REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); - is_usb_c_alt_mode = (dp_alt_mode_disable == 0); - } + memset(cmd, 0, sizeof(*cmd)); + cmd->query_dp_alt.header.type = DMUB_CMD__VBIOS; + cmd->query_dp_alt.header.sub_type = + DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT; + cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data); + cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter); - return is_usb_c_alt_mode; + if (!dc_wake_and_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + return false; + + return true; } -static void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc, +bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc) +{ + union dmub_rb_cmd cmd; + + if (!query_dp_alt_from_dmub(enc, &cmd)) + return false; + + return (cmd.query_dp_alt.data.is_dp_alt_disable == 0); +} + +void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_link_settings *link_settings) { - struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); - uint32_t is_in_usb_c_dp4_mode = 0; + union dmub_rb_cmd cmd; dcn10_link_encoder_get_max_link_cap(enc, link_settings); - /* in usb c dp2 mode, max lane count is 2 */ - if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) { - REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode); - if (!is_in_usb_c_dp4_mode) - link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count); - } + if (!query_dp_alt_from_dmub(enc, &cmd)) + return; + if (cmd.query_dp_alt.data.is_usb && + cmd.query_dp_alt.data.is_dp4 == 0) + link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count); } + static const struct link_encoder_funcs dcn32_link_enc_funcs = { .read_state = link_enc2_read_state, .validate_output_with_stream = @@ -203,13 +248,15 @@ void dcn32_link_encoder_construct( enc10->base.hpd_source = init_data->hpd_source; enc10->base.connector = init_data->connector; - enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; enc10->base.features = *enc_features; if (enc10->base.connector.id == CONNECTOR_ID_USBC) enc10->base.features.flags.bits.DP_IS_USB_C = 1; + if (enc10->base.connector.id == CONNECTOR_ID_USBC) + enc10->base.features.flags.bits.DP_IS_USB_C = 1; + enc10->base.transmitter = init_data->transmitter; /* set the flag to indicate whether driver poll the I2C data pin diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h index bbcfce06bec0..2d5f25290ed1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h @@ -53,4 +53,9 @@ void dcn32_link_encoder_enable_dp_output( const struct dc_link_settings *link_settings, enum clock_source_id clock_source); +bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc); + +void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc, + struct dc_link_settings *link_settings); + #endif /* __DC_LINK_ENCODER__DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index a0a65e099104..b49e1dc9d8ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -623,7 +623,6 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc, * - Not TMZ surface */ if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && - !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) && (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE && (refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) && diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 26307e599614..2a58a7687bdb 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -76,6 +76,11 @@ static void map_hw_resources(struct dml2_context *dml2, in_out_display_cfg->hw.DLGRefClkFreqMHz = 50; } for (j = 0; j < mode_support_info->DPPPerSurface[i]; j++) { + if (i >= __DML2_WRAPPER_MAX_STREAMS_PLANES__) { + dml_print("DML::%s: Index out of bounds: i=%d, __DML2_WRAPPER_MAX_STREAMS_PLANES__=%d\n", + __func__, i, __DML2_WRAPPER_MAX_STREAMS_PLANES__); + break; + } dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i]; dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[num_pipes] = true; dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[i]; diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c index 25ffc052d53b..99e17c164ce7 100644 --- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c +++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c @@ -23,8 +23,6 @@ * */ -#include - #include "dm_services.h" #include "dm_helpers.h" #include "include/hdcp_msg_types.h" diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 3a6bf77a6873..b1b72e688f74 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -522,6 +522,25 @@ struct dc_dmub_cmd { enum dm_dmub_wait_type wait_type; }; +struct dc_scratch_space { + /* used to temporarily backup plane states of a stream during + * dc update. The reason is that plane states are overwritten + * with surface updates in dc update. Once they are overwritten + * current state is no longer valid. We want to temporarily + * store current value in plane states so we can still recover + * a valid current state during dc update. + */ + struct dc_plane_state plane_states[MAX_SURFACE_NUM]; + struct dc_gamma gamma_correction[MAX_SURFACE_NUM]; + struct dc_transfer_func in_transfer_func[MAX_SURFACE_NUM]; + struct dc_3dlut lut3d_func[MAX_SURFACE_NUM]; + struct dc_transfer_func in_shaper_func[MAX_SURFACE_NUM]; + struct dc_transfer_func blend_tf[MAX_SURFACE_NUM]; + + struct dc_stream_state stream_state; + struct dc_transfer_func out_transfer_func; +}; + /** * struct dc_state - The full description of a state requested by users */ @@ -604,16 +623,8 @@ struct dc_state { unsigned int stutter_period_us; } perf_params; - struct { - /* used to temporarily backup plane states of a stream during - * dc update. The reason is that plane states are overwritten - * with surface updates in dc update. Once they are overwritten - * current state is no longer valid. We want to temporarily - * store current value in plane states so we can still recover - * a valid current state during dc update. - */ - struct dc_plane_state plane_states[MAX_SURFACE_NUM]; - } scratch; + + struct dc_scratch_space scratch; }; struct replay_context { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c index e8baafa02443..916f0c974637 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c @@ -23,8 +23,6 @@ * */ -#include - #include "dm_services.h" #include "include/logger_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index 03c5e8ff8cbd..42cdfe6c3538 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -23,8 +23,6 @@ * */ -#include - #include "dm_services.h" #include "include/logger_interface.h" diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c index fc50931c2aec..c5de6ed5bf58 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c @@ -164,7 +164,7 @@ static void dpcd_extend_address_range( if (new_addr_range.start != in_address || new_addr_range.end != end_address) { *out_address = new_addr_range.start; *out_size = ADDRESS_RANGE_SIZE(new_addr_range.start, new_addr_range.end); - *out_data = kzalloc(*out_size * sizeof(**out_data), GFP_KERNEL); + *out_data = kcalloc(*out_size, sizeof(**out_data), GFP_KERNEL); } } diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c index f9c5bc624be3..a2387cea1af9 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -24,8 +24,6 @@ * */ -#include - #include "dm_services.h" #include "dc.h" diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index 6f10052caeef..3f3951f3ba98 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -2118,6 +2118,7 @@ static bool dcn32_resource_construct( dc->config.use_pipe_ctx_sync_logic = true; dc->config.dc_mode_clk_limit_support = true; + dc->config.enable_windowed_mpo_odm = true; /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index 6f832bf278cf..b356fed1726d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -1760,7 +1760,7 @@ static bool dcn321_resource_construct( dc->caps.color.mpc.ocsc = 1; dc->config.dc_mode_clk_limit_support = true; - dc->config.enable_windowed_mpo_odm = false; + dc->config.enable_windowed_mpo_odm = true; /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c index 60223efc6fc8..53f359f3fae2 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c @@ -555,8 +555,14 @@ uint32_t dmub_dcn35_read_inbox0_ack_register(struct dmub_srv *dmub) bool dmub_dcn35_is_hw_powered_up(struct dmub_srv *dmub) { union dmub_fw_boot_status status; + uint32_t is_enable; + + REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enable); + if (is_enable == 0) + return false; status.all = REG_READ(DMCUB_SCRATCH0); - return status.bits.hw_power_init_done; + return (status.bits.dal_fw && status.bits.hw_power_init_done && status.bits.mailbox_rdy) || + (!status.bits.dal_fw && status.bits.mailbox_rdy); } diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index fb66832dc996..cd97983cf759 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -800,20 +800,13 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) bool dmub_srv_is_hw_pwr_up(struct dmub_srv *dmub) { - union dmub_fw_boot_status status; - if (!dmub->hw_funcs.is_hw_powered_up) return true; if (!dmub->hw_funcs.is_hw_powered_up(dmub)) return false; - if (!dmub->hw_funcs.is_hw_init(dmub)) - return false; - - status = dmub->hw_funcs.get_fw_status(dmub); - - return status.bits.dal_fw && status.bits.mailbox_rdy; + return true; } enum dmub_status dmub_srv_wait_for_hw_pwr_up(struct dmub_srv *dmub, diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h index 5960dd760e91..8ce6c22e5d04 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h @@ -57,10 +57,10 @@ void mod_stats_update_event(struct mod_stats *mod_stats, unsigned int length); void mod_stats_update_flip(struct mod_stats *mod_stats, - unsigned long timestamp_in_ns); + unsigned long long timestamp_in_ns); void mod_stats_update_vupdate(struct mod_stats *mod_stats, - unsigned long timestamp_in_ns); + unsigned long long timestamp_in_ns); void mod_stats_update_freesync(struct mod_stats *mod_stats, unsigned int v_total_min, diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_sh_mask.h index e0c28c29ddb0..a22481e7bcdb 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_sh_mask.h @@ -38896,13 +38896,5 @@ #define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L #define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L -//PCIE_PERF_CNTL_TXCLK3 -#define PCIE_PERF_CNTL_TXCLK3__EVENT0_SEL__SHIFT 0x0 -#define PCIE_PERF_CNTL_TXCLK3__EVENT0_SEL_MASK 0x000000FFL - -//PCIE_PERF_CNTL_TXCLK7 -#define PCIE_PERF_CNTL_TXCLK7__EVENT0_SEL__SHIFT 0x0 -#define PCIE_PERF_CNTL_TXCLK7__EVENT0_SEL_MASK 0x000000FFL - #endif diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index fa7d6ced786f..af3eebb4c9bc 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -610,6 +610,38 @@ struct atom_firmware_info_v3_4 { uint32_t reserved[2]; }; +struct atom_firmware_info_v3_5 { + struct atom_common_table_header table_header; + uint32_t firmware_revision; + uint32_t bootup_clk_reserved[2]; + uint32_t firmware_capability; // enum atombios_firmware_capability + uint32_t fw_protect_region_size_in_kb; /* FW allocate a write protect region at top of FB. */ + uint32_t bios_scratch_reg_startaddr; // 1st bios scratch register dword address + uint32_t bootup_voltage_reserved[2]; + uint8_t mem_module_id; + uint8_t coolingsolution_id; /*0: Air cooling; 1: Liquid cooling ... */ + uint8_t hw_blt_mode; //0:HW_BLT_DMA_PIO_MODE; 1:HW_BLT_LITE_SDMA_MODE; 2:HW_BLT_PCI_IO_MODE + uint8_t reserved1; + uint32_t mc_baseaddr_high; + uint32_t mc_baseaddr_low; + uint8_t board_i2c_feature_id; // enum of atom_board_i2c_feature_id_def + uint8_t board_i2c_feature_gpio_id; // i2c id find in gpio_lut data table gpio_id + uint8_t board_i2c_feature_slave_addr; + uint8_t ras_rom_i2c_slave_addr; + uint32_t bootup_voltage_reserved1; + uint32_t zfb_reserved; + // if pplib_pptable_id!=0, pplib get powerplay table inside driver instead of from VBIOS + uint32_t pplib_pptable_id; + uint32_t hw_voltage_reserved[3]; + uint32_t maco_pwrlimit_mw; // bomaco mode power limit in unit of m-watt + uint32_t usb_pwrlimit_mw; // power limit when USB is enable in unit of m-watt + uint32_t fw_reserved_size_in_kb; // VBIOS reserved extra fw size in unit of kb. + uint32_t pspbl_init_reserved[3]; + uint32_t spi_rom_size; // GPU spi rom size + uint16_t support_dev_in_objinfo; + uint16_t disp_phy_tunning_size; + uint32_t reserved[16]; +}; /* *************************************************************************** Data Table lcd_info structure diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 087d57850304..9e70c41ad98f 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -2034,6 +2034,31 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ return 0; } +static int pp_od_clk_voltage_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, + uint32_t mask, enum amdgpu_device_attr_states *states) +{ + uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); + + *states = ATTR_STATE_SUPPORTED; + + if (!amdgpu_dpm_is_overdrive_supported(adev)) { + *states = ATTR_STATE_UNSUPPORTED; + return 0; + } + + /* Enable pp_od_clk_voltage node for gc 9.4.3 SRIOV/BM support */ + if (gc_ver == IP_VERSION(9, 4, 3)) { + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + *states = ATTR_STATE_UNSUPPORTED; + return 0; + } + + if (!(attr->flags & mask)) + *states = ATTR_STATE_UNSUPPORTED; + + return 0; +} + /* Following items will be read out to indicate current plpd policy: * - -1: none * - 0: disallow @@ -2118,7 +2143,8 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = { AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC, + .attr_update = pp_od_clk_voltage_attr_update), AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC), @@ -2163,10 +2189,6 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { if (mp1_ver < IP_VERSION(10, 0, 0)) *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) { - *states = ATTR_STATE_UNSUPPORTED; - if (amdgpu_dpm_is_overdrive_supported(adev)) - *states = ATTR_STATE_SUPPORTED; } else if (DEVICE_ATTR_IS(mem_busy_percent)) { if ((adev->flags & AMD_IS_APU && gc_ver != IP_VERSION(9, 4, 3)) || @@ -2174,7 +2196,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pcie_bw)) { /* PCIe Perf counters won't work on APU nodes */ - if (adev->flags & AMD_IS_APU) + if (adev->flags & AMD_IS_APU || + !adev->asic_funcs->get_pcie_usage) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(unique_id)) { switch (gc_ver) { diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index df4f20293c16..eb4da3666e05 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -6925,6 +6925,23 @@ static int si_dpm_enable(struct amdgpu_device *adev) return 0; } +static int si_set_temperature_range(struct amdgpu_device *adev) +{ + int ret; + + ret = si_thermal_enable_alert(adev, false); + if (ret) + return ret; + ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); + if (ret) + return ret; + ret = si_thermal_enable_alert(adev, true); + if (ret) + return ret; + + return ret; +} + static void si_dpm_disable(struct amdgpu_device *adev) { struct rv7xx_power_info *pi = rv770_get_pi(adev); @@ -7608,6 +7625,18 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev, static int si_dpm_late_init(void *handle) { + int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->pm.dpm_enabled) + return 0; + + ret = si_set_temperature_range(adev); + if (ret) + return ret; +#if 0 //TODO ? + si_dpm_powergate_uvd(adev, true); +#endif return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 4cd43bbec910..1d96eb274d72 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -1303,13 +1303,12 @@ static int arcturus_get_power_limit(struct smu_context *smu, if (default_power_limit) *default_power_limit = power_limit; - if (smu->od_enabled) { + if (smu->od_enabled) od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); - od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); - } else { + else od_percent_upper = 0; - od_percent_lower = 100; - } + + od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", od_percent_upper, od_percent_lower, power_limit); @@ -2273,8 +2272,8 @@ static uint16_t arcturus_get_current_pcie_link_speed(struct smu_context *smu) /* TODO: confirm this on real target */ esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL); - if ((esm_ctrl >> 15) & 0x1FFFF) - return (uint16_t)(((esm_ctrl >> 8) & 0x3F) + 128); + if ((esm_ctrl >> 15) & 0x1) + return (uint16_t)(((esm_ctrl >> 8) & 0x7F) + 128); return smu_v11_0_get_current_pcie_link_speed(smu); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 8d1d29ffb0f1..ed189a3878eb 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -2357,13 +2357,12 @@ static int navi10_get_power_limit(struct smu_context *smu, *default_power_limit = power_limit; if (smu->od_enabled && - navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) { + navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); - od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); - } else { + else od_percent_upper = 0; - od_percent_lower = 100; - } + + od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]); dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", od_percent_upper, od_percent_lower, power_limit); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 21fc033528fa..e2ad2b972ab0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -640,13 +640,12 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu, if (default_power_limit) *default_power_limit = power_limit; - if (smu->od_enabled) { + if (smu->od_enabled) od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); - od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); - } else { + else od_percent_upper = 0; - od_percent_lower = 100; - } + + od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", od_percent_upper, od_percent_lower, power_limit); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index dd9bcbd630a1..f41ac6465f2a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1682,8 +1682,8 @@ static int aldebaran_get_current_pcie_link_speed(struct smu_context *smu) /* TODO: confirm this on real target */ esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL); - if ((esm_ctrl >> 15) & 0x1FFFF) - return (((esm_ctrl >> 8) & 0x3F) + 128); + if ((esm_ctrl >> 15) & 0x1) + return (((esm_ctrl >> 8) & 0x7F) + 128); return smu_v13_0_get_current_pcie_link_speed(smu); } @@ -1746,10 +1746,12 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu, gpu_metrics->current_fan_speed = 0; - gpu_metrics->pcie_link_width = - smu_v13_0_get_current_pcie_link_width(smu); - gpu_metrics->pcie_link_speed = - aldebaran_get_current_pcie_link_speed(smu); + if (!amdgpu_sriov_vf(smu->adev)) { + gpu_metrics->pcie_link_width = + smu_v13_0_get_current_pcie_link_width(smu); + gpu_metrics->pcie_link_speed = + aldebaran_get_current_pcie_link_speed(smu); + } gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index a9954ffc02c5..9b80f18ea6c3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -2369,13 +2369,12 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu, if (default_power_limit) *default_power_limit = power_limit; - if (smu->od_enabled) { + if (smu->od_enabled) od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); - od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); - } else { + else od_percent_upper = 0; - od_percent_lower = 100; - } + + od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]); dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", od_percent_upper, od_percent_lower, power_limit); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 45a84fd5dc04..3957af057d54 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -2147,8 +2147,8 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu) /* TODO: confirm this on real target */ esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL); - if ((esm_ctrl >> 15) & 0x1FFFF) - return (((esm_ctrl >> 8) & 0x3F) + 128); + if ((esm_ctrl >> 15) & 0x1) + return (((esm_ctrl >> 8) & 0x7F) + 128); speed_level = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) @@ -2228,14 +2228,16 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak) >> GET_INST(GC, 0); if (!(adev->flags & AMD_IS_APU)) { - link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu); - if (link_width_level > MAX_LINK_WIDTH) - link_width_level = 0; + if (!amdgpu_sriov_vf(adev)) { + link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu); + if (link_width_level > MAX_LINK_WIDTH) + link_width_level = 0; - gpu_metrics->pcie_link_width = - DECODE_LANE_WIDTH(link_width_level); - gpu_metrics->pcie_link_speed = - smu_v13_0_6_get_current_pcie_link_speed(smu); + gpu_metrics->pcie_link_width = + DECODE_LANE_WIDTH(link_width_level); + gpu_metrics->pcie_link_speed = + smu_v13_0_6_get_current_pcie_link_speed(smu); + } gpu_metrics->pcie_bandwidth_acc = SMUQ10_ROUND(metrics_x->PcieBandwidthAcc[0]); gpu_metrics->pcie_bandwidth_inst = @@ -2306,8 +2308,8 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu) ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2); - /* This is similar to FLR, wait till max FLR timeout */ - msleep(100); + /* Reset takes a bit longer, wait for 200ms. */ + msleep(200); dev_dbg(smu->adev->dev, "restore config space...\n"); /* Restore the config space saved during init */ diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 0ffdb58af74e..3dc7b60cb075 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -2333,13 +2333,12 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu, if (default_power_limit) *default_power_limit = power_limit; - if (smu->od_enabled) { + if (smu->od_enabled) od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); - od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); - } else { + else od_percent_upper = 0; - od_percent_lower = 100; - } + + od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", od_percent_upper, od_percent_lower, power_limit); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 2aa7e9945a0b..7ac9bc0df8fd 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -229,8 +229,6 @@ int smu_v14_0_check_fw_version(struct smu_context *smu) smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2; break; case IP_VERSION(14, 0, 0): - if ((smu->smc_fw_version < 0x5d3a00)) - dev_warn(smu->adev->dev, "The PMFW version(%x) is behind in this BIOS!\n", smu->smc_fw_version); smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0; break; default: diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index 47fdbae4adfc..9310c4758e38 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -261,7 +261,10 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu, *value = metrics->MpipuclkFrequency; break; case METRICS_AVERAGE_GFXACTIVITY: - *value = metrics->GfxActivity / 100; + if ((smu->smc_fw_version > 0x5d4600)) + *value = metrics->GfxActivity; + else + *value = metrics->GfxActivity / 100; break; case METRICS_AVERAGE_VCNACTIVITY: *value = metrics->VcnActivity / 100; diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig index 09712b88a5b8..c0f56888c328 100644 --- a/drivers/gpu/drm/display/Kconfig +++ b/drivers/gpu/drm/display/Kconfig @@ -17,6 +17,27 @@ config DRM_DISPLAY_DP_HELPER help DRM display helpers for DisplayPort. +config DRM_DISPLAY_DP_TUNNEL + bool + select DRM_DISPLAY_DP_HELPER + help + Enable support for DisplayPort tunnels. This allows drivers to use + DP tunnel features like the Bandwidth Allocation mode to maximize the + BW utilization for display streams on Thunderbolt links. + +config DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE + bool "Enable debugging the DP tunnel state" + depends on REF_TRACKER + depends on DRM_DISPLAY_DP_TUNNEL + depends on DEBUG_KERNEL + depends on EXPERT + help + Enables debugging the DP tunnel manager's state, including the + consistency of all managed tunnels' reference counting and the state of + streams contained in tunnels. + + If in doubt, say "N". + config DRM_DISPLAY_HDCP_HELPER bool depends on DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/display/Makefile b/drivers/gpu/drm/display/Makefile index 17ac4a1006a8..7ca61333c669 100644 --- a/drivers/gpu/drm/display/Makefile +++ b/drivers/gpu/drm/display/Makefile @@ -8,6 +8,8 @@ drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \ drm_dp_helper.o \ drm_dp_mst_topology.o \ drm_dsc_helper.o +drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_TUNNEL) += \ + drm_dp_tunnel.o drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \ drm_hdmi_helper.o \ diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c index 57a32e962322..266826eac4a7 100644 --- a/drivers/gpu/drm/display/drm_dp_helper.c +++ b/drivers/gpu/drm/display/drm_dp_helper.c @@ -4187,3 +4187,33 @@ int drm_dp_bw_channel_coding_efficiency(bool is_uhbr) return 800000; } EXPORT_SYMBOL(drm_dp_bw_channel_coding_efficiency); + +/** + * drm_dp_max_dprx_data_rate - Get the max data bandwidth of a DPRX sink + * @max_link_rate: max DPRX link rate in 10kbps units + * @max_lanes: max DPRX lane count + * + * Given a link rate and lanes, get the data bandwidth. + * + * Data bandwidth is the actual payload rate, which depends on the data + * bandwidth efficiency and the link rate. + * + * Note that protocol layers above the DPRX link level considered here can + * further limit the maximum data rate. Such layers are the MST topology (with + * limits on the link between the source and first branch device as well as on + * the whole MST path until the DPRX link) and (Thunderbolt) DP tunnels - + * which in turn can encapsulate an MST link with its own limit - with each + * SST or MST encapsulated tunnel sharing the BW of a tunnel group. + * + * Returns the maximum data rate in kBps units. + */ +int drm_dp_max_dprx_data_rate(int max_link_rate, int max_lanes) +{ + int ch_coding_efficiency = + drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(max_link_rate)); + + return DIV_ROUND_DOWN_ULL(mul_u32_u32(max_link_rate * 10 * max_lanes, + ch_coding_efficiency), + 1000000 * 8); +} +EXPORT_SYMBOL(drm_dp_max_dprx_data_rate); diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c new file mode 100644 index 000000000000..120e0de674c1 --- /dev/null +++ b/drivers/gpu/drm/display/drm_dp_tunnel.c @@ -0,0 +1,1949 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include +#include + +#include + +#include +#include +#include +#include +#include + +#define to_group(__private_obj) \ + container_of(__private_obj, struct drm_dp_tunnel_group, base) + +#define to_group_state(__private_state) \ + container_of(__private_state, struct drm_dp_tunnel_group_state, base) + +#define is_dp_tunnel_private_obj(__obj) \ + ((__obj)->funcs == &tunnel_group_funcs) + +#define for_each_new_group_in_state(__state, __new_group_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_private_objs; \ + (__i)++) \ + for_each_if ((__state)->private_objs[__i].ptr && \ + is_dp_tunnel_private_obj((__state)->private_objs[__i].ptr) && \ + ((__new_group_state) = \ + to_group_state((__state)->private_objs[__i].new_state), 1)) + +#define for_each_old_group_in_state(__state, __old_group_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_private_objs; \ + (__i)++) \ + for_each_if ((__state)->private_objs[__i].ptr && \ + is_dp_tunnel_private_obj((__state)->private_objs[__i].ptr) && \ + ((__old_group_state) = \ + to_group_state((__state)->private_objs[__i].old_state), 1)) + +#define for_each_tunnel_in_group(__group, __tunnel) \ + list_for_each_entry(__tunnel, &(__group)->tunnels, node) + +#define for_each_tunnel_state(__group_state, __tunnel_state) \ + list_for_each_entry(__tunnel_state, &(__group_state)->tunnel_states, node) + +#define for_each_tunnel_state_safe(__group_state, __tunnel_state, __tunnel_state_tmp) \ + list_for_each_entry_safe(__tunnel_state, __tunnel_state_tmp, \ + &(__group_state)->tunnel_states, node) + +#define kbytes_to_mbits(__kbytes) \ + DIV_ROUND_UP((__kbytes) * 8, 1000) + +#define DPTUN_BW_ARG(__bw) ((__bw) < 0 ? (__bw) : kbytes_to_mbits(__bw)) + +#define __tun_prn(__tunnel, __level, __type, __fmt, ...) \ + drm_##__level##__type((__tunnel)->group->mgr->dev, \ + "[DPTUN %s][%s] " __fmt, \ + drm_dp_tunnel_name(__tunnel), \ + (__tunnel)->aux->name, ## \ + __VA_ARGS__) + +#define tun_dbg(__tunnel, __fmt, ...) \ + __tun_prn(__tunnel, dbg, _kms, __fmt, ## __VA_ARGS__) + +#define tun_dbg_stat(__tunnel, __err, __fmt, ...) do { \ + if (__err) \ + __tun_prn(__tunnel, dbg, _kms, __fmt " (Failed, err: %pe)\n", \ + ## __VA_ARGS__, ERR_PTR(__err)); \ + else \ + __tun_prn(__tunnel, dbg, _kms, __fmt " (Ok)\n", \ + ## __VA_ARGS__); \ +} while (0) + +#define tun_dbg_atomic(__tunnel, __fmt, ...) \ + __tun_prn(__tunnel, dbg, _atomic, __fmt, ## __VA_ARGS__) + +#define tun_grp_dbg(__group, __fmt, ...) \ + drm_dbg_kms((__group)->mgr->dev, \ + "[DPTUN %s] " __fmt, \ + drm_dp_tunnel_group_name(__group), ## \ + __VA_ARGS__) + +#define DP_TUNNELING_BASE DP_TUNNELING_OUI + +#define __DPTUN_REG_RANGE(__start, __size) \ + GENMASK_ULL((__start) + (__size) - 1, (__start)) + +#define DPTUN_REG_RANGE(__addr, __size) \ + __DPTUN_REG_RANGE((__addr) - DP_TUNNELING_BASE, (__size)) + +#define DPTUN_REG(__addr) DPTUN_REG_RANGE(__addr, 1) + +#define DPTUN_INFO_REG_MASK ( \ + DPTUN_REG_RANGE(DP_TUNNELING_OUI, DP_TUNNELING_OUI_BYTES) | \ + DPTUN_REG_RANGE(DP_TUNNELING_DEV_ID, DP_TUNNELING_DEV_ID_BYTES) | \ + DPTUN_REG(DP_TUNNELING_HW_REV) | \ + DPTUN_REG(DP_TUNNELING_SW_REV_MAJOR) | \ + DPTUN_REG(DP_TUNNELING_SW_REV_MINOR) | \ + DPTUN_REG(DP_TUNNELING_CAPABILITIES) | \ + DPTUN_REG(DP_IN_ADAPTER_INFO) | \ + DPTUN_REG(DP_USB4_DRIVER_ID) | \ + DPTUN_REG(DP_USB4_DRIVER_BW_CAPABILITY) | \ + DPTUN_REG(DP_IN_ADAPTER_TUNNEL_INFORMATION) | \ + DPTUN_REG(DP_BW_GRANULARITY) | \ + DPTUN_REG(DP_ESTIMATED_BW) | \ + DPTUN_REG(DP_ALLOCATED_BW) | \ + DPTUN_REG(DP_TUNNELING_MAX_LINK_RATE) | \ + DPTUN_REG(DP_TUNNELING_MAX_LANE_COUNT) | \ + DPTUN_REG(DP_DPTX_BW_ALLOCATION_MODE_CONTROL)) + +static const DECLARE_BITMAP(dptun_info_regs, 64) = { + DPTUN_INFO_REG_MASK & -1UL, +#if BITS_PER_LONG == 32 + DPTUN_INFO_REG_MASK >> 32, +#endif +}; + +struct drm_dp_tunnel_regs { + u8 buf[HWEIGHT64(DPTUN_INFO_REG_MASK)]; +}; + +struct drm_dp_tunnel_group; + +struct drm_dp_tunnel { + struct drm_dp_tunnel_group *group; + + struct list_head node; + + struct kref kref; + struct ref_tracker *tracker; + struct drm_dp_aux *aux; + char name[8]; + + int bw_granularity; + int estimated_bw; + int allocated_bw; + + int max_dprx_rate; + u8 max_dprx_lane_count; + + u8 adapter_id; + + bool bw_alloc_supported:1; + bool bw_alloc_enabled:1; + bool has_io_error:1; + bool destroyed:1; +}; + +struct drm_dp_tunnel_group_state; + +struct drm_dp_tunnel_state { + struct drm_dp_tunnel_group_state *group_state; + + struct drm_dp_tunnel_ref tunnel_ref; + + struct list_head node; + + u32 stream_mask; + int *stream_bw; +}; + +struct drm_dp_tunnel_group_state { + struct drm_private_state base; + + struct list_head tunnel_states; +}; + +struct drm_dp_tunnel_group { + struct drm_private_obj base; + struct drm_dp_tunnel_mgr *mgr; + + struct list_head tunnels; + + /* available BW including the allocated_bw of all tunnels in the group */ + int available_bw; + + u8 drv_group_id; + char name[8]; + + bool active:1; +}; + +struct drm_dp_tunnel_mgr { + struct drm_device *dev; + + int group_count; + struct drm_dp_tunnel_group *groups; + wait_queue_head_t bw_req_queue; + +#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE + struct ref_tracker_dir ref_tracker; +#endif +}; + +/* + * The following helpers provide a way to read out the tunneling DPCD + * registers with a minimal amount of AUX transfers (1 transfer per contiguous + * range, as permitted by the 16 byte per transfer AUX limit), not accessing + * other registers to avoid any read side-effects. + */ +static int next_reg_area(int *offset) +{ + *offset = find_next_bit(dptun_info_regs, 64, *offset); + + return find_next_zero_bit(dptun_info_regs, 64, *offset + 1) - *offset; +} + +#define tunnel_reg_ptr(__regs, __address) ({ \ + WARN_ON(!test_bit((__address) - DP_TUNNELING_BASE, dptun_info_regs)); \ + &(__regs)->buf[bitmap_weight(dptun_info_regs, (__address) - DP_TUNNELING_BASE)]; \ +}) + +static int read_tunnel_regs(struct drm_dp_aux *aux, struct drm_dp_tunnel_regs *regs) +{ + int offset = 0; + int len; + + while ((len = next_reg_area(&offset))) { + int address = DP_TUNNELING_BASE + offset; + + if (drm_dp_dpcd_read(aux, address, tunnel_reg_ptr(regs, address), len) < 0) + return -EIO; + + offset += len; + } + + return 0; +} + +static u8 tunnel_reg(const struct drm_dp_tunnel_regs *regs, int address) +{ + return *tunnel_reg_ptr(regs, address); +} + +static u8 tunnel_reg_drv_group_id(const struct drm_dp_tunnel_regs *regs) +{ + u8 drv_id = tunnel_reg(regs, DP_USB4_DRIVER_ID) & DP_USB4_DRIVER_ID_MASK; + u8 group_id = tunnel_reg(regs, DP_IN_ADAPTER_TUNNEL_INFORMATION) & DP_GROUP_ID_MASK; + + if (!group_id) + return 0; + + return (drv_id << DP_GROUP_ID_BITS) | group_id; +} + +/* Return granularity in kB/s units */ +static int tunnel_reg_bw_granularity(const struct drm_dp_tunnel_regs *regs) +{ + int gr = tunnel_reg(regs, DP_BW_GRANULARITY) & DP_BW_GRANULARITY_MASK; + + if (gr > 2) + return -1; + + return (250000 << gr) / 8; +} + +static int tunnel_reg_max_dprx_rate(const struct drm_dp_tunnel_regs *regs) +{ + u8 bw_code = tunnel_reg(regs, DP_TUNNELING_MAX_LINK_RATE); + + return drm_dp_bw_code_to_link_rate(bw_code); +} + +static int tunnel_reg_max_dprx_lane_count(const struct drm_dp_tunnel_regs *regs) +{ + return tunnel_reg(regs, DP_TUNNELING_MAX_LANE_COUNT) & + DP_TUNNELING_MAX_LANE_COUNT_MASK; +} + +static bool tunnel_reg_bw_alloc_supported(const struct drm_dp_tunnel_regs *regs) +{ + u8 cap_mask = DP_TUNNELING_SUPPORT | DP_IN_BW_ALLOCATION_MODE_SUPPORT; + + if ((tunnel_reg(regs, DP_TUNNELING_CAPABILITIES) & cap_mask) != cap_mask) + return false; + + return tunnel_reg(regs, DP_USB4_DRIVER_BW_CAPABILITY) & + DP_USB4_DRIVER_BW_ALLOCATION_MODE_SUPPORT; +} + +static bool tunnel_reg_bw_alloc_enabled(const struct drm_dp_tunnel_regs *regs) +{ + return tunnel_reg(regs, DP_DPTX_BW_ALLOCATION_MODE_CONTROL) & + DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE; +} + +static u8 tunnel_group_drv_id(u8 drv_group_id) +{ + return drv_group_id >> DP_GROUP_ID_BITS; +} + +static u8 tunnel_group_id(u8 drv_group_id) +{ + return drv_group_id & DP_GROUP_ID_MASK; +} + +const char *drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel) +{ + return tunnel->name; +} +EXPORT_SYMBOL(drm_dp_tunnel_name); + +static const char *drm_dp_tunnel_group_name(const struct drm_dp_tunnel_group *group) +{ + return group->name; +} + +static struct drm_dp_tunnel_group * +lookup_or_alloc_group(struct drm_dp_tunnel_mgr *mgr, u8 drv_group_id) +{ + struct drm_dp_tunnel_group *group = NULL; + int i; + + for (i = 0; i < mgr->group_count; i++) { + /* + * A tunnel group with 0 group ID shouldn't have more than one + * tunnels. + */ + if (tunnel_group_id(drv_group_id) && + mgr->groups[i].drv_group_id == drv_group_id) + return &mgr->groups[i]; + + if (!group && !mgr->groups[i].active) + group = &mgr->groups[i]; + } + + if (!group) { + drm_dbg_kms(mgr->dev, + "DPTUN: Can't allocate more tunnel groups\n"); + return NULL; + } + + group->drv_group_id = drv_group_id; + group->active = true; + + /* + * The group name format here and elsewhere: Driver-ID:Group-ID:* + * (* standing for all DP-Adapters/tunnels in the group). + */ + snprintf(group->name, sizeof(group->name), "%d:%d:*", + tunnel_group_drv_id(drv_group_id) & ((1 << DP_GROUP_ID_BITS) - 1), + tunnel_group_id(drv_group_id) & ((1 << DP_USB4_DRIVER_ID_BITS) - 1)); + + return group; +} + +static void free_group(struct drm_dp_tunnel_group *group) +{ + struct drm_dp_tunnel_mgr *mgr = group->mgr; + + if (drm_WARN_ON(mgr->dev, !list_empty(&group->tunnels))) + return; + + group->drv_group_id = 0; + group->available_bw = -1; + group->active = false; +} + +static struct drm_dp_tunnel * +tunnel_get(struct drm_dp_tunnel *tunnel) +{ + kref_get(&tunnel->kref); + + return tunnel; +} + +static void free_tunnel(struct kref *kref) +{ + struct drm_dp_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref); + struct drm_dp_tunnel_group *group = tunnel->group; + + list_del(&tunnel->node); + if (list_empty(&group->tunnels)) + free_group(group); + + kfree(tunnel); +} + +static void tunnel_put(struct drm_dp_tunnel *tunnel) +{ + kref_put(&tunnel->kref, free_tunnel); +} + +#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE +static void track_tunnel_ref(struct drm_dp_tunnel *tunnel, + struct ref_tracker **tracker) +{ + ref_tracker_alloc(&tunnel->group->mgr->ref_tracker, + tracker, GFP_KERNEL); +} + +static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel, + struct ref_tracker **tracker) +{ + ref_tracker_free(&tunnel->group->mgr->ref_tracker, + tracker); +} +#else +static void track_tunnel_ref(struct drm_dp_tunnel *tunnel, + struct ref_tracker **tracker) +{ +} + +static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel, + struct ref_tracker **tracker) +{ +} +#endif + +/** + * drm_dp_tunnel_get - Get a reference for a DP tunnel + * @tunnel: Tunnel object + * @tracker: Debug tracker for the reference + * + * Get a reference for @tunnel, along with a debug tracker to help locating + * the source of a reference leak/double reference put etc. issue. + * + * The reference must be dropped after use calling drm_dp_tunnel_put() + * passing @tunnel and *@tracker returned from here. + * + * Returns @tunnel - as a convenience - along with *@tracker. + */ +struct drm_dp_tunnel * +drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel, + struct ref_tracker **tracker) +{ + track_tunnel_ref(tunnel, tracker); + + return tunnel_get(tunnel); +} +EXPORT_SYMBOL(drm_dp_tunnel_get); + +/** + * drm_dp_tunnel_put - Put a reference for a DP tunnel + * @tunnel - Tunnel object + * @tracker - Debug tracker for the reference + * + * Put a reference for @tunnel along with its debug *@tracker, which + * was obtained with drm_dp_tunnel_get(). + */ +void drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel, + struct ref_tracker **tracker) +{ + untrack_tunnel_ref(tunnel, tracker); + + tunnel_put(tunnel); +} +EXPORT_SYMBOL(drm_dp_tunnel_put); + +static bool add_tunnel_to_group(struct drm_dp_tunnel_mgr *mgr, + u8 drv_group_id, + struct drm_dp_tunnel *tunnel) +{ + struct drm_dp_tunnel_group *group; + + group = lookup_or_alloc_group(mgr, drv_group_id); + if (!group) + return false; + + tunnel->group = group; + list_add(&tunnel->node, &group->tunnels); + + return true; +} + +static struct drm_dp_tunnel * +create_tunnel(struct drm_dp_tunnel_mgr *mgr, + struct drm_dp_aux *aux, + const struct drm_dp_tunnel_regs *regs) +{ + u8 drv_group_id = tunnel_reg_drv_group_id(regs); + struct drm_dp_tunnel *tunnel; + + tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL); + if (!tunnel) + return NULL; + + INIT_LIST_HEAD(&tunnel->node); + + kref_init(&tunnel->kref); + + tunnel->aux = aux; + + tunnel->adapter_id = tunnel_reg(regs, DP_IN_ADAPTER_INFO) & DP_IN_ADAPTER_NUMBER_MASK; + + snprintf(tunnel->name, sizeof(tunnel->name), "%d:%d:%d", + tunnel_group_drv_id(drv_group_id) & ((1 << DP_GROUP_ID_BITS) - 1), + tunnel_group_id(drv_group_id) & ((1 << DP_USB4_DRIVER_ID_BITS) - 1), + tunnel->adapter_id & ((1 << DP_IN_ADAPTER_NUMBER_BITS) - 1)); + + tunnel->bw_granularity = tunnel_reg_bw_granularity(regs); + tunnel->allocated_bw = tunnel_reg(regs, DP_ALLOCATED_BW) * + tunnel->bw_granularity; + /* + * An initial allocated BW of 0 indicates an undefined state: the + * actual allocation is determined by the TBT CM, usually following a + * legacy allocation policy (based on the max DPRX caps). From the + * driver's POV the state becomes defined only after the first + * allocation request. + */ + if (!tunnel->allocated_bw) + tunnel->allocated_bw = -1; + + tunnel->bw_alloc_supported = tunnel_reg_bw_alloc_supported(regs); + tunnel->bw_alloc_enabled = tunnel_reg_bw_alloc_enabled(regs); + + if (!add_tunnel_to_group(mgr, drv_group_id, tunnel)) { + kfree(tunnel); + + return NULL; + } + + track_tunnel_ref(tunnel, &tunnel->tracker); + + return tunnel; +} + +static void destroy_tunnel(struct drm_dp_tunnel *tunnel) +{ + untrack_tunnel_ref(tunnel, &tunnel->tracker); + tunnel_put(tunnel); +} + +/** + * drm_dp_tunnel_set_io_error - Set the IO error flag for a DP tunnel + * @tunnel: Tunnel object + * + * Set the IO error flag for @tunnel. Drivers can call this function upon + * detecting a failure that affects the tunnel functionality, for instance + * after a DP AUX transfer failure on the port @tunnel is connected to. + * + * This disables further management of @tunnel, including any related + * AUX accesses for tunneling DPCD registers, returning error to the + * initiators of these. The driver is supposed to drop this tunnel and - + * optionally - recreate it. + */ +void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel) +{ + tunnel->has_io_error = true; +} +EXPORT_SYMBOL(drm_dp_tunnel_set_io_error); + +#define SKIP_DPRX_CAPS_CHECK BIT(0) +#define ALLOW_ALLOCATED_BW_CHANGE BIT(1) +static bool tunnel_regs_are_valid(struct drm_dp_tunnel_mgr *mgr, + const struct drm_dp_tunnel_regs *regs, + unsigned int flags) +{ + u8 drv_group_id = tunnel_reg_drv_group_id(regs); + bool check_dprx = !(flags & SKIP_DPRX_CAPS_CHECK); + bool ret = true; + + if (!tunnel_reg_bw_alloc_supported(regs)) { + if (tunnel_group_id(drv_group_id)) { + drm_dbg_kms(mgr->dev, + "DPTUN: A non-zero group ID is only allowed with BWA support\n"); + ret = false; + } + + if (tunnel_reg(regs, DP_ALLOCATED_BW)) { + drm_dbg_kms(mgr->dev, + "DPTUN: BW is allocated without BWA support\n"); + ret = false; + } + + return ret; + } + + if (!tunnel_group_id(drv_group_id)) { + drm_dbg_kms(mgr->dev, + "DPTUN: BWA support requires a non-zero group ID\n"); + ret = false; + } + + if (check_dprx && hweight8(tunnel_reg_max_dprx_lane_count(regs)) != 1) { + drm_dbg_kms(mgr->dev, + "DPTUN: Invalid DPRX lane count: %d\n", + tunnel_reg_max_dprx_lane_count(regs)); + + ret = false; + } + + if (check_dprx && !tunnel_reg_max_dprx_rate(regs)) { + drm_dbg_kms(mgr->dev, + "DPTUN: DPRX rate is 0\n"); + + ret = false; + } + + if (tunnel_reg_bw_granularity(regs) < 0) { + drm_dbg_kms(mgr->dev, + "DPTUN: Invalid BW granularity\n"); + + ret = false; + } + + if (tunnel_reg(regs, DP_ALLOCATED_BW) > tunnel_reg(regs, DP_ESTIMATED_BW)) { + drm_dbg_kms(mgr->dev, + "DPTUN: Allocated BW %d > estimated BW %d Mb/s\n", + DPTUN_BW_ARG(tunnel_reg(regs, DP_ALLOCATED_BW) * + tunnel_reg_bw_granularity(regs)), + DPTUN_BW_ARG(tunnel_reg(regs, DP_ESTIMATED_BW) * + tunnel_reg_bw_granularity(regs))); + + ret = false; + } + + return ret; +} + +static int tunnel_allocated_bw(const struct drm_dp_tunnel *tunnel) +{ + return max(tunnel->allocated_bw, 0); +} + +static bool tunnel_info_changes_are_valid(struct drm_dp_tunnel *tunnel, + const struct drm_dp_tunnel_regs *regs, + unsigned int flags) +{ + u8 new_drv_group_id = tunnel_reg_drv_group_id(regs); + bool ret = true; + + if (tunnel->bw_alloc_supported != tunnel_reg_bw_alloc_supported(regs)) { + tun_dbg(tunnel, + "BW alloc support has changed %s -> %s\n", + str_yes_no(tunnel->bw_alloc_supported), + str_yes_no(tunnel_reg_bw_alloc_supported(regs))); + + ret = false; + } + + if (tunnel->group->drv_group_id != new_drv_group_id) { + tun_dbg(tunnel, + "Driver/group ID has changed %d:%d:* -> %d:%d:*\n", + tunnel_group_drv_id(tunnel->group->drv_group_id), + tunnel_group_id(tunnel->group->drv_group_id), + tunnel_group_drv_id(new_drv_group_id), + tunnel_group_id(new_drv_group_id)); + + ret = false; + } + + if (!tunnel->bw_alloc_supported) + return ret; + + if (tunnel->bw_granularity != tunnel_reg_bw_granularity(regs)) { + tun_dbg(tunnel, + "BW granularity has changed: %d -> %d Mb/s\n", + DPTUN_BW_ARG(tunnel->bw_granularity), + DPTUN_BW_ARG(tunnel_reg_bw_granularity(regs))); + + ret = false; + } + + /* + * On some devices at least the BW alloc mode enabled status is always + * reported as 0, so skip checking that here. + */ + + if (!(flags & ALLOW_ALLOCATED_BW_CHANGE) && + tunnel_allocated_bw(tunnel) != + tunnel_reg(regs, DP_ALLOCATED_BW) * tunnel->bw_granularity) { + tun_dbg(tunnel, + "Allocated BW has changed: %d -> %d Mb/s\n", + DPTUN_BW_ARG(tunnel->allocated_bw), + DPTUN_BW_ARG(tunnel_reg(regs, DP_ALLOCATED_BW) * tunnel->bw_granularity)); + + ret = false; + } + + return ret; +} + +static int +read_and_verify_tunnel_regs(struct drm_dp_tunnel *tunnel, + struct drm_dp_tunnel_regs *regs, + unsigned int flags) +{ + int err; + + err = read_tunnel_regs(tunnel->aux, regs); + if (err < 0) { + drm_dp_tunnel_set_io_error(tunnel); + + return err; + } + + if (!tunnel_regs_are_valid(tunnel->group->mgr, regs, flags)) + return -EINVAL; + + if (!tunnel_info_changes_are_valid(tunnel, regs, flags)) + return -EINVAL; + + return 0; +} + +static bool update_dprx_caps(struct drm_dp_tunnel *tunnel, const struct drm_dp_tunnel_regs *regs) +{ + bool changed = false; + + if (tunnel_reg_max_dprx_rate(regs) != tunnel->max_dprx_rate) { + tunnel->max_dprx_rate = tunnel_reg_max_dprx_rate(regs); + changed = true; + } + + if (tunnel_reg_max_dprx_lane_count(regs) != tunnel->max_dprx_lane_count) { + tunnel->max_dprx_lane_count = tunnel_reg_max_dprx_lane_count(regs); + changed = true; + } + + return changed; +} + +static int dev_id_len(const u8 *dev_id, int max_len) +{ + while (max_len && dev_id[max_len - 1] == '\0') + max_len--; + + return max_len; +} + +static int get_max_dprx_bw(const struct drm_dp_tunnel *tunnel) +{ + int max_dprx_bw = drm_dp_max_dprx_data_rate(tunnel->max_dprx_rate, + tunnel->max_dprx_lane_count); + + /* + * A BW request of roundup(max_dprx_bw, tunnel->bw_granularity) results in + * an allocation of max_dprx_bw. A BW request above this rounded-up + * value will fail. + */ + return min(roundup(max_dprx_bw, tunnel->bw_granularity), + MAX_DP_REQUEST_BW * tunnel->bw_granularity); +} + +static int get_max_tunnel_bw(const struct drm_dp_tunnel *tunnel) +{ + return min(get_max_dprx_bw(tunnel), tunnel->group->available_bw); +} + +/** + * drm_dp_tunnel_detect - Detect DP tunnel on the link + * @mgr: Tunnel manager + * @aux: DP AUX on which the tunnel will be detected + * + * Detect if there is any DP tunnel on the link and add it to the tunnel + * group's tunnel list. + * + * Returns a pointer to a tunnel on success, or an ERR_PTR() error on + * failure. + */ +struct drm_dp_tunnel * +drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr, + struct drm_dp_aux *aux) +{ + struct drm_dp_tunnel_regs regs; + struct drm_dp_tunnel *tunnel; + int err; + + err = read_tunnel_regs(aux, ®s); + if (err) + return ERR_PTR(err); + + if (!(tunnel_reg(®s, DP_TUNNELING_CAPABILITIES) & + DP_TUNNELING_SUPPORT)) + return ERR_PTR(-ENODEV); + + /* The DPRX caps are valid only after enabling BW alloc mode. */ + if (!tunnel_regs_are_valid(mgr, ®s, SKIP_DPRX_CAPS_CHECK)) + return ERR_PTR(-EINVAL); + + tunnel = create_tunnel(mgr, aux, ®s); + if (!tunnel) + return ERR_PTR(-ENOMEM); + + tun_dbg(tunnel, + "OUI:%*phD DevID:%*pE Rev-HW:%d.%d SW:%d.%d PR-Sup:%s BWA-Sup:%s BWA-En:%s\n", + DP_TUNNELING_OUI_BYTES, + tunnel_reg_ptr(®s, DP_TUNNELING_OUI), + dev_id_len(tunnel_reg_ptr(®s, DP_TUNNELING_DEV_ID), DP_TUNNELING_DEV_ID_BYTES), + tunnel_reg_ptr(®s, DP_TUNNELING_DEV_ID), + (tunnel_reg(®s, DP_TUNNELING_HW_REV) & DP_TUNNELING_HW_REV_MAJOR_MASK) >> + DP_TUNNELING_HW_REV_MAJOR_SHIFT, + (tunnel_reg(®s, DP_TUNNELING_HW_REV) & DP_TUNNELING_HW_REV_MINOR_MASK) >> + DP_TUNNELING_HW_REV_MINOR_SHIFT, + tunnel_reg(®s, DP_TUNNELING_SW_REV_MAJOR), + tunnel_reg(®s, DP_TUNNELING_SW_REV_MINOR), + str_yes_no(tunnel_reg(®s, DP_TUNNELING_CAPABILITIES) & + DP_PANEL_REPLAY_OPTIMIZATION_SUPPORT), + str_yes_no(tunnel->bw_alloc_supported), + str_yes_no(tunnel->bw_alloc_enabled)); + + return tunnel; +} +EXPORT_SYMBOL(drm_dp_tunnel_detect); + +/** + * drm_dp_tunnel_destroy - Destroy tunnel object + * @tunnel: Tunnel object + * + * Remove the tunnel from the tunnel topology and destroy it. + * + * Returns 0 on success, -ENODEV if the tunnel has been destroyed already. + */ +int drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel) +{ + if (!tunnel) + return 0; + + if (drm_WARN_ON(tunnel->group->mgr->dev, tunnel->destroyed)) + return -ENODEV; + + tun_dbg(tunnel, "destroying\n"); + + tunnel->destroyed = true; + destroy_tunnel(tunnel); + + return 0; +} +EXPORT_SYMBOL(drm_dp_tunnel_destroy); + +static int check_tunnel(const struct drm_dp_tunnel *tunnel) +{ + if (tunnel->destroyed) + return -ENODEV; + + if (tunnel->has_io_error) + return -EIO; + + return 0; +} + +static int group_allocated_bw(struct drm_dp_tunnel_group *group) +{ + struct drm_dp_tunnel *tunnel; + int group_allocated_bw = 0; + + for_each_tunnel_in_group(group, tunnel) { + if (check_tunnel(tunnel) == 0 && + tunnel->bw_alloc_enabled) + group_allocated_bw += tunnel_allocated_bw(tunnel); + } + + return group_allocated_bw; +} + +/* + * The estimated BW reported by the TBT Connection Manager for each tunnel in + * a group includes the BW already allocated for the given tunnel and the + * unallocated BW which is free to be used by any tunnel in the group. + */ +static int group_free_bw(const struct drm_dp_tunnel *tunnel) +{ + return tunnel->estimated_bw - tunnel_allocated_bw(tunnel); +} + +static int calc_group_available_bw(const struct drm_dp_tunnel *tunnel) +{ + return group_allocated_bw(tunnel->group) + + group_free_bw(tunnel); +} + +static int update_group_available_bw(struct drm_dp_tunnel *tunnel, + const struct drm_dp_tunnel_regs *regs) +{ + struct drm_dp_tunnel *tunnel_iter; + int group_available_bw; + bool changed; + + tunnel->estimated_bw = tunnel_reg(regs, DP_ESTIMATED_BW) * tunnel->bw_granularity; + + if (calc_group_available_bw(tunnel) == tunnel->group->available_bw) + return 0; + + for_each_tunnel_in_group(tunnel->group, tunnel_iter) { + int err; + + if (tunnel_iter == tunnel) + continue; + + if (check_tunnel(tunnel_iter) != 0 || + !tunnel_iter->bw_alloc_enabled) + continue; + + err = drm_dp_dpcd_probe(tunnel_iter->aux, DP_DPCD_REV); + if (err) { + tun_dbg(tunnel_iter, + "Probe failed, assume disconnected (err %pe)\n", + ERR_PTR(err)); + drm_dp_tunnel_set_io_error(tunnel_iter); + } + } + + group_available_bw = calc_group_available_bw(tunnel); + + tun_dbg(tunnel, "Updated group available BW: %d->%d\n", + DPTUN_BW_ARG(tunnel->group->available_bw), + DPTUN_BW_ARG(group_available_bw)); + + changed = tunnel->group->available_bw != group_available_bw; + + tunnel->group->available_bw = group_available_bw; + + return changed ? 1 : 0; +} + +static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable) +{ + u8 mask = DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE | DP_UNMASK_BW_ALLOCATION_IRQ; + u8 val; + + if (drm_dp_dpcd_readb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0) + goto out_err; + + if (enable) + val |= mask; + else + val &= ~mask; + + if (drm_dp_dpcd_writeb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0) + goto out_err; + + tunnel->bw_alloc_enabled = enable; + + return 0; + +out_err: + drm_dp_tunnel_set_io_error(tunnel); + + return -EIO; +} + +/** + * drm_dp_tunnel_enable_bw_alloc - Enable DP tunnel BW allocation mode + * @tunnel: Tunnel object + * + * Enable the DP tunnel BW allocation mode on @tunnel if it supports it. + * + * Returns 0 in case of success, negative error code otherwise. + */ +int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel) +{ + struct drm_dp_tunnel_regs regs; + int err; + + err = check_tunnel(tunnel); + if (err) + return err; + + if (!tunnel->bw_alloc_supported) + return -EOPNOTSUPP; + + if (!tunnel_group_id(tunnel->group->drv_group_id)) + return -EINVAL; + + err = set_bw_alloc_mode(tunnel, true); + if (err) + goto out; + + /* + * After a BWA disable/re-enable sequence the allocated BW can either + * stay at its last requested value or, for instance after system + * suspend/resume, TBT CM can reset back the allocation to the amount + * allocated in the legacy/non-BWA mode. Accordingly allow for the + * allocation to change wrt. the last SW state. + */ + err = read_and_verify_tunnel_regs(tunnel, ®s, + ALLOW_ALLOCATED_BW_CHANGE); + if (err) { + set_bw_alloc_mode(tunnel, false); + + goto out; + } + + if (!tunnel->max_dprx_rate) + update_dprx_caps(tunnel, ®s); + + if (tunnel->group->available_bw == -1) { + err = update_group_available_bw(tunnel, ®s); + if (err > 0) + err = 0; + } +out: + tun_dbg_stat(tunnel, err, + "Enabling BW alloc mode: DPRX:%dx%d Group alloc:%d/%d Mb/s", + tunnel->max_dprx_rate / 100, tunnel->max_dprx_lane_count, + DPTUN_BW_ARG(group_allocated_bw(tunnel->group)), + DPTUN_BW_ARG(tunnel->group->available_bw)); + + return err; +} +EXPORT_SYMBOL(drm_dp_tunnel_enable_bw_alloc); + +/** + * drm_dp_tunnel_disable_bw_alloc - Disable DP tunnel BW allocation mode + * @tunnel: Tunnel object + * + * Disable the DP tunnel BW allocation mode on @tunnel. + * + * Returns 0 in case of success, negative error code otherwise. + */ +int drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel *tunnel) +{ + int err; + + err = check_tunnel(tunnel); + if (err) + return err; + + tunnel->allocated_bw = -1; + + err = set_bw_alloc_mode(tunnel, false); + + tun_dbg_stat(tunnel, err, "Disabling BW alloc mode"); + + return err; +} +EXPORT_SYMBOL(drm_dp_tunnel_disable_bw_alloc); + +/** + * drm_dp_tunnel_bw_alloc_is_enabled - Query the BW allocation mode enabled state + * @tunnel: Tunnel object + * + * Query if the BW allocation mode is enabled for @tunnel. + * + * Returns %true if the BW allocation mode is enabled for @tunnel. + */ +bool drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel *tunnel) +{ + return tunnel && tunnel->bw_alloc_enabled; +} +EXPORT_SYMBOL(drm_dp_tunnel_bw_alloc_is_enabled); + +static int clear_bw_req_state(struct drm_dp_aux *aux) +{ + u8 bw_req_mask = DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED; + + if (drm_dp_dpcd_writeb(aux, DP_TUNNELING_STATUS, bw_req_mask) < 0) + return -EIO; + + return 0; +} + +static int bw_req_complete(struct drm_dp_aux *aux, bool *status_changed) +{ + u8 bw_req_mask = DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED; + u8 status_change_mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED; + u8 val; + int err; + + if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0) + return -EIO; + + *status_changed = val & status_change_mask; + + val &= bw_req_mask; + + if (!val) + return -EAGAIN; + + err = clear_bw_req_state(aux); + if (err < 0) + return err; + + return val == DP_BW_REQUEST_SUCCEEDED ? 0 : -ENOSPC; +} + +static int allocate_tunnel_bw(struct drm_dp_tunnel *tunnel, int bw) +{ + struct drm_dp_tunnel_mgr *mgr = tunnel->group->mgr; + int request_bw = DIV_ROUND_UP(bw, tunnel->bw_granularity); + DEFINE_WAIT_FUNC(wait, woken_wake_function); + long timeout; + int err; + + if (bw < 0) { + err = -EINVAL; + goto out; + } + + if (request_bw * tunnel->bw_granularity == tunnel->allocated_bw) + return 0; + + /* Atomic check should prevent the following. */ + if (drm_WARN_ON(mgr->dev, request_bw > MAX_DP_REQUEST_BW)) { + err = -EINVAL; + goto out; + } + + err = clear_bw_req_state(tunnel->aux); + if (err) + goto out; + + if (drm_dp_dpcd_writeb(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) { + err = -EIO; + goto out; + } + + timeout = msecs_to_jiffies(3000); + add_wait_queue(&mgr->bw_req_queue, &wait); + + for (;;) { + bool status_changed; + + err = bw_req_complete(tunnel->aux, &status_changed); + if (err != -EAGAIN) + break; + + if (status_changed) { + struct drm_dp_tunnel_regs regs; + + err = read_and_verify_tunnel_regs(tunnel, ®s, + ALLOW_ALLOCATED_BW_CHANGE); + if (err) + break; + } + + if (!timeout) { + err = -ETIMEDOUT; + break; + } + + timeout = wait_woken(&wait, TASK_UNINTERRUPTIBLE, timeout); + }; + + remove_wait_queue(&mgr->bw_req_queue, &wait); + + if (err) + goto out; + + tunnel->allocated_bw = request_bw * tunnel->bw_granularity; + +out: + tun_dbg_stat(tunnel, err, "Allocating %d/%d Mb/s for tunnel: Group alloc:%d/%d Mb/s", + DPTUN_BW_ARG(request_bw * tunnel->bw_granularity), + DPTUN_BW_ARG(get_max_tunnel_bw(tunnel)), + DPTUN_BW_ARG(group_allocated_bw(tunnel->group)), + DPTUN_BW_ARG(tunnel->group->available_bw)); + + if (err == -EIO) + drm_dp_tunnel_set_io_error(tunnel); + + return err; +} + +/** + * drm_dp_tunnel_alloc_bw - Allocate BW for a DP tunnel + * @tunnel: Tunnel object + * @bw: BW in kB/s units + * + * Allocate @bw kB/s for @tunnel. The allocated BW must be freed after use by + * calling this function for the same tunnel setting @bw to 0. + * + * Returns 0 in case of success, a negative error code otherwise. + */ +int drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw) +{ + int err; + + err = check_tunnel(tunnel); + if (err) + return err; + + return allocate_tunnel_bw(tunnel, bw); +} +EXPORT_SYMBOL(drm_dp_tunnel_alloc_bw); + +/** + * drm_dp_tunnel_atomic_get_allocated_bw - Get the BW allocated for a DP tunnel + * @tunnel: Tunnel object + * + * Get the current BW allocated for @tunnel. After the tunnel is created / + * resumed and the BW allocation mode is enabled for it, the allocation + * becomes determined only after the first allocation request by the driver + * calling drm_dp_tunnel_alloc_bw(). + * + * Return the BW allocated for the tunnel, or -1 if the allocation is + * undetermined. + */ +int drm_dp_tunnel_get_allocated_bw(struct drm_dp_tunnel *tunnel) +{ + return tunnel->allocated_bw; +} +EXPORT_SYMBOL(drm_dp_tunnel_get_allocated_bw); + +/* + * Return 0 if the status hasn't changed, 1 if the status has changed, a + * negative error code in case of an I/O failure. + */ +static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel) +{ + u8 mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED; + u8 val; + + if (drm_dp_dpcd_readb(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0) + goto out_err; + + val &= mask; + + if (val) { + if (drm_dp_dpcd_writeb(tunnel->aux, DP_TUNNELING_STATUS, val) < 0) + goto out_err; + + return 1; + } + + if (!drm_dp_tunnel_bw_alloc_is_enabled(tunnel)) + return 0; + + /* + * Check for estimated BW changes explicitly to account for lost + * BW change notifications. + */ + if (drm_dp_dpcd_readb(tunnel->aux, DP_ESTIMATED_BW, &val) < 0) + goto out_err; + + if (val * tunnel->bw_granularity != tunnel->estimated_bw) + return 1; + + return 0; + +out_err: + drm_dp_tunnel_set_io_error(tunnel); + + return -EIO; +} + +/** + * drm_dp_tunnel_update_state - Update DP tunnel SW state with the HW state + * @tunnel: Tunnel object + * + * Update the SW state of @tunnel with the HW state. + * + * Returns 0 if the state has not changed, 1 if it has changed and got updated + * successfully and a negative error code otherwise. + */ +int drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel) +{ + struct drm_dp_tunnel_regs regs; + bool changed = false; + int ret; + + ret = check_tunnel(tunnel); + if (ret < 0) + return ret; + + ret = check_and_clear_status_change(tunnel); + if (ret < 0) + goto out; + + if (!ret) + return 0; + + ret = read_and_verify_tunnel_regs(tunnel, ®s, 0); + if (ret) + goto out; + + if (update_dprx_caps(tunnel, ®s)) + changed = true; + + ret = update_group_available_bw(tunnel, ®s); + if (ret == 1) + changed = true; + +out: + tun_dbg_stat(tunnel, ret < 0 ? ret : 0, + "State update: Changed:%s DPRX:%dx%d Tunnel alloc:%d/%d Group alloc:%d/%d Mb/s", + str_yes_no(changed), + tunnel->max_dprx_rate / 100, tunnel->max_dprx_lane_count, + DPTUN_BW_ARG(tunnel->allocated_bw), + DPTUN_BW_ARG(get_max_tunnel_bw(tunnel)), + DPTUN_BW_ARG(group_allocated_bw(tunnel->group)), + DPTUN_BW_ARG(tunnel->group->available_bw)); + + if (ret < 0) + return ret; + + if (changed) + return 1; + + return 0; +} +EXPORT_SYMBOL(drm_dp_tunnel_update_state); + +/* + * drm_dp_tunnel_handle_irq - Handle DP tunnel IRQs + * + * Handle any pending DP tunnel IRQs, waking up waiters for a completion + * event. + * + * Returns 1 if the state of the tunnel has changed which requires calling + * drm_dp_tunnel_update_state(), a negative error code in case of a failure, + * 0 otherwise. + */ +int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_aux *aux) +{ + u8 val; + + if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0) + return -EIO; + + if (val & (DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED)) + wake_up_all(&mgr->bw_req_queue); + + if (val & (DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED)) + return 1; + + return 0; +} +EXPORT_SYMBOL(drm_dp_tunnel_handle_irq); + +/** + * drm_dp_tunnel_max_dprx_rate - Query the maximum rate of the tunnel's DPRX + * @tunnel: Tunnel object + * + * The function is used to query the maximum link rate of the DPRX connected + * to @tunnel. Note that this rate will not be limited by the BW limit of the + * tunnel, as opposed to the standard and extended DP_MAX_LINK_RATE DPCD + * registers. + * + * Returns the maximum link rate in 10 kbit/s units. + */ +int drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel *tunnel) +{ + return tunnel->max_dprx_rate; +} +EXPORT_SYMBOL(drm_dp_tunnel_max_dprx_rate); + +/** + * drm_dp_tunnel_max_dprx_lane_count - Query the maximum lane count of the tunnel's DPRX + * @tunnel: Tunnel object + * + * The function is used to query the maximum lane count of the DPRX connected + * to @tunnel. Note that this lane count will not be limited by the BW limit of + * the tunnel, as opposed to the standard and extended DP_MAX_LANE_COUNT DPCD + * registers. + * + * Returns the maximum lane count. + */ +int drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel *tunnel) +{ + return tunnel->max_dprx_lane_count; +} +EXPORT_SYMBOL(drm_dp_tunnel_max_dprx_lane_count); + +/** + * drm_dp_tunnel_available_bw - Query the estimated total available BW of the tunnel + * @tunnel: Tunnel object + * + * This function is used to query the estimated total available BW of the + * tunnel. This includes the currently allocated and free BW for all the + * tunnels in @tunnel's group. The available BW is valid only after the BW + * allocation mode has been enabled for the tunnel and its state got updated + * calling drm_dp_tunnel_update_state(). + * + * Returns the @tunnel group's estimated total available bandwidth in kB/s + * units, or -1 if the available BW isn't valid (the BW allocation mode is + * not enabled or the tunnel's state hasn't been updated). + */ +int drm_dp_tunnel_available_bw(const struct drm_dp_tunnel *tunnel) +{ + return tunnel->group->available_bw; +} +EXPORT_SYMBOL(drm_dp_tunnel_available_bw); + +static struct drm_dp_tunnel_group_state * +drm_dp_tunnel_atomic_get_group_state(struct drm_atomic_state *state, + const struct drm_dp_tunnel *tunnel) +{ + return (struct drm_dp_tunnel_group_state *) + drm_atomic_get_private_obj_state(state, + &tunnel->group->base); +} + +static struct drm_dp_tunnel_state * +add_tunnel_state(struct drm_dp_tunnel_group_state *group_state, + struct drm_dp_tunnel *tunnel) +{ + struct drm_dp_tunnel_state *tunnel_state; + + tun_dbg_atomic(tunnel, + "Adding state for tunnel %p to group state %p\n", + tunnel, group_state); + + tunnel_state = kzalloc(sizeof(*tunnel_state), GFP_KERNEL); + if (!tunnel_state) + return NULL; + + tunnel_state->group_state = group_state; + + drm_dp_tunnel_ref_get(tunnel, &tunnel_state->tunnel_ref); + + INIT_LIST_HEAD(&tunnel_state->node); + list_add(&tunnel_state->node, &group_state->tunnel_states); + + return tunnel_state; +} + +static void free_tunnel_state(struct drm_dp_tunnel_state *tunnel_state) +{ + tun_dbg_atomic(tunnel_state->tunnel_ref.tunnel, + "Freeing state for tunnel %p\n", + tunnel_state->tunnel_ref.tunnel); + + list_del(&tunnel_state->node); + + kfree(tunnel_state->stream_bw); + drm_dp_tunnel_ref_put(&tunnel_state->tunnel_ref); + + kfree(tunnel_state); +} + +static void free_group_state(struct drm_dp_tunnel_group_state *group_state) +{ + struct drm_dp_tunnel_state *tunnel_state; + struct drm_dp_tunnel_state *tunnel_state_tmp; + + for_each_tunnel_state_safe(group_state, tunnel_state, tunnel_state_tmp) + free_tunnel_state(tunnel_state); + + kfree(group_state); +} + +static struct drm_dp_tunnel_state * +get_tunnel_state(struct drm_dp_tunnel_group_state *group_state, + const struct drm_dp_tunnel *tunnel) +{ + struct drm_dp_tunnel_state *tunnel_state; + + for_each_tunnel_state(group_state, tunnel_state) + if (tunnel_state->tunnel_ref.tunnel == tunnel) + return tunnel_state; + + return NULL; +} + +static struct drm_dp_tunnel_state * +get_or_add_tunnel_state(struct drm_dp_tunnel_group_state *group_state, + struct drm_dp_tunnel *tunnel) +{ + struct drm_dp_tunnel_state *tunnel_state; + + tunnel_state = get_tunnel_state(group_state, tunnel); + if (tunnel_state) + return tunnel_state; + + return add_tunnel_state(group_state, tunnel); +} + +static struct drm_private_state * +tunnel_group_duplicate_state(struct drm_private_obj *obj) +{ + struct drm_dp_tunnel_group_state *group_state; + struct drm_dp_tunnel_state *tunnel_state; + + group_state = kzalloc(sizeof(*group_state), GFP_KERNEL); + if (!group_state) + return NULL; + + INIT_LIST_HEAD(&group_state->tunnel_states); + + __drm_atomic_helper_private_obj_duplicate_state(obj, &group_state->base); + + for_each_tunnel_state(to_group_state(obj->state), tunnel_state) { + struct drm_dp_tunnel_state *new_tunnel_state; + + new_tunnel_state = get_or_add_tunnel_state(group_state, + tunnel_state->tunnel_ref.tunnel); + if (!new_tunnel_state) + goto out_free_state; + + new_tunnel_state->stream_mask = tunnel_state->stream_mask; + new_tunnel_state->stream_bw = kmemdup(tunnel_state->stream_bw, + sizeof(*tunnel_state->stream_bw) * + hweight32(tunnel_state->stream_mask), + GFP_KERNEL); + + if (!new_tunnel_state->stream_bw) + goto out_free_state; + } + + return &group_state->base; + +out_free_state: + free_group_state(group_state); + + return NULL; +} + +static void tunnel_group_destroy_state(struct drm_private_obj *obj, struct drm_private_state *state) +{ + free_group_state(to_group_state(state)); +} + +static const struct drm_private_state_funcs tunnel_group_funcs = { + .atomic_duplicate_state = tunnel_group_duplicate_state, + .atomic_destroy_state = tunnel_group_destroy_state, +}; + +/** + * drm_dp_tunnel_atomic_get_state - get/allocate the new atomic state for a tunnel + * @state: Atomic state + * @tunnel: Tunnel to get the state for + * + * Get the new atomic state for @tunnel, duplicating it from the old tunnel + * state if not yet allocated. + * + * Return the state or an ERR_PTR() error on failure. + */ +struct drm_dp_tunnel_state * +drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state, + struct drm_dp_tunnel *tunnel) +{ + struct drm_dp_tunnel_group_state *group_state; + struct drm_dp_tunnel_state *tunnel_state; + + group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel); + if (IS_ERR(group_state)) + return ERR_CAST(group_state); + + tunnel_state = get_or_add_tunnel_state(group_state, tunnel); + if (!tunnel_state) + return ERR_PTR(-ENOMEM); + + return tunnel_state; +} +EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_state); + +/** + * drm_dp_tunnel_atomic_get_old_state - get the old atomic state for a tunnel + * @state: Atomic state + * @tunnel: Tunnel to get the state for + * + * Get the old atomic state for @tunnel. + * + * Return the old state or NULL if the tunnel's atomic state is not in @state. + */ +struct drm_dp_tunnel_state * +drm_dp_tunnel_atomic_get_old_state(struct drm_atomic_state *state, + const struct drm_dp_tunnel *tunnel) +{ + struct drm_dp_tunnel_group_state *old_group_state; + int i; + + for_each_old_group_in_state(state, old_group_state, i) + if (to_group(old_group_state->base.obj) == tunnel->group) + return get_tunnel_state(old_group_state, tunnel); + + return NULL; +} +EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_old_state); + +/** + * drm_dp_tunnel_atomic_get_new_state - get the new atomic state for a tunnel + * @state: Atomic state + * @tunnel: Tunnel to get the state for + * + * Get the new atomic state for @tunnel. + * + * Return the new state or NULL if the tunnel's atomic state is not in @state. + */ +struct drm_dp_tunnel_state * +drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state, + const struct drm_dp_tunnel *tunnel) +{ + struct drm_dp_tunnel_group_state *new_group_state; + int i; + + for_each_new_group_in_state(state, new_group_state, i) + if (to_group(new_group_state->base.obj) == tunnel->group) + return get_tunnel_state(new_group_state, tunnel); + + return NULL; +} +EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_new_state); + +static bool init_group(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_tunnel_group *group) +{ + struct drm_dp_tunnel_group_state *group_state; + + group_state = kzalloc(sizeof(*group_state), GFP_KERNEL); + if (!group_state) + return false; + + INIT_LIST_HEAD(&group_state->tunnel_states); + + group->mgr = mgr; + group->available_bw = -1; + INIT_LIST_HEAD(&group->tunnels); + + drm_atomic_private_obj_init(mgr->dev, &group->base, &group_state->base, + &tunnel_group_funcs); + + return true; +} + +static void cleanup_group(struct drm_dp_tunnel_group *group) +{ + drm_atomic_private_obj_fini(&group->base); +} + +#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE +static void check_unique_stream_ids(const struct drm_dp_tunnel_group_state *group_state) +{ + const struct drm_dp_tunnel_state *tunnel_state; + u32 stream_mask = 0; + + for_each_tunnel_state(group_state, tunnel_state) { + drm_WARN(to_group(group_state->base.obj)->mgr->dev, + tunnel_state->stream_mask & stream_mask, + "[DPTUN %s]: conflicting stream IDs %x (IDs in other tunnels %x)\n", + tunnel_state->tunnel_ref.tunnel->name, + tunnel_state->stream_mask, + stream_mask); + + stream_mask |= tunnel_state->stream_mask; + } +} +#else +static void check_unique_stream_ids(const struct drm_dp_tunnel_group_state *group_state) +{ +} +#endif + +static int stream_id_to_idx(u32 stream_mask, u8 stream_id) +{ + return hweight32(stream_mask & (BIT(stream_id) - 1)); +} + +static int resize_bw_array(struct drm_dp_tunnel_state *tunnel_state, + unsigned long old_mask, unsigned long new_mask) +{ + unsigned long move_mask = old_mask & new_mask; + int *new_bws = NULL; + int id; + + WARN_ON(!new_mask); + + if (old_mask == new_mask) + return 0; + + new_bws = kcalloc(hweight32(new_mask), sizeof(*new_bws), GFP_KERNEL); + if (!new_bws) + return -ENOMEM; + + for_each_set_bit(id, &move_mask, BITS_PER_TYPE(move_mask)) + new_bws[stream_id_to_idx(new_mask, id)] = + tunnel_state->stream_bw[stream_id_to_idx(old_mask, id)]; + + kfree(tunnel_state->stream_bw); + tunnel_state->stream_bw = new_bws; + tunnel_state->stream_mask = new_mask; + + return 0; +} + +static int set_stream_bw(struct drm_dp_tunnel_state *tunnel_state, + u8 stream_id, int bw) +{ + int err; + + err = resize_bw_array(tunnel_state, + tunnel_state->stream_mask, + tunnel_state->stream_mask | BIT(stream_id)); + if (err) + return err; + + tunnel_state->stream_bw[stream_id_to_idx(tunnel_state->stream_mask, stream_id)] = bw; + + return 0; +} + +static int clear_stream_bw(struct drm_dp_tunnel_state *tunnel_state, + u8 stream_id) +{ + if (!(tunnel_state->stream_mask & ~BIT(stream_id))) { + free_tunnel_state(tunnel_state); + return 0; + } + + return resize_bw_array(tunnel_state, + tunnel_state->stream_mask, + tunnel_state->stream_mask & ~BIT(stream_id)); +} + +/** + * drm_dp_tunnel_atomic_set_stream_bw - Set the BW for a DP tunnel stream + * @state: Atomic state + * @tunnel: DP tunnel containing the stream + * @stream_id: Stream ID + * @bw: BW of the stream + * + * Set a DP tunnel stream's required BW in the atomic state. + * + * Returns 0 in case of success, a negative error code otherwise. + */ +int drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state, + struct drm_dp_tunnel *tunnel, + u8 stream_id, int bw) +{ + struct drm_dp_tunnel_group_state *new_group_state; + struct drm_dp_tunnel_state *tunnel_state; + int err; + + if (drm_WARN_ON(tunnel->group->mgr->dev, + stream_id > BITS_PER_TYPE(tunnel_state->stream_mask))) + return -EINVAL; + + tun_dbg(tunnel, + "Setting %d Mb/s for stream %d\n", + DPTUN_BW_ARG(bw), stream_id); + + new_group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel); + if (IS_ERR(new_group_state)) + return PTR_ERR(new_group_state); + + if (bw == 0) { + tunnel_state = get_tunnel_state(new_group_state, tunnel); + if (!tunnel_state) + return 0; + + return clear_stream_bw(tunnel_state, stream_id); + } + + tunnel_state = get_or_add_tunnel_state(new_group_state, tunnel); + if (drm_WARN_ON(state->dev, !tunnel_state)) + return -EINVAL; + + err = set_stream_bw(tunnel_state, stream_id, bw); + if (err) + return err; + + check_unique_stream_ids(new_group_state); + + return 0; +} +EXPORT_SYMBOL(drm_dp_tunnel_atomic_set_stream_bw); + +/** + * drm_dp_tunnel_atomic_get_required_bw - Get the BW required by a DP tunnel + * @tunnel_state: Atomic state of the queried tunnel + * + * Calculate the BW required by a tunnel adding up the required BW of all + * the streams in the tunnel. + * + * Return the total BW required by the tunnel. + */ +int drm_dp_tunnel_atomic_get_required_bw(const struct drm_dp_tunnel_state *tunnel_state) +{ + int tunnel_bw = 0; + int i; + + if (!tunnel_state || !tunnel_state->stream_mask) + return 0; + + for (i = 0; i < hweight32(tunnel_state->stream_mask); i++) + tunnel_bw += tunnel_state->stream_bw[i]; + + return tunnel_bw; +} +EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_required_bw); + +/** + * drm_dp_tunnel_atomic_get_group_streams_in_state - Get mask of stream IDs in a group + * @state: Atomic state + * @tunnel: Tunnel object + * @stream_mask: Mask of streams in @tunnel's group + * + * Get the mask of all the stream IDs in the tunnel group of @tunnel. + * + * Return 0 in case of success - with the stream IDs in @stream_mask - or a + * negative error code in case of failure. + */ +int drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state, + const struct drm_dp_tunnel *tunnel, + u32 *stream_mask) +{ + struct drm_dp_tunnel_group_state *group_state; + struct drm_dp_tunnel_state *tunnel_state; + + group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel); + if (IS_ERR(group_state)) + return PTR_ERR(group_state); + + *stream_mask = 0; + for_each_tunnel_state(group_state, tunnel_state) + *stream_mask |= tunnel_state->stream_mask; + + return 0; +} +EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_group_streams_in_state); + +static int +drm_dp_tunnel_atomic_check_group_bw(struct drm_dp_tunnel_group_state *new_group_state, + u32 *failed_stream_mask) +{ + struct drm_dp_tunnel_group *group = to_group(new_group_state->base.obj); + struct drm_dp_tunnel_state *new_tunnel_state; + u32 group_stream_mask = 0; + int group_bw = 0; + + for_each_tunnel_state(new_group_state, new_tunnel_state) { + struct drm_dp_tunnel *tunnel = new_tunnel_state->tunnel_ref.tunnel; + int max_dprx_bw = get_max_dprx_bw(tunnel); + int tunnel_bw = drm_dp_tunnel_atomic_get_required_bw(new_tunnel_state); + + tun_dbg(tunnel, + "%sRequired %d/%d Mb/s total for tunnel.\n", + tunnel_bw > max_dprx_bw ? "Not enough BW: " : "", + DPTUN_BW_ARG(tunnel_bw), + DPTUN_BW_ARG(max_dprx_bw)); + + if (tunnel_bw > max_dprx_bw) { + *failed_stream_mask = new_tunnel_state->stream_mask; + return -ENOSPC; + } + + group_bw += min(roundup(tunnel_bw, tunnel->bw_granularity), + max_dprx_bw); + group_stream_mask |= new_tunnel_state->stream_mask; + } + + tun_grp_dbg(group, + "%sRequired %d/%d Mb/s total for tunnel group.\n", + group_bw > group->available_bw ? "Not enough BW: " : "", + DPTUN_BW_ARG(group_bw), + DPTUN_BW_ARG(group->available_bw)); + + if (group_bw > group->available_bw) { + *failed_stream_mask = group_stream_mask; + return -ENOSPC; + } + + return 0; +} + +/** + * drm_dp_tunnel_atomic_check_stream_bws - Check BW limit for all streams in state + * @state: Atomic state + * @failed_stream_mask: Mask of stream IDs with a BW limit failure + * + * Check the required BW of each DP tunnel in @state against both the DPRX BW + * limit of the tunnel and the BW limit of the tunnel group. Return a mask of + * stream IDs in @failed_stream_mask once a check fails. The mask will contain + * either all the streams in a tunnel (in case a DPRX BW limit check failed) or + * all the streams in a tunnel group (in case a group BW limit check failed). + * + * Return 0 if all the BW limit checks passed, -ENOSPC in case a BW limit + * check failed - with @failed_stream_mask containing the streams failing the + * check - or a negative error code otherwise. + */ +int drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state, + u32 *failed_stream_mask) +{ + struct drm_dp_tunnel_group_state *new_group_state; + int i; + + for_each_new_group_in_state(state, new_group_state, i) { + int ret; + + ret = drm_dp_tunnel_atomic_check_group_bw(new_group_state, + failed_stream_mask); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL(drm_dp_tunnel_atomic_check_stream_bws); + +static void destroy_mgr(struct drm_dp_tunnel_mgr *mgr) +{ + int i; + + for (i = 0; i < mgr->group_count; i++) { + cleanup_group(&mgr->groups[i]); + drm_WARN_ON(mgr->dev, !list_empty(&mgr->groups[i].tunnels)); + } + +#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE + ref_tracker_dir_exit(&mgr->ref_tracker); +#endif + + kfree(mgr->groups); + kfree(mgr); +} + +/** + * drm_dp_tunnel_mgr_create - Create a DP tunnel manager + * @dev: DRM device object + * + * Creates a DP tunnel manager for @dev. + * + * Returns a pointer to the tunnel manager if created successfully or NULL in + * case of an error. + */ +struct drm_dp_tunnel_mgr * +drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count) +{ + struct drm_dp_tunnel_mgr *mgr; + int i; + + mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); + if (!mgr) + return NULL; + + mgr->dev = dev; + init_waitqueue_head(&mgr->bw_req_queue); + + mgr->groups = kcalloc(max_group_count, sizeof(*mgr->groups), GFP_KERNEL); + if (!mgr->groups) { + kfree(mgr); + + return NULL; + } + +#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE + ref_tracker_dir_init(&mgr->ref_tracker, 16, "dptun"); +#endif + + for (i = 0; i < max_group_count; i++) { + if (!init_group(mgr, &mgr->groups[i])) { + destroy_mgr(mgr); + + return NULL; + } + + mgr->group_count++; + } + + return mgr; +} +EXPORT_SYMBOL(drm_dp_tunnel_mgr_create); + +/** + * drm_dp_tunnel_mgr_destroy - Destroy DP tunnel manager + * @mgr: Tunnel manager object + * + * Destroy the tunnel manager. + */ +void drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr *mgr) +{ + destroy_mgr(mgr); +} +EXPORT_SYMBOL(drm_dp_tunnel_mgr_destroy); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c index b106e8b288ad..9bf47327f436 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c @@ -54,6 +54,7 @@ static const struct { ST(0x1480, 8), ST(0x1500, 8), ST(0x1520, 8), + ST(0x1540, 8), ST(0x1608, 1), ST(0x1610, 1), ST(0x1658, 1), diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 6228ce603248..6500f3999c5f 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -29,6 +29,17 @@ * DRM operations: */ +static struct device_node *etnaviv_of_first_available_node(void) +{ + struct device_node *np; + + for_each_compatible_node(np, NULL, "vivante,gc") { + if (of_device_is_available(np)) + return np; + } + + return NULL; +} static void load_gpu(struct drm_device *dev) { @@ -79,7 +90,7 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file) drm_sched_entity_init(&ctx->sched_entity[i], DRM_SCHED_PRIORITY_NORMAL, &sched, 1, NULL); - } + } } file->driver_priv = ctx; @@ -233,11 +244,11 @@ static int show_each_gpu(struct seq_file *m, void *arg) } static struct drm_info_list etnaviv_debugfs_list[] = { - {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs}, - {"gem", show_unlocked, 0, etnaviv_gem_show}, - { "mm", show_unlocked, 0, etnaviv_mm_show }, - {"mmu", show_each_gpu, 0, etnaviv_mmu_show}, - {"ring", show_each_gpu, 0, etnaviv_ring_show}, + {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs}, + {"gem", show_unlocked, 0, etnaviv_gem_show}, + { "mm", show_unlocked, 0, etnaviv_mm_show }, + {"mmu", show_each_gpu, 0, etnaviv_mmu_show}, + {"ring", show_each_gpu, 0, etnaviv_ring_show}, }; static void etnaviv_debugfs_init(struct drm_minor *minor) @@ -494,7 +505,7 @@ static const struct drm_driver etnaviv_drm_driver = { .desc = "etnaviv DRM", .date = "20151214", .major = 1, - .minor = 3, + .minor = 4, }; /* @@ -597,9 +608,6 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) if (!of_device_is_available(core_node)) continue; - if (!first_node) - first_node = core_node; - drm_of_component_match_add(&pdev->dev, &match, component_compare_of, core_node); } @@ -634,8 +642,11 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) * device as the GPU we found. This assumes that all Vivante * GPUs in the system share the same DMA constraints. */ - if (first_node) + first_node = etnaviv_of_first_available_node(); + if (first_node) { of_dma_configure(&pdev->dev, first_node, true); + of_node_put(first_node); + } return component_master_add_with_match(dev, &etnaviv_master_ops, match); } @@ -653,11 +664,43 @@ static struct platform_driver etnaviv_platform_driver = { }, }; +static int etnaviv_create_platform_device(const char *name, + struct platform_device **ppdev) +{ + struct platform_device *pdev; + int ret; + + pdev = platform_device_alloc(name, PLATFORM_DEVID_NONE); + if (!pdev) + return -ENOMEM; + + ret = platform_device_add(pdev); + if (ret) { + platform_device_put(pdev); + return ret; + } + + *ppdev = pdev; + + return 0; +} + +static void etnaviv_destroy_platform_device(struct platform_device **ppdev) +{ + struct platform_device *pdev = *ppdev; + + if (!pdev) + return; + + platform_device_unregister(pdev); + + *ppdev = NULL; +} + static struct platform_device *etnaviv_drm; static int __init etnaviv_init(void) { - struct platform_device *pdev; int ret; struct device_node *np; @@ -675,27 +718,13 @@ static int __init etnaviv_init(void) * If the DT contains at least one available GPU device, instantiate * the DRM platform device. */ - for_each_compatible_node(np, NULL, "vivante,gc") { - if (!of_device_is_available(np)) - continue; - - pdev = platform_device_alloc("etnaviv", PLATFORM_DEVID_NONE); - if (!pdev) { - ret = -ENOMEM; - of_node_put(np); - goto unregister_platform_driver; - } - - ret = platform_device_add(pdev); - if (ret) { - platform_device_put(pdev); - of_node_put(np); - goto unregister_platform_driver; - } - - etnaviv_drm = pdev; + np = etnaviv_of_first_available_node(); + if (np) { of_node_put(np); - break; + + ret = etnaviv_create_platform_device("etnaviv", &etnaviv_drm); + if (ret) + goto unregister_platform_driver; } return 0; @@ -710,7 +739,7 @@ module_init(etnaviv_init); static void __exit etnaviv_exit(void) { - platform_device_unregister(etnaviv_drm); + etnaviv_destroy_platform_device(&etnaviv_drm); platform_driver_unregister(&etnaviv_platform_driver); platform_driver_unregister(&etnaviv_gpu_driver); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index b5f73502e3dd..71a6d2b1c80f 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -100,11 +100,10 @@ struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj) if (!etnaviv_obj->sgt) { struct drm_device *dev = etnaviv_obj->base.dev; - int npages = etnaviv_obj->base.size >> PAGE_SHIFT; + unsigned int npages = etnaviv_obj->base.size >> PAGE_SHIFT; struct sg_table *sgt; - sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev, - etnaviv_obj->pages, npages); + sgt = drm_prime_pages_to_sg(dev, etnaviv_obj->pages, npages); if (IS_ERR(sgt)) { dev_err(dev->dev, "failed to allocate sgt: %ld\n", PTR_ERR(sgt)); @@ -542,7 +541,7 @@ static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = { .vm_ops = &vm_ops, }; -static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags, +static int etnaviv_gem_new_impl(struct drm_device *dev, u32 flags, const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj) { struct etnaviv_gem_object *etnaviv_obj; @@ -591,8 +590,7 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file, size = PAGE_ALIGN(size); - ret = etnaviv_gem_new_impl(dev, size, flags, - &etnaviv_gem_shmem_ops, &obj); + ret = etnaviv_gem_new_impl(dev, flags, &etnaviv_gem_shmem_ops, &obj); if (ret) goto fail; @@ -627,7 +625,7 @@ int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags, struct drm_gem_object *obj; int ret; - ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj); + ret = etnaviv_gem_new_impl(dev, flags, ops, &obj); if (ret) return ret; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 9b8445d2a128..734412aae94d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -164,6 +164,26 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) *value = gpu->identity.eco_id; break; + case ETNAVIV_PARAM_GPU_NN_CORE_COUNT: + *value = gpu->identity.nn_core_count; + break; + + case ETNAVIV_PARAM_GPU_NN_MAD_PER_CORE: + *value = gpu->identity.nn_mad_per_core; + break; + + case ETNAVIV_PARAM_GPU_TP_CORE_COUNT: + *value = gpu->identity.tp_core_count; + break; + + case ETNAVIV_PARAM_GPU_ON_CHIP_SRAM_SIZE: + *value = gpu->identity.on_chip_sram_size; + break; + + case ETNAVIV_PARAM_GPU_AXI_SRAM_SIZE: + *value = gpu->identity.axi_sram_size; + break; + default: DBG("%s: invalid param: %u", dev_name(gpu->dev), param); return -EINVAL; @@ -513,8 +533,19 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) timeout = jiffies + msecs_to_jiffies(1000); while (time_is_after_jiffies(timeout)) { - /* enable clock */ unsigned int fscale = 1 << (6 - gpu->freq_scale); + u32 pulse_eater = 0x01590880; + + /* disable clock gating */ + gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, 0x0); + + /* disable pulse eater */ + pulse_eater |= BIT(17); + gpu_write_power(gpu, VIVS_PM_PULSE_EATER, pulse_eater); + pulse_eater |= BIT(0); + gpu_write_power(gpu, VIVS_PM_PULSE_EATER, pulse_eater); + + /* enable clock */ control = VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale); etnaviv_gpu_load_clock(gpu, control); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 197e0037732e..7d5e9158e13c 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -54,6 +54,18 @@ struct etnaviv_chip_identity { /* Number of Neural Network cores. */ u32 nn_core_count; + /* Number of MAD units per Neural Network core. */ + u32 nn_mad_per_core; + + /* Number of Tensor Processing cores. */ + u32 tp_core_count; + + /* Size in bytes of the SRAM inside the NPU. */ + u32 on_chip_sram_size; + + /* Size in bytes of the SRAM across the AXI bus. */ + u32 axi_sram_size; + /* Size of the vertex cache. */ u32 vertex_cache_size; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c index 67201242438b..d8e7334de8ce 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c @@ -17,6 +17,10 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = { .thread_count = 128, .shader_core_count = 1, .nn_core_count = 0, + .nn_mad_per_core = 0, + .tp_core_count = 0, + .on_chip_sram_size = 0, + .axi_sram_size = 0, .vertex_cache_size = 8, .vertex_output_buffer_size = 1024, .pixel_pipes = 1, @@ -48,6 +52,11 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = { .register_max = 64, .thread_count = 256, .shader_core_count = 1, + .nn_core_count = 0, + .nn_mad_per_core = 0, + .tp_core_count = 0, + .on_chip_sram_size = 0, + .axi_sram_size = 0, .vertex_cache_size = 8, .vertex_output_buffer_size = 512, .pixel_pipes = 1, @@ -80,6 +89,10 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = { .thread_count = 512, .shader_core_count = 2, .nn_core_count = 0, + .nn_mad_per_core = 0, + .tp_core_count = 0, + .on_chip_sram_size = 0, + .axi_sram_size = 0, .vertex_cache_size = 16, .vertex_output_buffer_size = 1024, .pixel_pipes = 1, @@ -112,6 +125,10 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = { .thread_count = 512, .shader_core_count = 2, .nn_core_count = 0, + .nn_mad_per_core = 0, + .tp_core_count = 0, + .on_chip_sram_size = 0, + .axi_sram_size = 0, .vertex_cache_size = 16, .vertex_output_buffer_size = 1024, .pixel_pipes = 1, @@ -143,6 +160,11 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = { .register_max = 64, .thread_count = 512, .shader_core_count = 2, + .nn_core_count = 0, + .nn_mad_per_core = 0, + .tp_core_count = 0, + .on_chip_sram_size = 0, + .axi_sram_size = 0, .vertex_cache_size = 16, .vertex_output_buffer_size = 1024, .pixel_pipes = 1, @@ -175,6 +197,10 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = { .thread_count = 1024, .shader_core_count = 4, .nn_core_count = 0, + .nn_mad_per_core = 0, + .tp_core_count = 0, + .on_chip_sram_size = 0, + .axi_sram_size = 0, .vertex_cache_size = 16, .vertex_output_buffer_size = 1024, .pixel_pipes = 2, @@ -207,6 +233,10 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = { .thread_count = 256, .shader_core_count = 1, .nn_core_count = 8, + .nn_mad_per_core = 64, + .tp_core_count = 4, + .on_chip_sram_size = 524288, + .axi_sram_size = 1048576, .vertex_cache_size = 16, .vertex_output_buffer_size = 1024, .pixel_pipes = 1, @@ -239,6 +269,10 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = { .thread_count = 256, .shader_core_count = 1, .nn_core_count = 6, + .nn_mad_per_core = 64, + .tp_core_count = 3, + .on_chip_sram_size = 262144, + .axi_sram_size = 0, .vertex_cache_size = 16, .vertex_output_buffer_size = 1024, .pixel_pipes = 1, @@ -265,6 +299,9 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = { bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu) { struct etnaviv_chip_identity *ident = &gpu->identity; + const u32 product_id = ident->product_id; + const u32 customer_id = ident->customer_id; + const u32 eco_id = ident->eco_id; int i; for (i = 0; i < ARRAY_SIZE(etnaviv_chip_identities); i++) { @@ -278,6 +315,12 @@ bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu) etnaviv_chip_identities[i].eco_id == ~0U)) { memcpy(ident, &etnaviv_chip_identities[i], sizeof(*ident)); + + /* Restore some id values as ~0U aka 'don't care' might been used. */ + ident->product_id = product_id; + ident->customer_id = customer_id; + ident->eco_id = eco_id; + return true; } } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 4fa72567183a..1661d589bf3e 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -70,7 +70,7 @@ static int etnaviv_context_map(struct etnaviv_iommu_context *context, } static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, - struct sg_table *sgt, unsigned len, int prot) + struct sg_table *sgt, int prot) { struct scatterlist *sg; unsigned int da = iova; unsigned int i; @@ -314,7 +314,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context, goto unlock; mapping->iova = node->start; - ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size, + ret = etnaviv_iommu_map(context, node->start, sgt, ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE); if (ret < 0) { diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c index bafdfe49c1d8..dc9dea664a28 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c @@ -511,7 +511,7 @@ int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu, domain->id = domain->iter; domain->nr_signals = dom->nr_signals; - strncpy(domain->name, dom->name, sizeof(domain->name)); + strscpy_pad(domain->name, dom->name, sizeof(domain->name)); domain->iter++; if (domain->iter == nr_domains) @@ -540,7 +540,7 @@ int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu, sig = &dom->signal[signal->iter]; signal->id = signal->iter; - strncpy(signal->name, sig->name, sizeof(signal->name)); + strscpy_pad(signal->name, sig->name, sizeof(signal->name)); signal->iter++; if (signal->iter == dom->nr_signals) diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 3089029abba4..5932024f8f95 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -155,6 +155,20 @@ config DRM_I915_PXP protected session and manage the status of the alive software session, as well as its life cycle. +config DRM_I915_DP_TUNNEL + bool "Enable DP tunnel support" + depends on DRM_I915 + depends on USB4 + select DRM_DISPLAY_DP_TUNNEL + default y + help + Choose this option to detect DP tunnels and enable the Bandwidth + Allocation mode for such tunnels. This allows using the maximum + resolution allowed by the link BW on all displays sharing the + link BW, for instance on a Thunderbolt link. + + If in doubt, say "Y". + menu "drm/i915 Debugging" depends on DRM_I915 depends on EXPERT diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 5b7162076850..bc18e2d9ea05 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -28,6 +28,7 @@ config DRM_I915_DEBUG select STACKDEPOT select STACKTRACE select DRM_DP_AUX_CHARDEV + select DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE if DRM_I915_DP_TUNNEL select X86_MSR # used by igt/pm_rpm select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks) select DRM_DEBUG_MM if DRM=y diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index c13f14edb508..3ef6ed41e62b 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -369,6 +369,9 @@ i915-y += \ display/vlv_dsi.o \ display/vlv_dsi_pll.o +i915-$(CONFIG_DRM_I915_DP_TUNNEL) += \ + display/intel_dp_tunnel.o + i915-y += \ i915_perf.o diff --git a/drivers/gpu/drm/i915/display/dvo_ch7017.c b/drivers/gpu/drm/i915/display/dvo_ch7017.c index 0589994dde11..d0c3880d7f80 100644 --- a/drivers/gpu/drm/i915/display/dvo_ch7017.c +++ b/drivers/gpu/drm/i915/display/dvo_ch7017.c @@ -205,7 +205,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo, const char *str; u8 val; - priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL); + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (priv == NULL) return false; diff --git a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c index 6d948520e9a6..2e8e85da5a40 100644 --- a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c @@ -216,7 +216,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo, u8 vendor, device; char *name, *devid; - ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL); + ch7xxx = kzalloc(sizeof(*ch7xxx), GFP_KERNEL); if (ch7xxx == NULL) return false; diff --git a/drivers/gpu/drm/i915/display/dvo_ivch.c b/drivers/gpu/drm/i915/display/dvo_ivch.c index f43d8c610d3f..eef72bb3b767 100644 --- a/drivers/gpu/drm/i915/display/dvo_ivch.c +++ b/drivers/gpu/drm/i915/display/dvo_ivch.c @@ -267,7 +267,7 @@ static bool ivch_init(struct intel_dvo_device *dvo, u16 temp; int i; - priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL); + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (priv == NULL) return false; diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c index a724a8755673..1df212fb000e 100644 --- a/drivers/gpu/drm/i915/display/dvo_ns2501.c +++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c @@ -476,7 +476,7 @@ static bool ns2501_init(struct intel_dvo_device *dvo, struct ns2501_priv *ns; unsigned char ch; - ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL); + ns = kzalloc(sizeof(*ns), GFP_KERNEL); if (ns == NULL) return false; @@ -551,7 +551,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo, const struct drm_display_mode *adjusted_mode) { const struct ns2501_configuration *conf; - struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); + struct ns2501_priv *ns = dvo->dev_priv; int mode_idx, i; DRM_DEBUG_KMS @@ -655,7 +655,7 @@ static bool ns2501_get_hw_state(struct intel_dvo_device *dvo) /* set the NS2501 power state */ static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable) { - struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); + struct ns2501_priv *ns = dvo->dev_priv; DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable); diff --git a/drivers/gpu/drm/i915/display/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c index 4acc8ce29c0b..6c461024c8e3 100644 --- a/drivers/gpu/drm/i915/display/dvo_sil164.c +++ b/drivers/gpu/drm/i915/display/dvo_sil164.c @@ -141,7 +141,7 @@ static bool sil164_init(struct intel_dvo_device *dvo, struct sil164_priv *sil; unsigned char ch; - sil = kzalloc(sizeof(struct sil164_priv), GFP_KERNEL); + sil = kzalloc(sizeof(*sil), GFP_KERNEL); if (sil == NULL) return false; diff --git a/drivers/gpu/drm/i915/display/dvo_tfp410.c b/drivers/gpu/drm/i915/display/dvo_tfp410.c index 009d65b0f3e9..0939e097f4f9 100644 --- a/drivers/gpu/drm/i915/display/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/display/dvo_tfp410.c @@ -173,7 +173,7 @@ static bool tfp410_init(struct intel_dvo_device *dvo, struct tfp410_priv *tfp; int id; - tfp = kzalloc(sizeof(struct tfp410_priv), GFP_KERNEL); + tfp = kzalloc(sizeof(*tfp), GFP_KERNEL); if (tfp == NULL) return false; diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c index 11ca9572e8b3..628e7192ebc9 100644 --- a/drivers/gpu/drm/i915/display/i9xx_wm.c +++ b/drivers/gpu/drm/i915/display/i9xx_wm.c @@ -70,26 +70,25 @@ static const struct cxsr_latency cxsr_latency_table[] = { {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ }; -static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop, - bool is_ddr3, - int fsb, - int mem) +static const struct cxsr_latency *intel_get_cxsr_latency(struct drm_i915_private *i915) { - const struct cxsr_latency *latency; int i; - if (fsb == 0 || mem == 0) + if (i915->fsb_freq == 0 || i915->mem_freq == 0) return NULL; for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { - latency = &cxsr_latency_table[i]; + const struct cxsr_latency *latency = &cxsr_latency_table[i]; + bool is_desktop = !IS_MOBILE(i915); + if (is_desktop == latency->is_desktop && - is_ddr3 == latency->is_ddr3 && - fsb == latency->fsb_freq && mem == latency->mem_freq) + i915->is_ddr3 == latency->is_ddr3 && + i915->fsb_freq == latency->fsb_freq && + i915->mem_freq == latency->mem_freq) return latency; } - DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); + drm_dbg_kms(&i915->drm, "Unknown FSB/MEM found, disable CxSR\n"); return NULL; } @@ -525,6 +524,7 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate, /** * intel_calculate_wm - calculate watermark level + * @i915: the device * @pixel_rate: pixel clock * @wm: chip FIFO params * @fifo_size: size of the FIFO buffer @@ -542,7 +542,8 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate, * past the watermark point. If the FIFO drains completely, a FIFO underrun * will occur, and a display engine hang could result. */ -static unsigned int intel_calculate_wm(int pixel_rate, +static unsigned int intel_calculate_wm(struct drm_i915_private *i915, + int pixel_rate, const struct intel_watermark_params *wm, int fifo_size, int cpp, unsigned int latency_ns) @@ -559,10 +560,10 @@ static unsigned int intel_calculate_wm(int pixel_rate, latency_ns / 100); entries = DIV_ROUND_UP(entries, wm->cacheline_size) + wm->guard_size; - DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries); + drm_dbg_kms(&i915->drm, "FIFO entries required for mode: %d\n", entries); wm_size = fifo_size - entries; - DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size); + drm_dbg_kms(&i915->drm, "FIFO watermark level: %d\n", wm_size); /* Don't promote wm_size to unsigned... */ if (wm_size > wm->max_wm) @@ -634,10 +635,7 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv) u32 reg; unsigned int wm; - latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv), - dev_priv->is_ddr3, - dev_priv->fsb_freq, - dev_priv->mem_freq); + latency = intel_get_cxsr_latency(dev_priv); if (!latency) { drm_dbg_kms(&dev_priv->drm, "Unknown FSB/MEM found, disable CxSR\n"); @@ -653,7 +651,8 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv) int cpp = fb->format->cpp[0]; /* Display SR */ - wm = intel_calculate_wm(pixel_rate, &pnv_display_wm, + wm = intel_calculate_wm(dev_priv, pixel_rate, + &pnv_display_wm, pnv_display_wm.fifo_size, cpp, latency->display_sr); reg = intel_uncore_read(&dev_priv->uncore, DSPFW1); @@ -663,20 +662,23 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv) drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg); /* cursor SR */ - wm = intel_calculate_wm(pixel_rate, &pnv_cursor_wm, + wm = intel_calculate_wm(dev_priv, pixel_rate, + &pnv_cursor_wm, pnv_display_wm.fifo_size, 4, latency->cursor_sr); intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_CURSOR_SR_MASK, FW_WM(wm, CURSOR_SR)); /* Display HPLL off SR */ - wm = intel_calculate_wm(pixel_rate, &pnv_display_hplloff_wm, + wm = intel_calculate_wm(dev_priv, pixel_rate, + &pnv_display_hplloff_wm, pnv_display_hplloff_wm.fifo_size, cpp, latency->display_hpll_disable); intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR)); /* cursor HPLL off SR */ - wm = intel_calculate_wm(pixel_rate, &pnv_cursor_hplloff_wm, + wm = intel_calculate_wm(dev_priv, pixel_rate, + &pnv_cursor_hplloff_wm, pnv_display_hplloff_wm.fifo_size, 4, latency->cursor_hpll_disable); reg = intel_uncore_read(&dev_priv->uncore, DSPFW3); @@ -2124,7 +2126,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) else cpp = fb->format->cpp[0]; - planea_wm = intel_calculate_wm(crtc->config->pixel_rate, + planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate, wm_info, fifo_size, cpp, pessimal_latency_ns); } else { @@ -2151,7 +2153,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) else cpp = fb->format->cpp[0]; - planeb_wm = intel_calculate_wm(crtc->config->pixel_rate, + planeb_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate, wm_info, fifo_size, cpp, pessimal_latency_ns); } else { @@ -2245,7 +2247,7 @@ static void i845_update_wm(struct drm_i915_private *dev_priv) if (crtc == NULL) return; - planea_wm = intel_calculate_wm(crtc->config->pixel_rate, + planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate, &i845_wm_info, i845_get_fifo_size(dev_priv, PLANE_A), 4, pessimal_latency_ns); @@ -2531,7 +2533,8 @@ static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv, max->fbc = ilk_fbc_wm_reg_max(dev_priv); } -static bool ilk_validate_wm_level(int level, +static bool ilk_validate_wm_level(struct drm_i915_private *i915, + int level, const struct ilk_wm_maximums *max, struct intel_wm_level *result) { @@ -2554,14 +2557,17 @@ static bool ilk_validate_wm_level(int level, */ if (level == 0 && !result->enable) { if (result->pri_val > max->pri) - DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n", - level, result->pri_val, max->pri); + drm_dbg_kms(&i915->drm, + "Primary WM%d too large %u (max %u)\n", + level, result->pri_val, max->pri); if (result->spr_val > max->spr) - DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n", - level, result->spr_val, max->spr); + drm_dbg_kms(&i915->drm, + "Sprite WM%d too large %u (max %u)\n", + level, result->spr_val, max->spr); if (result->cur_val > max->cur) - DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n", - level, result->cur_val, max->cur); + drm_dbg_kms(&i915->drm, + "Cursor WM%d too large %u (max %u)\n", + level, result->cur_val, max->cur); result->pri_val = min_t(u32, result->pri_val, max->pri); result->spr_val = min_t(u32, result->spr_val, max->spr); @@ -2761,7 +2767,7 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) } } -static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv, +static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv, struct intel_pipe_wm *pipe_wm) { /* LP0 watermark maximums depend on this pipe alone */ @@ -2776,7 +2782,7 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv, ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max); /* At least LP0 must be valid */ - if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) { + if (!ilk_validate_wm_level(dev_priv, 0, &max, &pipe_wm->wm[0])) { drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n"); return false; } @@ -2845,7 +2851,7 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state, * register maximums since such watermarks are * always invalid. */ - if (!ilk_validate_wm_level(level, &max, wm)) { + if (!ilk_validate_wm_level(dev_priv, level, &max, wm)) { memset(wm, 0, sizeof(*wm)); break; } @@ -2976,7 +2982,7 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv, if (level > last_enabled_level) wm->enable = false; - else if (!ilk_validate_wm_level(level, max, wm)) + else if (!ilk_validate_wm_level(dev_priv, level, max, wm)) /* make sure all following levels get disabled */ last_enabled_level = level - 1; @@ -4016,10 +4022,7 @@ void i9xx_wm_init(struct drm_i915_private *dev_priv) g4x_setup_wm_latency(dev_priv); dev_priv->display.funcs.wm = &g4x_wm_funcs; } else if (IS_PINEVIEW(dev_priv)) { - if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv), - dev_priv->is_ddr3, - dev_priv->fsb_freq, - dev_priv->mem_freq)) { + if (!intel_get_cxsr_latency(dev_priv)) { drm_info(&dev_priv->drm, "failed to find known CxSR latency " "(found ddr%s fsb freq %d, mem freq %d), " diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index ec0d5168b503..2bb270f82932 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -29,6 +29,7 @@ * See intel_atomic_plane.c for the plane-specific atomic functionality. */ +#include #include #include #include @@ -38,6 +39,7 @@ #include "intel_atomic.h" #include "intel_cdclk.h" #include "intel_display_types.h" +#include "intel_dp_tunnel.h" #include "intel_global_state.h" #include "intel_hdcp.h" #include "intel_psr.h" @@ -258,6 +260,10 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) if (crtc_state->post_csc_lut) drm_property_blob_get(crtc_state->post_csc_lut); + if (crtc_state->dp_tunnel_ref.tunnel) + drm_dp_tunnel_ref_get(crtc_state->dp_tunnel_ref.tunnel, + &crtc_state->dp_tunnel_ref); + crtc_state->update_pipe = false; crtc_state->update_m_n = false; crtc_state->update_lrr = false; @@ -309,6 +315,8 @@ intel_crtc_destroy_state(struct drm_crtc *crtc, __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi); intel_crtc_free_hw_state(crtc_state); + if (crtc_state->dp_tunnel_ref.tunnel) + drm_dp_tunnel_ref_put(&crtc_state->dp_tunnel_ref); kfree(crtc_state); } @@ -344,6 +352,8 @@ void intel_atomic_state_clear(struct drm_atomic_state *s) /* state->internal not reset on purpose */ state->dpll_set = state->modeset = false; + + intel_dp_tunnel_atomic_cleanup_inherited_state(state); } struct intel_crtc_state * diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 5f04e495fd27..fe52c06271ef 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -1759,7 +1759,8 @@ parse_mipi_config(struct drm_i915_private *i915, /* Find the sequence block and size for the given panel. */ static const u8 * -find_panel_sequence_block(const struct bdb_mipi_sequence *sequence, +find_panel_sequence_block(struct drm_i915_private *i915, + const struct bdb_mipi_sequence *sequence, u16 panel_id, u32 *seq_size) { u32 total = get_blocksize(sequence); @@ -1776,7 +1777,7 @@ find_panel_sequence_block(const struct bdb_mipi_sequence *sequence, for (i = 0; i < MAX_MIPI_CONFIGURATIONS && index < total; i++) { if (index + header_size > total) { - DRM_ERROR("Invalid sequence block (header)\n"); + drm_err(&i915->drm, "Invalid sequence block (header)\n"); return NULL; } @@ -1789,7 +1790,7 @@ find_panel_sequence_block(const struct bdb_mipi_sequence *sequence, index += header_size; if (index + current_size > total) { - DRM_ERROR("Invalid sequence block\n"); + drm_err(&i915->drm, "Invalid sequence block\n"); return NULL; } @@ -1801,12 +1802,13 @@ find_panel_sequence_block(const struct bdb_mipi_sequence *sequence, index += current_size; } - DRM_ERROR("Sequence block detected but no valid configuration\n"); + drm_err(&i915->drm, "Sequence block detected but no valid configuration\n"); return NULL; } -static int goto_next_sequence(const u8 *data, int index, int total) +static int goto_next_sequence(struct drm_i915_private *i915, + const u8 *data, int index, int total) { u16 len; @@ -1836,7 +1838,7 @@ static int goto_next_sequence(const u8 *data, int index, int total) len = *(data + index + 6) + 7; break; default: - DRM_ERROR("Unknown operation byte\n"); + drm_err(&i915->drm, "Unknown operation byte\n"); return 0; } } @@ -1844,7 +1846,8 @@ static int goto_next_sequence(const u8 *data, int index, int total) return 0; } -static int goto_next_sequence_v3(const u8 *data, int index, int total) +static int goto_next_sequence_v3(struct drm_i915_private *i915, + const u8 *data, int index, int total) { int seq_end; u16 len; @@ -1855,7 +1858,7 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total) * checking on the structure. */ if (total < 5) { - DRM_ERROR("Too small sequence size\n"); + drm_err(&i915->drm, "Too small sequence size\n"); return 0; } @@ -1872,7 +1875,7 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total) seq_end = index + size_of_sequence; if (seq_end > total) { - DRM_ERROR("Invalid sequence size\n"); + drm_err(&i915->drm, "Invalid sequence size\n"); return 0; } @@ -1882,7 +1885,7 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total) if (operation_byte == MIPI_SEQ_ELEM_END) { if (index != seq_end) { - DRM_ERROR("Invalid element structure\n"); + drm_err(&i915->drm, "Invalid element structure\n"); return 0; } return index; @@ -1904,8 +1907,8 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total) case MIPI_SEQ_ELEM_PMIC: break; default: - DRM_ERROR("Unknown operation byte %u\n", - operation_byte); + drm_err(&i915->drm, "Unknown operation byte %u\n", + operation_byte); break; } } @@ -2030,7 +2033,7 @@ parse_mipi_sequence(struct drm_i915_private *i915, drm_dbg(&i915->drm, "Found MIPI sequence block v%u\n", sequence->version); - seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size); + seq_data = find_panel_sequence_block(i915, sequence, panel_type, &seq_size); if (!seq_data) return; @@ -2058,9 +2061,9 @@ parse_mipi_sequence(struct drm_i915_private *i915, panel->vbt.dsi.sequence[seq_id] = data + index; if (sequence->version >= 3) - index = goto_next_sequence_v3(data, index, seq_size); + index = goto_next_sequence_v3(i915, data, index, seq_size); else - index = goto_next_sequence(data, index, seq_size); + index = goto_next_sequence(i915, data, index, seq_size); if (!index) { drm_err(&i915->drm, "Invalid sequence %u\n", seq_id); @@ -2135,12 +2138,13 @@ parse_compression_parameters(struct drm_i915_private *i915) } } -static u8 translate_iboost(u8 val) +static u8 translate_iboost(struct drm_i915_private *i915, u8 val) { static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */ if (val >= ARRAY_SIZE(mapping)) { - DRM_DEBUG_KMS("Unsupported I_boost value found in VBT (%d), display may not work properly\n", val); + drm_dbg_kms(&i915->drm, + "Unsupported I_boost value found in VBT (%d), display may not work properly\n", val); return 0; } return mapping[val]; @@ -2897,12 +2901,14 @@ static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt) /** * intel_bios_is_valid_vbt - does the given buffer contain a valid VBT + * @i915: the device * @buf: pointer to a buffer to validate * @size: size of the buffer * * Returns true on valid VBT. */ -bool intel_bios_is_valid_vbt(const void *buf, size_t size) +bool intel_bios_is_valid_vbt(struct drm_i915_private *i915, + const void *buf, size_t size) { const struct vbt_header *vbt = buf; const struct bdb_header *bdb; @@ -2911,17 +2917,17 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size) return false; if (sizeof(struct vbt_header) > size) { - DRM_DEBUG_DRIVER("VBT header incomplete\n"); + drm_dbg_kms(&i915->drm, "VBT header incomplete\n"); return false; } if (memcmp(vbt->signature, "$VBT", 4)) { - DRM_DEBUG_DRIVER("VBT invalid signature\n"); + drm_dbg_kms(&i915->drm, "VBT invalid signature\n"); return false; } if (vbt->vbt_size > size) { - DRM_DEBUG_DRIVER("VBT incomplete (vbt_size overflows)\n"); + drm_dbg_kms(&i915->drm, "VBT incomplete (vbt_size overflows)\n"); return false; } @@ -2931,13 +2937,13 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size) vbt->bdb_offset, sizeof(struct bdb_header), size)) { - DRM_DEBUG_DRIVER("BDB header incomplete\n"); + drm_dbg_kms(&i915->drm, "BDB header incomplete\n"); return false; } bdb = get_bdb_header(vbt); if (range_overflows_t(size_t, vbt->bdb_offset, bdb->bdb_size, size)) { - DRM_DEBUG_DRIVER("BDB incomplete\n"); + drm_dbg_kms(&i915->drm, "BDB incomplete\n"); return false; } @@ -2989,7 +2995,7 @@ static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915) for (count = 0; count < vbt_size; count += 4) *(vbt + store++) = intel_spi_read(&i915->uncore, found + count); - if (!intel_bios_is_valid_vbt(vbt, vbt_size)) + if (!intel_bios_is_valid_vbt(i915, vbt, vbt_size)) goto err_free_vbt; drm_dbg_kms(&i915->drm, "Found valid VBT in SPI flash\n"); @@ -3046,7 +3052,7 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915) memcpy_fromio(vbt, p, vbt_size); - if (!intel_bios_is_valid_vbt(vbt, vbt_size)) + if (!intel_bios_is_valid_vbt(i915, vbt, vbt_size)) goto err_free_vbt; pci_unmap_rom(pdev, oprom); @@ -3398,6 +3404,7 @@ static void fill_dsc(struct intel_crtc_state *crtc_state, struct dsc_compression_parameters_entry *dsc, int dsc_max_bpc) { + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; int bpc = 8; @@ -3411,8 +3418,8 @@ static void fill_dsc(struct intel_crtc_state *crtc_state, else if (dsc->support_8bpc && dsc_max_bpc >= 8) bpc = 8; else - DRM_DEBUG_KMS("VBT: Unsupported BPC %d for DCS\n", - dsc_max_bpc); + drm_dbg_kms(&i915->drm, "VBT: Unsupported BPC %d for DCS\n", + dsc_max_bpc); crtc_state->pipe_bpp = bpc * 3; @@ -3432,16 +3439,16 @@ static void fill_dsc(struct intel_crtc_state *crtc_state, } else { /* FIXME */ if (!(dsc->slices_per_line & BIT(0))) - DRM_DEBUG_KMS("VBT: Unsupported DSC slice count for DSI\n"); + drm_dbg_kms(&i915->drm, "VBT: Unsupported DSC slice count for DSI\n"); crtc_state->dsc.slice_count = 1; } if (crtc_state->hw.adjusted_mode.crtc_hdisplay % crtc_state->dsc.slice_count != 0) - DRM_DEBUG_KMS("VBT: DSC hdisplay %d not divisible by slice count %d\n", - crtc_state->hw.adjusted_mode.crtc_hdisplay, - crtc_state->dsc.slice_count); + drm_dbg_kms(&i915->drm, "VBT: DSC hdisplay %d not divisible by slice count %d\n", + crtc_state->hw.adjusted_mode.crtc_hdisplay, + crtc_state->dsc.slice_count); /* * The VBT rc_buffer_block_size and rc_buffer_size definitions @@ -3597,7 +3604,7 @@ int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata) if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost) return 0; - return translate_iboost(devdata->child.dp_iboost_level); + return translate_iboost(devdata->i915, devdata->child.dp_iboost_level); } int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata) @@ -3605,7 +3612,7 @@ int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata) if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost) return 0; - return translate_iboost(devdata->child.hdmi_iboost_level); + return translate_iboost(devdata->i915, devdata->child.hdmi_iboost_level); } int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata) diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h index 41bfb009d4b0..06a51be4afd8 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.h +++ b/drivers/gpu/drm/i915/display/intel_bios.h @@ -242,7 +242,8 @@ void intel_bios_init_panel_late(struct drm_i915_private *dev_priv, const struct drm_edid *drm_edid); void intel_bios_fini_panel(struct intel_panel *panel); void intel_bios_driver_remove(struct drm_i915_private *dev_priv); -bool intel_bios_is_valid_vbt(const void *buf, size_t size); +bool intel_bios_is_valid_vbt(struct drm_i915_private *i915, + const void *buf, size_t size); bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port); diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 26200ee3e23f..ed89b86ea625 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -63,6 +63,16 @@ * DMC will not change the active CDCLK frequency however, so that part * will still be performed by the driver directly. * + * Several methods exist to change the CDCLK frequency, which ones are + * supported depends on the platform: + * + * - Full PLL disable + re-enable with new VCO frequency. Pipes must be inactive. + * - CD2X divider update. Single pipe can be active as the divider update + * can be synchronized with the pipe's start of vblank. + * - Crawl the PLL smoothly to the new VCO frequency. Pipes can be active. + * - Squash waveform update. Pipes can be active. + * - Crawl and squash can also be done back to back. Pipes can be active. + * * RAWCLK is a fixed frequency clock, often used by various auxiliary * blocks such as AUX CH or backlight PWM. Hence the only thing we * really need to know about RAWCLK is its frequency so that various @@ -1406,6 +1416,20 @@ static const struct intel_cdclk_vals lnl_cdclk_table[] = { {} }; +static const int cdclk_squash_len = 16; + +static int cdclk_squash_divider(u16 waveform) +{ + return hweight16(waveform ?: 0xffff); +} + +static int cdclk_divider(int cdclk, int vco, u16 waveform) +{ + /* 2 * cd2x divider */ + return DIV_ROUND_CLOSEST(vco * cdclk_squash_divider(waveform), + cdclk * cdclk_squash_len); +} + static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk) { const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table; @@ -1744,10 +1768,10 @@ static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe } static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv, - int cdclk, int vco) + int cdclk, int vco, u16 waveform) { /* cdclk = vco / 2 / div{1,1.5,2,4} */ - switch (DIV_ROUND_CLOSEST(vco, cdclk)) { + switch (cdclk_divider(cdclk, vco, waveform)) { default: drm_WARN_ON(&dev_priv->drm, cdclk != dev_priv->display.cdclk.hw.bypass); @@ -1764,7 +1788,7 @@ static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv, } } -static u32 cdclk_squash_waveform(struct drm_i915_private *dev_priv, +static u16 cdclk_squash_waveform(struct drm_i915_private *dev_priv, int cdclk) { const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table; @@ -1826,20 +1850,13 @@ static bool cdclk_pll_is_unknown(unsigned int vco) return vco == ~0; } -static const int cdclk_squash_len = 16; - -static int cdclk_squash_divider(u16 waveform) -{ - return hweight16(waveform ?: 0xffff); -} - static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i915, const struct intel_cdclk_config *old_cdclk_config, const struct intel_cdclk_config *new_cdclk_config, struct intel_cdclk_config *mid_cdclk_config) { u16 old_waveform, new_waveform, mid_waveform; - int div = 2; + int old_div, new_div, mid_div; /* Return if PLL is in an unknown state, force a complete disable and re-enable. */ if (cdclk_pll_is_unknown(old_cdclk_config->vco)) @@ -1858,6 +1875,18 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91 old_waveform == new_waveform) return false; + old_div = cdclk_divider(old_cdclk_config->cdclk, + old_cdclk_config->vco, old_waveform); + new_div = cdclk_divider(new_cdclk_config->cdclk, + new_cdclk_config->vco, new_waveform); + + /* + * Should not happen currently. We might need more midpoint + * transitions if we need to also change the cd2x divider. + */ + if (drm_WARN_ON(&i915->drm, old_div != new_div)) + return false; + *mid_cdclk_config = *new_cdclk_config; /* @@ -1870,15 +1899,17 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91 if (cdclk_squash_divider(new_waveform) > cdclk_squash_divider(old_waveform)) { mid_cdclk_config->vco = old_cdclk_config->vco; + mid_div = old_div; mid_waveform = new_waveform; } else { mid_cdclk_config->vco = new_cdclk_config->vco; + mid_div = new_div; mid_waveform = old_waveform; } mid_cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_squash_divider(mid_waveform) * mid_cdclk_config->vco, - cdclk_squash_len * div); + cdclk_squash_len * mid_div); /* make sure the mid clock came out sane */ @@ -1906,16 +1937,12 @@ static u32 bxt_cdclk_ctl(struct drm_i915_private *i915, { int cdclk = cdclk_config->cdclk; int vco = cdclk_config->vco; - int unsquashed_cdclk; u16 waveform; u32 val; waveform = cdclk_squash_waveform(i915, cdclk); - unsquashed_cdclk = DIV_ROUND_CLOSEST(cdclk * cdclk_squash_len, - cdclk_squash_divider(waveform)); - - val = bxt_cdclk_cd2x_div_sel(i915, unsquashed_cdclk, vco) | + val = bxt_cdclk_cd2x_div_sel(i915, cdclk, vco, waveform) | bxt_cdclk_cd2x_pipe(i915, pipe); /* diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index c5092b7e87d5..ca7112b32cb3 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -2111,7 +2111,8 @@ static u32 intel_degamma_lut_size(const struct intel_crtc_state *crtc_state) return DISPLAY_INFO(i915)->color.degamma_lut_size; } -static int check_lut_size(const struct drm_property_blob *lut, int expected) +static int check_lut_size(struct drm_i915_private *i915, + const struct drm_property_blob *lut, int expected) { int len; @@ -2120,8 +2121,8 @@ static int check_lut_size(const struct drm_property_blob *lut, int expected) len = drm_color_lut_size(lut); if (len != expected) { - DRM_DEBUG_KMS("Invalid LUT size; got %d, expected %d\n", - len, expected); + drm_dbg_kms(&i915->drm, "Invalid LUT size; got %d, expected %d\n", + len, expected); return -EINVAL; } @@ -2146,8 +2147,8 @@ static int _check_luts(const struct intel_crtc_state *crtc_state, degamma_length = intel_degamma_lut_size(crtc_state); gamma_length = intel_gamma_lut_size(crtc_state); - if (check_lut_size(degamma_lut, degamma_length) || - check_lut_size(gamma_lut, gamma_length)) + if (check_lut_size(i915, degamma_lut, degamma_length) || + check_lut_size(i915, gamma_lut, gamma_length)) return -EINVAL; if (drm_color_lut_check(degamma_lut, degamma_tests) || diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index b9733a73e21d..93479db0f89f 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -933,6 +933,9 @@ static int intel_crt_get_modes(struct drm_connector *connector) struct i2c_adapter *ddc; int ret; + if (!intel_display_driver_check_access(dev_priv)) + return drm_edid_connector_add_modes(connector); + wakeref = intel_display_power_get(dev_priv, intel_encoder->power_domain); diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c index 288a00e083c8..64e0f820a789 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c @@ -848,10 +848,10 @@ static const struct intel_c20pll_state mtl_c20_dp_hbr3 = { static const struct intel_c20pll_state mtl_c20_dp_uhbr10 = { .clock = 1000000, /* 10 Gbps */ .tx = { 0xbe21, /* tx cfg0 */ - 0x4800, /* tx cfg1 */ + 0xe800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ }, - .cmn = {0x0500, /* cmn cfg0*/ + .cmn = {0x0700, /* cmn cfg0*/ 0x0005, /* cmn cfg1 */ 0x0000, /* cmn cfg2 */ 0x0000, /* cmn cfg3 */ @@ -1641,7 +1641,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_594 = { static const struct intel_c20pll_state mtl_c20_hdmi_300 = { .clock = 3000000, .tx = { 0xbe98, /* tx cfg0 */ - 0x9800, /* tx cfg1 */ + 0x8800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ }, .cmn = { 0x0500, /* cmn cfg0*/ @@ -1649,8 +1649,8 @@ static const struct intel_c20pll_state mtl_c20_hdmi_300 = { 0x0000, /* cmn cfg2 */ 0x0000, /* cmn cfg3 */ }, - .mpllb = { 0x209c, /* mpllb cfg0 */ - 0x7d10, /* mpllb cfg1 */ + .mpllb = { 0x309c, /* mpllb cfg0 */ + 0x2110, /* mpllb cfg1 */ 0xca06, /* mpllb cfg2 */ 0xbe40, /* mpllb cfg3 */ 0x0000, /* mpllb cfg4 */ @@ -1666,7 +1666,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_300 = { static const struct intel_c20pll_state mtl_c20_hdmi_600 = { .clock = 6000000, .tx = { 0xbe98, /* tx cfg0 */ - 0x9800, /* tx cfg1 */ + 0x8800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ }, .cmn = { 0x0500, /* cmn cfg0*/ @@ -1674,8 +1674,8 @@ static const struct intel_c20pll_state mtl_c20_hdmi_600 = { 0x0000, /* cmn cfg2 */ 0x0000, /* cmn cfg3 */ }, - .mpllb = { 0x009c, /* mpllb cfg0 */ - 0x7d08, /* mpllb cfg1 */ + .mpllb = { 0x109c, /* mpllb cfg0 */ + 0x2108, /* mpllb cfg1 */ 0xca06, /* mpllb cfg2 */ 0xbe40, /* mpllb cfg3 */ 0x0000, /* mpllb cfg4 */ @@ -1691,7 +1691,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_600 = { static const struct intel_c20pll_state mtl_c20_hdmi_800 = { .clock = 8000000, .tx = { 0xbe98, /* tx cfg0 */ - 0x9800, /* tx cfg1 */ + 0x8800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ }, .cmn = { 0x0500, /* cmn cfg0*/ @@ -1699,8 +1699,8 @@ static const struct intel_c20pll_state mtl_c20_hdmi_800 = { 0x0000, /* cmn cfg2 */ 0x0000, /* cmn cfg3 */ }, - .mpllb = { 0x00d0, /* mpllb cfg0 */ - 0x7d08, /* mpllb cfg1 */ + .mpllb = { 0x10d0, /* mpllb cfg0 */ + 0x2108, /* mpllb cfg1 */ 0x4a06, /* mpllb cfg2 */ 0xbe40, /* mpllb cfg3 */ 0x0000, /* mpllb cfg4 */ @@ -1716,7 +1716,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_800 = { static const struct intel_c20pll_state mtl_c20_hdmi_1000 = { .clock = 10000000, .tx = { 0xbe98, /* tx cfg0 */ - 0x9800, /* tx cfg1 */ + 0x8800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ }, .cmn = { 0x0500, /* cmn cfg0*/ @@ -1725,7 +1725,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_1000 = { 0x0000, /* cmn cfg3 */ }, .mpllb = { 0x1104, /* mpllb cfg0 */ - 0x7d08, /* mpllb cfg1 */ + 0x2108, /* mpllb cfg1 */ 0x0a06, /* mpllb cfg2 */ 0xbe40, /* mpllb cfg3 */ 0x0000, /* mpllb cfg4 */ @@ -1741,7 +1741,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_1000 = { static const struct intel_c20pll_state mtl_c20_hdmi_1200 = { .clock = 12000000, .tx = { 0xbe98, /* tx cfg0 */ - 0x9800, /* tx cfg1 */ + 0x8800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ }, .cmn = { 0x0500, /* cmn cfg0*/ @@ -1749,8 +1749,8 @@ static const struct intel_c20pll_state mtl_c20_hdmi_1200 = { 0x0000, /* cmn cfg2 */ 0x0000, /* cmn cfg3 */ }, - .mpllb = { 0x0138, /* mpllb cfg0 */ - 0x7d08, /* mpllb cfg1 */ + .mpllb = { 0x1138, /* mpllb cfg0 */ + 0x2108, /* mpllb cfg1 */ 0x5486, /* mpllb cfg2 */ 0xfe40, /* mpllb cfg3 */ 0x0000, /* mpllb cfg4 */ diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index bea441590204..c587a8efeafc 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -54,6 +54,7 @@ #include "intel_dp_aux.h" #include "intel_dp_link_training.h" #include "intel_dp_mst.h" +#include "intel_dp_tunnel.h" #include "intel_dpio_phy.h" #include "intel_dsi.h" #include "intel_fdi.h" @@ -4150,7 +4151,7 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder, intel_tc_port_sanitize_mode(enc_to_dig_port(encoder), crtc_state); - if (crtc_state && intel_crtc_has_dp_encoder(crtc_state)) + if (intel_encoder_is_dp(encoder)) intel_dp_sync_state(encoder, crtc_state); } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 7db0655d8c9e..ab2f52d21bad 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -33,6 +33,7 @@ #include #include +#include #include #include #include @@ -73,6 +74,7 @@ #include "intel_dp.h" #include "intel_dp_link_training.h" #include "intel_dp_mst.h" +#include "intel_dp_tunnel.h" #include "intel_dpll.h" #include "intel_dpll_mgr.h" #include "intel_dpt.h" @@ -2478,7 +2480,7 @@ intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes, u32 link_symbol_clock = intel_dp_link_symbol_clock(link_clock); u32 data_m = intel_dp_effective_data_rate(pixel_clock, bits_per_pixel_x16, bw_overhead); - u32 data_n = intel_dp_max_data_rate(link_clock, nlanes); + u32 data_n = drm_dp_max_dprx_data_rate(link_clock, nlanes); /* * Windows/BIOS uses fixed M/N values always. Follow suit. @@ -4490,6 +4492,8 @@ copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state, saved_state->crc_enabled = slave_crtc_state->crc_enabled; intel_crtc_free_hw_state(slave_crtc_state); + if (slave_crtc_state->dp_tunnel_ref.tunnel) + drm_dp_tunnel_ref_put(&slave_crtc_state->dp_tunnel_ref); memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state)); kfree(saved_state); @@ -4505,6 +4509,10 @@ copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state, &master_crtc_state->hw.adjusted_mode); slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter; + if (master_crtc_state->dp_tunnel_ref.tunnel) + drm_dp_tunnel_ref_get(master_crtc_state->dp_tunnel_ref.tunnel, + &slave_crtc_state->dp_tunnel_ref); + copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc); slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed; @@ -4533,6 +4541,8 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state, /* free the old crtc_state->hw members */ intel_crtc_free_hw_state(crtc_state); + intel_dp_tunnel_atomic_clear_stream_bw(state, crtc_state); + /* FIXME: before the switch to atomic started, a new pipe_config was * kzalloc'd. Code that depends on any field being zero should be * fixed, so that the crtc_state can be safely duplicated. For now, @@ -4851,10 +4861,12 @@ memcmp_diff_len(const u8 *a, const u8 *b, size_t len) } static void -pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv, - bool fastset, const char *name, +pipe_config_buffer_mismatch(bool fastset, const struct intel_crtc *crtc, + const char *name, const u8 *a, const u8 *b, size_t len) { + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + if (fastset) { if (!drm_debug_enabled(DRM_UT_KMS)) return; @@ -4863,7 +4875,8 @@ pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv, len = memcmp_diff_len(a, b, len); drm_dbg_kms(&dev_priv->drm, - "fastset requirement not met in %s buffer\n", name); + "[CRTC:%d:%s] fastset requirement not met in %s buffer\n", + crtc->base.base.id, crtc->base.name, name); print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE, 16, 0, a, len, false); print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE, @@ -4872,7 +4885,8 @@ pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv, /* only dump up to the last difference */ len = memcmp_diff_len(a, b, len); - drm_err(&dev_priv->drm, "mismatch in %s buffer\n", name); + drm_err(&dev_priv->drm, "[CRTC:%d:%s] mismatch in %s buffer\n", + crtc->base.base.id, crtc->base.name, name); print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE, 16, 0, a, len, false); print_hex_dump(KERN_ERR, "found: ", DUMP_PREFIX_NONE, @@ -4903,18 +4917,34 @@ pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc, va_end(args); } -static bool fastboot_enabled(struct drm_i915_private *dev_priv) +static void +pipe_config_pll_mismatch(bool fastset, + const struct intel_crtc *crtc, + const char *name, + const struct intel_dpll_hw_state *a, + const struct intel_dpll_hw_state *b) { - /* Enable fastboot by default on Skylake and newer */ - if (DISPLAY_VER(dev_priv) >= 9) - return true; + struct drm_i915_private *i915 = to_i915(crtc->base.dev); - /* Enable fastboot by default on VLV and CHV */ - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - return true; + if (fastset) { + if (!drm_debug_enabled(DRM_UT_KMS)) + return; - /* Disabled by default on all others */ - return false; + drm_dbg_kms(&i915->drm, + "[CRTC:%d:%s] fastset requirement not met in %s\n", + crtc->base.base.id, crtc->base.name, name); + drm_dbg_kms(&i915->drm, "expected:\n"); + intel_dpll_dump_hw_state(i915, a); + drm_dbg_kms(&i915->drm, "found:\n"); + intel_dpll_dump_hw_state(i915, b); + } else { + drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s buffer\n", + crtc->base.base.id, crtc->base.name, name); + drm_err(&i915->drm, "expected:\n"); + intel_dpll_dump_hw_state(i915, a); + drm_err(&i915->drm, "found:\n"); + intel_dpll_dump_hw_state(i915, b); + } } bool @@ -4925,14 +4955,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); bool ret = true; - bool fixup_inherited = fastset && - current_config->inherited && !pipe_config->inherited; - - if (fixup_inherited && !fastboot_enabled(dev_priv)) { - drm_dbg_kms(&dev_priv->drm, - "initial modeset and fastboot not set\n"); - ret = false; - } #define PIPE_CONF_CHECK_X(name) do { \ if (current_config->name != pipe_config->name) { \ @@ -5012,7 +5034,17 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, } \ } while (0) -#define PIPE_CONF_CHECK_TIMINGS(name) do { \ +#define PIPE_CONF_CHECK_PLL(name) do { \ + if (!intel_dpll_compare_hw_state(dev_priv, ¤t_config->name, \ + &pipe_config->name)) { \ + pipe_config_pll_mismatch(fastset, crtc, __stringify(name), \ + ¤t_config->name, \ + &pipe_config->name); \ + ret = false; \ + } \ +} while (0) + +#define PIPE_CONF_CHECK_TIMINGS(name) do { \ PIPE_CONF_CHECK_I(name.crtc_hdisplay); \ PIPE_CONF_CHECK_I(name.crtc_htotal); \ PIPE_CONF_CHECK_I(name.crtc_hblank_start); \ @@ -5071,7 +5103,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, BUILD_BUG_ON(sizeof(current_config->name) != (len)); \ BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \ if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \ - pipe_config_buffer_mismatch(dev_priv, fastset, __stringify(name), \ + pipe_config_buffer_mismatch(fastset, crtc, __stringify(name), \ current_config->name, \ pipe_config->name, \ (len)); \ @@ -5215,42 +5247,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_BOOL(double_wide); - if (dev_priv->display.dpll.mgr) { + if (dev_priv->display.dpll.mgr) PIPE_CONF_CHECK_P(shared_dpll); - PIPE_CONF_CHECK_X(dpll_hw_state.dpll); - PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); - PIPE_CONF_CHECK_X(dpll_hw_state.fp0); - PIPE_CONF_CHECK_X(dpll_hw_state.fp1); - PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); - PIPE_CONF_CHECK_X(dpll_hw_state.spll); - PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); - PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); - PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); - PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0); - PIPE_CONF_CHECK_X(dpll_hw_state.div0); - PIPE_CONF_CHECK_X(dpll_hw_state.ebb0); - PIPE_CONF_CHECK_X(dpll_hw_state.ebb4); - PIPE_CONF_CHECK_X(dpll_hw_state.pll0); - PIPE_CONF_CHECK_X(dpll_hw_state.pll1); - PIPE_CONF_CHECK_X(dpll_hw_state.pll2); - PIPE_CONF_CHECK_X(dpll_hw_state.pll3); - PIPE_CONF_CHECK_X(dpll_hw_state.pll6); - PIPE_CONF_CHECK_X(dpll_hw_state.pll8); - PIPE_CONF_CHECK_X(dpll_hw_state.pll9); - PIPE_CONF_CHECK_X(dpll_hw_state.pll10); - PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12); - PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl); - PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1); - PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl); - PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0); - PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1); - PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf); - PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock); - PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc); - PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias); - PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias); - } + /* FIXME convert everything over the dpll_mgr */ + if (dev_priv->display.dpll.mgr || HAS_GMCH(dev_priv)) + PIPE_CONF_CHECK_PLL(dpll_hw_state); PIPE_CONF_CHECK_X(dsi_pll.ctrl); PIPE_CONF_CHECK_X(dsi_pll.div); @@ -5373,6 +5375,10 @@ static int intel_modeset_pipe(struct intel_atomic_state *state, if (ret) return ret; + ret = intel_dp_tunnel_atomic_add_state_for_crtc(state, crtc); + if (ret) + return ret; + ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc); if (ret) return ret; @@ -6260,12 +6266,11 @@ static int intel_atomic_check_config(struct intel_atomic_state *state, static int intel_atomic_check_config_and_link(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_link_bw_limits new_limits; struct intel_link_bw_limits old_limits; int ret; - intel_link_bw_init_limits(i915, &new_limits); + intel_link_bw_init_limits(state, &new_limits); old_limits = new_limits; while (true) { @@ -7118,6 +7123,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) intel_commit_modeset_disables(state); + intel_dp_tunnel_atomic_alloc_bw(state); + /* FIXME: Eventually get rid of our crtc->config pointer */ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) crtc->config = new_crtc_state; @@ -8094,8 +8101,9 @@ void intel_hpd_poll_fini(struct drm_i915_private *i915) /* Kill all the work that may have been queued by hpd. */ drm_connector_list_iter_begin(&i915->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { - if (connector->modeset_retry_work.func) - cancel_work_sync(&connector->modeset_retry_work); + if (connector->modeset_retry_work.func && + cancel_work_sync(&connector->modeset_retry_work)) + drm_connector_put(&connector->base); if (connector->hdcp.shim) { cancel_delayed_work_sync(&connector->hdcp.check_work); cancel_work_sync(&connector->hdcp.prop_work); diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h index fdeaac994e17..2167dbee5eea 100644 --- a/drivers/gpu/drm/i915/display/intel_display_core.h +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -524,6 +524,7 @@ struct intel_display { } wq; /* Grouping using named structs. Keep sorted. */ + struct drm_dp_tunnel_mgr *dp_tunnel_mgr; struct intel_audio audio; struct intel_dpll dpll; struct intel_fbc *fbc[I915_MAX_FBCS]; diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 6f2d13c8ccf7..b99c024b0934 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -188,7 +188,8 @@ static void intel_panel_info(struct seq_file *m, } static void intel_hdcp_info(struct seq_file *m, - struct intel_connector *intel_connector) + struct intel_connector *intel_connector, + bool remote_req) { bool hdcp_cap, hdcp2_cap; @@ -197,8 +198,14 @@ static void intel_hdcp_info(struct seq_file *m, goto out; } - hdcp_cap = intel_hdcp_capable(intel_connector); - hdcp2_cap = intel_hdcp2_capable(intel_connector); + if (remote_req) { + intel_hdcp_get_remote_capability(intel_connector, + &hdcp_cap, + &hdcp2_cap); + } else { + hdcp_cap = intel_hdcp_get_capability(intel_connector); + hdcp2_cap = intel_hdcp2_get_capability(intel_connector); + } if (hdcp_cap) seq_puts(m, "HDCP1.4 "); @@ -285,7 +292,11 @@ static void intel_connector_info(struct seq_file *m, } seq_puts(m, "\tHDCP version: "); - intel_hdcp_info(m, intel_connector); + if (intel_encoder_is_mst(encoder)) { + intel_hdcp_info(m, intel_connector, true); + seq_puts(m, "\tMST Hub HDCP version: "); + } + intel_hdcp_info(m, intel_connector, false); seq_printf(m, "\tmax bpc: %u\n", connector->display_info.bpc); @@ -1131,7 +1142,7 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data) seq_printf(m, "%s:%d HDCP version: ", connector->base.name, connector->base.base.id); - intel_hdcp_info(m, connector); + intel_hdcp_info(m, connector, false); out: drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); @@ -1391,6 +1402,20 @@ out: drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); return ret; } +static int i915_bigjoiner_enable_show(struct seq_file *m, void *data) +{ + struct intel_connector *connector = m->private; + struct drm_crtc *crtc; + + crtc = connector->base.state->crtc; + if (connector->base.status != connector_status_connected || !crtc) + return -ENODEV; + + seq_printf(m, "Bigjoiner enable: %d\n", connector->force_bigjoiner_enable); + + return 0; +} + static ssize_t i915_dsc_output_format_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) @@ -1412,6 +1437,30 @@ static ssize_t i915_dsc_output_format_write(struct file *file, return len; } +static ssize_t i915_bigjoiner_enable_write(struct file *file, + const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct intel_connector *connector = m->private; + struct drm_crtc *crtc; + bool bigjoiner_en = 0; + int ret; + + crtc = connector->base.state->crtc; + if (connector->base.status != connector_status_connected || !crtc) + return -ENODEV; + + ret = kstrtobool_from_user(ubuf, len, &bigjoiner_en); + if (ret < 0) + return ret; + + connector->force_bigjoiner_enable = bigjoiner_en; + *offp += len; + + return len; +} + static int i915_dsc_output_format_open(struct inode *inode, struct file *file) { @@ -1505,6 +1554,8 @@ static const struct file_operations i915_dsc_fractional_bpp_fops = { .write = i915_dsc_fractional_bpp_write }; +DEFINE_SHOW_STORE_ATTRIBUTE(i915_bigjoiner_enable); + /* * Returns the Current CRTC's bpc. * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/i915_current_bpc @@ -1586,6 +1637,13 @@ void intel_connector_debugfs_add(struct intel_connector *connector) connector, &i915_dsc_fractional_bpp_fops); } + if (DISPLAY_VER(i915) >= 11 && + (connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector_type == DRM_MODE_CONNECTOR_eDP)) { + debugfs_create_file("i915_bigjoiner_force_enable", 0644, root, + connector, &i915_bigjoiner_enable_fops); + } + if (connector_type == DRM_MODE_CONNECTOR_DSI || connector_type == DRM_MODE_CONNECTOR_eDP || connector_type == DRM_MODE_CONNECTOR_DisplayPort || diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c index 4f7ba7eb03d2..87dd07e0d138 100644 --- a/drivers/gpu/drm/i915/display/intel_display_driver.c +++ b/drivers/gpu/drm/i915/display/intel_display_driver.c @@ -35,6 +35,7 @@ #include "intel_dkl_phy.h" #include "intel_dmc.h" #include "intel_dp.h" +#include "intel_dp_tunnel.h" #include "intel_dpll.h" #include "intel_dpll_mgr.h" #include "intel_fb.h" @@ -434,10 +435,8 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915) for_each_pipe(i915, pipe) { ret = intel_crtc_init(i915, pipe); - if (ret) { - intel_mode_config_cleanup(i915); - return ret; - } + if (ret) + goto err_mode_config; } intel_plane_possible_crtcs_init(i915); @@ -457,6 +456,10 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915) intel_vga_disable(i915); intel_setup_outputs(i915); + ret = intel_dp_tunnel_mgr_init(i915); + if (ret) + goto err_hdcp; + intel_display_driver_disable_user_access(i915); drm_modeset_lock_all(dev); @@ -475,6 +478,13 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915) ilk_wm_sanitize(i915); return 0; + +err_hdcp: + intel_hdcp_component_fini(i915); +err_mode_config: + intel_mode_config_cleanup(i915); + + return ret; } /* part #3: call after gem init */ @@ -599,6 +609,8 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915) intel_mode_config_cleanup(i915); + intel_dp_tunnel_mgr_cleanup(i915); + intel_overlay_cleanup(i915); intel_gmbus_teardown(i915); diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 01eb6e4e6049..860e867586f4 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -33,6 +33,7 @@ #include #include +#include #include #include #include @@ -327,7 +328,6 @@ struct intel_vbt_panel_data { struct edp_power_seq pps; u8 drrs_msa_timing_delay; bool low_vswing; - bool initialized; bool hobl; } edp; @@ -499,15 +499,15 @@ struct intel_hdcp_shim { struct intel_connector *connector); /* Detects panel's hdcp capability. This is optional for HDMI. */ - int (*hdcp_capable)(struct intel_digital_port *dig_port, - bool *hdcp_capable); + int (*hdcp_get_capability)(struct intel_digital_port *dig_port, + bool *hdcp_capable); /* HDCP adaptation(DP/HDMI) required on the port */ enum hdcp_wired_protocol protocol; /* Detects whether sink is HDCP2.2 capable */ - int (*hdcp_2_2_capable)(struct intel_connector *connector, - bool *capable); + int (*hdcp_2_2_get_capability)(struct intel_connector *connector, + bool *capable); /* Write HDCP2.2 messages */ int (*write_2_2_msg)(struct intel_connector *connector, @@ -532,6 +532,10 @@ struct intel_hdcp_shim { /* HDCP2.2 Link Integrity Check */ int (*check_2_2_link)(struct intel_digital_port *dig_port, struct intel_connector *connector); + + /* HDCP remote sink cap */ + int (*get_remote_hdcp_capability)(struct intel_connector *connector, + bool *hdcp_capable, bool *hdcp2_capable); }; struct intel_hdcp { @@ -626,6 +630,8 @@ struct intel_connector { struct intel_dp *mst_port; + bool force_bigjoiner_enable; + struct { struct drm_dp_aux *dsc_decompression_aux; u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]; @@ -677,6 +683,8 @@ struct intel_atomic_state { struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS]; + struct intel_dp_tunnel_inherited_state *inherited_dp_tunnels; + /* * Current watermarks can't be trusted during hardware readout, so * don't bother calculating intermediate watermarks. @@ -1374,6 +1382,9 @@ struct intel_crtc_state { struct drm_dsc_config config; } dsc; + /* DP tunnel used for BW allocation. */ + struct drm_dp_tunnel_ref dp_tunnel_ref; + /* HSW+ linetime watermarks */ u16 linetime; u16 ips_linetime; @@ -1784,6 +1795,9 @@ struct intel_dp { /* connector directly attached - won't be use for modeset in mst world */ struct intel_connector *attached_connector; + struct drm_dp_tunnel *tunnel; + bool tunnel_suspended:1; + /* mst connector list */ struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES]; struct drm_dp_mst_topology_mgr mst_mgr; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index e2d991edfd89..6ece2c563c7a 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -36,6 +36,7 @@ #include #include +#include #include #include #include @@ -63,6 +64,7 @@ #include "intel_dp_hdcp.h" #include "intel_dp_link_training.h" #include "intel_dp_mst.h" +#include "intel_dp_tunnel.h" #include "intel_dpio_phy.h" #include "intel_dpll.h" #include "intel_fifo_underrun.h" @@ -152,6 +154,22 @@ int intel_dp_link_symbol_clock(int rate) return DIV_ROUND_CLOSEST(rate * 10, intel_dp_link_symbol_size(rate)); } +static int max_dprx_rate(struct intel_dp *intel_dp) +{ + if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) + return drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel); + + return drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); +} + +static int max_dprx_lane_count(struct intel_dp *intel_dp) +{ + if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) + return drm_dp_tunnel_max_dprx_lane_count(intel_dp->tunnel); + + return drm_dp_max_lane_count(intel_dp->dpcd); +} + static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp) { intel_dp->sink_rates[0] = 162000; @@ -180,7 +198,7 @@ static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp) /* * Sink rates for 8b/10b. */ - max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); + max_rate = max_dprx_rate(intel_dp); max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps); if (max_lttpr_rate) max_rate = min(max_rate, max_lttpr_rate); @@ -259,7 +277,7 @@ static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp) struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &intel_dig_port->base; - intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); + intel_dp->max_sink_lane_count = max_dprx_lane_count(intel_dp); switch (intel_dp->max_sink_lane_count) { case 1: @@ -309,7 +327,7 @@ static int intel_dp_common_rate(struct intel_dp *intel_dp, int index) } /* Theoretical max between source and sink */ -static int intel_dp_max_common_rate(struct intel_dp *intel_dp) +int intel_dp_max_common_rate(struct intel_dp *intel_dp) { return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1); } @@ -326,7 +344,7 @@ static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port) } /* Theoretical max between source and sink */ -static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) +int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); int source_max = intel_dp_max_source_lane_count(dig_port); @@ -383,50 +401,27 @@ int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16, 1000000 * 16 * 8); } -/* - * Given a link rate and lanes, get the data bandwidth. +/** + * intel_dp_max_link_data_rate: Calculate the maximum rate for the given link params + * @intel_dp: Intel DP object + * @max_dprx_rate: Maximum data rate of the DPRX + * @max_dprx_lanes: Maximum lane count of the DPRX * - * Data bandwidth is the actual payload rate, which depends on the data - * bandwidth efficiency and the link rate. + * Calculate the maximum data rate for the provided link parameters taking into + * account any BW limitations by a DP tunnel attached to @intel_dp. * - * For 8b/10b channel encoding, SST and non-FEC, the data bandwidth efficiency - * is 80%. For example, for a 1.62 Gbps link, 1.62*10^9 bps * 0.80 * (1/8) = - * 162000 kBps. With 8-bit symbols, we have 162000 kHz symbol clock. Just by - * coincidence, the port clock in kHz matches the data bandwidth in kBps, and - * they equal the link bit rate in Gbps multiplied by 100000. (Note that this no - * longer holds for data bandwidth as soon as FEC or MST is taken into account!) - * - * For 128b/132b channel encoding, the data bandwidth efficiency is 96.71%. For - * example, for a 10 Gbps link, 10*10^9 bps * 0.9671 * (1/8) = 1208875 - * kBps. With 32-bit symbols, we have 312500 kHz symbol clock. The value 1000000 - * does not match the symbol clock, the port clock (not even if you think in - * terms of a byte clock), nor the data bandwidth. It only matches the link bit - * rate in units of 10000 bps. + * Returns the maximum data rate in kBps units. */ -int -intel_dp_max_data_rate(int max_link_rate, int max_lanes) +int intel_dp_max_link_data_rate(struct intel_dp *intel_dp, + int max_dprx_rate, int max_dprx_lanes) { - int ch_coding_efficiency = - drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(max_link_rate)); - int max_link_rate_kbps = max_link_rate * 10; + int max_rate = drm_dp_max_dprx_data_rate(max_dprx_rate, max_dprx_lanes); - /* - * UHBR rates always use 128b/132b channel encoding, and have - * 97.71% data bandwidth efficiency. Consider max_link_rate the - * link bit rate in units of 10000 bps. - */ - /* - * Lower than UHBR rates always use 8b/10b channel encoding, and have - * 80% data bandwidth efficiency for SST non-FEC. However, this turns - * out to be a nop by coincidence: - * - * int max_link_rate_kbps = max_link_rate * 10; - * max_link_rate_kbps = DIV_ROUND_DOWN_ULL(max_link_rate_kbps * 8, 10); - * max_link_rate = max_link_rate_kbps / 8; - */ - return DIV_ROUND_DOWN_ULL(mul_u32_u32(max_link_rate_kbps * max_lanes, - ch_coding_efficiency), - 1000000 * 8); + if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) + max_rate = min(max_rate, + drm_dp_tunnel_available_bw(intel_dp->tunnel)); + + return max_rate; } bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp) @@ -658,7 +653,7 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, int mode_rate, max_rate; mode_rate = intel_dp_link_required(fixed_mode->clock, 18); - max_rate = intel_dp_max_data_rate(link_rate, lane_count); + max_rate = intel_dp_max_link_data_rate(intel_dp, link_rate, lane_count); if (mode_rate > max_rate) return false; @@ -1205,11 +1200,13 @@ bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp, int hdisplay, int clock) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_connector *connector = intel_dp->attached_connector; if (!intel_dp_can_bigjoiner(intel_dp)) return false; - return clock > i915->max_dotclk_freq || hdisplay > 5120; + return clock > i915->max_dotclk_freq || hdisplay > 5120 || + connector->force_bigjoiner_enable; } static enum drm_mode_status @@ -1260,7 +1257,8 @@ intel_dp_mode_valid(struct drm_connector *_connector, max_link_clock = intel_dp_max_link_rate(intel_dp); max_lanes = intel_dp_max_lane_count(intel_dp); - max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); + max_rate = intel_dp_max_link_data_rate(intel_dp, max_link_clock, max_lanes); + mode_rate = intel_dp_link_required(target_clock, intel_dp_mode_min_output_bpp(connector, mode)); @@ -1610,8 +1608,10 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, for (lane_count = limits->min_lane_count; lane_count <= limits->max_lane_count; lane_count <<= 1) { - link_avail = intel_dp_max_data_rate(link_rate, - lane_count); + link_avail = intel_dp_max_link_data_rate(intel_dp, + link_rate, + lane_count); + if (mode_rate <= link_avail) { pipe_config->lane_count = lane_count; @@ -2387,6 +2387,17 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp, limits); } +int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state) +{ + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + int bpp = crtc_state->dsc.compression_enable ? + to_bpp_int_roundup(crtc_state->dsc.compressed_bpp_x16) : + crtc_state->pipe_bpp; + + return intel_dp_link_required(adjusted_mode->crtc_clock, bpp); +} + static int intel_dp_compute_link_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, @@ -2454,31 +2465,16 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, return ret; } - if (pipe_config->dsc.compression_enable) { - drm_dbg_kms(&i915->drm, - "DP lane count %d clock %d Input bpp %d Compressed bpp " BPP_X16_FMT "\n", - pipe_config->lane_count, pipe_config->port_clock, - pipe_config->pipe_bpp, - BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16)); + drm_dbg_kms(&i915->drm, + "DP lane count %d clock %d bpp input %d compressed " BPP_X16_FMT " link rate required %d available %d\n", + pipe_config->lane_count, pipe_config->port_clock, + pipe_config->pipe_bpp, + BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16), + intel_dp_config_required_rate(pipe_config), + intel_dp_max_link_data_rate(intel_dp, + pipe_config->port_clock, + pipe_config->lane_count)); - drm_dbg_kms(&i915->drm, - "DP link rate required %i available %i\n", - intel_dp_link_required(adjusted_mode->crtc_clock, - to_bpp_int_roundup(pipe_config->dsc.compressed_bpp_x16)), - intel_dp_max_data_rate(pipe_config->port_clock, - pipe_config->lane_count)); - } else { - drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n", - pipe_config->lane_count, pipe_config->port_clock, - pipe_config->pipe_bpp); - - drm_dbg_kms(&i915->drm, - "DP link rate required %i available %i\n", - intel_dp_link_required(adjusted_mode->crtc_clock, - pipe_config->pipe_bpp), - intel_dp_max_data_rate(pipe_config->port_clock, - pipe_config->lane_count)); - } return 0; } @@ -2840,12 +2836,47 @@ intel_dp_audio_compute_config(struct intel_encoder *encoder, intel_dp_is_uhbr(pipe_config); } +void intel_dp_queue_modeset_retry_work(struct intel_connector *connector) +{ + struct drm_i915_private *i915 = to_i915(connector->base.dev); + + drm_connector_get(&connector->base); + if (!queue_work(i915->unordered_wq, &connector->modeset_retry_work)) + drm_connector_put(&connector->base); +} + +void +intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct intel_connector *connector; + struct intel_digital_connector_state *conn_state; + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + int i; + + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) { + intel_dp_queue_modeset_retry_work(intel_dp->attached_connector); + + return; + } + + for_each_new_intel_connector_in_state(state, connector, conn_state, i) { + if (!conn_state->base.crtc) + continue; + + if (connector->mst_port == intel_dp) + intel_dp_queue_modeset_retry_work(connector); + } +} + int intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); const struct drm_display_mode *fixed_mode; @@ -2946,7 +2977,8 @@ intel_dp_compute_config(struct intel_encoder *encoder, intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); - return 0; + return intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector, + pipe_config); } void intel_dp_set_link_params(struct intel_dp *intel_dp, @@ -3282,18 +3314,21 @@ void intel_dp_sync_state(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - if (!crtc_state) - return; + bool dpcd_updated = false; /* * Don't clobber DPCD if it's been already read out during output * setup (eDP) or detect. */ - if (intel_dp->dpcd[DP_DPCD_REV] == 0) + if (crtc_state && intel_dp->dpcd[DP_DPCD_REV] == 0) { intel_dp_get_dpcd(intel_dp); + dpcd_updated = true; + } - intel_dp_reset_max_link_params(intel_dp); + intel_dp_tunnel_resume(intel_dp, crtc_state, dpcd_updated); + + if (crtc_state) + intel_dp_reset_max_link_params(intel_dp); } bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, @@ -3959,6 +3994,13 @@ intel_dp_has_sink_count(struct intel_dp *intel_dp) &intel_dp->desc); } +void intel_dp_update_sink_caps(struct intel_dp *intel_dp) +{ + intel_dp_set_sink_rates(intel_dp); + intel_dp_set_max_sink_lane_count(intel_dp); + intel_dp_set_common_rates(intel_dp); +} + static bool intel_dp_get_dpcd(struct intel_dp *intel_dp) { @@ -3975,9 +4017,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, drm_dp_is_branch(intel_dp->dpcd)); - intel_dp_set_sink_rates(intel_dp); - intel_dp_set_max_sink_lane_count(intel_dp); - intel_dp_set_common_rates(intel_dp); + intel_dp_update_sink_caps(intel_dp); } if (intel_dp_has_sink_count(intel_dp)) { @@ -4800,13 +4840,15 @@ static bool intel_dp_mst_link_status(struct intel_dp *intel_dp) * - %true if pending interrupts were serviced (or no interrupts were * pending) w/o detecting an error condition. * - %false if an error condition - like AUX failure or a loss of link - is - * detected, which needs servicing from the hotplug work. + * detected, or another condition - like a DP tunnel BW state change - needs + * servicing from the hotplug work. */ static bool intel_dp_check_mst_status(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); bool link_ok = true; + bool reprobe_needed = false; drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); @@ -4833,6 +4875,13 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) intel_dp_mst_hpd_irq(intel_dp, esi, ack); + if (esi[3] & DP_TUNNELING_IRQ) { + if (drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr, + &intel_dp->aux)) + reprobe_needed = true; + ack[3] |= DP_TUNNELING_IRQ; + } + if (!memchr_inv(ack, 0, sizeof(ack))) break; @@ -4843,7 +4892,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr); } - return link_ok; + return link_ok && !reprobe_needed; } static void @@ -4970,9 +5019,10 @@ int intel_dp_get_active_pipes(struct intel_dp *intel_dp, if (!crtc_state->hw.active) continue; - if (conn_state->commit && - !try_wait_for_completion(&conn_state->commit->hw_done)) - continue; + if (conn_state->commit) + drm_WARN_ON(&i915->drm, + !wait_for_completion_timeout(&conn_state->commit->hw_done, + msecs_to_jiffies(5000))); *pipe_mask |= BIT(crtc->pipe); } @@ -5202,23 +5252,32 @@ static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp) drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); } -static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp) +static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + bool reprobe_needed = false; u8 val; if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) - return; + return false; if (drm_dp_dpcd_readb(&intel_dp->aux, DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) - return; + return false; + + if ((val & DP_TUNNELING_IRQ) && + drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr, + &intel_dp->aux)) + reprobe_needed = true; if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) - return; + return reprobe_needed; if (val & HDMI_LINK_STATUS_CHANGED) intel_dp_handle_hdmi_link_status_change(intel_dp); + + return reprobe_needed; } /* @@ -5239,6 +5298,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u8 old_sink_count = intel_dp->sink_count; + bool reprobe_needed = false; bool ret; /* @@ -5261,7 +5321,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp) } intel_dp_check_device_service_irq(intel_dp); - intel_dp_check_link_service_irq(intel_dp); + reprobe_needed = intel_dp_check_link_service_irq(intel_dp); /* Handle CEC interrupts, if any */ drm_dp_cec_irq(&intel_dp->aux); @@ -5288,10 +5348,10 @@ intel_dp_short_pulse(struct intel_dp *intel_dp) * FIXME get rid of the ad-hoc phy test modeset code * and properly incorporate it into the normal modeset. */ - return false; + reprobe_needed = true; } - return true; + return !reprobe_needed; } /* XXX this is probably wrong for multiple downstream ports */ @@ -5601,6 +5661,7 @@ intel_dp_detect(struct drm_connector *connector, struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &dig_port->base; enum drm_connector_status status; + int ret; drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); @@ -5636,9 +5697,18 @@ intel_dp_detect(struct drm_connector *connector, intel_dp->is_mst); } + intel_dp_tunnel_disconnect(intel_dp); + goto out; } + ret = intel_dp_tunnel_detect(intel_dp, ctx); + if (ret == -EDEADLK) + return ret; + + if (ret == 1) + intel_connector->base.epoch_counter++; + intel_dp_detect_dsc_caps(intel_dp, intel_connector); intel_dp_configure_mst(intel_dp); @@ -5669,8 +5739,6 @@ intel_dp_detect(struct drm_connector *connector, * with an IRQ_HPD, so force a link status check. */ if (!intel_dp_is_edp(intel_dp)) { - int ret; - ret = intel_dp_retrain_link(encoder, ctx); if (ret) return ret; @@ -5810,6 +5878,8 @@ void intel_dp_encoder_flush_work(struct drm_encoder *encoder) intel_dp_mst_encoder_cleanup(dig_port); + intel_dp_tunnel_destroy(intel_dp); + intel_pps_vdd_off_sync(intel_dp); /* @@ -5826,6 +5896,8 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); intel_pps_vdd_off_sync(intel_dp); + + intel_dp_tunnel_suspend(intel_dp); } void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder) @@ -5963,6 +6035,15 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn, return ret; } + if (!intel_connector_needs_modeset(state, conn)) + return 0; + + ret = intel_dp_tunnel_atomic_check_state(state, + intel_dp, + intel_conn); + if (ret) + return ret; + /* * We don't enable port sync on BDW due to missing w/as and * due to not having adjusted the modeset sequence appropriately. @@ -5970,9 +6051,6 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn, if (DISPLAY_VER(dev_priv) < 9) return 0; - if (!intel_connector_needs_modeset(state, conn)) - return 0; - if (conn->has_tile) { ret = intel_modeset_tile_group(state, conn->tile_group->id); if (ret) @@ -6029,6 +6107,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_dp *intel_dp = &dig_port->dp; + u8 dpcd[DP_RECEIVER_CAP_SIZE]; if (dig_port->base.type == INTEL_OUTPUT_EDP && (long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) { @@ -6051,6 +6130,17 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) dig_port->base.base.name, long_hpd ? "long" : "short"); + /* + * TBT DP tunnels require the GFX driver to read out the DPRX caps in + * response to long HPD pulses. The DP hotplug handler does that, + * however the hotplug handler may be blocked by another + * connector's/encoder's hotplug handler. Since the TBT CM may not + * complete the DP tunnel BW request for the latter connector/encoder + * waiting for this encoder's DPRX read, perform a dummy read here. + */ + if (long_hpd) + intel_dp_read_dprx_caps(intel_dp, dpcd); + if (long_hpd) { intel_dp->reset_link_params = true; return IRQ_NONE; @@ -6371,6 +6461,14 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work) mutex_unlock(&connector->dev->mode_config.mutex); /* Send Hotplug uevent so userspace can reprobe */ drm_kms_helper_connector_hotplug_event(connector); + + drm_connector_put(connector); +} + +void intel_dp_init_modeset_retry_work(struct intel_connector *connector) +{ + INIT_WORK(&connector->modeset_retry_work, + intel_dp_modeset_retry_work_fn); } bool @@ -6387,8 +6485,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, int type; /* Initialize the work for modeset in case of link train failure */ - INIT_WORK(&intel_connector->modeset_retry_work, - intel_dp_modeset_retry_work_fn); + intel_dp_init_modeset_retry_work(intel_connector); if (drm_WARN(dev, dig_port->max_lanes < 1, "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 530cc97bc42f..564a587e2d01 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -43,6 +43,12 @@ void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); int intel_dp_min_bpp(enum intel_output_format output_format); +void intel_dp_init_modeset_retry_work(struct intel_connector *connector); +void intel_dp_queue_modeset_retry_work(struct intel_connector *connector); +void +intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); bool intel_dp_init_connector(struct intel_digital_port *dig_port, struct intel_connector *intel_connector); void intel_dp_set_link_params(struct intel_dp *intel_dp, @@ -94,7 +100,11 @@ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv); void intel_dp_mst_resume(struct drm_i915_private *dev_priv); int intel_dp_max_link_rate(struct intel_dp *intel_dp); int intel_dp_max_lane_count(struct intel_dp *intel_dp); +int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state); int intel_dp_rate_select(struct intel_dp *intel_dp, int rate); +int intel_dp_max_common_rate(struct intel_dp *intel_dp); +int intel_dp_max_common_lane_count(struct intel_dp *intel_dp); +void intel_dp_update_sink_caps(struct intel_dp *intel_dp); void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, u8 *link_bw, u8 *rate_select); @@ -105,7 +115,8 @@ bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp); int intel_dp_link_required(int pixel_clock, int bpp); int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16, int bw_overhead); -int intel_dp_max_data_rate(int max_link_rate, int max_lanes); +int intel_dp_max_link_data_rate(struct intel_dp *intel_dp, + int max_dprx_rate, int max_dprx_lanes); bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp); bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c index 3a595cd433d4..b98a87883fef 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c @@ -36,8 +36,10 @@ static u32 transcoder_to_stream_enc_status(enum transcoder cpu_transcoder) } } -static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout) +static void intel_dp_hdcp_wait_for_cp_irq(struct intel_connector *connector, + int timeout) { + struct intel_hdcp *hdcp = &connector->hdcp; long ret; #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count)) @@ -45,7 +47,8 @@ static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout) msecs_to_jiffies(timeout)); if (!ret) - DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n"); + drm_dbg_kms(connector->base.dev, + "Timedout at waiting for CP_IRQ\n"); } static @@ -122,13 +125,13 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port, } static -int intel_dp_hdcp_read_bcaps(struct intel_digital_port *dig_port, +int intel_dp_hdcp_read_bcaps(struct drm_dp_aux *aux, + struct drm_i915_private *i915, u8 *bcaps) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); ssize_t ret; - ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BCAPS, + ret = drm_dp_dpcd_read(aux, DP_AUX_HDCP_BCAPS, bcaps, 1); if (ret != 1) { drm_dbg_kms(&i915->drm, @@ -143,10 +146,11 @@ static int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port, bool *repeater_present) { + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); ssize_t ret; u8 bcaps; - ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps); + ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, i915, &bcaps); if (ret) return ret; @@ -265,13 +269,14 @@ bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port, } static -int intel_dp_hdcp_capable(struct intel_digital_port *dig_port, - bool *hdcp_capable) +int intel_dp_hdcp_get_capability(struct intel_digital_port *dig_port, + bool *hdcp_capable) { + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); ssize_t ret; u8 bcaps; - ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps); + ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, i915, &bcaps); if (ret) return ret; @@ -330,23 +335,13 @@ static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = { 0, 0 }, }; -static struct drm_dp_aux * -intel_dp_hdcp_get_aux(struct intel_connector *connector) -{ - struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - - if (intel_encoder_is_mst(connector->encoder)) - return &connector->port->aux; - else - return &dig_port->dp.aux; -} - static int intel_dp_hdcp2_read_rx_status(struct intel_connector *connector, u8 *rx_status) { struct drm_i915_private *i915 = to_i915(connector->base.dev); - struct drm_dp_aux *aux = intel_dp_hdcp_get_aux(connector); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); + struct drm_dp_aux *aux = &dig_port->dp.aux; ssize_t ret; ret = drm_dp_dpcd_read(aux, @@ -387,7 +382,8 @@ int hdcp2_detect_msg_availability(struct intel_connector *connector, *msg_ready = true; break; default: - DRM_ERROR("Unidentified msg_id: %d\n", msg_id); + drm_err(connector->base.dev, + "Unidentified msg_id: %d\n", msg_id); return -EINVAL; } @@ -399,7 +395,9 @@ intel_dp_hdcp2_wait_for_msg(struct intel_connector *connector, const struct hdcp2_dp_msg_data *hdcp2_msg_data) { struct drm_i915_private *i915 = to_i915(connector->base.dev); - struct intel_hdcp *hdcp = &connector->hdcp; + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); + struct intel_dp *dp = &dig_port->dp; + struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; u8 msg_id = hdcp2_msg_data->msg_id; int ret, timeout; bool msg_ready = false; @@ -421,7 +419,7 @@ intel_dp_hdcp2_wait_for_msg(struct intel_connector *connector, * As we want to check the msg availability at timeout, Ignoring * the timeout at wait for CP_IRQ. */ - intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout); + intel_dp_hdcp_wait_for_cp_irq(connector, timeout); ret = hdcp2_detect_msg_availability(connector, msg_id, &msg_ready); if (!msg_ready) @@ -454,8 +452,9 @@ int intel_dp_hdcp2_write_msg(struct intel_connector *connector, unsigned int offset; u8 *byte = buf; ssize_t ret, bytes_to_write, len; + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); + struct drm_dp_aux *aux = &dig_port->dp.aux; const struct hdcp2_dp_msg_data *hdcp2_msg_data; - struct drm_dp_aux *aux; hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte); if (!hdcp2_msg_data) @@ -463,8 +462,6 @@ int intel_dp_hdcp2_write_msg(struct intel_connector *connector, offset = hdcp2_msg_data->offset; - aux = intel_dp_hdcp_get_aux(connector); - /* No msg_id in DP HDCP2.2 msgs */ bytes_to_write = size - 1; byte++; @@ -490,7 +487,8 @@ static ssize_t get_receiver_id_list_rx_info(struct intel_connector *connector, u32 *dev_cnt, u8 *byte) { - struct drm_dp_aux *aux = intel_dp_hdcp_get_aux(connector); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); + struct drm_dp_aux *aux = &dig_port->dp.aux; ssize_t ret; u8 *rx_info = byte; @@ -515,8 +513,9 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector, { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - struct intel_hdcp *hdcp = &connector->hdcp; - struct drm_dp_aux *aux; + struct drm_dp_aux *aux = &dig_port->dp.aux; + struct intel_dp *dp = &dig_port->dp; + struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; unsigned int offset; u8 *byte = buf; ssize_t ret, bytes_to_recv, len; @@ -530,8 +529,6 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector, return -EINVAL; offset = hdcp2_msg_data->offset; - aux = intel_dp_hdcp_get_aux(connector); - ret = intel_dp_hdcp2_wait_for_msg(connector, hdcp2_msg_data); if (ret < 0) return ret; @@ -561,13 +558,8 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector, /* Entire msg read timeout since initiate of msg read */ if (bytes_to_recv == size - 1 && hdcp2_msg_data->msg_read_timeout > 0) { - if (intel_encoder_is_mst(connector->encoder)) - msg_end = ktime_add_ms(ktime_get_raw(), - hdcp2_msg_data->msg_read_timeout * - connector->port->parent->num_ports); - else - msg_end = ktime_add_ms(ktime_get_raw(), - hdcp2_msg_data->msg_read_timeout); + msg_end = ktime_add_ms(ktime_get_raw(), + hdcp2_msg_data->msg_read_timeout); } ret = drm_dp_dpcd_read(aux, offset, @@ -648,25 +640,69 @@ int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port, } static -int intel_dp_hdcp2_capable(struct intel_connector *connector, - bool *capable) +int _intel_dp_hdcp2_get_capability(struct drm_dp_aux *aux, + bool *capable) { - struct drm_dp_aux *aux; u8 rx_caps[3]; - int ret; - - aux = intel_dp_hdcp_get_aux(connector); + int ret, i; *capable = false; - ret = drm_dp_dpcd_read(aux, - DP_HDCP_2_2_REG_RX_CAPS_OFFSET, - rx_caps, HDCP_2_2_RXCAPS_LEN); - if (ret != HDCP_2_2_RXCAPS_LEN) - return ret >= 0 ? -EIO : ret; - if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL && - HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2])) - *capable = true; + /* + * Some HDCP monitors act really shady by not giving the correct hdcp + * capability on the first rx_caps read and usually take an extra read + * to give the capability. We read rx_caps three times before we + * declare a monitor not capable of HDCP 2.2. + */ + for (i = 0; i < 3; i++) { + ret = drm_dp_dpcd_read(aux, + DP_HDCP_2_2_REG_RX_CAPS_OFFSET, + rx_caps, HDCP_2_2_RXCAPS_LEN); + if (ret != HDCP_2_2_RXCAPS_LEN) + return ret >= 0 ? -EIO : ret; + + if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL && + HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2])) { + *capable = true; + break; + } + } + + return 0; +} + +static +int intel_dp_hdcp2_get_capability(struct intel_connector *connector, + bool *capable) +{ + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); + struct drm_dp_aux *aux = &dig_port->dp.aux; + + return _intel_dp_hdcp2_get_capability(aux, capable); +} + +static +int intel_dp_hdcp_get_remote_capability(struct intel_connector *connector, + bool *hdcp_capable, + bool *hdcp2_capable) +{ + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct drm_dp_aux *aux = &connector->port->aux; + u8 bcaps; + int ret; + + if (!intel_encoder_is_mst(connector->encoder)) + return -EINVAL; + + ret = _intel_dp_hdcp2_get_capability(aux, hdcp2_capable); + if (ret) + return ret; + + ret = intel_dp_hdcp_read_bcaps(aux, i915, &bcaps); + if (ret) + return ret; + + *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE; return 0; } @@ -682,12 +718,12 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = { .read_v_prime_part = intel_dp_hdcp_read_v_prime_part, .toggle_signalling = intel_dp_hdcp_toggle_signalling, .check_link = intel_dp_hdcp_check_link, - .hdcp_capable = intel_dp_hdcp_capable, + .hdcp_get_capability = intel_dp_hdcp_get_capability, .write_2_2_msg = intel_dp_hdcp2_write_msg, .read_2_2_msg = intel_dp_hdcp2_read_msg, .config_stream_type = intel_dp_hdcp2_config_stream_type, .check_2_2_link = intel_dp_hdcp2_check_link, - .hdcp_2_2_capable = intel_dp_hdcp2_capable, + .hdcp_2_2_get_capability = intel_dp_hdcp2_get_capability, .protocol = HDCP_PROTOCOL_DP, }; @@ -812,13 +848,14 @@ static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = { .toggle_signalling = intel_dp_hdcp_toggle_signalling, .stream_encryption = intel_dp_mst_hdcp_stream_encryption, .check_link = intel_dp_hdcp_check_link, - .hdcp_capable = intel_dp_hdcp_capable, + .hdcp_get_capability = intel_dp_hdcp_get_capability, .write_2_2_msg = intel_dp_hdcp2_write_msg, .read_2_2_msg = intel_dp_hdcp2_read_msg, .config_stream_type = intel_dp_hdcp2_config_stream_type, .stream_2_2_encryption = intel_dp_mst_hdcp2_stream_encryption, .check_2_2_link = intel_dp_mst_hdcp2_check_link, - .hdcp_2_2_capable = intel_dp_hdcp2_capable, + .hdcp_2_2_get_capability = intel_dp_hdcp2_get_capability, + .get_remote_hdcp_capability = intel_dp_hdcp_get_remote_capability, .protocol = HDCP_PROTOCOL_DP, }; diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 1abfafbbfa75..fb84ca98bb7a 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -162,6 +162,28 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI return lttpr_count; } +int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + if (intel_dp_is_edp(intel_dp)) + return 0; + + /* + * Detecting LTTPRs must be avoided on platforms with an AUX timeout + * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1). + */ + if (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915)) + if (drm_dp_dpcd_probe(&intel_dp->aux, + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV)) + return -EIO; + + if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd)) + return -EIO; + + return 0; +} + /** * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode * @intel_dp: Intel DP struct @@ -192,12 +214,10 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp) if (!intel_dp_is_edp(intel_dp) && (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))) { u8 dpcd[DP_RECEIVER_CAP_SIZE]; + int err = intel_dp_read_dprx_caps(intel_dp, dpcd); - if (drm_dp_dpcd_probe(&intel_dp->aux, DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV)) - return -EIO; - - if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd)) - return -EIO; + if (err != 0) + return err; lttpr_count = intel_dp_init_lttpr(intel_dp, dpcd); } @@ -1075,7 +1095,6 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct intel_connector *intel_connector = intel_dp->attached_connector; - struct drm_i915_private *i915 = dp_to_i915(intel_dp); if (!intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) { lt_dbg(intel_dp, DP_PHY_DPRX, "Link Training failed on disconnected sink.\n"); @@ -1093,7 +1112,7 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp, } /* Schedule a Hotplug Uevent to userspace to start modeset */ - queue_work(i915->unordered_wq, &intel_connector->modeset_retry_work); + intel_dp_queue_modeset_retry_work(intel_connector); } /* Perform the link training on all LTTPRs and the DPRX on a link. */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h index 2c8f2775891b..19836a8a4f90 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h @@ -11,6 +11,7 @@ struct intel_crtc_state; struct intel_dp; +int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE]); int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp); void intel_dp_get_adjust_train(struct intel_dp *intel_dp, diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 5fa25a5a36b5..db1254b036f1 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -42,6 +42,7 @@ #include "intel_dp.h" #include "intel_dp_hdcp.h" #include "intel_dp_mst.h" +#include "intel_dp_tunnel.h" #include "intel_dpio_phy.h" #include "intel_hdcp.h" #include "intel_hotplug.h" @@ -523,6 +524,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state); struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_dp *intel_dp = &intel_mst->primary->dp; const struct intel_connector *connector = @@ -619,7 +621,8 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, intel_psr_compute_config(intel_dp, pipe_config, conn_state); - return 0; + return intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector, + pipe_config); } /* @@ -876,6 +879,14 @@ intel_dp_mst_atomic_check(struct drm_connector *connector, if (ret) return ret; + if (intel_connector_needs_modeset(state, connector)) { + ret = intel_dp_tunnel_atomic_check_state(state, + intel_connector->mst_port, + intel_connector); + if (ret) + return ret; + } + return drm_dp_atomic_release_time_slots(&state->base, &intel_connector->mst_port->mst_mgr, intel_connector->port); @@ -1197,6 +1208,7 @@ static bool intel_dp_mst_initial_fastset_check(struct intel_encoder *encoder, static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_i915_private *i915 = to_i915(intel_connector->base.dev); struct intel_dp *intel_dp = intel_connector->mst_port; const struct drm_edid *drm_edid; int ret; @@ -1204,6 +1216,9 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector) if (drm_connector_is_unregistered(connector)) return intel_connector_update_modes(connector, NULL); + if (!intel_display_driver_check_access(i915)) + return drm_edid_connector_add_modes(connector); + drm_edid = drm_dp_mst_edid_read(connector, &intel_dp->mst_mgr, intel_connector->port); ret = intel_connector_update_modes(connector, drm_edid); @@ -1295,7 +1310,8 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, max_link_clock = intel_dp_max_link_rate(intel_dp); max_lanes = intel_dp_max_lane_count(intel_dp); - max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); + max_rate = intel_dp_max_link_data_rate(intel_dp, + max_link_clock, max_lanes); mode_rate = intel_dp_link_required(mode->clock, min_bpp); ret = drm_modeset_lock(&mgr->base.lock, ctx); @@ -1542,6 +1558,8 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo intel_connector->port = port; drm_dp_mst_get_port_malloc(port); + intel_dp_init_modeset_retry_work(intel_connector); + intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port); intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector); intel_connector->dp.dsc_hblank_expansion_quirk = diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c new file mode 100644 index 000000000000..75d76f91ecbd --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c @@ -0,0 +1,811 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include "i915_drv.h" + +#include + +#include "intel_atomic.h" +#include "intel_display_limits.h" +#include "intel_display_types.h" +#include "intel_dp.h" +#include "intel_dp_link_training.h" +#include "intel_dp_mst.h" +#include "intel_dp_tunnel.h" +#include "intel_link_bw.h" + +struct intel_dp_tunnel_inherited_state { + struct drm_dp_tunnel_ref ref[I915_MAX_PIPES]; +}; + +/** + * intel_dp_tunnel_disconnect - Disconnect a DP tunnel from a port + * @intel_dp: DP port object the tunnel is connected to + * + * Disconnect a DP tunnel from @intel_dp, destroying any related state. This + * should be called after detecting a sink-disconnect event from the port. + */ +void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp) +{ + drm_dp_tunnel_destroy(intel_dp->tunnel); + intel_dp->tunnel = NULL; +} + +/** + * intel_dp_tunnel_destroy - Destroy a DP tunnel + * @intel_dp: DP port object the tunnel is connected to + * + * Destroy a DP tunnel connected to @intel_dp, after disabling the BW + * allocation mode on the tunnel. This should be called while destroying the + * port. + */ +void intel_dp_tunnel_destroy(struct intel_dp *intel_dp) +{ + if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) + drm_dp_tunnel_disable_bw_alloc(intel_dp->tunnel); + + intel_dp_tunnel_disconnect(intel_dp); +} + +static int kbytes_to_mbits(int kbytes) +{ + return DIV_ROUND_UP(kbytes * 8, 1000); +} + +static int get_current_link_bw(struct intel_dp *intel_dp, + bool *below_dprx_bw) +{ + int rate = intel_dp_max_common_rate(intel_dp); + int lane_count = intel_dp_max_common_lane_count(intel_dp); + int bw; + + bw = intel_dp_max_link_data_rate(intel_dp, rate, lane_count); + *below_dprx_bw = bw < drm_dp_max_dprx_data_rate(rate, lane_count); + + return bw; +} + +static int update_tunnel_state(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + bool old_bw_below_dprx; + bool new_bw_below_dprx; + int old_bw; + int new_bw; + int ret; + + old_bw = get_current_link_bw(intel_dp, &old_bw_below_dprx); + + ret = drm_dp_tunnel_update_state(intel_dp->tunnel); + if (ret < 0) { + drm_dbg_kms(&i915->drm, + "[DPTUN %s][ENCODER:%d:%s] State update failed (err %pe)\n", + drm_dp_tunnel_name(intel_dp->tunnel), + encoder->base.base.id, encoder->base.name, + ERR_PTR(ret)); + + return ret; + } + + if (ret == 0 || + !drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel)) + return 0; + + intel_dp_update_sink_caps(intel_dp); + + new_bw = get_current_link_bw(intel_dp, &new_bw_below_dprx); + + /* Suppress the notification if the mode list can't change due to bw. */ + if (old_bw_below_dprx == new_bw_below_dprx && + !new_bw_below_dprx) + return 0; + + drm_dbg_kms(&i915->drm, + "[DPTUN %s][ENCODER:%d:%s] Notify users about BW change: %d -> %d\n", + drm_dp_tunnel_name(intel_dp->tunnel), + encoder->base.base.id, encoder->base.name, + kbytes_to_mbits(old_bw), kbytes_to_mbits(new_bw)); + + return 1; +} + +/* + * Allocate the BW for a tunnel on a DP connector/port if the connector/port + * was already active when detecting the tunnel. The allocated BW must be + * freed by the next atomic modeset, storing the BW in the + * intel_atomic_state::inherited_dp_tunnels, and calling + * intel_dp_tunnel_atomic_free_bw(). + */ +static int allocate_initial_tunnel_bw_for_pipes(struct intel_dp *intel_dp, u8 pipe_mask) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct intel_crtc *crtc; + int tunnel_bw = 0; + int err; + + for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) { + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + int stream_bw = intel_dp_config_required_rate(crtc_state); + + tunnel_bw += stream_bw; + + drm_dbg_kms(&i915->drm, + "[DPTUN %s][ENCODER:%d:%s][CRTC:%d:%s] Initial BW for stream %d: %d/%d Mb/s\n", + drm_dp_tunnel_name(intel_dp->tunnel), + encoder->base.base.id, encoder->base.name, + crtc->base.base.id, crtc->base.name, + crtc->pipe, + kbytes_to_mbits(stream_bw), kbytes_to_mbits(tunnel_bw)); + } + + err = drm_dp_tunnel_alloc_bw(intel_dp->tunnel, tunnel_bw); + if (err) { + drm_dbg_kms(&i915->drm, + "[DPTUN %s][ENCODER:%d:%s] Initial BW allocation failed (err %pe)\n", + drm_dp_tunnel_name(intel_dp->tunnel), + encoder->base.base.id, encoder->base.name, + ERR_PTR(err)); + + return err; + } + + return update_tunnel_state(intel_dp); +} + +static int allocate_initial_tunnel_bw(struct intel_dp *intel_dp, + struct drm_modeset_acquire_ctx *ctx) +{ + u8 pipe_mask; + int err; + + err = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask); + if (err) + return err; + + return allocate_initial_tunnel_bw_for_pipes(intel_dp, pipe_mask); +} + +static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct drm_dp_tunnel *tunnel; + int ret; + + tunnel = drm_dp_tunnel_detect(i915->display.dp_tunnel_mgr, + &intel_dp->aux); + if (IS_ERR(tunnel)) + return PTR_ERR(tunnel); + + intel_dp->tunnel = tunnel; + + ret = drm_dp_tunnel_enable_bw_alloc(intel_dp->tunnel); + if (ret) { + if (ret == -EOPNOTSUPP) + return 0; + + drm_dbg_kms(&i915->drm, + "[DPTUN %s][ENCODER:%d:%s] Failed to enable BW allocation mode (ret %pe)\n", + drm_dp_tunnel_name(intel_dp->tunnel), + encoder->base.base.id, encoder->base.name, + ERR_PTR(ret)); + + /* Keep the tunnel with BWA disabled */ + return 0; + } + + ret = allocate_initial_tunnel_bw(intel_dp, ctx); + if (ret < 0) + intel_dp_tunnel_destroy(intel_dp); + + return ret; +} + +/** + * intel_dp_tunnel_detect - Detect a DP tunnel on a port + * @intel_dp: DP port object + * @ctx: lock context acquired by the connector detection handler + * + * Detect a DP tunnel on the @intel_dp port, enabling the BW allocation mode + * on it if supported and allocating the BW required on an already active port. + * The BW allocated this way must be freed by the next atomic modeset calling + * intel_dp_tunnel_atomic_free_bw(). + * + * If @intel_dp has already a tunnel detected on it, update the tunnel's state + * wrt. its support for BW allocation mode and the available BW via the + * tunnel. If the tunnel's state change requires this - for instance the + * tunnel's group ID has changed - the tunnel will be dropped and recreated. + * + * Return 0 in case of success - after any tunnel detected and added to + * @intel_dp - 1 in case the BW on an already existing tunnel has changed in a + * way that requires notifying user space. + */ +int intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx) +{ + int ret; + + if (intel_dp_is_edp(intel_dp)) + return 0; + + if (intel_dp->tunnel) { + ret = update_tunnel_state(intel_dp); + if (ret >= 0) + return ret; + + /* Try to recreate the tunnel after an update error. */ + intel_dp_tunnel_destroy(intel_dp); + } + + return detect_new_tunnel(intel_dp, ctx); +} + +/** + * intel_dp_tunnel_bw_alloc_is_enabled - Query the BW allocation support on a tunnel + * @intel_dp: DP port object + * + * Query whether a DP tunnel is connected on @intel_dp and the tunnel supports + * the BW allocation mode. + * + * Returns %true if the BW allocation mode is supported on @intel_dp. + */ +bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp) +{ + return drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel); +} + +/** + * intel_dp_tunnel_suspend - Suspend a DP tunnel connected on a port + * @intel_dp: DP port object + * + * Suspend a DP tunnel on @intel_dp with BW allocation mode enabled on it. + */ +void intel_dp_tunnel_suspend(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_connector *connector = intel_dp->attached_connector; + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + + if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) + return; + + drm_dbg_kms(&i915->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Suspend\n", + drm_dp_tunnel_name(intel_dp->tunnel), + connector->base.base.id, connector->base.name, + encoder->base.base.id, encoder->base.name); + + drm_dp_tunnel_disable_bw_alloc(intel_dp->tunnel); + + intel_dp->tunnel_suspended = true; +} + +/** + * intel_dp_tunnel_resume - Resume a DP tunnel connected on a port + * @intel_dp: DP port object + * @crtc_state: CRTC state + * @dpcd_updated: the DPCD DPRX capabilities got updated during resume + * + * Resume a DP tunnel on @intel_dp with BW allocation mode enabled on it. + */ +void intel_dp_tunnel_resume(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + bool dpcd_updated) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_connector *connector = intel_dp->attached_connector; + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + u8 dpcd[DP_RECEIVER_CAP_SIZE]; + u8 pipe_mask; + int err = 0; + + if (!intel_dp->tunnel_suspended) + return; + + intel_dp->tunnel_suspended = false; + + drm_dbg_kms(&i915->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Resume\n", + drm_dp_tunnel_name(intel_dp->tunnel), + connector->base.base.id, connector->base.name, + encoder->base.base.id, encoder->base.name); + + /* + * The TBT Connection Manager requires the GFX driver to read out + * the sink's DPRX caps to be able to service any BW requests later. + * During resume overriding the caps in @intel_dp cached before + * suspend must be avoided, so do here only a dummy read, unless the + * capabilities were updated already during resume. + */ + if (!dpcd_updated) { + err = intel_dp_read_dprx_caps(intel_dp, dpcd); + + if (err) { + drm_dp_tunnel_set_io_error(intel_dp->tunnel); + goto out_err; + } + } + + err = drm_dp_tunnel_enable_bw_alloc(intel_dp->tunnel); + if (err) + goto out_err; + + pipe_mask = 0; + if (crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + /* TODO: Add support for MST */ + pipe_mask |= BIT(crtc->pipe); + } + + err = allocate_initial_tunnel_bw_for_pipes(intel_dp, pipe_mask); + if (err < 0) + goto out_err; + + return; + +out_err: + drm_dbg_kms(&i915->drm, + "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and redect it (err %pe)\n", + drm_dp_tunnel_name(intel_dp->tunnel), + connector->base.base.id, connector->base.name, + encoder->base.base.id, encoder->base.name, + ERR_PTR(err)); +} + +static struct drm_dp_tunnel * +get_inherited_tunnel(struct intel_atomic_state *state, struct intel_crtc *crtc) +{ + if (!state->inherited_dp_tunnels) + return NULL; + + return state->inherited_dp_tunnels->ref[crtc->pipe].tunnel; +} + +static int +add_inherited_tunnel(struct intel_atomic_state *state, + struct drm_dp_tunnel *tunnel, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct drm_dp_tunnel *old_tunnel; + + old_tunnel = get_inherited_tunnel(state, crtc); + if (old_tunnel) { + drm_WARN_ON(&i915->drm, old_tunnel != tunnel); + return 0; + } + + if (!state->inherited_dp_tunnels) { + state->inherited_dp_tunnels = kzalloc(sizeof(*state->inherited_dp_tunnels), + GFP_KERNEL); + if (!state->inherited_dp_tunnels) + return -ENOMEM; + } + + drm_dp_tunnel_ref_get(tunnel, &state->inherited_dp_tunnels->ref[crtc->pipe]); + + return 0; +} + +static int check_inherited_tunnel_state(struct intel_atomic_state *state, + struct intel_dp *intel_dp, + const struct intel_digital_connector_state *old_conn_state) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct intel_connector *connector = + to_intel_connector(old_conn_state->base.connector); + struct intel_crtc *old_crtc; + const struct intel_crtc_state *old_crtc_state; + + /* + * If a BWA tunnel gets detected only after the corresponding + * connector got enabled already without a BWA tunnel, or a different + * BWA tunnel (which was removed meanwhile) the old CRTC state won't + * contain the state of the current tunnel. This tunnel still has a + * reserved BW, which needs to be released, add the state for such + * inherited tunnels separately only to this atomic state. + */ + if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) + return 0; + + if (!old_conn_state->base.crtc) + return 0; + + old_crtc = to_intel_crtc(old_conn_state->base.crtc); + old_crtc_state = intel_atomic_get_old_crtc_state(state, old_crtc); + + if (!old_crtc_state->hw.active || + old_crtc_state->dp_tunnel_ref.tunnel == intel_dp->tunnel) + return 0; + + drm_dbg_kms(&i915->drm, + "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding state for inherited tunnel %p\n", + drm_dp_tunnel_name(intel_dp->tunnel), + connector->base.base.id, connector->base.name, + encoder->base.base.id, encoder->base.name, + old_crtc->base.base.id, old_crtc->base.name, + intel_dp->tunnel); + + return add_inherited_tunnel(state, intel_dp->tunnel, old_crtc); +} + +/** + * intel_dp_tunnel_atomic_cleanup_inherited_state - Free any inherited DP tunnel state + * @state: Atomic state + * + * Free the inherited DP tunnel state in @state. + */ +void intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state) +{ + enum pipe pipe; + + if (!state->inherited_dp_tunnels) + return; + + for_each_pipe(to_i915(state->base.dev), pipe) + if (state->inherited_dp_tunnels->ref[pipe].tunnel) + drm_dp_tunnel_ref_put(&state->inherited_dp_tunnels->ref[pipe]); + + kfree(state->inherited_dp_tunnels); + state->inherited_dp_tunnels = NULL; +} + +static int intel_dp_tunnel_atomic_add_group_state(struct intel_atomic_state *state, + struct drm_dp_tunnel *tunnel) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + u32 pipe_mask; + int err; + + err = drm_dp_tunnel_atomic_get_group_streams_in_state(&state->base, + tunnel, &pipe_mask); + if (err) + return err; + + drm_WARN_ON(&i915->drm, pipe_mask & ~((1 << I915_MAX_PIPES) - 1)); + + return intel_modeset_pipes_in_mask_early(state, "DPTUN", pipe_mask); +} + +/** + * intel_dp_tunnel_atomic_add_state_for_crtc - Add CRTC specific DP tunnel state + * @state: Atomic state + * @crtc: CRTC to add the tunnel state for + * + * Add the DP tunnel state for @crtc if the CRTC (aka DP tunnel stream) is enabled + * via a DP tunnel. + * + * Return 0 in case of success, a negative error code otherwise. + */ +int intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct drm_dp_tunnel_state *tunnel_state; + struct drm_dp_tunnel *tunnel = new_crtc_state->dp_tunnel_ref.tunnel; + + if (!tunnel) + return 0; + + tunnel_state = drm_dp_tunnel_atomic_get_state(&state->base, tunnel); + if (IS_ERR(tunnel_state)) + return PTR_ERR(tunnel_state); + + return 0; +} + +static int check_group_state(struct intel_atomic_state *state, + struct intel_dp *intel_dp, + struct intel_connector *connector, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + if (!crtc_state->dp_tunnel_ref.tunnel) + return 0; + + drm_dbg_kms(&i915->drm, + "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding group state for tunnel %p\n", + drm_dp_tunnel_name(intel_dp->tunnel), + connector->base.base.id, connector->base.name, + encoder->base.base.id, encoder->base.name, + crtc->base.base.id, crtc->base.name, + crtc_state->dp_tunnel_ref.tunnel); + + return intel_dp_tunnel_atomic_add_group_state(state, crtc_state->dp_tunnel_ref.tunnel); +} + +/** + * intel_dp_tunnel_atomic_check_state - Check a connector's DP tunnel specific state + * @state: Atomic state + * @intel_dp: DP port object + * @connector: connector using @intel_dp + * + * Check and add the DP tunnel atomic state for @intel_dp/@connector to + * @state, if there is a DP tunnel detected on @intel_dp with BW allocation + * mode enabled on it, or if @intel_dp/@connector was previously enabled via a + * DP tunnel. + * + * Returns 0 in case of success, or a negative error code otherwise. + */ +int intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state, + struct intel_dp *intel_dp, + struct intel_connector *connector) +{ + const struct intel_digital_connector_state *old_conn_state = + intel_atomic_get_old_connector_state(state, connector); + const struct intel_digital_connector_state *new_conn_state = + intel_atomic_get_new_connector_state(state, connector); + int err; + + if (old_conn_state->base.crtc) { + err = check_group_state(state, intel_dp, connector, + to_intel_crtc(old_conn_state->base.crtc)); + if (err) + return err; + } + + if (new_conn_state->base.crtc && + new_conn_state->base.crtc != old_conn_state->base.crtc) { + err = check_group_state(state, intel_dp, connector, + to_intel_crtc(new_conn_state->base.crtc)); + if (err) + return err; + } + + return check_inherited_tunnel_state(state, intel_dp, old_conn_state); +} + +/** + * intel_dp_tunnel_atomic_compute_stream_bw - Compute the BW required by a DP tunnel stream + * @state: Atomic state + * @intel_dp: DP object + * @connector: connector using @intel_dp + * @crtc_state: state of CRTC of the given DP tunnel stream + * + * Compute the required BW of CRTC (aka DP tunnel stream), storing this BW to + * the DP tunnel state containing the stream in @state. Before re-calculating a + * BW requirement in the crtc_state state the old BW requirement computed by this + * function must be cleared by calling intel_dp_tunnel_atomic_clear_stream_bw(). + * + * Returns 0 in case of success, a negative error code otherwise. + */ +int intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state, + struct intel_dp *intel_dp, + const struct intel_connector *connector, + struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + int required_rate = intel_dp_config_required_rate(crtc_state); + int ret; + + if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) + return 0; + + drm_dbg_kms(&i915->drm, + "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Stream %d required BW %d Mb/s\n", + drm_dp_tunnel_name(intel_dp->tunnel), + connector->base.base.id, connector->base.name, + encoder->base.base.id, encoder->base.name, + crtc->base.base.id, crtc->base.name, + crtc->pipe, + kbytes_to_mbits(required_rate)); + + ret = drm_dp_tunnel_atomic_set_stream_bw(&state->base, intel_dp->tunnel, + crtc->pipe, required_rate); + if (ret < 0) + return ret; + + drm_dp_tunnel_ref_get(intel_dp->tunnel, + &crtc_state->dp_tunnel_ref); + + return 0; +} + +/** + * intel_dp_tunnel_atomic_clear_stream_bw - Clear any DP tunnel stream BW requirement + * @state: Atomic state + * @crtc_state: state of CRTC of the given DP tunnel stream + * + * Clear any DP tunnel stream BW requirement set by + * intel_dp_tunnel_atomic_compute_stream_bw(). + */ +void intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state *state, + struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + if (!crtc_state->dp_tunnel_ref.tunnel) + return; + + drm_dp_tunnel_atomic_set_stream_bw(&state->base, + crtc_state->dp_tunnel_ref.tunnel, + crtc->pipe, 0); + drm_dp_tunnel_ref_put(&crtc_state->dp_tunnel_ref); +} + +/** + * intel_dp_tunnel_atomic_check_link - Check the DP tunnel atomic state + * @state: intel atomic state + * @limits: link BW limits + * + * Check the link configuration for all DP tunnels in @state. If the + * configuration is invalid @limits will be updated if possible to + * reduce the total BW, after which the configuration for all CRTCs in + * @state must be recomputed with the updated @limits. + * + * Returns: + * - 0 if the confugration is valid + * - %-EAGAIN, if the configuration is invalid and @limits got updated + * with fallback values with which the configuration of all CRTCs in + * @state must be recomputed + * - Other negative error, if the configuration is invalid without a + * fallback possibility, or the check failed for another reason + */ +int intel_dp_tunnel_atomic_check_link(struct intel_atomic_state *state, + struct intel_link_bw_limits *limits) +{ + u32 failed_stream_mask; + int err; + + err = drm_dp_tunnel_atomic_check_stream_bws(&state->base, + &failed_stream_mask); + if (err != -ENOSPC) + return err; + + err = intel_link_bw_reduce_bpp(state, limits, + failed_stream_mask, "DP tunnel link BW"); + + return err ? : -EAGAIN; +} + +static void atomic_decrease_bw(struct intel_atomic_state *state) +{ + struct intel_crtc *crtc; + const struct intel_crtc_state *old_crtc_state; + const struct intel_crtc_state *new_crtc_state; + int i; + + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + const struct drm_dp_tunnel_state *new_tunnel_state; + struct drm_dp_tunnel *tunnel; + int old_bw; + int new_bw; + + if (!intel_crtc_needs_modeset(new_crtc_state)) + continue; + + tunnel = get_inherited_tunnel(state, crtc); + if (!tunnel) + tunnel = old_crtc_state->dp_tunnel_ref.tunnel; + + if (!tunnel) + continue; + + old_bw = drm_dp_tunnel_get_allocated_bw(tunnel); + + new_tunnel_state = drm_dp_tunnel_atomic_get_new_state(&state->base, tunnel); + new_bw = drm_dp_tunnel_atomic_get_required_bw(new_tunnel_state); + + if (new_bw >= old_bw) + continue; + + drm_dp_tunnel_alloc_bw(tunnel, new_bw); + } +} + +static void queue_retry_work(struct intel_atomic_state *state, + struct drm_dp_tunnel *tunnel, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_encoder *encoder; + + encoder = intel_get_crtc_new_encoder(state, crtc_state); + + if (!intel_digital_port_connected(encoder)) + return; + + drm_dbg_kms(&i915->drm, + "[DPTUN %s][ENCODER:%d:%s] BW allocation failed on a connected sink\n", + drm_dp_tunnel_name(tunnel), + encoder->base.base.id, + encoder->base.name); + + intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state); +} + +static void atomic_increase_bw(struct intel_atomic_state *state) +{ + struct intel_crtc *crtc; + const struct intel_crtc_state *crtc_state; + int i; + + for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { + struct drm_dp_tunnel_state *tunnel_state; + struct drm_dp_tunnel *tunnel = crtc_state->dp_tunnel_ref.tunnel; + int bw; + + if (!intel_crtc_needs_modeset(crtc_state)) + continue; + + if (!tunnel) + continue; + + tunnel_state = drm_dp_tunnel_atomic_get_new_state(&state->base, tunnel); + + bw = drm_dp_tunnel_atomic_get_required_bw(tunnel_state); + + if (drm_dp_tunnel_alloc_bw(tunnel, bw) != 0) + queue_retry_work(state, tunnel, crtc_state); + } +} + +/** + * intel_dp_tunnel_atomic_alloc_bw - Allocate the BW for all modeset tunnels + * @state: Atomic state + * + * Allocate the required BW for all tunnels in @state. + */ +void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state) +{ + atomic_decrease_bw(state); + atomic_increase_bw(state); +} + +/** + * intel_dp_tunnel_mgr_init - Initialize the DP tunnel manager + * @i915: i915 device object + * + * Initialize the DP tunnel manager. The tunnel manager will support the + * detection/management of DP tunnels on all DP connectors, so the function + * must be called after all these connectors have been registered already. + * + * Return 0 in case of success, a negative error code otherwise. + */ +int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915) +{ + struct drm_dp_tunnel_mgr *tunnel_mgr; + struct drm_connector_list_iter connector_list_iter; + struct intel_connector *connector; + int dp_connectors = 0; + + drm_connector_list_iter_begin(&i915->drm, &connector_list_iter); + for_each_intel_connector_iter(connector, &connector_list_iter) { + if (connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) + continue; + + dp_connectors++; + } + drm_connector_list_iter_end(&connector_list_iter); + + tunnel_mgr = drm_dp_tunnel_mgr_create(&i915->drm, dp_connectors); + if (IS_ERR(tunnel_mgr)) + return PTR_ERR(tunnel_mgr); + + i915->display.dp_tunnel_mgr = tunnel_mgr; + + return 0; +} + +/** + * intel_dp_tunnel_mgr_cleanup - Clean up the DP tunnel manager state + * @i915: i915 device object + * + * Clean up the DP tunnel manager state. + */ +void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915) +{ + drm_dp_tunnel_mgr_destroy(i915->display.dp_tunnel_mgr); + i915->display.dp_tunnel_mgr = NULL; +} diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.h b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h new file mode 100644 index 000000000000..08b2cba84af2 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_DP_TUNNEL_H__ +#define __INTEL_DP_TUNNEL_H__ + +#include +#include + +struct drm_i915_private; +struct drm_connector_state; +struct drm_modeset_acquire_ctx; + +struct intel_atomic_state; +struct intel_connector; +struct intel_crtc; +struct intel_crtc_state; +struct intel_dp; +struct intel_encoder; +struct intel_link_bw_limits; + +#if defined(CONFIG_DRM_I915_DP_TUNNEL) && defined(I915) + +int intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx); +void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp); +void intel_dp_tunnel_destroy(struct intel_dp *intel_dp); +void intel_dp_tunnel_resume(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + bool dpcd_updated); +void intel_dp_tunnel_suspend(struct intel_dp *intel_dp); + +bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp); + +void +intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state); + +int intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state, + struct intel_dp *intel_dp, + const struct intel_connector *connector, + struct intel_crtc_state *crtc_state); +void intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state *state, + struct intel_crtc_state *crtc_state); + +int intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc); +int intel_dp_tunnel_atomic_check_link(struct intel_atomic_state *state, + struct intel_link_bw_limits *limits); +int intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state, + struct intel_dp *intel_dp, + struct intel_connector *connector); + +void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state); + +int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915); +void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915); + +#else + +static inline int +intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx) +{ + return -EOPNOTSUPP; +} + +static inline void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp) {} +static inline void intel_dp_tunnel_destroy(struct intel_dp *intel_dp) {} +static inline void intel_dp_tunnel_resume(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + bool dpcd_updated) {} +static inline void intel_dp_tunnel_suspend(struct intel_dp *intel_dp) {} + +static inline bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp) +{ + return false; +} + +static inline void +intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state) {} + +static inline int +intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state, + struct intel_dp *intel_dp, + const struct intel_connector *connector, + struct intel_crtc_state *crtc_state) +{ + return 0; +} + +static inline void +intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state *state, + struct intel_crtc_state *crtc_state) {} + +static inline int +intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + return 0; +} + +static inline int +intel_dp_tunnel_atomic_check_link(struct intel_atomic_state *state, + struct intel_link_bw_limits *limits) +{ + return 0; +} + +static inline int +intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state, + struct intel_dp *intel_dp, + struct intel_connector *connector) +{ + return 0; +} + +static inline int +intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state) +{ + return 0; +} + +static inline int +intel_dp_tunnel_mgr_init(struct drm_i915_private *i915) +{ + return 0; +} + +static inline void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915) {} + +#endif /* CONFIG_DRM_I915_DP_TUNNEL */ + +#endif /* __INTEL_DP_TUNNEL_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index e7e0a4cf9f93..ff480f171f75 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -109,6 +109,8 @@ struct intel_dpll_mgr { void (*update_ref_clks)(struct drm_i915_private *i915); void (*dump_hw_state)(struct drm_i915_private *i915, const struct intel_dpll_hw_state *hw_state); + bool (*compare_hw_state)(const struct intel_dpll_hw_state *a, + const struct intel_dpll_hw_state *b); }; static void @@ -644,6 +646,15 @@ static void ibx_dump_hw_state(struct drm_i915_private *i915, hw_state->fp1); } +static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *a, + const struct intel_dpll_hw_state *b) +{ + return a->dpll == b->dpll && + a->dpll_md == b->dpll_md && + a->fp0 == b->fp0 && + a->fp1 == b->fp1; +} + static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = { .enable = ibx_pch_dpll_enable, .disable = ibx_pch_dpll_disable, @@ -662,6 +673,7 @@ static const struct intel_dpll_mgr pch_pll_mgr = { .get_dplls = ibx_get_dpll, .put_dplls = intel_put_dpll, .dump_hw_state = ibx_dump_hw_state, + .compare_hw_state = ibx_compare_hw_state, }; static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915, @@ -1220,6 +1232,13 @@ static void hsw_dump_hw_state(struct drm_i915_private *i915, hw_state->wrpll, hw_state->spll); } +static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *a, + const struct intel_dpll_hw_state *b) +{ + return a->wrpll == b->wrpll && + a->spll == b->spll; +} + static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = { .enable = hsw_ddi_wrpll_enable, .disable = hsw_ddi_wrpll_disable, @@ -1278,6 +1297,7 @@ static const struct intel_dpll_mgr hsw_pll_mgr = { .put_dplls = intel_put_dpll, .update_ref_clks = hsw_update_dpll_ref_clks, .dump_hw_state = hsw_dump_hw_state, + .compare_hw_state = hsw_compare_hw_state, }; struct skl_dpll_regs { @@ -1929,6 +1949,14 @@ static void skl_dump_hw_state(struct drm_i915_private *i915, hw_state->cfgcr2); } +static bool skl_compare_hw_state(const struct intel_dpll_hw_state *a, + const struct intel_dpll_hw_state *b) +{ + return a->ctrl1 == b->ctrl1 && + a->cfgcr1 == b->cfgcr1 && + a->cfgcr2 == b->cfgcr2; +} + static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = { .enable = skl_ddi_pll_enable, .disable = skl_ddi_pll_disable, @@ -1959,6 +1987,7 @@ static const struct intel_dpll_mgr skl_pll_mgr = { .put_dplls = intel_put_dpll, .update_ref_clks = skl_update_dpll_ref_clks, .dump_hw_state = skl_dump_hw_state, + .compare_hw_state = skl_compare_hw_state, }; static void bxt_ddi_pll_enable(struct drm_i915_private *i915, @@ -2392,6 +2421,21 @@ static void bxt_dump_hw_state(struct drm_i915_private *i915, hw_state->pcsdw12); } +static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *a, + const struct intel_dpll_hw_state *b) +{ + return a->ebb0 == b->ebb0 && + a->ebb4 == b->ebb4 && + a->pll0 == b->pll0 && + a->pll1 == b->pll1 && + a->pll2 == b->pll2 && + a->pll3 == b->pll3 && + a->pll6 == b->pll6 && + a->pll8 == b->pll8 && + a->pll10 == b->pll10 && + a->pcsdw12 == b->pcsdw12; +} + static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = { .enable = bxt_ddi_pll_enable, .disable = bxt_ddi_pll_disable, @@ -2413,6 +2457,7 @@ static const struct intel_dpll_mgr bxt_pll_mgr = { .put_dplls = intel_put_dpll, .update_ref_clks = bxt_update_dpll_ref_clks, .dump_hw_state = bxt_dump_hw_state, + .compare_hw_state = bxt_compare_hw_state, }; static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv, @@ -4005,6 +4050,25 @@ static void icl_dump_hw_state(struct drm_i915_private *i915, hw_state->mg_pll_tdc_coldst_bias); } +static bool icl_compare_hw_state(const struct intel_dpll_hw_state *a, + const struct intel_dpll_hw_state *b) +{ + /* FIXME split combo vs. mg more thoroughly */ + return a->cfgcr0 == b->cfgcr0 && + a->cfgcr1 == b->cfgcr1 && + a->div0 == b->div0 && + a->mg_refclkin_ctl == b->mg_refclkin_ctl && + a->mg_clktop2_coreclkctl1 == b->mg_clktop2_coreclkctl1 && + a->mg_clktop2_hsclkctl == b->mg_clktop2_hsclkctl && + a->mg_pll_div0 == b->mg_pll_div0 && + a->mg_pll_div1 == b->mg_pll_div1 && + a->mg_pll_lf == b->mg_pll_lf && + a->mg_pll_frac_lock == b->mg_pll_frac_lock && + a->mg_pll_ssc == b->mg_pll_ssc && + a->mg_pll_bias == b->mg_pll_bias && + a->mg_pll_tdc_coldst_bias == b->mg_pll_tdc_coldst_bias; +} + static const struct intel_shared_dpll_funcs combo_pll_funcs = { .enable = combo_pll_enable, .disable = combo_pll_disable, @@ -4046,6 +4110,7 @@ static const struct intel_dpll_mgr icl_pll_mgr = { .update_active_dpll = icl_update_active_dpll, .update_ref_clks = icl_update_dpll_ref_clks, .dump_hw_state = icl_dump_hw_state, + .compare_hw_state = icl_compare_hw_state, }; static const struct dpll_info ehl_plls[] = { @@ -4063,6 +4128,7 @@ static const struct intel_dpll_mgr ehl_pll_mgr = { .put_dplls = icl_put_dplls, .update_ref_clks = icl_update_dpll_ref_clks, .dump_hw_state = icl_dump_hw_state, + .compare_hw_state = icl_compare_hw_state, }; static const struct intel_shared_dpll_funcs dkl_pll_funcs = { @@ -4094,6 +4160,7 @@ static const struct intel_dpll_mgr tgl_pll_mgr = { .update_active_dpll = icl_update_active_dpll, .update_ref_clks = icl_update_dpll_ref_clks, .dump_hw_state = icl_dump_hw_state, + .compare_hw_state = icl_compare_hw_state, }; static const struct dpll_info rkl_plls[] = { @@ -4110,6 +4177,7 @@ static const struct intel_dpll_mgr rkl_pll_mgr = { .put_dplls = icl_put_dplls, .update_ref_clks = icl_update_dpll_ref_clks, .dump_hw_state = icl_dump_hw_state, + .compare_hw_state = icl_compare_hw_state, }; static const struct dpll_info dg1_plls[] = { @@ -4127,6 +4195,7 @@ static const struct intel_dpll_mgr dg1_pll_mgr = { .put_dplls = icl_put_dplls, .update_ref_clks = icl_update_dpll_ref_clks, .dump_hw_state = icl_dump_hw_state, + .compare_hw_state = icl_compare_hw_state, }; static const struct dpll_info adls_plls[] = { @@ -4144,6 +4213,7 @@ static const struct intel_dpll_mgr adls_pll_mgr = { .put_dplls = icl_put_dplls, .update_ref_clks = icl_update_dpll_ref_clks, .dump_hw_state = icl_dump_hw_state, + .compare_hw_state = icl_compare_hw_state, }; static const struct dpll_info adlp_plls[] = { @@ -4166,6 +4236,7 @@ static const struct intel_dpll_mgr adlp_pll_mgr = { .update_active_dpll = icl_update_active_dpll, .update_ref_clks = icl_update_dpll_ref_clks, .dump_hw_state = icl_dump_hw_state, + .compare_hw_state = icl_compare_hw_state, }; /** @@ -4458,13 +4529,31 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *i915, /* fallback for platforms that don't use the shared dpll * infrastructure */ - drm_dbg_kms(&i915->drm, - "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " - "fp0: 0x%x, fp1: 0x%x\n", - hw_state->dpll, - hw_state->dpll_md, - hw_state->fp0, - hw_state->fp1); + ibx_dump_hw_state(i915, hw_state); + } +} + +/** + * intel_dpll_compare_hw_state - compare the two states + * @i915: i915 drm device + * @a: first DPLL hw state + * @b: second DPLL hw state + * + * Compare DPLL hw states @a and @b. + * + * Returns: true if the states are equal, false if the differ + */ +bool intel_dpll_compare_hw_state(struct drm_i915_private *i915, + const struct intel_dpll_hw_state *a, + const struct intel_dpll_hw_state *b) +{ + if (i915->display.dpll.mgr) { + return i915->display.dpll.mgr->compare_hw_state(a, b); + } else { + /* fallback for platforms that don't use the shared dpll + * infrastructure + */ + return ibx_compare_hw_state(a, b); } } diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index 616afe861b46..cc0e1386309d 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -378,6 +378,9 @@ void intel_dpll_sanitize_state(struct drm_i915_private *i915); void intel_dpll_dump_hw_state(struct drm_i915_private *i915, const struct intel_dpll_hw_state *hw_state); +bool intel_dpll_compare_hw_state(struct drm_i915_private *i915, + const struct intel_dpll_hw_state *a, + const struct intel_dpll_hw_state *b); enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port); bool intel_dpll_is_combophy(enum intel_dpll_id id); diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c index 6282ec0fc9b4..169ef38ff188 100644 --- a/drivers/gpu/drm/i915/display/intel_drrs.c +++ b/drivers/gpu/drm/i915/display/intel_drrs.c @@ -299,6 +299,7 @@ void intel_drrs_crtc_init(struct intel_crtc *crtc) static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused) { struct intel_crtc *crtc = m->private; + struct drm_i915_private *i915 = to_i915(crtc->base.dev); const struct intel_crtc_state *crtc_state; int ret; @@ -310,6 +311,11 @@ static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused) mutex_lock(&crtc->drrs.mutex); + seq_printf(m, "DRRS capable: %s\n", + str_yes_no(crtc_state->has_drrs || + HAS_DOUBLE_BUFFERED_M_N(i915) || + intel_cpu_transcoder_has_m2_n2(i915, crtc_state->cpu_transcoder))); + seq_printf(m, "DRRS enabled: %s\n", str_yes_no(crtc_state->has_drrs)); diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index a6c7122fd671..d62e050185e7 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -325,7 +325,7 @@ static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - unsigned int latency = skl_watermark_max_latency(i915); + unsigned int latency = skl_watermark_max_latency(i915, 0); int vblank_start; if (crtc_state->vrr.enable) { diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h index 083390e5e442..e99c94edfaae 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.h +++ b/drivers/gpu/drm/i915/display/intel_dsi.h @@ -57,9 +57,6 @@ struct intel_dsi { u16 phys; /* ICL DSI */ }; - /* if true, use HS mode, otherwise LP */ - bool hs; - /* virtual channel */ int channel; @@ -93,7 +90,6 @@ struct intel_dsi { bool bgr_enabled; u8 pixel_overlap; - u32 port_bits; u32 bw_timer; u32 dphy_reg; diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index 8ca9ae4798a8..c076da75b066 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -30,6 +30,7 @@ #include #include +#include #include "i915_drv.h" #include "i915_reg.h" @@ -338,8 +339,12 @@ intel_dvo_detect(struct drm_connector *_connector, bool force) static int intel_dvo_get_modes(struct drm_connector *_connector) { struct intel_connector *connector = to_intel_connector(_connector); + struct drm_i915_private *i915 = to_i915(connector->base.dev); int num_modes; + if (!intel_display_driver_check_access(i915)) + return drm_edid_connector_add_modes(&connector->base); + /* * We should probably have an i2c driver get_modes function for those * devices which will have a fixed set of modes determined by the chip diff --git a/drivers/gpu/drm/i915/display/intel_dvo_dev.h b/drivers/gpu/drm/i915/display/intel_dvo_dev.h index f7e98e1c6470..af7b04539b93 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo_dev.h +++ b/drivers/gpu/drm/i915/display/intel_dvo_dev.h @@ -53,12 +53,6 @@ struct intel_dvo_dev_ops { bool (*init)(struct intel_dvo_device *dvo, struct i2c_adapter *i2cbus); - /* - * Called to allow the output a chance to create properties after the - * RandR objects have been created. - */ - void (*create_resources)(struct intel_dvo_device *dvo); - /* * Turn on/off output. * @@ -79,16 +73,6 @@ struct intel_dvo_dev_ops { enum drm_mode_status (*mode_valid)(struct intel_dvo_device *dvo, struct drm_display_mode *mode); - /* - * Callback for preparing mode changes on an output - */ - void (*prepare)(struct intel_dvo_device *dvo); - - /* - * Callback for committing mode changes on an output - */ - void (*commit)(struct intel_dvo_device *dvo); - /* * Callback for setting up a video mode after fixups have been made. * @@ -111,15 +95,6 @@ struct intel_dvo_dev_ops { */ bool (*get_hw_state)(struct intel_dvo_device *dev); - /** - * Query the device for the modes it provides. - * - * This function may also update MonInfo, mm_width, and mm_height. - * - * \return singly-linked list of modes or NULL if no modes found. - */ - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo); - /** * Clean up driver-specific bits of the output */ diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index 0c0144eaa8fa..3ea6470d6d92 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -1849,9 +1849,10 @@ static int intel_plane_check_stride(const struct intel_plane_state *plane_state) fb->modifier, rotation); if (stride > max_stride) { - DRM_DEBUG_KMS("[FB:%d] stride (%d) exceeds [PLANE:%d:%s] max stride (%d)\n", - fb->base.id, stride, - plane->base.base.id, plane->base.name, max_stride); + drm_dbg_kms(plane->base.dev, + "[FB:%d] stride (%d) exceeds [PLANE:%d:%s] max stride (%d)\n", + fb->base.id, stride, + plane->base.base.id, plane->base.name, max_stride); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/display/intel_global_state.h b/drivers/gpu/drm/i915/display/intel_global_state.h index 5c8545d7a76a..6506a8e32972 100644 --- a/drivers/gpu/drm/i915/display/intel_global_state.h +++ b/drivers/gpu/drm/i915/display/intel_global_state.h @@ -37,11 +37,11 @@ struct intel_global_obj { (__i)++) \ for_each_if(obj) -#define for_each_old_global_obj_in_state(__state, obj, new_obj_state, __i) \ +#define for_each_old_global_obj_in_state(__state, obj, old_obj_state, __i) \ for ((__i) = 0; \ (__i) < (__state)->num_global_objs && \ ((obj) = (__state)->global_objs[__i].ptr, \ - (new_obj_state) = (__state)->global_objs[__i].old_state, 1); \ + (old_obj_state) = (__state)->global_objs[__i].old_state, 1); \ (__i)++) \ for_each_if(obj) diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index c3e692e7f790..9edac27bab26 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -30,7 +30,7 @@ #define KEY_LOAD_TRIES 5 #define HDCP2_LC_RETRY_CNT 3 -static int intel_conn_to_vcpi(struct drm_atomic_state *state, +static int intel_conn_to_vcpi(struct intel_atomic_state *state, struct intel_connector *connector) { struct drm_dp_mst_topology_mgr *mgr; @@ -43,7 +43,7 @@ static int intel_conn_to_vcpi(struct drm_atomic_state *state, return 0; mgr = connector->port->mgr; - drm_modeset_lock(&mgr->base.lock, state->acquire_ctx); + drm_modeset_lock(&mgr->base.lock, state->base.acquire_ctx); mst_state = to_drm_dp_mst_topology_state(mgr->base.state); payload = drm_atomic_get_mst_payload_state(mst_state, connector->port); if (drm_WARN_ON(mgr->dev, !payload)) @@ -68,19 +68,51 @@ out: * DP MST topology. Though it is not compulsory, security fw should change its * policy to mark different content_types for different streams. */ -static void -intel_hdcp_required_content_stream(struct intel_digital_port *dig_port) +static int +intel_hdcp_required_content_stream(struct intel_atomic_state *state, + struct intel_digital_port *dig_port) { + struct drm_connector_list_iter conn_iter; + struct intel_digital_port *conn_dig_port; + struct intel_connector *connector; + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct hdcp_port_data *data = &dig_port->hdcp_port_data; bool enforce_type0 = false; int k; if (dig_port->hdcp_auth_status) - return; + return 0; + + data->k = 0; if (!dig_port->hdcp_mst_type1_capable) enforce_type0 = true; + drm_connector_list_iter_begin(&i915->drm, &conn_iter); + for_each_intel_connector_iter(connector, &conn_iter) { + if (connector->base.status == connector_status_disconnected) + continue; + + if (!intel_encoder_is_mst(intel_attached_encoder(connector))) + continue; + + conn_dig_port = intel_attached_dig_port(connector); + if (conn_dig_port != dig_port) + continue; + + data->streams[data->k].stream_id = + intel_conn_to_vcpi(state, connector); + data->k++; + + /* if there is only one active stream */ + if (dig_port->dp.active_mst_links <= 1) + break; + } + drm_connector_list_iter_end(&conn_iter); + + if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0)) + return -EINVAL; + /* * Apply common protection level across all streams in DP MST Topology. * Use highest supported content type for all streams in DP MST Topology. @@ -88,19 +120,25 @@ intel_hdcp_required_content_stream(struct intel_digital_port *dig_port) for (k = 0; k < data->k; k++) data->streams[k].stream_type = enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1; + + return 0; } -static void intel_hdcp_prepare_streams(struct intel_connector *connector) +static int intel_hdcp_prepare_streams(struct intel_atomic_state *state, + struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct intel_hdcp *hdcp = &connector->hdcp; - if (!intel_encoder_is_mst(intel_attached_encoder(connector))) { - data->streams[0].stream_type = hdcp->content_type; - } else { - intel_hdcp_required_content_stream(dig_port); - } + if (intel_encoder_is_mst(intel_attached_encoder(connector))) + return intel_hdcp_required_content_stream(state, dig_port); + + data->k = 1; + data->streams[0].stream_id = 0; + data->streams[0].stream_type = hdcp->content_type; + + return 0; } static @@ -140,7 +178,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port, } /* Is HDCP1.4 capable on Platform and Sink */ -bool intel_hdcp_capable(struct intel_connector *connector) +bool intel_hdcp_get_capability(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); const struct intel_hdcp_shim *shim = connector->hdcp.shim; @@ -150,8 +188,8 @@ bool intel_hdcp_capable(struct intel_connector *connector) if (!shim) return capable; - if (shim->hdcp_capable) { - shim->hdcp_capable(dig_port, &capable); + if (shim->hdcp_get_capability) { + shim->hdcp_get_capability(dig_port, &capable); } else { if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv)) capable = true; @@ -160,12 +198,14 @@ bool intel_hdcp_capable(struct intel_connector *connector) return capable; } -/* Is HDCP2.2 capable on Platform and Sink */ -bool intel_hdcp2_capable(struct intel_connector *connector) +/* + * Check if the source has all the building blocks ready to make + * HDCP 2.2 work + */ +static bool intel_hdcp2_prerequisite(struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; - bool capable = false; /* I915 support for HDCP2.2 */ if (!hdcp->hdcp2_supported) @@ -185,12 +225,40 @@ bool intel_hdcp2_capable(struct intel_connector *connector) } mutex_unlock(&i915->display.hdcp.hdcp_mutex); + return true; +} + +/* Is HDCP2.2 capable on Platform and Sink */ +bool intel_hdcp2_get_capability(struct intel_connector *connector) +{ + struct intel_hdcp *hdcp = &connector->hdcp; + bool capable = false; + + if (!intel_hdcp2_prerequisite(connector)) + return false; + /* Sink's capability for HDCP2.2 */ - hdcp->shim->hdcp_2_2_capable(connector, &capable); + hdcp->shim->hdcp_2_2_get_capability(connector, &capable); return capable; } +void intel_hdcp_get_remote_capability(struct intel_connector *connector, + bool *hdcp_capable, + bool *hdcp2_capable) +{ + struct intel_hdcp *hdcp = &connector->hdcp; + + if (!hdcp->shim->get_remote_hdcp_capability) + return; + + hdcp->shim->get_remote_hdcp_capability(connector, hdcp_capable, + hdcp2_capable); + + if (!intel_hdcp2_prerequisite(connector)) + *hdcp2_capable = false; +} + static bool intel_hdcp_in_use(struct drm_i915_private *i915, enum transcoder cpu_transcoder, enum port port) { @@ -726,8 +794,8 @@ static int intel_hdcp_auth(struct intel_connector *connector) * whether the display supports HDCP before we write An. For HDMI * displays, this is not necessary. */ - if (shim->hdcp_capable) { - ret = shim->hdcp_capable(dig_port, &hdcp_capable); + if (shim->hdcp_get_capability) { + ret = shim->hdcp_get_capability(dig_port, &hdcp_capable); if (ret) return ret; if (!hdcp_capable) { @@ -1058,15 +1126,9 @@ static int intel_hdcp_check_link(struct intel_connector *connector) goto out; } - ret = intel_hdcp1_enable(connector); - if (ret) { - drm_err(&i915->drm, "Failed to enable hdcp (%d)\n", ret); - intel_hdcp_update_value(connector, - DRM_MODE_CONTENT_PROTECTION_DESIRED, - true); - goto out; - } - + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_DESIRED, + true); out: mutex_unlock(&dig_port->hdcp_mutex); mutex_unlock(&hdcp->mutex); @@ -1871,7 +1933,8 @@ hdcp2_propagate_stream_management_info(struct intel_connector *connector) return ret; } -static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector) +static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state, + struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); @@ -1880,7 +1943,13 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector) for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) { ret = hdcp2_authenticate_sink(connector); if (!ret) { - intel_hdcp_prepare_streams(connector); + ret = intel_hdcp_prepare_streams(state, connector); + if (ret) { + drm_dbg_kms(&i915->drm, + "Prepare stream failed.(%d)\n", + ret); + break; + } ret = hdcp2_propagate_stream_management_info(connector); if (ret) { @@ -1925,7 +1994,8 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector) return ret; } -static int _intel_hdcp2_enable(struct intel_connector *connector) +static int _intel_hdcp2_enable(struct intel_atomic_state *state, + struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; @@ -1935,7 +2005,7 @@ static int _intel_hdcp2_enable(struct intel_connector *connector) connector->base.base.id, connector->base.name, hdcp->content_type); - ret = hdcp2_authenticate_and_encrypt(connector); + ret = hdcp2_authenticate_and_encrypt(state, connector); if (ret) { drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n", hdcp->content_type, ret); @@ -2038,17 +2108,6 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) drm_dbg_kms(&i915->drm, "HDCP2.2 Downstream topology change\n"); - ret = hdcp2_authenticate_repeater_topology(connector); - if (!ret) { - intel_hdcp_update_value(connector, - DRM_MODE_CONTENT_PROTECTION_ENABLED, - true); - goto out; - } - drm_dbg_kms(&i915->drm, - "[CONNECTOR:%d:%s] Repeater topology auth failed.(%d)\n", - connector->base.base.id, connector->base.name, - ret); } else { drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n", @@ -2065,18 +2124,8 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) goto out; } - ret = _intel_hdcp2_enable(connector); - if (ret) { - drm_dbg_kms(&i915->drm, - "[CONNECTOR:%d:%s] Failed to enable hdcp2.2 (%d)\n", - connector->base.base.id, connector->base.name, - ret); - intel_hdcp_update_value(connector, - DRM_MODE_CONTENT_PROTECTION_DESIRED, - true); - goto out; - } - + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_DESIRED, true); out: mutex_unlock(&dig_port->hdcp_mutex); mutex_unlock(&hdcp->mutex); @@ -2284,52 +2333,6 @@ int intel_hdcp_init(struct intel_connector *connector, return 0; } -static int -intel_hdcp_set_streams(struct intel_digital_port *dig_port, - struct intel_atomic_state *state) -{ - struct drm_connector_list_iter conn_iter; - struct intel_digital_port *conn_dig_port; - struct intel_connector *connector; - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - struct hdcp_port_data *data = &dig_port->hdcp_port_data; - - if (!intel_encoder_is_mst(&dig_port->base)) { - data->k = 1; - data->streams[0].stream_id = 0; - return 0; - } - - data->k = 0; - - drm_connector_list_iter_begin(&i915->drm, &conn_iter); - for_each_intel_connector_iter(connector, &conn_iter) { - if (connector->base.status == connector_status_disconnected) - continue; - - if (!intel_encoder_is_mst(intel_attached_encoder(connector))) - continue; - - conn_dig_port = intel_attached_dig_port(connector); - if (conn_dig_port != dig_port) - continue; - - data->streams[data->k].stream_id = - intel_conn_to_vcpi(&state->base, connector); - data->k++; - - /* if there is only one active stream */ - if (dig_port->dp.active_mst_links <= 1) - break; - } - drm_connector_list_iter_end(&conn_iter); - - if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0)) - return -EINVAL; - - return 0; -} - static int _intel_hdcp_enable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, @@ -2374,25 +2377,18 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state, * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup * is capable of HDCP2.2, it is preferred to use HDCP2.2. */ - if (intel_hdcp2_capable(connector)) { - ret = intel_hdcp_set_streams(dig_port, state); - if (!ret) { - ret = _intel_hdcp2_enable(connector); - if (!ret) - check_link_interval = - DRM_HDCP2_CHECK_PERIOD_MS; - } else { - drm_dbg_kms(&i915->drm, - "Set content streams failed: (%d)\n", - ret); - } + if (intel_hdcp2_get_capability(connector)) { + ret = _intel_hdcp2_enable(state, connector); + if (!ret) + check_link_interval = + DRM_HDCP2_CHECK_PERIOD_MS; } /* * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will * be attempted. */ - if (ret && intel_hdcp_capable(connector) && + if (ret && intel_hdcp_get_capability(connector) && hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) { ret = intel_hdcp1_enable(connector); } diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h index a9c784fd9ba5..477f2d2bb120 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.h +++ b/drivers/gpu/drm/i915/display/intel_hdcp.h @@ -38,8 +38,11 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); bool is_hdcp_supported(struct drm_i915_private *i915, enum port port); -bool intel_hdcp_capable(struct intel_connector *connector); -bool intel_hdcp2_capable(struct intel_connector *connector); +bool intel_hdcp_get_capability(struct intel_connector *connector); +bool intel_hdcp2_get_capability(struct intel_connector *connector); +void intel_hdcp_get_remote_capability(struct intel_connector *connector, + bool *hdcp_capable, + bool *hdcp2_capable); void intel_hdcp_component_init(struct drm_i915_private *i915); void intel_hdcp_component_fini(struct drm_i915_private *i915); void intel_hdcp_cleanup(struct intel_connector *connector); diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 7020e5806109..90d2236fede3 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -1732,8 +1732,8 @@ int intel_hdmi_hdcp2_check_link(struct intel_digital_port *dig_port, } static -int intel_hdmi_hdcp2_capable(struct intel_connector *connector, - bool *capable) +int intel_hdmi_hdcp2_get_capability(struct intel_connector *connector, + bool *capable) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); u8 hdcp2_version; @@ -1762,7 +1762,7 @@ static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = { .write_2_2_msg = intel_hdmi_hdcp2_write_msg, .read_2_2_msg = intel_hdmi_hdcp2_read_msg, .check_2_2_link = intel_hdmi_hdcp2_check_link, - .hdcp_2_2_capable = intel_hdmi_hdcp2_capable, + .hdcp_2_2_get_capability = intel_hdmi_hdcp2_get_capability, .protocol = HDCP_PROTOCOL_HDMI, }; diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c index 9c6d35a405a1..dfd7d5e23f3f 100644 --- a/drivers/gpu/drm/i915/display/intel_link_bw.c +++ b/drivers/gpu/drm/i915/display/intel_link_bw.c @@ -6,26 +6,41 @@ #include "i915_drv.h" #include "intel_atomic.h" +#include "intel_crtc.h" #include "intel_display_types.h" #include "intel_dp_mst.h" +#include "intel_dp_tunnel.h" #include "intel_fdi.h" #include "intel_link_bw.h" /** * intel_link_bw_init_limits - initialize BW limits - * @i915: device instance + * @state: Atomic state * @limits: link BW limits * * Initialize @limits. */ -void intel_link_bw_init_limits(struct drm_i915_private *i915, struct intel_link_bw_limits *limits) +void intel_link_bw_init_limits(struct intel_atomic_state *state, + struct intel_link_bw_limits *limits) { + struct drm_i915_private *i915 = to_i915(state->base.dev); enum pipe pipe; limits->force_fec_pipes = 0; limits->bpp_limit_reached_pipes = 0; - for_each_pipe(i915, pipe) - limits->max_bpp_x16[pipe] = INT_MAX; + for_each_pipe(i915, pipe) { + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, + intel_crtc_for_pipe(i915, pipe)); + + if (state->base.duplicated && crtc_state) { + limits->max_bpp_x16[pipe] = crtc_state->max_link_bpp_x16; + if (crtc_state->fec_enable) + limits->force_fec_pipes |= BIT(pipe); + } else { + limits->max_bpp_x16[pipe] = INT_MAX; + } + } } /** @@ -149,6 +164,10 @@ static int check_all_link_config(struct intel_atomic_state *state, if (ret) return ret; + ret = intel_dp_tunnel_atomic_check_link(state, limits); + if (ret) + return ret; + ret = intel_fdi_atomic_check_link(state, limits); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.h b/drivers/gpu/drm/i915/display/intel_link_bw.h index 2cf57307cc24..6b0ccfff59da 100644 --- a/drivers/gpu/drm/i915/display/intel_link_bw.h +++ b/drivers/gpu/drm/i915/display/intel_link_bw.h @@ -22,7 +22,7 @@ struct intel_link_bw_limits { int max_bpp_x16[I915_MAX_PIPES]; }; -void intel_link_bw_init_limits(struct drm_i915_private *i915, +void intel_link_bw_init_limits(struct intel_atomic_state *state, struct intel_link_bw_limits *limits); int intel_link_bw_reduce_bpp(struct intel_atomic_state *state, struct intel_link_bw_limits *limits, diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index f242bb320610..fcbb083318a7 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -887,7 +887,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv) return ret; } - if (intel_bios_is_valid_vbt(fw->data, fw->size)) { + if (intel_bios_is_valid_vbt(dev_priv, fw->data, fw->size)) { opregion->vbt_firmware = kmemdup(fw->data, fw->size, GFP_KERNEL); if (opregion->vbt_firmware) { drm_dbg_kms(&dev_priv->drm, @@ -1034,7 +1034,7 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) vbt = opregion->rvda; vbt_size = opregion->asle->rvds; - if (intel_bios_is_valid_vbt(vbt, vbt_size)) { + if (intel_bios_is_valid_vbt(dev_priv, vbt, vbt_size)) { drm_dbg_kms(&dev_priv->drm, "Found valid VBT in ACPI OpRegion (RVDA)\n"); opregion->vbt = vbt; @@ -1059,7 +1059,7 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) vbt_size = (mboxes & MBOX_ASLE_EXT) ? OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE; vbt_size -= OPREGION_VBT_OFFSET; - if (intel_bios_is_valid_vbt(vbt, vbt_size)) { + if (intel_bios_is_valid_vbt(dev_priv, vbt, vbt_size)) { drm_dbg_kms(&dev_priv->drm, "Found valid VBT in ACPI OpRegion (Mailbox #4)\n"); opregion->vbt = vbt; diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 093106c1e101..5f9e748adc89 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -252,6 +252,7 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch) { + struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev); struct i2c_msg msgs[] = { { .addr = intel_sdvo->slave_addr, @@ -271,7 +272,7 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch) if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2) return true; - DRM_DEBUG_KMS("i2c transfer returned %d\n", ret); + drm_dbg_kms(&i915->drm, "i2c transfer returned %d\n", ret); return false; } @@ -437,7 +438,8 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, drm_WARN_ON(&dev_priv->drm, pos >= sizeof(buffer) - 1); #undef BUF_PRINT - DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer); + drm_dbg_kms(&dev_priv->drm, "%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), + cmd, buffer); } static const char * const cmd_status_names[] = { @@ -462,6 +464,7 @@ static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, const void *args, int args_len, bool unlocked) { + struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev); u8 *buf, status; struct i2c_msg *msgs; int i, ret = true; @@ -511,13 +514,13 @@ static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, else ret = __i2c_transfer(intel_sdvo->i2c, msgs, i+3); if (ret < 0) { - DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); + drm_dbg_kms(&i915->drm, "I2c transfer returned %d\n", ret); ret = false; goto out; } if (ret != i+3) { /* failure in I2C transfer */ - DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3); + drm_dbg_kms(&i915->drm, "I2c transfer returned %d/%d\n", ret, i+3); ret = false; } @@ -604,12 +607,13 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, drm_WARN_ON(&dev_priv->drm, pos >= sizeof(buffer) - 1); #undef BUF_PRINT - DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer); + drm_dbg_kms(&dev_priv->drm, "%s: R: %s\n", + SDVO_NAME(intel_sdvo), buffer); return true; log_fail: - DRM_DEBUG_KMS("%s: R: ... failed %s\n", - SDVO_NAME(intel_sdvo), buffer); + drm_dbg_kms(&dev_priv->drm, "%s: R: ... failed %s\n", + SDVO_NAME(intel_sdvo), buffer); return false; } @@ -758,7 +762,7 @@ static bool intel_sdvo_get_timing(struct intel_sdvo *intel_sdvo, u8 cmd, } static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo, - struct intel_sdvo_dtd *dtd) + struct intel_sdvo_dtd *dtd) { return intel_sdvo_set_timing(intel_sdvo, SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); @@ -926,8 +930,8 @@ static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo) BUILD_BUG_ON(sizeof(encode) != 2); return intel_sdvo_get_value(intel_sdvo, - SDVO_CMD_GET_SUPP_ENCODE, - &encode, sizeof(encode)); + SDVO_CMD_GET_SUPP_ENCODE, + &encode, sizeof(encode)); } static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo, @@ -1004,6 +1008,7 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo, unsigned int if_index, u8 tx_rate, const u8 *data, unsigned int length) { + struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev); u8 set_buf_index[2] = { if_index, 0 }; u8 hbuf_size, tmp[8]; int i; @@ -1016,8 +1021,9 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo, if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size)) return false; - DRM_DEBUG_KMS("writing sdvo hbuf: %i, length %u, hbuf_size: %i\n", - if_index, length, hbuf_size); + drm_dbg_kms(&i915->drm, + "writing sdvo hbuf: %i, length %u, hbuf_size: %i\n", + if_index, length, hbuf_size); if (hbuf_size < length) return false; @@ -1042,6 +1048,7 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo, unsigned int if_index, u8 *data, unsigned int length) { + struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev); u8 set_buf_index[2] = { if_index, 0 }; u8 hbuf_size, tx_rate, av_split; int i; @@ -1071,8 +1078,9 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo, if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size)) return false; - DRM_DEBUG_KMS("reading sdvo hbuf: %i, length %u, hbuf_size: %i\n", - if_index, length, hbuf_size); + drm_dbg_kms(&i915->drm, + "reading sdvo hbuf: %i, length %u, hbuf_size: %i\n", + if_index, length, hbuf_size); hbuf_size = min_t(unsigned int, length, hbuf_size); @@ -1151,6 +1159,7 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo, struct intel_crtc_state *crtc_state) { + struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev); u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)]; union hdmi_infoframe *frame = &crtc_state->infoframes.avi; ssize_t len; @@ -1162,7 +1171,7 @@ static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo, len = intel_sdvo_read_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF, sdvo_data, sizeof(sdvo_data)); if (len < 0) { - DRM_DEBUG_KMS("failed to read AVI infoframe\n"); + drm_dbg_kms(&i915->drm, "failed to read AVI infoframe\n"); return; } else if (len == 0) { return; @@ -1173,13 +1182,14 @@ static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo, ret = hdmi_infoframe_unpack(frame, sdvo_data, len); if (ret) { - DRM_DEBUG_KMS("Failed to unpack AVI infoframe\n"); + drm_dbg_kms(&i915->drm, "Failed to unpack AVI infoframe\n"); return; } if (frame->any.type != HDMI_INFOFRAME_TYPE_AVI) - DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n", - frame->any.type, HDMI_INFOFRAME_TYPE_AVI); + drm_dbg_kms(&i915->drm, + "Found the wrong infoframe type 0x%x (expected 0x%02x)\n", + frame->any.type, HDMI_INFOFRAME_TYPE_AVI); } static void intel_sdvo_get_eld(struct intel_sdvo *intel_sdvo, @@ -1348,6 +1358,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { + struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_sdvo *intel_sdvo = to_sdvo(encoder); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(conn_state->connector); @@ -1360,7 +1371,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder, return -EINVAL; } - DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n"); + drm_dbg_kms(&i915->drm, "forcing bpc to 8 for SDVO\n"); /* FIXME: Don't increase pipe_bpp */ pipe_config->pipe_bpp = 8*3; pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; @@ -1439,7 +1450,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder, if (!intel_sdvo_compute_avi_infoframe(intel_sdvo, pipe_config, conn_state)) { - DRM_DEBUG_KMS("bad AVI infoframe\n"); + drm_dbg_kms(&i915->drm, "bad AVI infoframe\n"); return -EINVAL; } @@ -1916,8 +1927,8 @@ static void intel_enable_sdvo(struct intel_atomic_state *state, */ if (success && !input1) { drm_dbg_kms(&dev_priv->drm, - "First %s output reported failure to " - "sync\n", SDVO_NAME(intel_sdvo)); + "First %s output reported failure to sync\n", + SDVO_NAME(intel_sdvo)); } if (0) @@ -1976,37 +1987,38 @@ intel_sdvo_mode_valid(struct drm_connector *connector, static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps) { + struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev); BUILD_BUG_ON(sizeof(*caps) != 8); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps))) return false; - DRM_DEBUG_KMS("SDVO capabilities:\n" - " vendor_id: %d\n" - " device_id: %d\n" - " device_rev_id: %d\n" - " sdvo_version_major: %d\n" - " sdvo_version_minor: %d\n" - " sdvo_num_inputs: %d\n" - " smooth_scaling: %d\n" - " sharp_scaling: %d\n" - " up_scaling: %d\n" - " down_scaling: %d\n" - " stall_support: %d\n" - " output_flags: %d\n", - caps->vendor_id, - caps->device_id, - caps->device_rev_id, - caps->sdvo_version_major, - caps->sdvo_version_minor, - caps->sdvo_num_inputs, - caps->smooth_scaling, - caps->sharp_scaling, - caps->up_scaling, - caps->down_scaling, - caps->stall_support, - caps->output_flags); + drm_dbg_kms(&i915->drm, "SDVO capabilities:\n" + " vendor_id: %d\n" + " device_id: %d\n" + " device_rev_id: %d\n" + " sdvo_version_major: %d\n" + " sdvo_version_minor: %d\n" + " sdvo_num_inputs: %d\n" + " smooth_scaling: %d\n" + " sharp_scaling: %d\n" + " up_scaling: %d\n" + " down_scaling: %d\n" + " stall_support: %d\n" + " output_flags: %d\n", + caps->vendor_id, + caps->device_id, + caps->device_rev_id, + caps->sdvo_version_major, + caps->sdvo_version_minor, + caps->sdvo_num_inputs, + caps->smooth_scaling, + caps->sharp_scaling, + caps->up_scaling, + caps->down_scaling, + caps->stall_support, + caps->output_flags); return true; } @@ -2038,7 +2050,7 @@ static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo) return 0; if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, - &hotplug, sizeof(hotplug))) + &hotplug, sizeof(hotplug))) return 0; return hotplug; @@ -2121,8 +2133,9 @@ intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo, bool monitor_is_digital = drm_edid_is_digital(drm_edid); bool connector_is_digital = !!IS_DIGITAL(sdvo); - DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n", - connector_is_digital, monitor_is_digital); + drm_dbg_kms(sdvo->base.base.dev, + "connector_is_digital? %d, monitor_is_digital? %d\n", + connector_is_digital, monitor_is_digital); return connector_is_digital == monitor_is_digital; } @@ -2135,8 +2148,8 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret; u16 response; - DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", - connector->base.id, connector->name); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); if (!intel_display_device_enabled(i915)) return connector_status_disconnected; @@ -2153,9 +2166,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) &response, 2)) return connector_status_unknown; - DRM_DEBUG_KMS("SDVO response %d %d [%x]\n", - response & 0xff, response >> 8, - intel_sdvo_connector->output_flag); + drm_dbg_kms(&i915->drm, "SDVO response %d %d [%x]\n", + response & 0xff, response >> 8, + intel_sdvo_connector->output_flag); if (response == 0) return connector_status_disconnected; @@ -2189,11 +2202,15 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) static int intel_sdvo_get_ddc_modes(struct drm_connector *connector) { + struct drm_i915_private *i915 = to_i915(connector->dev); int num_modes = 0; const struct drm_edid *drm_edid; - DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", - connector->base.id, connector->name); + drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); + + if (!intel_display_driver_check_access(i915)) + return drm_edid_connector_add_modes(connector); /* set the bus switch and get the modes */ drm_edid = intel_sdvo_get_edid(connector); @@ -2287,6 +2304,7 @@ static const struct drm_display_mode sdvo_tv_modes[] = { static int intel_sdvo_get_tv_modes(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector)); + struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); const struct drm_connector_state *conn_state = connector->state; @@ -2295,8 +2313,11 @@ static int intel_sdvo_get_tv_modes(struct drm_connector *connector) int num_modes = 0; int i; - DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", - connector->base.id, connector->name); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); + + if (!intel_display_driver_check_access(i915)) + return 0; /* * Read the list of supported input resolutions for the selected TV @@ -2783,10 +2804,11 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, u16 type) struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_encoder *intel_encoder = to_intel_encoder(encoder); + struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev); struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; - DRM_DEBUG_KMS("initialising DVI type 0x%x\n", type); + drm_dbg_kms(&i915->drm, "initialising DVI type 0x%x\n", type); intel_sdvo_connector = intel_sdvo_connector_alloc(); if (!intel_sdvo_connector) @@ -2797,7 +2819,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, u16 type) intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; if (intel_sdvo_get_hotplug_support(intel_sdvo) & - intel_sdvo_connector->output_flag) { + intel_sdvo_connector->output_flag) { intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag; /* * Some SDVO devices have one-shot hotplug interrupts. @@ -2832,12 +2854,13 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, u16 type) static bool intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, u16 type) { + struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev); struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; - DRM_DEBUG_KMS("initialising TV type 0x%x\n", type); + drm_dbg_kms(&i915->drm, "initialising TV type 0x%x\n", type); intel_sdvo_connector = intel_sdvo_connector_alloc(); if (!intel_sdvo_connector) @@ -2871,12 +2894,13 @@ err: static bool intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, u16 type) { + struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev); struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; - DRM_DEBUG_KMS("initialising analog type 0x%x\n", type); + drm_dbg_kms(&i915->drm, "initialising analog type 0x%x\n", type); intel_sdvo_connector = intel_sdvo_connector_alloc(); if (!intel_sdvo_connector) @@ -2908,7 +2932,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, u16 type) struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; - DRM_DEBUG_KMS("initialising LVDS type 0x%x\n", type); + drm_dbg_kms(&i915->drm, "initialising LVDS type 0x%x\n", type); intel_sdvo_connector = intel_sdvo_connector_alloc(); if (!intel_sdvo_connector) @@ -2992,6 +3016,7 @@ static bool intel_sdvo_output_init(struct intel_sdvo *sdvo, u16 type) static bool intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo) { + struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev); static const u16 probe_order[] = { SDVO_OUTPUT_TMDS0, SDVO_OUTPUT_TMDS1, @@ -3010,8 +3035,9 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo) flags = intel_sdvo_filter_output_flags(intel_sdvo->caps.output_flags); if (flags == 0) { - DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%04x)\n", - SDVO_NAME(intel_sdvo), intel_sdvo->caps.output_flags); + drm_dbg_kms(&i915->drm, + "%s: Unknown SDVO output type (0x%04x)\n", + SDVO_NAME(intel_sdvo), intel_sdvo->caps.output_flags); return false; } @@ -3073,8 +3099,8 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, intel_sdvo_connector->tv_format = - drm_property_create(dev, DRM_MODE_PROP_ENUM, - "mode", intel_sdvo_connector->format_supported_num); + drm_property_create(dev, DRM_MODE_PROP_ENUM, + "mode", intel_sdvo_connector->format_supported_num); if (!intel_sdvo_connector->tv_format) return false; @@ -3100,8 +3126,8 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, state_assignment = response; \ drm_object_attach_property(&connector->base, \ intel_sdvo_connector->name, 0); \ - DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ - data_value[0], data_value[1], response); \ + drm_dbg_kms(dev, #name ": max %d, default %d, current %d\n", \ + data_value[0], data_value[1], response); \ } \ } while (0) @@ -3112,6 +3138,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, struct intel_sdvo_enhancements_reply enhancements) { + struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev); struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_connector *connector = &intel_sdvo_connector->base.base; struct drm_connector_state *conn_state = connector->state; @@ -3148,10 +3175,9 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, return false; drm_object_attach_property(&connector->base, - intel_sdvo_connector->right, 0); - DRM_DEBUG_KMS("h_overscan: max %d, " - "default %d, current %d\n", - data_value[0], data_value[1], response); + intel_sdvo_connector->right, 0); + drm_dbg_kms(&i915->drm, "h_overscan: max %d, default %d, current %d\n", + data_value[0], data_value[1], response); } if (enhancements.overscan_v) { @@ -3170,7 +3196,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, intel_sdvo_connector->max_vscan = data_value[0]; intel_sdvo_connector->top = drm_property_create_range(dev, 0, - "top_margin", 0, data_value[0]); + "top_margin", 0, data_value[0]); if (!intel_sdvo_connector->top) return false; @@ -3179,15 +3205,14 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, intel_sdvo_connector->bottom = drm_property_create_range(dev, 0, - "bottom_margin", 0, data_value[0]); + "bottom_margin", 0, data_value[0]); if (!intel_sdvo_connector->bottom) return false; drm_object_attach_property(&connector->base, - intel_sdvo_connector->bottom, 0); - DRM_DEBUG_KMS("v_overscan: max %d, " - "default %d, current %d\n", - data_value[0], data_value[1], response); + intel_sdvo_connector->bottom, 0); + drm_dbg_kms(&i915->drm, "v_overscan: max %d, default %d, current %d\n", + data_value[0], data_value[1], response); } ENHANCEMENT(&sdvo_state->tv, hpos, HPOS); @@ -3215,7 +3240,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, drm_object_attach_property(&connector->base, intel_sdvo_connector->dot_crawl, 0); - DRM_DEBUG_KMS("dot crawl: current %d\n", response); + drm_dbg_kms(&i915->drm, "dot crawl: current %d\n", response); } return true; @@ -3240,6 +3265,7 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo, static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector) { + struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev); union { struct intel_sdvo_enhancements_reply reply; u16 response; @@ -3251,7 +3277,7 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, &enhancements, sizeof(enhancements)) || enhancements.response == 0) { - DRM_DEBUG_KMS("No enhancement is supported\n"); + drm_dbg_kms(&i915->drm, "No enhancement is supported\n"); return true; } @@ -3471,23 +3497,23 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv, goto err_output; drm_dbg_kms(&dev_priv->drm, "%s device VID/DID: %02X:%02X.%02X, " - "clock range %dMHz - %dMHz, " - "num inputs: %d, " - "output 1: %c, output 2: %c\n", - SDVO_NAME(intel_sdvo), - intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id, - intel_sdvo->caps.device_rev_id, - intel_sdvo->pixel_clock_min / 1000, - intel_sdvo->pixel_clock_max / 1000, - intel_sdvo->caps.sdvo_num_inputs, - /* check currently supported outputs */ - intel_sdvo->caps.output_flags & - (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0 | - SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_SVID0 | - SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB0) ? 'Y' : 'N', - intel_sdvo->caps.output_flags & - (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1 | - SDVO_OUTPUT_LVDS1) ? 'Y' : 'N'); + "clock range %dMHz - %dMHz, " + "num inputs: %d, " + "output 1: %c, output 2: %c\n", + SDVO_NAME(intel_sdvo), + intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id, + intel_sdvo->caps.device_rev_id, + intel_sdvo->pixel_clock_min / 1000, + intel_sdvo->pixel_clock_max / 1000, + intel_sdvo->caps.sdvo_num_inputs, + /* check currently supported outputs */ + intel_sdvo->caps.output_flags & + (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0 | + SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_SVID0 | + SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB0) ? 'Y' : 'N', + intel_sdvo->caps.output_flags & + (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1 | + SDVO_OUTPUT_LVDS1) ? 'Y' : 'N'); return true; err_output: diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 392d93e97bf8..e941e2e4fd14 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -948,6 +948,11 @@ static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, if (DISPLAY_VER(dev_priv) == 13) plane_ctl |= adlp_plane_ctl_arb_slots(plane_state); + if (GRAPHICS_VER(dev_priv) >= 20 && + fb->modifier == I915_FORMAT_MOD_4_TILED) { + plane_ctl |= PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; + } + return plane_ctl; } diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c index 614f319d754e..c6b9be80d83c 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.c +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -23,6 +23,12 @@ #include "skl_watermark.h" #include "skl_watermark_regs.h" +/*It is expected that DSB can do posted writes to every register in + * the pipe and planes within 100us. For flip queue use case, the + * recommended DSB execution time is 100us + one SAGV block time. + */ +#define DSB_EXE_TIME 100 + static void skl_sagv_disable(struct drm_i915_private *i915); /* Stores plane specific WM parameters */ @@ -2904,12 +2910,51 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state, return 0; } +/* + * If Fixed Refresh Rate: + * Program DEEP PKG_C_LATENCY Pkg C with highest valid latency from + * watermark level1 and up and above. If watermark level 1 is + * invalid program it with all 1's. + * Program PKG_C_LATENCY Added Wake Time = DSB execution time + * If Variable Refresh Rate: + * Program DEEP PKG_C_LATENCY Pkg C with all 1's. + * Program PKG_C_LATENCY Added Wake Time = 0 + */ +static void +skl_program_dpkgc_latency(struct drm_i915_private *i915, bool vrr_enabled) +{ + u32 max_latency = 0; + u32 clear = 0, val = 0; + u32 added_wake_time = 0; + + if (DISPLAY_VER(i915) < 20) + return; + + if (vrr_enabled) { + max_latency = LNL_PKG_C_LATENCY_MASK; + added_wake_time = 0; + } else { + max_latency = skl_watermark_max_latency(i915, 1); + if (max_latency == 0) + max_latency = LNL_PKG_C_LATENCY_MASK; + added_wake_time = DSB_EXE_TIME + + i915->display.sagv.block_time_us; + } + + clear |= LNL_ADDED_WAKE_TIME_MASK | LNL_PKG_C_LATENCY_MASK; + val |= REG_FIELD_PREP(LNL_PKG_C_LATENCY_MASK, max_latency); + val |= REG_FIELD_PREP(LNL_ADDED_WAKE_TIME_MASK, added_wake_time); + + intel_uncore_rmw(&i915->uncore, LNL_PKG_C_LATENCY, clear, val); +} + static int skl_compute_wm(struct intel_atomic_state *state) { struct intel_crtc *crtc; struct intel_crtc_state __maybe_unused *new_crtc_state; int ret, i; + bool vrr_enabled = false; for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { ret = skl_build_pipe_wm(state, crtc); @@ -2934,8 +2979,13 @@ skl_compute_wm(struct intel_atomic_state *state) ret = skl_wm_add_affected_planes(state, crtc); if (ret) return ret; + + if (new_crtc_state->vrr.enable) + vrr_enabled = true; } + skl_program_dpkgc_latency(to_i915(state->base.dev), vrr_enabled); + skl_print_wm_changes(state); return 0; @@ -3731,11 +3781,11 @@ void skl_watermark_debugfs_register(struct drm_i915_private *i915) &intel_sagv_status_fops); } -unsigned int skl_watermark_max_latency(struct drm_i915_private *i915) +unsigned int skl_watermark_max_latency(struct drm_i915_private *i915, int initial_wm_level) { int level; - for (level = i915->display.wm.num_levels - 1; level >= 0; level--) { + for (level = i915->display.wm.num_levels - 1; level >= initial_wm_level; level--) { unsigned int latency = skl_wm_latency(i915, level, NULL); if (latency) diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h index fb0da36fd3ec..e3d1d74a7b17 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.h +++ b/drivers/gpu/drm/i915/display/skl_watermark.h @@ -46,8 +46,8 @@ void skl_watermark_ipc_update(struct drm_i915_private *i915); bool skl_watermark_ipc_enabled(struct drm_i915_private *i915); void skl_watermark_debugfs_register(struct drm_i915_private *i915); -unsigned int skl_watermark_max_latency(struct drm_i915_private *i915); - +unsigned int skl_watermark_max_latency(struct drm_i915_private *i915, + int initial_wm_level); void skl_wm_init(struct drm_i915_private *i915); struct intel_dbuf_state { diff --git a/drivers/gpu/drm/i915/display/skl_watermark_regs.h b/drivers/gpu/drm/i915/display/skl_watermark_regs.h index 628c5920ad49..20b30c9a6613 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark_regs.h +++ b/drivers/gpu/drm/i915/display/skl_watermark_regs.h @@ -157,4 +157,8 @@ #define MTL_LATENCY_SAGV _MMIO(0x4578c) #define MTL_LATENCY_QCLK_SAGV REG_GENMASK(12, 0) +#define LNL_PKG_C_LATENCY _MMIO(0x46460) +#define LNL_ADDED_WAKE_TIME_MASK REG_GENMASK(28, 16) +#define LNL_PKG_C_LATENCY_MASK REG_GENMASK(12, 0) + #endif /* __SKL_WATERMARK_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 27dcfd8a34bb..e6f177183c0f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -162,10 +162,10 @@ i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj, unsigned int flags = obj->flags; unsigned int i; - places[0].flags |= TTM_PL_FLAG_DESIRED; i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] : obj->mm.region, &places[0], obj->bo_offset, obj->base.size, flags); + places[0].flags |= TTM_PL_FLAG_DESIRED; /* Cache this on object? */ for (i = 0; i < num_allowed; ++i) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 0e21ce9d3e5a..61abfb505766 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -349,6 +349,9 @@ i915_gem_userptr_release(struct drm_i915_gem_object *obj) { GEM_WARN_ON(obj->userptr.page_ref); + if (!obj->userptr.notifier.mm) + return; + mmu_interval_notifier_remove(&obj->userptr.notifier); obj->userptr.notifier.mm = NULL; } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c index 5f8d86e25993..8d4bb95f8424 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c @@ -96,8 +96,8 @@ static void heartbeat_commit(struct i915_request *rq, static void show_heartbeat(const struct i915_request *rq, struct intel_engine_cs *engine) { - struct drm_printer p = drm_dbg_printer(&rq->i915->drm, DRM_UT_DRIVER, - "heartbeat"); + struct drm_printer p = + drm_dbg_printer(&engine->i915->drm, DRM_UT_DRIVER, "heartbeat"); if (!rq) { intel_engine_dump(engine, &p, diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 813cc888e6fa..be70c46604b4 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -206,8 +206,6 @@ struct intel_guc { u32 ads_golden_ctxt_size; /** @ads_capture_size: size of register lists in the ADS used for error capture */ u32 ads_capture_size; - /** @ads_engine_usage_size: size of engine usage in the ADS */ - u32 ads_engine_usage_size; /** @lrc_desc_pool_v69: object allocated to hold the GuC LRC descriptor pool */ struct i915_vma *lrc_desc_pool_v69; diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h index 4eff44194439..fa6503900c84 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.h +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h @@ -152,17 +152,6 @@ struct intel_vgpu_cursor_plane_format { u32 y_hot; /* in pixels */ }; -struct intel_vgpu_pipe_format { - struct intel_vgpu_primary_plane_format primary; - struct intel_vgpu_sprite_plane_format sprite; - struct intel_vgpu_cursor_plane_format cursor; - enum DDI_PORT ddi_port; /* the DDI port that pipe is connected to */ -}; - -struct intel_vgpu_fb_format { - struct intel_vgpu_pipe_format pipes[I915_MAX_PIPES]; -}; - int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, struct intel_vgpu_primary_plane_format *plane); int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 4cb183e06e95..cb50700e6cc9 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -93,8 +93,6 @@ struct intel_gvt_gtt_gma_ops { struct intel_gvt_gtt { const struct intel_gvt_gtt_pte_ops *pte_ops; const struct intel_gvt_gtt_gma_ops *gma_ops; - int (*mm_alloc_page_table)(struct intel_vgpu_mm *mm); - void (*mm_free_page_table)(struct intel_vgpu_mm *mm); struct list_head oos_page_use_list_head; struct list_head oos_page_free_list_head; struct mutex ppgtt_mm_lock; @@ -210,7 +208,6 @@ struct intel_vgpu_scratch_pt { struct intel_vgpu_gtt { struct intel_vgpu_mm *ggtt_mm; - unsigned long active_ppgtt_mm_bitmap; struct list_head ppgtt_mm_list_head; struct radix_tree_root spt_tree; struct list_head oos_page_list_head; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index c57aba09091f..2c95aeef4e41 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -89,7 +89,6 @@ struct intel_vgpu_gm { /* Fences owned by a vGPU */ struct intel_vgpu_fence { struct i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES]; - u32 base; u32 size; }; @@ -119,7 +118,6 @@ struct intel_vgpu_irq { }; struct intel_vgpu_opregion { - bool mapped; void *va; u32 gfn[INTEL_GVT_OPREGION_PAGES]; }; @@ -223,7 +221,6 @@ struct intel_vgpu { struct vfio_region *region; int num_regions; - struct eventfd_ctx *intx_trigger; struct eventfd_ctx *msi_trigger; /* @@ -256,7 +253,6 @@ struct intel_gvt_fence { /* Special MMIO blocks. */ struct gvt_mmio_block { - unsigned int device; i915_reg_t offset; unsigned int size; gvt_mmio_func read; @@ -444,7 +440,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt); #define vgpu_hidden_gmadr_end(vgpu) \ (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1) -#define vgpu_fence_base(vgpu) (vgpu->fence.base) #define vgpu_fence_sz(vgpu) (vgpu->fence.size) /* ring context size i.e. the first 0x50 dwords*/ diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index c8e7dfc9f791..336d079c4207 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -40,7 +40,6 @@ struct intel_gvt_irq_info { char *name; i915_reg_t reg_base; enum intel_gvt_event_type bit_to_event[INTEL_GVT_IRQ_BITWIDTH]; - unsigned long warned; int group; DECLARE_BITMAP(downstream_irq_bitmap, INTEL_GVT_IRQ_BITWIDTH); bool has_upstream_irq; diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h index e60ad476fe60..cd214be98668 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.h +++ b/drivers/gpu/drm/i915/gvt/interrupt.h @@ -177,7 +177,6 @@ enum intel_gvt_irq_type { /* per-event information */ struct intel_gvt_event_info { int bit; /* map to register bit */ - int policy; /* forwarding policy */ struct intel_gvt_irq_info *info; /* register info */ gvt_event_virt_handler_t v_handler; /* for v_event */ }; @@ -188,7 +187,6 @@ struct intel_gvt_irq { struct intel_gvt_irq_info *info[INTEL_GVT_IRQ_INFO_MAX]; DECLARE_BITMAP(irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX); struct intel_gvt_event_info events[INTEL_GVT_EVENT_MAX]; - DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX); struct intel_gvt_irq_map *irq_map; }; diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index bba154e38705..32ebacb078e8 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -62,10 +62,8 @@ typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *, struct intel_gvt_mmio_info { u32 offset; u64 ro_mask; - u32 device; gvt_mmio_func read; gvt_mmio_func write; - u32 addr_range; struct hlist_node node; }; diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 1f391b3da2cc..cd94993278b6 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -104,10 +104,8 @@ struct intel_vgpu_workload { /* execlist context information */ struct execlist_ctx_descriptor_format ctx_desc; - struct execlist_ring_context *ring_context; unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len; unsigned long guest_rb_head; - bool restore_inhibit; struct intel_vgpu_elsp_dwords elsp_dwords; bool emulate_schedule_in; atomic_t shadow_ctx_active; diff --git a/drivers/gpu/drm/i915/i915_drm_client.h b/drivers/gpu/drm/i915/i915_drm_client.h index a439dd789936..2e7a50d16a88 100644 --- a/drivers/gpu/drm/i915/i915_drm_client.h +++ b/drivers/gpu/drm/i915/i915_drm_client.h @@ -24,8 +24,6 @@ struct drm_printer; struct i915_drm_client { struct kref kref; - unsigned int id; - spinlock_t ctx_lock; /* For add/remove from ctx_list. */ struct list_head ctx_list; /* List of contexts belonging to client. */ diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h index 46445248d193..39fb6ce4a7ef 100644 --- a/drivers/gpu/drm/i915/i915_perf_types.h +++ b/drivers/gpu/drm/i915/i915_perf_types.h @@ -288,7 +288,6 @@ struct i915_perf_stream { struct i915_vma *vma; u8 *vaddr; u32 last_ctx_id; - int size_exponent; /** * @oa_buffer.ptr_lock: Locks reads and writes to all diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index f59081066a19..519e096c607c 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -52,7 +52,6 @@ struct execute_cb { struct irq_work work; struct i915_sw_fence *fence; - struct i915_request *signal; }; static struct kmem_cache *slab_requests; diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h index 64472b7f0e77..559de74d0b11 100644 --- a/drivers/gpu/drm/i915/i915_vma_types.h +++ b/drivers/gpu/drm/i915/i915_vma_types.h @@ -290,7 +290,6 @@ struct i915_vma { struct list_head obj_link; /* Link in the object's VMA list */ struct rb_node obj_node; - struct hlist_node obj_hash; /** This vma's place in the eviction list */ struct list_head evict_link; diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h index 40810cfb3fd9..8c927e303c4a 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -50,8 +50,6 @@ enum intel_region_id { for_each_if((mr) = (i915)->mm.regions[id]) struct intel_memory_region_ops { - unsigned int flags; - int (*init)(struct intel_memory_region *mem); int (*release)(struct intel_memory_region *mem); diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.c b/drivers/gpu/drm/imx/dcss/dcss-dev.c index 597e9b7bd4bf..7fd0c4c14205 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-dev.c +++ b/drivers/gpu/drm/imx/dcss/dcss-dev.c @@ -167,7 +167,6 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output) struct resource *res; struct dcss_dev *dcss; const struct dcss_type_data *devtype; - resource_size_t res_len; devtype = of_device_get_match_data(dev); if (!devtype) { @@ -181,8 +180,7 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output) return ERR_PTR(-EINVAL); } - res_len = res->end - res->start; - if (!devm_request_mem_region(dev, res->start, res_len, "dcss")) { + if (!devm_request_mem_region(dev, res->start, resource_size(res), "dcss")) { dev_err(dev, "cannot request memory region\n"); return ERR_PTR(-EBUSY); } diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h index 74fa56339383..90e64467ea8f 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h @@ -73,6 +73,8 @@ void mtk_merge_advance_config(struct device *dev, unsigned int l_w, unsigned int struct cmdq_pkt *cmdq_pkt); void mtk_merge_start_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt); void mtk_merge_stop_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt); +enum drm_mode_status mtk_merge_mode_valid(struct device *dev, + const struct drm_display_mode *mode); void mtk_ovl_bgclr_in_on(struct device *dev); void mtk_ovl_bgclr_in_off(struct device *dev); @@ -131,6 +133,8 @@ unsigned int mtk_ovl_adaptor_layer_nr(struct device *dev); struct device *mtk_ovl_adaptor_dma_dev_get(struct device *dev); const u32 *mtk_ovl_adaptor_get_formats(struct device *dev); size_t mtk_ovl_adaptor_get_num_formats(struct device *dev); +enum drm_mode_status mtk_ovl_adaptor_mode_valid(struct device *dev, + const struct drm_display_mode *mode); void mtk_rdma_bypass_shadow(struct device *dev); int mtk_rdma_clk_enable(struct device *dev); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_merge.c b/drivers/gpu/drm/mediatek/mtk_disp_merge.c index 22f768d923d5..32a29924bd54 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_merge.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_merge.c @@ -222,6 +222,71 @@ void mtk_merge_clk_disable(struct device *dev) clk_disable_unprepare(priv->clk); } +enum drm_mode_status mtk_merge_mode_valid(struct device *dev, + const struct drm_display_mode *mode) +{ + struct mtk_disp_merge *priv = dev_get_drvdata(dev); + unsigned long rate; + + rate = clk_get_rate(priv->clk); + + /* Convert to KHz and round the number */ + rate = (rate + 500) / 1000; + + if (rate && mode->clock > rate) { + dev_dbg(dev, "invalid clock: %d (>%lu)\n", mode->clock, rate); + return MODE_CLOCK_HIGH; + } + + /* + * Measure the bandwidth requirement of hardware prefetch (per frame) + * + * let N = prefetch buffer size in lines + * (ex. N=3, then prefetch buffer size = 3 lines) + * + * prefetch size = htotal * N (pixels) + * time per line = 1 / fps / vtotal (seconds) + * duration = vbp * time per line + * = vbp / fps / vtotal + * + * data rate = prefetch size / duration + * = htotal * N / (vbp / fps / vtotal) + * = htotal * vtotal * fps * N / vbp + * = clk * N / vbp (pixels per second) + * + * Say 4K60 (CEA-861) is the maximum mode supported by the SoC + * data rate = 594000K * N / 72 = 8250 (standard) + * (remove K * N due to the same unit) + * + * For 2560x1440@144 (clk=583600K, vbp=17): + * data rate = 583600 / 17 ~= 34329 > 8250 (NG) + * + * For 2560x1440@120 (clk=497760K, vbp=77): + * data rate = 497760 / 77 ~= 6464 < 8250 (OK) + * + * A non-standard 4K60 timing (clk=521280K, vbp=54) + * data rate = 521280 / 54 ~= 9653 > 8250 (NG) + * + * Bandwidth requirement of hardware prefetch increases significantly + * when the VBP decreases (more than 4x in this example). + * + * The proposed formula is only one way to estimate whether our SoC + * supports the mode setting. The basic idea behind it is just to check + * if the data rate requirement is too high (directly proportional to + * pixel clock, inversely proportional to vbp). Please adjust the + * function if it doesn't fit your situation in the future. + */ + rate = mode->clock / (mode->vtotal - mode->vsync_end); + + if (rate > 8250) { + dev_dbg(dev, "invalid rate: %lu (>8250): " DRM_MODE_FMT "\n", + rate, DRM_MODE_ARG(mode)); + return MODE_BAD; + } + + return MODE_OK; +} + static int mtk_disp_merge_bind(struct device *dev, struct device *master, void *data) { diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c index 12a37f740bf4..034d31824d4d 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c @@ -30,6 +30,7 @@ enum mtk_ovl_adaptor_comp_type { OVL_ADAPTOR_TYPE_ETHDR, OVL_ADAPTOR_TYPE_MDP_RDMA, OVL_ADAPTOR_TYPE_MERGE, + OVL_ADAPTOR_TYPE_PADDING, OVL_ADAPTOR_TYPE_NUM, }; @@ -47,6 +48,14 @@ enum mtk_ovl_adaptor_comp_id { OVL_ADAPTOR_MERGE1, OVL_ADAPTOR_MERGE2, OVL_ADAPTOR_MERGE3, + OVL_ADAPTOR_PADDING0, + OVL_ADAPTOR_PADDING1, + OVL_ADAPTOR_PADDING2, + OVL_ADAPTOR_PADDING3, + OVL_ADAPTOR_PADDING4, + OVL_ADAPTOR_PADDING5, + OVL_ADAPTOR_PADDING6, + OVL_ADAPTOR_PADDING7, OVL_ADAPTOR_ID_MAX }; @@ -67,6 +76,7 @@ static const char * const private_comp_stem[OVL_ADAPTOR_TYPE_NUM] = { [OVL_ADAPTOR_TYPE_ETHDR] = "ethdr", [OVL_ADAPTOR_TYPE_MDP_RDMA] = "vdo1-rdma", [OVL_ADAPTOR_TYPE_MERGE] = "merge", + [OVL_ADAPTOR_TYPE_PADDING] = "padding", }; static const struct mtk_ddp_comp_funcs ethdr = { @@ -79,6 +89,14 @@ static const struct mtk_ddp_comp_funcs ethdr = { static const struct mtk_ddp_comp_funcs merge = { .clk_enable = mtk_merge_clk_enable, .clk_disable = mtk_merge_clk_disable, + .mode_valid = mtk_merge_mode_valid, +}; + +static const struct mtk_ddp_comp_funcs padding = { + .clk_enable = mtk_padding_clk_enable, + .clk_disable = mtk_padding_clk_disable, + .start = mtk_padding_start, + .stop = mtk_padding_stop, }; static const struct mtk_ddp_comp_funcs rdma = { @@ -102,6 +120,14 @@ static const struct ovl_adaptor_comp_match comp_matches[OVL_ADAPTOR_ID_MAX] = { [OVL_ADAPTOR_MERGE1] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE2, 2, &merge }, [OVL_ADAPTOR_MERGE2] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE3, 3, &merge }, [OVL_ADAPTOR_MERGE3] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE4, 4, &merge }, + [OVL_ADAPTOR_PADDING0] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING0, 0, &padding }, + [OVL_ADAPTOR_PADDING1] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING1, 1, &padding }, + [OVL_ADAPTOR_PADDING2] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING2, 2, &padding }, + [OVL_ADAPTOR_PADDING3] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING3, 3, &padding }, + [OVL_ADAPTOR_PADDING4] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING4, 4, &padding }, + [OVL_ADAPTOR_PADDING5] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING5, 5, &padding }, + [OVL_ADAPTOR_PADDING6] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING6, 6, &padding }, + [OVL_ADAPTOR_PADDING7] = { OVL_ADAPTOR_TYPE_PADDING, DDP_COMPONENT_PADDING7, 7, &padding }, }; void mtk_ovl_adaptor_layer_config(struct device *dev, unsigned int idx, @@ -317,6 +343,22 @@ void mtk_ovl_adaptor_clk_disable(struct device *dev) } } +enum drm_mode_status mtk_ovl_adaptor_mode_valid(struct device *dev, + const struct drm_display_mode *mode) + +{ + int i; + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + + for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) { + dev = ovl_adaptor->ovl_adaptor_comp[i]; + if (!dev || !comp_matches[i].funcs->mode_valid) + continue; + return comp_matches[i].funcs->mode_valid(dev, mode); + } + return MODE_OK; +} + unsigned int mtk_ovl_adaptor_layer_nr(struct device *dev) { return MTK_OVL_ADAPTOR_LAYER_NUM; @@ -437,6 +479,7 @@ static int ovl_adaptor_comp_get_id(struct device *dev, struct device_node *node, } static const struct of_device_id mtk_ovl_adaptor_comp_dt_ids[] = { + { .compatible = "mediatek,mt8188-disp-padding", .data = (void *)OVL_ADAPTOR_TYPE_PADDING }, { .compatible = "mediatek,mt8195-disp-ethdr", .data = (void *)OVL_ADAPTOR_TYPE_ETHDR }, { .compatible = "mediatek,mt8195-disp-merge", .data = (void *)OVL_ADAPTOR_TYPE_MERGE }, { .compatible = "mediatek,mt8195-vdo1-rdma", .data = (void *)OVL_ADAPTOR_TYPE_MDP_RDMA }, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index c729af3b9822..a04499c4f9ca 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -95,11 +95,13 @@ static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc) struct drm_crtc *crtc = &mtk_crtc->base; unsigned long flags; - spin_lock_irqsave(&crtc->dev->event_lock, flags); - drm_crtc_send_vblank_event(crtc, mtk_crtc->event); - drm_crtc_vblank_put(crtc); - mtk_crtc->event = NULL; - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + if (mtk_crtc->event) { + spin_lock_irqsave(&crtc->dev->event_lock, flags); + drm_crtc_send_vblank_event(crtc, mtk_crtc->event); + drm_crtc_vblank_put(crtc); + mtk_crtc->event = NULL; + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + } } static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc) @@ -213,6 +215,22 @@ static void mtk_drm_crtc_destroy_state(struct drm_crtc *crtc, kfree(to_mtk_crtc_state(state)); } +static enum drm_mode_status +mtk_drm_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); + enum drm_mode_status status = MODE_OK; + int i; + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + status = mtk_ddp_comp_mode_valid(mtk_crtc->ddp_comp[i], mode); + if (status != MODE_OK) + break; + } + return status; +} + static bool mtk_drm_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -831,6 +849,7 @@ static const struct drm_crtc_funcs mtk_crtc_funcs = { static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = { .mode_fixup = mtk_drm_crtc_mode_fixup, .mode_set_nofb = mtk_drm_crtc_mode_set_nofb, + .mode_valid = mtk_drm_crtc_mode_valid, .atomic_begin = mtk_drm_crtc_atomic_begin, .atomic_flush = mtk_drm_crtc_atomic_flush, .atomic_enable = mtk_drm_crtc_atomic_enable, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index a9b5a21cde2d..a515e96cfefc 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -418,6 +418,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl_adaptor = { .remove = mtk_ovl_adaptor_remove_comp, .get_formats = mtk_ovl_adaptor_get_formats, .get_num_formats = mtk_ovl_adaptor_get_num_formats, + .mode_valid = mtk_ovl_adaptor_mode_valid, }; static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = { diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index 15b2eafff438..93d79a1366e9 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h @@ -12,6 +12,8 @@ #include #include +#include + struct device; struct device_node; struct drm_crtc; @@ -85,6 +87,7 @@ struct mtk_ddp_comp_funcs { void (*add)(struct device *dev, struct mtk_mutex *mutex); void (*remove)(struct device *dev, struct mtk_mutex *mutex); unsigned int (*encoder_index)(struct device *dev); + enum drm_mode_status (*mode_valid)(struct device *dev, const struct drm_display_mode *mode); }; struct mtk_ddp_comp { @@ -126,6 +129,15 @@ static inline void mtk_ddp_comp_clk_disable(struct mtk_ddp_comp *comp) comp->funcs->clk_disable(comp->dev); } +static inline +enum drm_mode_status mtk_ddp_comp_mode_valid(struct mtk_ddp_comp *comp, + const struct drm_display_mode *mode) +{ + if (comp && comp->funcs && comp->funcs->mode_valid) + return comp->funcs->mode_valid(comp->dev, mode); + return MODE_OK; +} + static inline void mtk_ddp_comp_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, unsigned int bpc, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 14a1e0157cc4..74832c213092 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -293,7 +293,7 @@ static const struct mtk_mmsys_driver_data mt8188_vdosys0_driver_data = { .main_len = ARRAY_SIZE(mt8188_mtk_ddp_main), .conn_routes = mt8188_mtk_ddp_main_routes, .num_conn_routes = ARRAY_SIZE(mt8188_mtk_ddp_main_routes), - .mmsys_dev_num = 1, + .mmsys_dev_num = 2, }; static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = { @@ -334,6 +334,8 @@ static const struct of_device_id mtk_drm_of_ids[] = { .data = &mt8186_mmsys_driver_data}, { .compatible = "mediatek,mt8188-vdosys0", .data = &mt8188_vdosys0_driver_data}, + { .compatible = "mediatek,mt8188-vdosys1", + .data = &mt8195_vdosys1_driver_data}, { .compatible = "mediatek,mt8192-mmsys", .data = &mt8192_mmsys_driver_data}, { .compatible = "mediatek,mt8195-mmsys", diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index a2fdfc8ddb15..9501f4019199 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -3,6 +3,7 @@ * Copyright (c) 2015 MediaTek Inc. */ +#include #include #include #include @@ -12,6 +13,7 @@ #include #include #include +#include #include