Merge drm/drm-next into drm-xe-next

Need to sync some header include that propagated through
drm-intel-next.

v2: After some changes in drm/drm-next

Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
Rodrigo Vivi 2024-06-26 18:22:52 -04:00
commit 8664e76373
No known key found for this signature in database
GPG Key ID: FA625F640EEB13CA
616 changed files with 27852 additions and 18087 deletions

View File

@ -32,8 +32,6 @@ properties:
- innolux,hj110iz-01a
# STARRY 2081101QFH032011-53G 10.1" WUXGA TFT LCD panel
- starry,2081101qfh032011-53g
# STARRY himax83102-j02 10.51" WUXGA TFT LCD panel
- starry,himax83102-j02
# STARRY ili9882t 10.51" WUXGA TFT LCD panel
- starry,ili9882t

View File

@ -0,0 +1,77 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/himax,hx83102.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Himax HX83102 MIPI-DSI LCD panel controller
maintainers:
- Cong Yang <yangcong5@huaqin.corp-partner.google.com>
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
items:
- enum:
# Boe nv110wum-l60 11.0" WUXGA TFT LCD panel
- boe,nv110wum-l60
# IVO t109nw41 11.0" WUXGA TFT LCD panel
- ivo,t109nw41
# STARRY himax83102-j02 10.51" WUXGA TFT LCD panel
- starry,himax83102-j02
- const: himax,hx83102
reg:
description: the virtual channel number of a DSI peripheral
enable-gpios:
description: a GPIO spec for the enable pin
pp1800-supply:
description: core voltage supply
avdd-supply:
description: phandle of the regulator that provides positive voltage
avee-supply:
description: phandle of the regulator that provides negative voltage
backlight: true
port: true
rotation: true
required:
- compatible
- reg
- enable-gpios
- pp1800-supply
- avdd-supply
- avee-supply
additionalProperties: false
examples:
- |
dsi {
#address-cells = <1>;
#size-cells = <0>;
panel@0 {
compatible = "starry,himax83102-j02", "himax,hx83102";
reg = <0>;
enable-gpios = <&pio 45 0>;
avdd-supply = <&ppvarn_lcd>;
avee-supply = <&ppvarp_lcd>;
pp1800-supply = <&pp1800_lcd>;
backlight = <&backlight_lcd0>;
port {
panel_in: endpoint {
remote-endpoint = <&dsi_out>;
};
};
};
};
...

View File

@ -0,0 +1,117 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/panel-edp-legacy.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Legacy eDP panels from before the "edp-panel" compatible
maintainers:
- Douglas Anderson <dianders@chromium.org>
description: |
This binding file is a collection of eDP panels from before the generic
"edp-panel" compatible was introduced. It is kept around to support old
dts files. The only reason one might add a new panel here instead of using
the generic "edp-panel" is if it needed to be used on an eDP controller
that doesn't support the generic "edp-panel" compatible, but it should be
a strong preference to add the generic "edp-panel" compatible instead.
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
enum:
# compatible must be listed in alphabetical order, ordered by compatible.
# The description in the comment is mandatory for each compatible.
# AU Optronics Corporation 10.1" WSVGA TFT LCD panel
- auo,b101ean01
# AUO B116XAK01 eDP TFT LCD panel
- auo,b116xa01
# AU Optronics Corporation 13.3" FHD (1920x1080) color TFT-LCD panel
- auo,b133htn01
# AU Optronics Corporation 13.3" WXGA (1366x768) TFT LCD panel
- auo,b133xtn01
# BOE OPTOELECTRONICS TECHNOLOGY 10.1" WXGA TFT LCD panel
- boe,nv101wxmn51
# BOE NV133FHM-N61 13.3" FHD (1920x1080) TFT LCD Panel
- boe,nv110wtm-n61
# BOE NV110WTM-N61 11.0" 2160x1440 TFT LCD Panel
- boe,nv133fhm-n61
# BOE NV133FHM-N62 13.3" FHD (1920x1080) TFT LCD Panel
- boe,nv133fhm-n62
# BOE NV140FHM-N49 14.0" FHD a-Si FT panel
- boe,nv140fhmn49
# Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
- innolux,n116bca-ea1
# Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
- innolux,n116bge
# InnoLux 13.3" FHD (1920x1080) eDP TFT LCD panel
- innolux,n125hce-gn1
# Innolux P120ZDG-BF1 12.02 inch eDP 2K display panel
- innolux,p120zdg-bf1
# King & Display KD116N21-30NV-A010 eDP TFT LCD panel
- kingdisplay,kd116n21-30nv-a010
# LG LP079QX1-SP0V 7.9" (1536x2048 pixels) TFT LCD panel
- lg,lp079qx1-sp0v
# LG 9.7" (2048x1536 pixels) TFT LCD panel
- lg,lp097qx1-spa1
# LG 12.0" (1920x1280 pixels) TFT LCD panel
- lg,lp120up1
# LG 12.9" (2560x1700 pixels) TFT LCD panel
- lg,lp129qe
# NewEast Optoelectronics CO., LTD WJFH116008A eDP TFT LCD panel
- neweast,wjfh116008a
# Samsung 12.2" (2560x1600 pixels) TFT LCD panel
- samsung,lsn122dl01-c01
# Samsung Electronics 14" WXGA (1366x768) TFT LCD panel
- samsung,ltn140at29-301
# Sharp LD-D5116Z01B 12.3" WUXGA+ eDP panel
- sharp,ld-d5116z01b
# Sharp 12.3" (2400x1600 pixels) TFT LCD panel
- sharp,lq123p1jx31
backlight: true
ddc-i2c-bus: true
enable-gpios: true
panel-timing: true
port: true
power-supply: true
no-hpd: true
hpd-gpios: true
additionalProperties: false
required:
- compatible
- power-supply
examples:
- |
panel: panel {
compatible = "innolux,n116bge";
power-supply = <&panel_regulator>;
backlight = <&backlight>;
panel-timing {
clock-frequency = <74250000>;
hactive = <1366>;
hfront-porch = <136>;
hback-porch = <60>;
hsync-len = <30>;
hsync-active = <0>;
vactive = <768>;
vfront-porch = <8>;
vback-porch = <12>;
vsync-len = <12>;
vsync-active = <0>;
};
port {
panel_in_edp: endpoint {
remote-endpoint = <&edp_out_panel>;
};
};
};

View File

@ -50,6 +50,12 @@ description: |
| Command or data |
|<D7><D6><D5><D4><D3><D2><D1><D0>|
The standard defines one pixel format for type C: RGB111. The industry
however has decided to provide the type A/B interface pixel formats also on
the Type C interface and most common among these are RGB565 and RGB666.
The MIPI DCS command set_address_mode (36h) has one bit that controls RGB/BGR
order. This gives each supported RGB format a BGR variant.
The panel resolution is specified using the panel-timing node properties
hactive (width) and vactive (height). The other mandatory panel-timing
properties should be set to zero except clock-frequency which can be
@ -93,6 +99,28 @@ properties:
spi-3wire: true
format:
description: >
Pixel format in bit order as going on the wire:
* `x2r1g1b1r1g1b1` - RGB111, 2 pixels per byte
* `x2b1g1r1b1g1r1` - BGR111, 2 pixels per byte
* `x1r1g1b1x1r1g1b1` - RGB111, 2 pixels per byte
* `x1b1g1r1x1b1g1r1` - BGR111, 2 pixels per byte
* `r5g6b5` - RGB565, 2 bytes
* `b5g6r5` - BGR565, 2 bytes
* `r6x2g6x2b6x2` - RGB666, 3 bytes
* `b6x2g6x2r6x2` - BGR666, 3 bytes
enum:
- x2r1g1b1r1g1b1
- x2b1g1r1b1g1r1
- x1r1g1b1x1r1g1b1
- x1b1g1r1x1b1g1r1
- r5g6b5
- b5g6r5
- r6x2g6x2b6x2
- b6x2g6x2r6x2
default: r5g6b5
required:
- compatible
- reg
@ -119,6 +147,8 @@ examples:
reset-gpios = <&gpio 25 GPIO_ACTIVE_HIGH>;
write-only;
format = "r5g6b5";
backlight = <&backlight>;
width-mm = <35>;

View File

@ -41,6 +41,12 @@ properties:
- auo,g190ean01
# Kaohsiung Opto-Electronics Inc. 10.1" WUXGA (1920 x 1200) LVDS TFT LCD panel
- koe,tx26d202vm0bwa
# Lincoln Technology Solutions, LCD185-101CT 10.1" TFT 1920x1200
- lincolntech,lcd185-101ct
# Microtips Technology MF-101HIEBCAF0 10.1" WUXGA (1920x1200) TFT LCD panel
- microtips,mf-101hiebcaf0
# Microtips Technology MF-103HIEB0GA0 10.25" 1920x720 TFT LCD panel
- microtips,mf-103hieb0ga0
# NLT Technologies, Ltd. 15.6" FHD (1920x1080) LVDS TFT LCD panel
- nlt,nl192108ac18-02d

View File

@ -41,22 +41,10 @@ properties:
- ampire,am800600p5tmqw-tb8h
# AU Optronics Corporation 10.1" WSVGA TFT LCD panel
- auo,b101aw03
# AU Optronics Corporation 10.1" WSVGA TFT LCD panel
- auo,b101ean01
# AU Optronics Corporation 10.1" WXGA TFT LCD panel
- auo,b101xtn01
# AUO B116XAK01 eDP TFT LCD panel
- auo,b116xa01
# AU Optronics Corporation 11.6" HD (1366x768) color TFT-LCD panel
- auo,b116xw03
# AU Optronics Corporation 13.3" FHD (1920x1080) color TFT-LCD panel
- auo,b133han05
# AU Optronics Corporation 13.3" FHD (1920x1080) color TFT-LCD panel
- auo,b133htn01
# AU Optronics Corporation 13.3" WXGA (1366x768) TFT LCD panel
- auo,b133xtn01
# AU Optronics Corporation 14.0" FHD (1920x1080) color TFT-LCD panel
- auo,b140han06
# AU Optronics Corporation 7.0" FHD (800 x 480) TFT LCD panel
- auo,g070vvn01
# AU Optronics Corporation 10.1" (1280x800) color TFT LCD panel
@ -81,16 +69,6 @@ properties:
- boe,ev121wxm-n10-1850
# BOE HV070WSA-100 7.01" WSVGA TFT LCD panel
- boe,hv070wsa-100
# BOE OPTOELECTRONICS TECHNOLOGY 10.1" WXGA TFT LCD panel
- boe,nv101wxmn51
# BOE NV133FHM-N61 13.3" FHD (1920x1080) TFT LCD Panel
- boe,nv110wtm-n61
# BOE NV110WTM-N61 11.0" 2160x1440 TFT LCD Panel
- boe,nv133fhm-n61
# BOE NV133FHM-N62 13.3" FHD (1920x1080) TFT LCD Panel
- boe,nv133fhm-n62
# BOE NV140FHM-N49 14.0" FHD a-Si FT panel
- boe,nv140fhmn49
# Crystal Clear Technology CMT430B19N00 4.3" 480x272 TFT-LCD panel
- cct,cmt430b19n00
# CDTech(H.K.) Electronics Limited 4.3" 480x272 color TFT-LCD panel
@ -172,8 +150,6 @@ properties:
- hannstar,hsd100pxn1
# Hitachi Ltd. Corporation 9" WVGA (800x480) TFT LCD panel
- hit,tx23d38vm0caa
# InfoVision Optoelectronics M133NWF4 R0 13.3" FHD (1920x1080) TFT LCD panel
- ivo,m133nwf4-r0
# Innolux AT043TN24 4.3" WQVGA TFT LCD panel
- innolux,at043tn24
# Innolux AT070TN92 7.0" WQVGA TFT LCD panel
@ -192,22 +168,12 @@ properties:
- innolux,g121x1-l03
# Innolux Corporation 12.1" G121XCE-L01 XGA (1024x768) TFT LCD panel
- innolux,g121xce-l01
# Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
- innolux,n116bca-ea1
# Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
- innolux,n116bge
# InnoLux 13.3" FHD (1920x1080) eDP TFT LCD panel
- innolux,n125hce-gn1
# InnoLux 15.6" FHD (1920x1080) TFT LCD panel
- innolux,g156hce-l01
# InnoLux 15.6" WXGA TFT LCD panel
- innolux,n156bge-l21
# Innolux P120ZDG-BF1 12.02 inch eDP 2K display panel
- innolux,p120zdg-bf1
# Innolux Corporation 7.0" WSVGA (1024x600) TFT LCD panel
- innolux,zj070na-01p
# King & Display KD116N21-30NV-A010 eDP TFT LCD panel
- kingdisplay,kd116n21-30nv-a010
# Kaohsiung Opto-Electronics Inc. 5.7" QVGA (320 x 240) TFT LCD panel
- koe,tx14d24vm1bpa
# Kaohsiung Opto-Electronics. TX31D200VM0BAA 12.3" HSXGA LVDS panel
@ -220,14 +186,6 @@ properties:
- lemaker,bl035-rgb-002
# LG 7" (800x480 pixels) TFT LCD panel
- lg,lb070wv8
# LG LP079QX1-SP0V 7.9" (1536x2048 pixels) TFT LCD panel
- lg,lp079qx1-sp0v
# LG 9.7" (2048x1536 pixels) TFT LCD panel
- lg,lp097qx1-spa1
# LG 12.0" (1920x1280 pixels) TFT LCD panel
- lg,lp120up1
# LG 12.9" (2560x1700 pixels) TFT LCD panel
- lg,lp129qe
# Logic Technologies LT161010-2NHC 7" WVGA TFT Cap Touch Module
- logictechno,lt161010-2nhc
# Logic Technologies LT161010-2NHR 7" WVGA TFT Resistive Touch Module
@ -254,8 +212,6 @@ properties:
- nec,nl4827hc19-05b
# Netron-DY E231732 7.0" WSVGA TFT LCD panel
- netron-dy,e231732
# NewEast Optoelectronics CO., LTD WJFH116008A eDP TFT LCD panel
- neweast,wjfh116008a
# Newhaven Display International 480 x 272 TFT LCD panel
- newhaven,nhd-4.3-480272ef-atxl
# New Vision Display 7.0" 800 RGB x 480 TFT LCD panel
@ -280,6 +236,8 @@ properties:
- powertip,ph128800t006-zhc01
# POWERTIP PH800480T013-IDF2 7.0" WVGA TFT LCD panel
- powertip,ph800480t013-idf02
# PrimeView PM070WL4 7.0" 800x480 TFT LCD panel
- primeview,pm070wl4
# QiaoDian XianShi Corporation 4"3 TFT LCD panel
- qiaodian,qd43003c0-40
# Shenzhen QiShenglong Industrialist Co., Ltd. Gopher 2b 4.3" 480(RGB)x272 TFT LCD panel
@ -290,16 +248,10 @@ properties:
- rocktech,rk070er9427
# Rocktech Display Ltd. RK043FN48H 4.3" 480x272 LCD-TFT panel
- rocktech,rk043fn48h
# Samsung 13.3" FHD (1920x1080 pixels) eDP AMOLED panel
- samsung,atna33xc20
# Samsung 12.2" (2560x1600 pixels) TFT LCD panel
- samsung,lsn122dl01-c01
# Samsung Electronics 10.1" WXGA (1280x800) TFT LCD panel
- samsung,ltl101al01
# Samsung Electronics 10.1" WSVGA TFT LCD panel
- samsung,ltn101nt05
# Samsung Electronics 14" WXGA (1366x768) TFT LCD panel
- samsung,ltn140at29-301
# Satoz SAT050AT40H12R2 5.0" WVGA TFT LCD panel
- satoz,sat050at40h12r2
# Sharp LQ035Q7DB03 3.5" QVGA TFT LCD panel
@ -308,18 +260,12 @@ properties:
- sharp,lq070y3dg3b
# Sharp Display Corp. LQ101K1LY04 10.07" WXGA TFT LCD panel
- sharp,lq101k1ly04
# Sharp 12.3" (2400x1600 pixels) TFT LCD panel
- sharp,lq123p1jx31
# Sharp 14" (1920x1080 pixels) TFT LCD panel
- sharp,lq140m1jw46
# Sharp LS020B1DD01D 2.0" HQVGA TFT LCD panel
- sharp,ls020b1dd01d
# Shelly SCA07010-BFN-LNN 7.0" WVGA TFT LCD panel
- shelly,sca07010-bfn-lnn
# Starry KR070PE2T 7" WVGA TFT LCD panel
- starry,kr070pe2t
# Starry 12.2" (1920x1200 pixels) TFT LCD panel
- starry,kr122ea0sra
# Startek KD070WVFPA043-C069A 7" TFT LCD panel
- startek,kd070wvfpa
# Team Source Display Technology TST043015CMHX 4.3" WQVGA TFT LCD panel

View File

@ -0,0 +1,95 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/samsung,atna33xc20.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung 13.3" FHD (1920x1080 pixels) eDP AMOLED panel
maintainers:
- Douglas Anderson <dianders@chromium.org>
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
const: samsung,atna33xc20
enable-gpios: true
port: true
power-supply: true
no-hpd: true
hpd-gpios: true
additionalProperties: false
required:
- compatible
- enable-gpios
- power-supply
examples:
- |
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
i2c {
#address-cells = <1>;
#size-cells = <0>;
bridge@2d {
compatible = "ti,sn65dsi86";
reg = <0x2d>;
interrupt-parent = <&tlmm>;
interrupts = <10 IRQ_TYPE_LEVEL_HIGH>;
enable-gpios = <&tlmm 102 GPIO_ACTIVE_HIGH>;
vpll-supply = <&src_pp1800_s4a>;
vccio-supply = <&src_pp1800_s4a>;
vcca-supply = <&src_pp1200_l2a>;
vcc-supply = <&src_pp1200_l2a>;
clocks = <&rpmhcc RPMH_LN_BB_CLK2>;
clock-names = "refclk";
no-hpd;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
endpoint {
remote-endpoint = <&dsi0_out>;
};
};
port@1 {
reg = <1>;
sn65dsi86_out: endpoint {
remote-endpoint = <&panel_in_edp>;
};
};
};
aux-bus {
panel {
compatible = "samsung,atna33xc20";
enable-gpios = <&tlmm 12 GPIO_ACTIVE_HIGH>;
power-supply = <&pp3300_dx_edp>;
hpd-gpios = <&sn65dsi86_bridge 2 GPIO_ACTIVE_HIGH>;
port {
panel_in_edp: endpoint {
remote-endpoint = <&sn65dsi86_out>;
};
};
};
};
};
};

View File

@ -1,30 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/sharp,ld-d5116z01b.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Sharp LD-D5116Z01B 12.3" WUXGA+ eDP panel
maintainers:
- Jeffrey Hugo <jeffrey.l.hugo@gmail.com>
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
const: sharp,ld-d5116z01b
power-supply: true
backlight: true
port: true
no-hpd: true
additionalProperties: false
required:
- compatible
- power-supply
...

View File

@ -0,0 +1,60 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/wl-355608-a8.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: WL-355608-A8 3.5" (640x480 pixels) 24-bit IPS LCD panel
maintainers:
- Ryan Walklin <ryan@testtoast.com>
allOf:
- $ref: panel-common.yaml#
- $ref: /schemas/spi/spi-peripheral-props.yaml#
properties:
compatible:
const: wl-355608-a8
reg:
maxItems: 1
spi-3wire: true
required:
- compatible
- reg
- port
- power-supply
- reset-gpios
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
spi {
#address-cells = <1>;
#size-cells = <0>;
panel@0 {
compatible = "wl-355608-a8";
reg = <0>;
spi-3wire;
spi-max-frequency = <3125000>;
reset-gpios = <&pio 8 14 GPIO_ACTIVE_LOW>; // PI14
backlight = <&backlight>;
power-supply = <&reg_lcd>;
port {
endpoint {
remote-endpoint = <&tcon_lcd0_out_lcd>;
};
};
};
};

View File

@ -15,6 +15,7 @@ properties:
items:
- enum:
- rockchip,px30-mipi-dsi
- rockchip,rk3128-mipi-dsi
- rockchip,rk3288-mipi-dsi
- rockchip,rk3399-mipi-dsi
- rockchip,rk3568-mipi-dsi
@ -77,6 +78,7 @@ allOf:
contains:
enum:
- rockchip,px30-mipi-dsi
- rockchip,rk3128-mipi-dsi
- rockchip,rk3568-mipi-dsi
- rockchip,rv1126-mipi-dsi

View File

@ -34,6 +34,7 @@ properties:
- const: arm,mali-valhall-jm # Mali Valhall GPU model/revision is fully discoverable
- items:
- enum:
- mediatek,mt8188-mali
- mediatek,mt8192-mali
- const: arm,mali-valhall-jm # Mali Valhall GPU model/revision is fully discoverable
@ -195,7 +196,9 @@ allOf:
properties:
compatible:
contains:
const: mediatek,mt8183b-mali
enum:
- mediatek,mt8183b-mali
- mediatek,mt8188-mali
then:
properties:
power-domains:

View File

@ -820,6 +820,8 @@ patternProperties:
description: Lichee Pi
"^linaro,.*":
description: Linaro Limited
"^lincolntech,.*":
description: Lincoln Technology Solutions
"^lineartechnology,.*":
description: Linear Technology
"^linksprite,.*":
@ -924,6 +926,8 @@ patternProperties:
description: Microsoft Corporation
"^microsys,.*":
description: MicroSys Electronics GmbH
"^microtips,.*":
description: Microtips Technology USA
"^mikroe,.*":
description: MikroElektronika d.o.o.
"^mikrotik,.*":
@ -1160,6 +1164,8 @@ patternProperties:
description: PowerVR (deprecated, use img)
"^powkiddy,.*":
description: Powkiddy
"^primeview,.*":
description: Prime View International (PVI)
"^primux,.*":
description: Primux Trading, S.L.
"^probox2,.*":

View File

@ -57,8 +57,8 @@ is larger than the driver minor, the DRM_IOCTL_SET_VERSION call will
return an error. Otherwise the driver's set_version() method will be
called with the requested version.
Name, Description and Date
~~~~~~~~~~~~~~~~~~~~~~~~~~
Name and Description
~~~~~~~~~~~~~~~~~~~~
char \*name; char \*desc; char \*date;
The driver name is printed to the kernel log at initialization time,
@ -69,12 +69,6 @@ The driver description is a purely informative string passed to
userspace through the DRM_IOCTL_VERSION ioctl and otherwise unused by
the kernel.
The driver date, formatted as YYYYMMDD, is meant to identify the date of
the latest modification to the driver. However, as most drivers fail to
update it, its value is mostly useless. The DRM core prints it to the
kernel log at initialization time and passes it to userspace through the
DRM_IOCTL_VERSION ioctl.
Module Initialization
---------------------

View File

@ -110,15 +110,21 @@ fbdev Helper Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_fb_helper.c
:doc: fbdev helpers
.. kernel-doc:: drivers/gpu/drm/drm_fbdev_dma.c
:export:
.. kernel-doc:: drivers/gpu/drm/drm_fbdev_shmem.c
:export:
.. kernel-doc:: drivers/gpu/drm/drm_fbdev_ttm.c
:export:
.. kernel-doc:: include/drm/drm_fb_helper.h
:internal:
.. kernel-doc:: drivers/gpu/drm/drm_fb_helper.c
:export:
.. kernel-doc:: drivers/gpu/drm/drm_fbdev_generic.c
:export:
format Helper Functions Reference
=================================

View File

@ -150,7 +150,7 @@ High Definition Audio
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_audio.c
:internal:
.. kernel-doc:: include/drm/i915_component.h
.. kernel-doc:: include/drm/intel/i915_component.h
:internal:
Intel HDMI LPE Audio Support
@ -210,9 +210,6 @@ DMC wakelock support
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dmc_wl.c
:doc: DMC wakelock support
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dmc_wl.c
:internal:
Video BIOS Table (VBT)
----------------------

View File

@ -17,7 +17,6 @@ Owner Module/Drivers,Group,Property Name,Type,Property Values,Object attached,De
,Virtual GPU,“suggested X”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an X offset for a connector
,,“suggested Y”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an Y offset for a connector
,Optional,"""aspect ratio""",ENUM,"{ ""None"", ""4:3"", ""16:9"" }",Connector,TDB
i915,Generic,"""Broadcast RGB""",ENUM,"{ ""Automatic"", ""Full"", ""Limited 16:235"" }",Connector,"When this property is set to Limited 16:235 and CTM is set, the hardware will be programmed with the result of the multiplication of CTM by the limited range matrix to ensure the pixels normally in the range 0..1.0 are remapped to the range 16/255..235/255."
,,“audio”,ENUM,"{ ""force-dvi"", ""off"", ""auto"", ""on"" }",Connector,TBD
,SDVO-TV,“mode”,ENUM,"{ ""NTSC_M"", ""NTSC_J"", ""NTSC_443"", ""PAL_B"" } etc.",Connector,TBD
,,"""left_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
@ -38,7 +37,6 @@ i915,Generic,"""Broadcast RGB""",ENUM,"{ ""Automatic"", ""Full"", ""Limited 16:2
,,“dot_crawl”,RANGE,"Min=0, Max=1",Connector,TBD
,SDVO-TV/LVDS,“brightness”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
CDV gma-500,Generic,"""Broadcast RGB""",ENUM,"{ “Full”, “Limited 16:235” }",Connector,TBD
,,"""Broadcast RGB""",ENUM,"{ “off”, “auto”, “on” }",Connector,TBD
Poulsbo,Generic,“backlight”,RANGE,"Min=0, Max=100",Connector,TBD
,SDVO-TV,“mode”,ENUM,"{ ""NTSC_M"", ""NTSC_J"", ""NTSC_443"", ""PAL_B"" } etc.",Connector,TBD
,,"""left_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD

1 Owner Module/Drivers Group Property Name Type Property Values Object attached Description/Restrictions
17 Virtual GPU “suggested X” RANGE Min=0, Max=0xffffffff Connector property to suggest an X offset for a connector
18 “suggested Y” RANGE Min=0, Max=0xffffffff Connector property to suggest an Y offset for a connector
19 Optional "aspect ratio" ENUM { "None", "4:3", "16:9" } Connector TDB
i915 Generic "Broadcast RGB" ENUM { "Automatic", "Full", "Limited 16:235" } Connector When this property is set to Limited 16:235 and CTM is set, the hardware will be programmed with the result of the multiplication of CTM by the limited range matrix to ensure the pixels normally in the range 0..1.0 are remapped to the range 16/255..235/255.
20 “audio” ENUM { "force-dvi", "off", "auto", "on" } Connector TBD
21 SDVO-TV “mode” ENUM { "NTSC_M", "NTSC_J", "NTSC_443", "PAL_B" } etc. Connector TBD
22 "left_margin" RANGE Min=0, Max= SDVO dependent Connector TBD
37 “dot_crawl” RANGE Min=0, Max=1 Connector TBD
38 SDVO-TV/LVDS “brightness” RANGE Min=0, Max= SDVO dependent Connector TBD
39 CDV gma-500 Generic "Broadcast RGB" ENUM { “Full”, “Limited 16:235” } Connector TBD
"Broadcast RGB" ENUM { “off”, “auto”, “on” } Connector TBD
40 Poulsbo Generic “backlight” RANGE Min=0, Max=100 Connector TBD
41 SDVO-TV “mode” ENUM { "NTSC_M", "NTSC_J", "NTSC_443", "PAL_B" } etc. Connector TBD
42 "left_margin" RANGE Min=0, Max= SDVO dependent Connector TBD

View File

@ -243,19 +243,6 @@ Contact: Maintainer of the driver you plan to convert
Level: Intermediate
Convert drivers to use drm_fbdev_generic_setup()
------------------------------------------------
Most drivers can use drm_fbdev_generic_setup(). Driver have to implement
atomic modesetting and GEM vmap support. Historically, generic fbdev emulation
expected the framebuffer in system memory or system-like memory. By employing
struct iosys_map, drivers with frambuffers in I/O memory can be supported
as well.
Contact: Maintainer of the driver you plan to convert
Level: Intermediate
Reimplement functions in drm_fbdev_fb_ops without fbdev
-------------------------------------------------------
@ -482,30 +469,53 @@ Contact: Thomas Zimmermann <tzimmermann@suse.de>
Level: Starter
Clean up checks for already prepared/enabled in panels
------------------------------------------------------
Remove disable/unprepare in remove/shutdown in panel-simple and panel-edp
-------------------------------------------------------------------------
In a whole pile of panel drivers, we have code to make the
prepare/unprepare/enable/disable callbacks behave as no-ops if they've already
been called. To get some idea of the duplicated code, try::
As of commit d2aacaf07395 ("drm/panel: Check for already prepared/enabled in
drm_panel"), we have a check in the drm_panel core to make sure nobody
double-calls prepare/enable/disable/unprepare. Eventually that should probably
be turned into a WARN_ON() or somehow made louder, but right now we actually
expect it to trigger and so we don't want it to be too loud.
git grep 'if.*>prepared' -- drivers/gpu/drm/panel
git grep 'if.*>enabled' -- drivers/gpu/drm/panel
Specifically, that warning will trigger for panel-edp and panel-simple at
shutdown time because those panels hardcode a call to drm_panel_disable()
and drm_panel_unprepare() at shutdown and remove time that they call regardless
of panel state. On systems with a properly coded DRM modeset driver that
calls drm_atomic_helper_shutdown() this is pretty much guaranteed to cause
the warning to fire.
In the patch ("drm/panel: Check for already prepared/enabled in drm_panel")
we've moved this check to the core. Now we can most definitely remove the
check from the individual panels and save a pile of code.
In adition to removing the check from the individual panels, it is believed
that even the core shouldn't need this check and that should be considered
an error if other code ever relies on this check. The check in the core
currently prints a warning whenever something is relying on this check with
dev_warn(). After a little while, we likely want to promote this to a
WARN(1) to help encourage folks not to rely on this behavior.
Unfortunately we can't safely remove the calls in panel-edp and panel-simple
until we're sure that all DRM modeset drivers that are used with those panels
properly call drm_atomic_helper_shutdown(). This TODO item is to validate
that all DRM modeset drivers used with panel-edp and panel-simple properly
call drm_atomic_helper_shutdown() and then remove the calls to
disable/unprepare from those panels. Alternatively, this TODO item could be
removed by convincing stakeholders that those calls are fine and downgrading
the error message in drm_panel_disable() / drm_panel_unprepare() to a
debug-level message.
Contact: Douglas Anderson <dianders@chromium.org>
Level: Starter/Intermediate
Level: Intermediate
Transition away from using mipi_dsi_*_write_seq()
-------------------------------------------------
The macros mipi_dsi_generic_write_seq() and mipi_dsi_dcs_write_seq() are
non-intuitive because, if there are errors, they return out of the *caller's*
function. We should move all callers to use mipi_dsi_generic_write_seq_multi()
and mipi_dsi_dcs_write_seq_multi() macros instead.
Once all callers are transitioned, the macros and the functions that they call,
mipi_dsi_generic_write_chatty() and mipi_dsi_dcs_write_buffer_chatty(), can
probably be removed. Alternatively, if people feel like the _multi() variants
are overkill for some use cases, we could keep the mipi_dsi_*_write_seq()
variants but change them not to return out of the caller.
Contact: Douglas Anderson <dianders@chromium.org>
Level: Starter
Core refactorings

View File

@ -6909,7 +6909,7 @@ DRM DRIVER FOR LG SW43408 PANELS
M: Sumit Semwal <sumit.semwal@linaro.org>
M: Caleb Connolly <caleb.connolly@linaro.org>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/lg,sw43408.yaml
F: drivers/gpu/drm/panel/panel-lg-sw43408.c
@ -7196,6 +7196,7 @@ L: dri-devel@lists.freedesktop.org
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/gpu/vkms.rst
F: drivers/gpu/drm/ci/xfails/vkms*
F: drivers/gpu/drm/vkms/
DRM DRIVER FOR VIRTUALBOX VIRTUAL GPU
@ -7566,7 +7567,6 @@ F: include/drm/gpu_scheduler.h
DRM PANEL DRIVERS
M: Neil Armstrong <neil.armstrong@linaro.org>
R: Jessica Zhang <quic_jesszhan@quicinc.com>
R: Sam Ravnborg <sam@ravnborg.org>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
@ -11012,6 +11012,7 @@ S: Supported
F: drivers/gpu/drm/i915/display/
F: drivers/gpu/drm/xe/display/
F: drivers/gpu/drm/xe/compat-i915-headers
F: include/drm/intel/
INTEL DRM I915 DRIVER (Meteor Lake, DG2 and older excluding Poulsbo, Moorestown and derivative)
M: Jani Nikula <jani.nikula@linux.intel.com>
@ -11024,12 +11025,12 @@ W: https://drm.pages.freedesktop.org/intel-docs/
Q: http://patchwork.freedesktop.org/project/intel-gfx/
B: https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html
C: irc://irc.oftc.net/intel-gfx
T: git git://anongit.freedesktop.org/drm-intel
T: git https://gitlab.freedesktop.org/drm/i915/kernel.git
F: Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon
F: Documentation/gpu/i915.rst
F: drivers/gpu/drm/ci/xfails/i915*
F: drivers/gpu/drm/i915/
F: include/drm/i915*
F: include/drm/intel/
F: include/uapi/drm/i915_drm.h
INTEL DRM XE DRIVER (Lunar Lake and newer)
@ -11045,7 +11046,7 @@ T: git https://gitlab.freedesktop.org/drm/xe/kernel.git
F: Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
F: Documentation/gpu/xe/
F: drivers/gpu/drm/xe/
F: include/drm/xe*
F: include/drm/intel/
F: include/uapi/drm/xe_drm.h
INTEL ETHERNET DRIVERS

View File

@ -17,8 +17,8 @@
#include <linux/bcma/bcma.h>
#include <linux/bcma/bcma_regs.h>
#include <linux/platform_data/x86/apple.h>
#include <drm/i915_drm.h>
#include <drm/i915_pciids.h>
#include <drm/intel/i915_drm.h>
#include <drm/intel/i915_pciids.h>
#include <asm/pci-direct.h>
#include <asm/dma.h>
#include <asm/io_apic.h>
@ -518,47 +518,46 @@ static const struct intel_early_ops gen11_early_ops __initconst = {
/* Intel integrated GPUs for which we need to reserve "stolen memory" */
static const struct pci_device_id intel_early_ids[] __initconst = {
INTEL_I830_IDS(&i830_early_ops),
INTEL_I845G_IDS(&i845_early_ops),
INTEL_I85X_IDS(&i85x_early_ops),
INTEL_I865G_IDS(&i865_early_ops),
INTEL_I915G_IDS(&gen3_early_ops),
INTEL_I915GM_IDS(&gen3_early_ops),
INTEL_I945G_IDS(&gen3_early_ops),
INTEL_I945GM_IDS(&gen3_early_ops),
INTEL_VLV_IDS(&gen6_early_ops),
INTEL_PINEVIEW_G_IDS(&gen3_early_ops),
INTEL_PINEVIEW_M_IDS(&gen3_early_ops),
INTEL_I965G_IDS(&gen3_early_ops),
INTEL_G33_IDS(&gen3_early_ops),
INTEL_I965GM_IDS(&gen3_early_ops),
INTEL_GM45_IDS(&gen3_early_ops),
INTEL_G45_IDS(&gen3_early_ops),
INTEL_IRONLAKE_D_IDS(&gen3_early_ops),
INTEL_IRONLAKE_M_IDS(&gen3_early_ops),
INTEL_SNB_D_IDS(&gen6_early_ops),
INTEL_SNB_M_IDS(&gen6_early_ops),
INTEL_IVB_M_IDS(&gen6_early_ops),
INTEL_IVB_D_IDS(&gen6_early_ops),
INTEL_HSW_IDS(&gen6_early_ops),
INTEL_BDW_IDS(&gen8_early_ops),
INTEL_CHV_IDS(&chv_early_ops),
INTEL_SKL_IDS(&gen9_early_ops),
INTEL_BXT_IDS(&gen9_early_ops),
INTEL_KBL_IDS(&gen9_early_ops),
INTEL_CFL_IDS(&gen9_early_ops),
INTEL_GLK_IDS(&gen9_early_ops),
INTEL_CNL_IDS(&gen9_early_ops),
INTEL_ICL_11_IDS(&gen11_early_ops),
INTEL_EHL_IDS(&gen11_early_ops),
INTEL_JSL_IDS(&gen11_early_ops),
INTEL_TGL_12_IDS(&gen11_early_ops),
INTEL_RKL_IDS(&gen11_early_ops),
INTEL_ADLS_IDS(&gen11_early_ops),
INTEL_ADLP_IDS(&gen11_early_ops),
INTEL_ADLN_IDS(&gen11_early_ops),
INTEL_RPLS_IDS(&gen11_early_ops),
INTEL_RPLP_IDS(&gen11_early_ops),
INTEL_I830_IDS(INTEL_VGA_DEVICE, &i830_early_ops),
INTEL_I845G_IDS(INTEL_VGA_DEVICE, &i845_early_ops),
INTEL_I85X_IDS(INTEL_VGA_DEVICE, &i85x_early_ops),
INTEL_I865G_IDS(INTEL_VGA_DEVICE, &i865_early_ops),
INTEL_I915G_IDS(INTEL_VGA_DEVICE, &gen3_early_ops),
INTEL_I915GM_IDS(INTEL_VGA_DEVICE, &gen3_early_ops),
INTEL_I945G_IDS(INTEL_VGA_DEVICE, &gen3_early_ops),
INTEL_I945GM_IDS(INTEL_VGA_DEVICE, &gen3_early_ops),
INTEL_VLV_IDS(INTEL_VGA_DEVICE, &gen6_early_ops),
INTEL_PNV_IDS(INTEL_VGA_DEVICE, &gen3_early_ops),
INTEL_I965G_IDS(INTEL_VGA_DEVICE, &gen3_early_ops),
INTEL_G33_IDS(INTEL_VGA_DEVICE, &gen3_early_ops),
INTEL_I965GM_IDS(INTEL_VGA_DEVICE, &gen3_early_ops),
INTEL_GM45_IDS(INTEL_VGA_DEVICE, &gen3_early_ops),
INTEL_G45_IDS(INTEL_VGA_DEVICE, &gen3_early_ops),
INTEL_ILK_IDS(INTEL_VGA_DEVICE, &gen3_early_ops),
INTEL_SNB_IDS(INTEL_VGA_DEVICE, &gen6_early_ops),
INTEL_IVB_IDS(INTEL_VGA_DEVICE, &gen6_early_ops),
INTEL_HSW_IDS(INTEL_VGA_DEVICE, &gen6_early_ops),
INTEL_BDW_IDS(INTEL_VGA_DEVICE, &gen8_early_ops),
INTEL_CHV_IDS(INTEL_VGA_DEVICE, &chv_early_ops),
INTEL_SKL_IDS(INTEL_VGA_DEVICE, &gen9_early_ops),
INTEL_BXT_IDS(INTEL_VGA_DEVICE, &gen9_early_ops),
INTEL_KBL_IDS(INTEL_VGA_DEVICE, &gen9_early_ops),
INTEL_CFL_IDS(INTEL_VGA_DEVICE, &gen9_early_ops),
INTEL_WHL_IDS(INTEL_VGA_DEVICE, &gen9_early_ops),
INTEL_CML_IDS(INTEL_VGA_DEVICE, &gen9_early_ops),
INTEL_GLK_IDS(INTEL_VGA_DEVICE, &gen9_early_ops),
INTEL_CNL_IDS(INTEL_VGA_DEVICE, &gen9_early_ops),
INTEL_ICL_IDS(INTEL_VGA_DEVICE, &gen11_early_ops),
INTEL_EHL_IDS(INTEL_VGA_DEVICE, &gen11_early_ops),
INTEL_JSL_IDS(INTEL_VGA_DEVICE, &gen11_early_ops),
INTEL_TGL_IDS(INTEL_VGA_DEVICE, &gen11_early_ops),
INTEL_RKL_IDS(INTEL_VGA_DEVICE, &gen11_early_ops),
INTEL_ADLS_IDS(INTEL_VGA_DEVICE, &gen11_early_ops),
INTEL_ADLP_IDS(INTEL_VGA_DEVICE, &gen11_early_ops),
INTEL_ADLN_IDS(INTEL_VGA_DEVICE, &gen11_early_ops),
INTEL_RPLS_IDS(INTEL_VGA_DEVICE, &gen11_early_ops),
INTEL_RPLU_IDS(INTEL_VGA_DEVICE, &gen11_early_ops),
INTEL_RPLP_IDS(INTEL_VGA_DEVICE, &gen11_early_ops),
};
struct resource intel_graphics_stolen_res __ro_after_init = DEFINE_RES_MEM(0, 0);

View File

@ -1,19 +1,22 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (C) 2023 Intel Corporation
# Copyright (C) 2023-2024 Intel Corporation
intel_vpu-y := \
ivpu_drv.o \
ivpu_fw.o \
ivpu_fw_log.o \
ivpu_gem.o \
ivpu_hw_37xx.o \
ivpu_hw_40xx.o \
ivpu_hw.o \
ivpu_hw_btrs.o \
ivpu_hw_ip.o \
ivpu_ipc.o \
ivpu_job.o \
ivpu_jsm_msg.o \
ivpu_mmu.o \
ivpu_mmu_context.o \
ivpu_pm.o
ivpu_ms.o \
ivpu_pm.o \
ivpu_sysfs.o
intel_vpu-$(CONFIG_DEBUG_FS) += ivpu_debugfs.o

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
* Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/debugfs.h>
@ -145,6 +145,30 @@ static const struct file_operations dvfs_mode_fops = {
.write = dvfs_mode_fops_write,
};
static ssize_t
fw_dyndbg_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
{
struct ivpu_device *vdev = file->private_data;
char buffer[VPU_DYNDBG_CMD_MAX_LEN] = {};
int ret;
if (size >= VPU_DYNDBG_CMD_MAX_LEN)
return -EINVAL;
ret = strncpy_from_user(buffer, user_buf, size);
if (ret < 0)
return ret;
ivpu_jsm_dyndbg_control(vdev, buffer, size);
return size;
}
static const struct file_operations fw_dyndbg_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.write = fw_dyndbg_fops_write,
};
static int fw_log_show(struct seq_file *s, void *v)
{
struct ivpu_device *vdev = s->private;
@ -335,6 +359,61 @@ static const struct file_operations ivpu_reset_engine_fops = {
.write = ivpu_reset_engine_fn,
};
static ssize_t
ivpu_resume_engine_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
{
struct ivpu_device *vdev = file->private_data;
if (!size)
return -EINVAL;
if (ivpu_jsm_hws_resume_engine(vdev, DRM_IVPU_ENGINE_COMPUTE))
return -ENODEV;
if (ivpu_jsm_hws_resume_engine(vdev, DRM_IVPU_ENGINE_COPY))
return -ENODEV;
return size;
}
static const struct file_operations ivpu_resume_engine_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.write = ivpu_resume_engine_fn,
};
static int dct_active_get(void *data, u64 *active_percent)
{
struct ivpu_device *vdev = data;
*active_percent = vdev->pm->dct_active_percent;
return 0;
}
static int dct_active_set(void *data, u64 active_percent)
{
struct ivpu_device *vdev = data;
int ret;
if (active_percent > 100)
return -EINVAL;
ret = ivpu_rpm_get(vdev);
if (ret)
return ret;
if (active_percent)
ret = ivpu_pm_dct_enable(vdev, active_percent);
else
ret = ivpu_pm_dct_disable(vdev);
ivpu_rpm_put(vdev);
return ret;
}
DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n");
void ivpu_debugfs_init(struct ivpu_device *vdev)
{
struct dentry *debugfs_root = vdev->drm.debugfs_root;
@ -347,6 +426,8 @@ void ivpu_debugfs_init(struct ivpu_device *vdev)
debugfs_create_file("dvfs_mode", 0200, debugfs_root, vdev,
&dvfs_mode_fops);
debugfs_create_file("fw_dyndbg", 0200, debugfs_root, vdev,
&fw_dyndbg_fops);
debugfs_create_file("fw_log", 0644, debugfs_root, vdev,
&fw_log_fops);
debugfs_create_file("fw_trace_destination_mask", 0200, debugfs_root, vdev,
@ -358,8 +439,12 @@ void ivpu_debugfs_init(struct ivpu_device *vdev)
debugfs_create_file("reset_engine", 0200, debugfs_root, vdev,
&ivpu_reset_engine_fops);
debugfs_create_file("resume_engine", 0200, debugfs_root, vdev,
&ivpu_resume_engine_fops);
if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX)
if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX) {
debugfs_create_file("fw_profiling_freq_drive", 0200,
debugfs_root, vdev, &fw_profiling_freq_fops);
debugfs_create_file("dct", 0644, debugfs_root, vdev, &ivpu_dct_fops);
}
}

View File

@ -26,7 +26,9 @@
#include "ivpu_jsm_msg.h"
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
#include "ivpu_ms.h"
#include "ivpu_pm.h"
#include "ivpu_sysfs.h"
#ifndef DRIVER_VERSION_STR
#define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \
@ -51,10 +53,18 @@ u8 ivpu_pll_max_ratio = U8_MAX;
module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency");
int ivpu_sched_mode;
module_param_named(sched_mode, ivpu_sched_mode, int, 0444);
MODULE_PARM_DESC(sched_mode, "Scheduler mode: 0 - Default scheduler, 1 - Force HW scheduler");
bool ivpu_disable_mmu_cont_pages;
module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0644);
module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0444);
MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization");
bool ivpu_force_snoop;
module_param_named(force_snoop, ivpu_force_snoop, bool, 0444);
MODULE_PARM_DESC(force_snoop, "Force snooping for NPU host memory access");
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
{
struct ivpu_device *vdev = file_priv->vdev;
@ -74,7 +84,6 @@ static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *fi
ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id);
ivpu_cmdq_release_all_locked(file_priv);
ivpu_jsm_context_release(vdev, file_priv->ctx.id);
ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx);
ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
file_priv->bound = false;
@ -97,6 +106,7 @@ static void file_priv_release(struct kref *ref)
mutex_unlock(&vdev->context_list_lock);
pm_runtime_put_autosuspend(vdev->drm.dev);
mutex_destroy(&file_priv->ms_lock);
mutex_destroy(&file_priv->lock);
kfree(file_priv);
}
@ -119,7 +129,7 @@ static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param
{
switch (args->index) {
case DRM_IVPU_CAP_METRIC_STREAMER:
args->value = 0;
args->value = 1;
break;
case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
args->value = 1;
@ -228,10 +238,13 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
goto err_dev_exit;
}
INIT_LIST_HEAD(&file_priv->ms_instance_list);
file_priv->vdev = vdev;
file_priv->bound = true;
kref_init(&file_priv->ref);
mutex_init(&file_priv->lock);
mutex_init(&file_priv->ms_lock);
mutex_lock(&vdev->context_list_lock);
@ -260,6 +273,7 @@ err_xa_erase:
xa_erase_irq(&vdev->context_xa, ctx_id);
err_unlock:
mutex_unlock(&vdev->context_list_lock);
mutex_destroy(&file_priv->ms_lock);
mutex_destroy(&file_priv->lock);
kfree(file_priv);
err_dev_exit:
@ -275,6 +289,7 @@ static void ivpu_postclose(struct drm_device *dev, struct drm_file *file)
ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n",
file_priv->ctx.id, current->comm, task_pid_nr(current));
ivpu_ms_cleanup(file_priv);
ivpu_file_priv_put(&file_priv);
}
@ -285,6 +300,10 @@ static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_START, ivpu_ms_start_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_DATA, ivpu_ms_get_data_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_STOP, ivpu_ms_stop_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_INFO, ivpu_ms_get_info_ioctl, 0),
};
static int ivpu_wait_for_ready(struct ivpu_device *vdev)
@ -301,7 +320,7 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot);
while (1) {
ivpu_ipc_irq_handler(vdev, NULL);
ivpu_ipc_irq_handler(vdev);
ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0);
if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout))
break;
@ -323,6 +342,21 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
return ret;
}
static int ivpu_hw_sched_init(struct ivpu_device *vdev)
{
int ret = 0;
if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) {
ret = ivpu_jsm_hws_setup_priority_bands(vdev);
if (ret) {
ivpu_err(vdev, "Failed to enable hw scheduler: %d", ret);
return ret;
}
}
return ret;
}
/**
* ivpu_boot() - Start VPU firmware
* @vdev: VPU device
@ -356,6 +390,15 @@ int ivpu_boot(struct ivpu_device *vdev)
enable_irq(vdev->irq);
ivpu_hw_irq_enable(vdev);
ivpu_ipc_enable(vdev);
if (ivpu_fw_is_cold_boot(vdev)) {
ret = ivpu_pm_dct_init(vdev);
if (ret)
return ret;
return ivpu_hw_sched_init(vdev);
}
return 0;
}
@ -408,11 +451,52 @@ static const struct drm_driver driver = {
.minor = DRM_IVPU_DRIVER_MINOR,
};
static void ivpu_context_abort_invalid(struct ivpu_device *vdev)
{
struct ivpu_file_priv *file_priv;
unsigned long ctx_id;
mutex_lock(&vdev->context_list_lock);
xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
if (!file_priv->has_mmu_faults || file_priv->aborted)
continue;
mutex_lock(&file_priv->lock);
ivpu_context_abort_locked(file_priv);
file_priv->aborted = true;
mutex_unlock(&file_priv->lock);
}
mutex_unlock(&vdev->context_list_lock);
}
static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
{
struct ivpu_device *vdev = arg;
u8 irq_src;
return ivpu_ipc_irq_thread_handler(vdev);
if (kfifo_is_empty(&vdev->hw->irq.fifo))
return IRQ_NONE;
while (kfifo_get(&vdev->hw->irq.fifo, &irq_src)) {
switch (irq_src) {
case IVPU_HW_IRQ_SRC_IPC:
ivpu_ipc_irq_thread_handler(vdev);
break;
case IVPU_HW_IRQ_SRC_MMU_EVTQ:
ivpu_context_abort_invalid(vdev);
break;
case IVPU_HW_IRQ_SRC_DCT:
ivpu_pm_dct_irq_thread_handler(vdev);
break;
default:
ivpu_err_ratelimited(vdev, "Unknown IRQ source: %u\n", irq_src);
break;
}
}
return IRQ_HANDLED;
}
static int ivpu_irq_init(struct ivpu_device *vdev)
@ -426,9 +510,11 @@ static int ivpu_irq_init(struct ivpu_device *vdev)
return ret;
}
ivpu_irq_handlers_init(vdev);
vdev->irq = pci_irq_vector(pdev, 0);
ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler,
ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, ivpu_hw_irq_handler,
ivpu_irq_thread_handler, IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
if (ret)
ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
@ -505,13 +591,10 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
if (!vdev->pm)
return -ENOMEM;
if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX) {
vdev->hw->ops = &ivpu_hw_40xx_ops;
if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX)
vdev->hw->dma_bits = 48;
} else {
vdev->hw->ops = &ivpu_hw_37xx_ops;
else
vdev->hw->dma_bits = 38;
}
vdev->platform = IVPU_PLATFORM_INVALID;
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
@ -540,7 +623,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
goto err_xa_destroy;
/* Init basic HW info based on buttress registers which are accessible before power up */
ret = ivpu_hw_info_init(vdev);
ret = ivpu_hw_init(vdev);
if (ret)
goto err_xa_destroy;
@ -612,13 +695,14 @@ static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
static void ivpu_dev_fini(struct ivpu_device *vdev)
{
ivpu_jobs_abort_all(vdev);
ivpu_pm_cancel_recovery(vdev);
ivpu_pm_disable(vdev);
ivpu_prepare_for_reset(vdev);
ivpu_shutdown(vdev);
ivpu_jobs_abort_all(vdev);
ivpu_ms_cleanup_all(vdev);
ivpu_job_done_consumer_fini(vdev);
ivpu_pm_cancel_recovery(vdev);
ivpu_bo_unbind_all_user_contexts(vdev);
ivpu_ipc_fini(vdev);
@ -658,6 +742,7 @@ static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
ivpu_debugfs_init(vdev);
ivpu_sysfs_init(vdev);
ret = drm_dev_register(&vdev->drm, 0);
if (ret) {

View File

@ -27,8 +27,15 @@
#define PCI_DEVICE_ID_ARL 0xad1d
#define PCI_DEVICE_ID_LNL 0x643e
#define IVPU_HW_37XX 37
#define IVPU_HW_40XX 40
#define IVPU_HW_IP_37XX 37
#define IVPU_HW_IP_40XX 40
#define IVPU_HW_IP_50XX 50
#define IVPU_HW_IP_60XX 60
#define IVPU_HW_IP_REV_LNL_B0 4
#define IVPU_HW_BTRS_MTL 1
#define IVPU_HW_BTRS_LNL 2
#define IVPU_GLOBAL_CONTEXT_MMU_SSID 0
/* SSID 1 is used by the VPU to represent reserved context */
@ -39,7 +46,11 @@
#define IVPU_MIN_DB 1
#define IVPU_MAX_DB 255
#define IVPU_NUM_ENGINES 2
#define IVPU_NUM_ENGINES 2
#define IVPU_NUM_PRIORITIES 4
#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_ENGINES * IVPU_NUM_PRIORITIES)
#define IVPU_CMDQ_INDEX(engine, priority) ((engine) * IVPU_NUM_PRIORITIES + (priority))
#define IVPU_PLATFORM_SILICON 0
#define IVPU_PLATFORM_SIMICS 2
@ -93,6 +104,7 @@ struct ivpu_wa_table {
bool interrupt_clear_with_0;
bool disable_clock_relinquish;
bool disable_d0i3_msg;
bool wp0_during_power_up;
};
struct ivpu_hw_info;
@ -131,11 +143,13 @@ struct ivpu_device {
atomic64_t unique_id_counter;
ktime_t busy_start_ts;
ktime_t busy_time;
struct {
int boot;
int jsm;
int tdr;
int reschedule_suspend;
int autosuspend;
int d0i3_entry_msg;
} timeout;
@ -149,22 +163,31 @@ struct ivpu_file_priv {
struct kref ref;
struct ivpu_device *vdev;
struct mutex lock; /* Protects cmdq */
struct ivpu_cmdq *cmdq[IVPU_NUM_ENGINES];
struct ivpu_cmdq *cmdq[IVPU_NUM_CMDQS_PER_CTX];
struct ivpu_mmu_context ctx;
struct mutex ms_lock; /* Protects ms_instance_list, ms_info_bo */
struct list_head ms_instance_list;
struct ivpu_bo *ms_info_bo;
bool has_mmu_faults;
bool bound;
bool aborted;
};
extern int ivpu_dbg_mask;
extern u8 ivpu_pll_min_ratio;
extern u8 ivpu_pll_max_ratio;
extern int ivpu_sched_mode;
extern bool ivpu_disable_mmu_cont_pages;
extern bool ivpu_force_snoop;
#define IVPU_TEST_MODE_FW_TEST BIT(0)
#define IVPU_TEST_MODE_NULL_HW BIT(1)
#define IVPU_TEST_MODE_NULL_SUBMISSION BIT(2)
#define IVPU_TEST_MODE_D0I3_MSG_DISABLE BIT(4)
#define IVPU_TEST_MODE_D0I3_MSG_ENABLE BIT(5)
#define IVPU_TEST_MODE_PREEMPTION_DISABLE BIT(6)
#define IVPU_TEST_MODE_HWS_EXTRA_EVENTS BIT(7)
#define IVPU_TEST_MODE_DISABLE_TIMEOUTS BIT(8)
extern int ivpu_test_mode;
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv);
@ -184,16 +207,32 @@ static inline u16 ivpu_device_id(struct ivpu_device *vdev)
return to_pci_dev(vdev->drm.dev)->device;
}
static inline int ivpu_hw_gen(struct ivpu_device *vdev)
static inline int ivpu_hw_ip_gen(struct ivpu_device *vdev)
{
switch (ivpu_device_id(vdev)) {
case PCI_DEVICE_ID_MTL:
case PCI_DEVICE_ID_ARL:
return IVPU_HW_37XX;
return IVPU_HW_IP_37XX;
case PCI_DEVICE_ID_LNL:
return IVPU_HW_40XX;
return IVPU_HW_IP_40XX;
default:
ivpu_err(vdev, "Unknown NPU device\n");
dump_stack();
ivpu_err(vdev, "Unknown NPU IP generation\n");
return 0;
}
}
static inline int ivpu_hw_btrs_gen(struct ivpu_device *vdev)
{
switch (ivpu_device_id(vdev)) {
case PCI_DEVICE_ID_MTL:
case PCI_DEVICE_ID_ARL:
return IVPU_HW_BTRS_MTL;
case PCI_DEVICE_ID_LNL:
return IVPU_HW_BTRS_LNL;
default:
dump_stack();
ivpu_err(vdev, "Unknown buttress generation\n");
return 0;
}
}
@ -231,4 +270,9 @@ static inline bool ivpu_is_fpga(struct ivpu_device *vdev)
return ivpu_get_platform(vdev) == IVPU_PLATFORM_FPGA;
}
static inline bool ivpu_is_force_snoop_enabled(struct ivpu_device *vdev)
{
return ivpu_force_snoop;
}
#endif /* __IVPU_DRV_H__ */

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
* Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/firmware.h>
@ -44,6 +44,8 @@
#define IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, name, major, minor) \
ivpu_fw_check_api_ver_lt(vdev, fw_hdr, #name, VPU_##name##_API_VER_INDEX, major, minor)
#define IVPU_FOCUS_PRESENT_TIMER_MS 1000
static char *ivpu_firmware;
module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
MODULE_PARM_DESC(firmware, "NPU firmware binary in /lib/firmware/..");
@ -52,10 +54,10 @@ static struct {
int gen;
const char *name;
} fw_names[] = {
{ IVPU_HW_37XX, "vpu_37xx.bin" },
{ IVPU_HW_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
{ IVPU_HW_40XX, "vpu_40xx.bin" },
{ IVPU_HW_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
{ IVPU_HW_IP_37XX, "vpu_37xx.bin" },
{ IVPU_HW_IP_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
{ IVPU_HW_IP_40XX, "vpu_40xx.bin" },
{ IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
};
static int ivpu_fw_request(struct ivpu_device *vdev)
@ -71,7 +73,7 @@ static int ivpu_fw_request(struct ivpu_device *vdev)
}
for (i = 0; i < ARRAY_SIZE(fw_names); i++) {
if (fw_names[i].gen != ivpu_hw_gen(vdev))
if (fw_names[i].gen != ivpu_hw_ip_gen(vdev))
continue;
ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i].name, vdev->drm.dev);
@ -121,6 +123,14 @@ ivpu_fw_check_api_ver_lt(struct ivpu_device *vdev, const struct vpu_firmware_hea
return false;
}
static bool is_within_range(u64 addr, size_t size, u64 range_start, size_t range_size)
{
if (addr < range_start || addr + size > range_start + range_size)
return false;
return true;
}
static int ivpu_fw_parse(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
@ -200,10 +210,27 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
fw->dvfs_mode = 0;
fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
if (fw_hdr->ro_section_start_address && !is_within_range(fw_hdr->ro_section_start_address,
fw_hdr->ro_section_size,
fw_hdr->image_load_address,
fw_hdr->image_size)) {
ivpu_err(vdev, "Invalid read-only section: start address 0x%llx, size %u\n",
fw_hdr->ro_section_start_address, fw_hdr->ro_section_size);
return -EINVAL;
}
fw->read_only_addr = fw_hdr->ro_section_start_address;
fw->read_only_size = fw_hdr->ro_section_size;
ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n",
fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size);
ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n",
fw->runtime_addr, image_load_addr, fw->entry_point);
ivpu_dbg(vdev, FW_BOOT, "Read-only section: address 0x%llx, size %u\n",
fw->read_only_addr, fw->read_only_size);
return 0;
}
@ -241,7 +268,7 @@ static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
return -EINVAL;
}
ivpu_hw_init_range(&vdev->hw->ranges.global, start, size);
ivpu_hw_range_init(&vdev->hw->ranges.global, start, size);
return 0;
}
@ -265,6 +292,13 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
return -ENOMEM;
}
ret = ivpu_mmu_context_set_pages_ro(vdev, &vdev->gctx, fw->read_only_addr,
fw->read_only_size);
if (ret) {
ivpu_err(vdev, "Failed to set firmware image read-only\n");
goto err_free_fw_mem;
}
fw->mem_log_crit = ivpu_bo_create_global(vdev, IVPU_FW_CRITICAL_BUFFER_SIZE,
DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
if (!fw->mem_log_crit) {
@ -464,6 +498,8 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
boot_params->punit_telemetry_sram_size);
ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_telemetry_enable = 0x%x\n",
boot_params->vpu_telemetry_enable);
ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_scheduling_mode = 0x%x\n",
boot_params->vpu_scheduling_mode);
ivpu_dbg(vdev, FW_BOOT, "boot_params.dvfs_mode = %u\n",
boot_params->dvfs_mode);
ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_delayed_entry = %d\n",
@ -504,7 +540,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->magic = VPU_BOOT_PARAMS_MAGIC;
boot_params->vpu_id = to_pci_dev(vdev->drm.dev)->bus->number;
boot_params->frequency = ivpu_hw_reg_pll_freq_get(vdev);
boot_params->frequency = ivpu_hw_pll_freq_get(vdev);
/*
* This param is a debug firmware feature. It switches default clock
@ -561,9 +597,12 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->verbose_tracing_buff_addr = vdev->fw->mem_log_verb->vpu_addr;
boot_params->verbose_tracing_buff_size = ivpu_bo_size(vdev->fw->mem_log_verb);
boot_params->punit_telemetry_sram_base = ivpu_hw_reg_telemetry_offset_get(vdev);
boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
boot_params->punit_telemetry_sram_base = ivpu_hw_telemetry_offset_get(vdev);
boot_params->punit_telemetry_sram_size = ivpu_hw_telemetry_size_get(vdev);
boot_params->vpu_telemetry_enable = ivpu_hw_telemetry_enable_get(vdev);
boot_params->vpu_scheduling_mode = vdev->hw->sched_mode;
if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW)
boot_params->vpu_focus_present_timer_ms = IVPU_FOCUS_PRESENT_TIMER_MS;
boot_params->dvfs_mode = vdev->fw->dvfs_mode;
if (!IVPU_WA(disable_d0i3_msg))
boot_params->d0i3_delayed_entry = 1;

View File

@ -28,6 +28,10 @@ struct ivpu_fw_info {
u32 trace_destination_mask;
u64 trace_hw_component_mask;
u32 dvfs_mode;
u32 primary_preempt_buf_size;
u32 secondary_preempt_buf_size;
u64 read_only_addr;
u32 read_only_size;
};
int ivpu_fw_init(struct ivpu_device *vdev);

View File

@ -60,16 +60,19 @@ static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo)
return bo->flags & DRM_IVPU_BO_CACHE_MASK;
}
static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
{
return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
}
static inline struct ivpu_device *ivpu_bo_to_vdev(struct ivpu_bo *bo)
{
return to_ivpu_device(bo->base.base.dev);
}
static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
{
if (ivpu_is_force_snoop_enabled(ivpu_bo_to_vdev(bo)))
return true;
return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
}
static inline void *ivpu_to_cpu_addr(struct ivpu_bo *bo, u32 vpu_addr)
{
if (vpu_addr < bo->vpu_addr)

View File

@ -0,0 +1,331 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 - 2024 Intel Corporation
*/
#include "ivpu_drv.h"
#include "ivpu_hw.h"
#include "ivpu_hw_btrs.h"
#include "ivpu_hw_ip.h"
#include <linux/dmi.h>
static char *platform_to_str(u32 platform)
{
switch (platform) {
case IVPU_PLATFORM_SILICON:
return "SILICON";
case IVPU_PLATFORM_SIMICS:
return "SIMICS";
case IVPU_PLATFORM_FPGA:
return "FPGA";
default:
return "Invalid platform";
}
}
static const struct dmi_system_id dmi_platform_simulation[] = {
{
.ident = "Intel Simics",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "lnlrvp"),
DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
DMI_MATCH(DMI_BOARD_SERIAL, "123456789"),
},
},
{
.ident = "Intel Simics",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "Simics"),
},
},
{ }
};
static void platform_init(struct ivpu_device *vdev)
{
if (dmi_check_system(dmi_platform_simulation))
vdev->platform = IVPU_PLATFORM_SIMICS;
else
vdev->platform = IVPU_PLATFORM_SILICON;
ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n",
platform_to_str(vdev->platform), vdev->platform);
}
static void wa_init(struct ivpu_device *vdev)
{
vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
vdev->wa.clear_runtime_mem = false;
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
vdev->wa.interrupt_clear_with_0 = ivpu_hw_btrs_irqs_clear_with_0_mtl(vdev);
if (ivpu_device_id(vdev) == PCI_DEVICE_ID_LNL &&
ivpu_revision(vdev) < IVPU_HW_IP_REV_LNL_B0)
vdev->wa.disable_clock_relinquish = true;
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
vdev->wa.wp0_during_power_up = true;
IVPU_PRINT_WA(punit_disabled);
IVPU_PRINT_WA(clear_runtime_mem);
IVPU_PRINT_WA(interrupt_clear_with_0);
IVPU_PRINT_WA(disable_clock_relinquish);
IVPU_PRINT_WA(wp0_during_power_up);
}
static void timeouts_init(struct ivpu_device *vdev)
{
if (ivpu_test_mode & IVPU_TEST_MODE_DISABLE_TIMEOUTS) {
vdev->timeout.boot = -1;
vdev->timeout.jsm = -1;
vdev->timeout.tdr = -1;
vdev->timeout.autosuspend = -1;
vdev->timeout.d0i3_entry_msg = -1;
} else if (ivpu_is_fpga(vdev)) {
vdev->timeout.boot = 100000;
vdev->timeout.jsm = 50000;
vdev->timeout.tdr = 2000000;
vdev->timeout.autosuspend = -1;
vdev->timeout.d0i3_entry_msg = 500;
} else if (ivpu_is_simics(vdev)) {
vdev->timeout.boot = 50;
vdev->timeout.jsm = 500;
vdev->timeout.tdr = 10000;
vdev->timeout.autosuspend = -1;
vdev->timeout.d0i3_entry_msg = 100;
} else {
vdev->timeout.boot = 1000;
vdev->timeout.jsm = 500;
vdev->timeout.tdr = 2000;
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
vdev->timeout.autosuspend = 10;
else
vdev->timeout.autosuspend = 100;
vdev->timeout.d0i3_entry_msg = 5;
}
}
static void memory_ranges_init(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
ivpu_hw_range_init(&vdev->hw->ranges.user, 0xc0000000, 255 * SZ_1M);
ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x180000000, SZ_2G);
ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_8G);
} else {
ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
ivpu_hw_range_init(&vdev->hw->ranges.user, 0x80000000, SZ_256M);
ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x80000000 + SZ_256M, SZ_2G - SZ_256M);
ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_8G);
}
}
static int wp_enable(struct ivpu_device *vdev)
{
return ivpu_hw_btrs_wp_drive(vdev, true);
}
static int wp_disable(struct ivpu_device *vdev)
{
return ivpu_hw_btrs_wp_drive(vdev, false);
}
int ivpu_hw_power_up(struct ivpu_device *vdev)
{
int ret;
if (IVPU_WA(wp0_during_power_up)) {
/* WP requests may fail when powering down, so issue WP 0 here */
ret = wp_disable(vdev);
if (ret)
ivpu_warn(vdev, "Failed to disable workpoint: %d\n", ret);
}
ret = ivpu_hw_btrs_d0i3_disable(vdev);
if (ret)
ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
ret = wp_enable(vdev);
if (ret) {
ivpu_err(vdev, "Failed to enable workpoint: %d\n", ret);
return ret;
}
if (ivpu_hw_btrs_gen(vdev) >= IVPU_HW_BTRS_LNL) {
if (IVPU_WA(disable_clock_relinquish))
ivpu_hw_btrs_clock_relinquish_disable_lnl(vdev);
ivpu_hw_btrs_profiling_freq_reg_set_lnl(vdev);
ivpu_hw_btrs_ats_print_lnl(vdev);
}
ret = ivpu_hw_ip_host_ss_configure(vdev);
if (ret) {
ivpu_err(vdev, "Failed to configure host SS: %d\n", ret);
return ret;
}
ivpu_hw_ip_idle_gen_disable(vdev);
ret = ivpu_hw_btrs_wait_for_clock_res_own_ack(vdev);
if (ret) {
ivpu_err(vdev, "Timed out waiting for clock resource own ACK\n");
return ret;
}
ret = ivpu_hw_ip_pwr_domain_enable(vdev);
if (ret) {
ivpu_err(vdev, "Failed to enable power domain: %d\n", ret);
return ret;
}
ret = ivpu_hw_ip_host_ss_axi_enable(vdev);
if (ret) {
ivpu_err(vdev, "Failed to enable AXI: %d\n", ret);
return ret;
}
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_LNL)
ivpu_hw_btrs_set_port_arbitration_weights_lnl(vdev);
ret = ivpu_hw_ip_top_noc_enable(vdev);
if (ret)
ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret);
return ret;
}
static void save_d0i3_entry_timestamp(struct ivpu_device *vdev)
{
vdev->hw->d0i3_entry_host_ts = ktime_get_boottime();
vdev->hw->d0i3_entry_vpu_ts = ivpu_hw_ip_read_perf_timer_counter(vdev);
}
int ivpu_hw_reset(struct ivpu_device *vdev)
{
int ret = 0;
if (ivpu_hw_btrs_ip_reset(vdev)) {
ivpu_err(vdev, "Failed to reset NPU IP\n");
ret = -EIO;
}
if (wp_disable(vdev)) {
ivpu_err(vdev, "Failed to disable workpoint\n");
ret = -EIO;
}
return ret;
}
int ivpu_hw_power_down(struct ivpu_device *vdev)
{
int ret = 0;
save_d0i3_entry_timestamp(vdev);
if (!ivpu_hw_is_idle(vdev))
ivpu_warn(vdev, "NPU not idle during power down\n");
if (ivpu_hw_reset(vdev)) {
ivpu_err(vdev, "Failed to reset NPU\n");
ret = -EIO;
}
if (ivpu_hw_btrs_d0i3_enable(vdev)) {
ivpu_err(vdev, "Failed to enter D0I3\n");
ret = -EIO;
}
return ret;
}
int ivpu_hw_init(struct ivpu_device *vdev)
{
ivpu_hw_btrs_info_init(vdev);
ivpu_hw_btrs_freq_ratios_init(vdev);
memory_ranges_init(vdev);
platform_init(vdev);
wa_init(vdev);
timeouts_init(vdev);
return 0;
}
int ivpu_hw_boot_fw(struct ivpu_device *vdev)
{
int ret;
ivpu_hw_ip_snoop_disable(vdev);
ivpu_hw_ip_tbu_mmu_enable(vdev);
ret = ivpu_hw_ip_soc_cpu_boot(vdev);
if (ret)
ivpu_err(vdev, "Failed to boot SOC CPU: %d\n", ret);
return ret;
}
void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
return;
}
if (enable)
vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_HIGH;
else
vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
}
void ivpu_irq_handlers_init(struct ivpu_device *vdev)
{
INIT_KFIFO(vdev->hw->irq.fifo);
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_37xx;
else
vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_40xx;
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
vdev->hw->irq.btrs_irq_handler = ivpu_hw_btrs_irq_handler_mtl;
else
vdev->hw->irq.btrs_irq_handler = ivpu_hw_btrs_irq_handler_lnl;
}
void ivpu_hw_irq_enable(struct ivpu_device *vdev)
{
kfifo_reset(&vdev->hw->irq.fifo);
ivpu_hw_ip_irq_enable(vdev);
ivpu_hw_btrs_irq_enable(vdev);
}
void ivpu_hw_irq_disable(struct ivpu_device *vdev)
{
ivpu_hw_btrs_irq_disable(vdev);
ivpu_hw_ip_irq_disable(vdev);
}
irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr)
{
struct ivpu_device *vdev = ptr;
bool ip_handled, btrs_handled;
ivpu_hw_btrs_global_int_disable(vdev);
btrs_handled = ivpu_hw_btrs_irq_handler(vdev, irq);
if (!ivpu_hw_is_idle((vdev)) || !btrs_handled)
ip_handled = ivpu_hw_ip_irq_handler(vdev, irq);
else
ip_handled = false;
/* Re-enable global interrupts to re-trigger MSI for pending interrupts */
ivpu_hw_btrs_global_int_enable(vdev);
if (!kfifo_is_empty(&vdev->hw->irq.fifo))
return IRQ_WAKE_THREAD;
if (ip_handled || btrs_handled)
return IRQ_HANDLED;
return IRQ_NONE;
}

View File

@ -1,39 +1,22 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020-2023 Intel Corporation
* Copyright (C) 2020-2024 Intel Corporation
*/
#ifndef __IVPU_HW_H__
#define __IVPU_HW_H__
#include "ivpu_drv.h"
#include <linux/kfifo.h>
struct ivpu_hw_ops {
int (*info_init)(struct ivpu_device *vdev);
int (*power_up)(struct ivpu_device *vdev);
int (*boot_fw)(struct ivpu_device *vdev);
int (*power_down)(struct ivpu_device *vdev);
int (*reset)(struct ivpu_device *vdev);
bool (*is_idle)(struct ivpu_device *vdev);
int (*wait_for_idle)(struct ivpu_device *vdev);
void (*wdt_disable)(struct ivpu_device *vdev);
void (*diagnose_failure)(struct ivpu_device *vdev);
u32 (*profiling_freq_get)(struct ivpu_device *vdev);
void (*profiling_freq_drive)(struct ivpu_device *vdev, bool enable);
u32 (*reg_pll_freq_get)(struct ivpu_device *vdev);
u32 (*ratio_to_freq)(struct ivpu_device *vdev, u32 ratio);
u32 (*reg_telemetry_offset_get)(struct ivpu_device *vdev);
u32 (*reg_telemetry_size_get)(struct ivpu_device *vdev);
u32 (*reg_telemetry_enable_get)(struct ivpu_device *vdev);
void (*reg_db_set)(struct ivpu_device *vdev, u32 db_id);
u32 (*reg_ipc_rx_addr_get)(struct ivpu_device *vdev);
u32 (*reg_ipc_rx_count_get)(struct ivpu_device *vdev);
void (*reg_ipc_tx_set)(struct ivpu_device *vdev, u32 vpu_addr);
void (*irq_clear)(struct ivpu_device *vdev);
void (*irq_enable)(struct ivpu_device *vdev);
void (*irq_disable)(struct ivpu_device *vdev);
irqreturn_t (*irq_handler)(int irq, void *ptr);
};
#include "ivpu_drv.h"
#include "ivpu_hw_btrs.h"
#include "ivpu_hw_ip.h"
#define IVPU_HW_IRQ_FIFO_LENGTH 1024
#define IVPU_HW_IRQ_SRC_IPC 1
#define IVPU_HW_IRQ_SRC_MMU_EVTQ 2
#define IVPU_HW_IRQ_SRC_DCT 3
struct ivpu_addr_range {
resource_size_t start;
@ -41,7 +24,11 @@ struct ivpu_addr_range {
};
struct ivpu_hw_info {
const struct ivpu_hw_ops *ops;
struct {
bool (*btrs_irq_handler)(struct ivpu_device *vdev, int irq);
bool (*ip_irq_handler)(struct ivpu_device *vdev, int irq);
DECLARE_KFIFO(fifo, u8, IVPU_HW_IRQ_FIFO_LENGTH);
} irq;
struct {
struct ivpu_addr_range global;
struct ivpu_addr_range user;
@ -59,6 +46,7 @@ struct ivpu_hw_info {
u32 profiling_freq;
} pll;
u32 tile_fuse;
u32 sched_mode;
u32 sku;
u16 config;
int dma_bits;
@ -66,127 +54,28 @@ struct ivpu_hw_info {
u64 d0i3_entry_vpu_ts;
};
extern const struct ivpu_hw_ops ivpu_hw_37xx_ops;
extern const struct ivpu_hw_ops ivpu_hw_40xx_ops;
int ivpu_hw_init(struct ivpu_device *vdev);
int ivpu_hw_power_up(struct ivpu_device *vdev);
int ivpu_hw_power_down(struct ivpu_device *vdev);
int ivpu_hw_reset(struct ivpu_device *vdev);
int ivpu_hw_boot_fw(struct ivpu_device *vdev);
void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable);
void ivpu_irq_handlers_init(struct ivpu_device *vdev);
void ivpu_hw_irq_enable(struct ivpu_device *vdev);
void ivpu_hw_irq_disable(struct ivpu_device *vdev);
irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr);
static inline int ivpu_hw_info_init(struct ivpu_device *vdev)
static inline u32 ivpu_hw_btrs_irq_handler(struct ivpu_device *vdev, int irq)
{
return vdev->hw->ops->info_init(vdev);
};
static inline int ivpu_hw_power_up(struct ivpu_device *vdev)
{
ivpu_dbg(vdev, PM, "HW power up\n");
return vdev->hw->ops->power_up(vdev);
};
static inline int ivpu_hw_boot_fw(struct ivpu_device *vdev)
{
return vdev->hw->ops->boot_fw(vdev);
};
static inline bool ivpu_hw_is_idle(struct ivpu_device *vdev)
{
return vdev->hw->ops->is_idle(vdev);
};
static inline int ivpu_hw_wait_for_idle(struct ivpu_device *vdev)
{
return vdev->hw->ops->wait_for_idle(vdev);
};
static inline int ivpu_hw_power_down(struct ivpu_device *vdev)
{
ivpu_dbg(vdev, PM, "HW power down\n");
return vdev->hw->ops->power_down(vdev);
};
static inline int ivpu_hw_reset(struct ivpu_device *vdev)
{
ivpu_dbg(vdev, PM, "HW reset\n");
return vdev->hw->ops->reset(vdev);
};
static inline void ivpu_hw_wdt_disable(struct ivpu_device *vdev)
{
vdev->hw->ops->wdt_disable(vdev);
};
static inline u32 ivpu_hw_profiling_freq_get(struct ivpu_device *vdev)
{
return vdev->hw->ops->profiling_freq_get(vdev);
};
static inline void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
{
return vdev->hw->ops->profiling_freq_drive(vdev, enable);
};
/* Register indirect accesses */
static inline u32 ivpu_hw_reg_pll_freq_get(struct ivpu_device *vdev)
{
return vdev->hw->ops->reg_pll_freq_get(vdev);
};
static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
{
return vdev->hw->ops->ratio_to_freq(vdev, ratio);
return vdev->hw->irq.btrs_irq_handler(vdev, irq);
}
static inline u32 ivpu_hw_reg_telemetry_offset_get(struct ivpu_device *vdev)
static inline u32 ivpu_hw_ip_irq_handler(struct ivpu_device *vdev, int irq)
{
return vdev->hw->ops->reg_telemetry_offset_get(vdev);
};
return vdev->hw->irq.ip_irq_handler(vdev, irq);
}
static inline u32 ivpu_hw_reg_telemetry_size_get(struct ivpu_device *vdev)
{
return vdev->hw->ops->reg_telemetry_size_get(vdev);
};
static inline u32 ivpu_hw_reg_telemetry_enable_get(struct ivpu_device *vdev)
{
return vdev->hw->ops->reg_telemetry_enable_get(vdev);
};
static inline void ivpu_hw_reg_db_set(struct ivpu_device *vdev, u32 db_id)
{
vdev->hw->ops->reg_db_set(vdev, db_id);
};
static inline u32 ivpu_hw_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
{
return vdev->hw->ops->reg_ipc_rx_addr_get(vdev);
};
static inline u32 ivpu_hw_reg_ipc_rx_count_get(struct ivpu_device *vdev)
{
return vdev->hw->ops->reg_ipc_rx_count_get(vdev);
};
static inline void ivpu_hw_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
{
vdev->hw->ops->reg_ipc_tx_set(vdev, vpu_addr);
};
static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev)
{
vdev->hw->ops->irq_clear(vdev);
};
static inline void ivpu_hw_irq_enable(struct ivpu_device *vdev)
{
vdev->hw->ops->irq_enable(vdev);
};
static inline void ivpu_hw_irq_disable(struct ivpu_device *vdev)
{
vdev->hw->ops->irq_disable(vdev);
};
static inline void ivpu_hw_init_range(struct ivpu_addr_range *range, u64 start, u64 size)
static inline void ivpu_hw_range_init(struct ivpu_addr_range *range, u64 start, u64 size)
{
range->start = start;
range->end = start + size;
@ -197,9 +86,75 @@ static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range)
return range->end - range->start;
}
static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
{
return ivpu_hw_btrs_ratio_to_freq(vdev, ratio);
}
static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev)
{
ivpu_hw_ip_irq_clear(vdev);
}
static inline u32 ivpu_hw_pll_freq_get(struct ivpu_device *vdev)
{
return ivpu_hw_btrs_pll_freq_get(vdev);
}
static inline u32 ivpu_hw_profiling_freq_get(struct ivpu_device *vdev)
{
return vdev->hw->pll.profiling_freq;
}
static inline void ivpu_hw_diagnose_failure(struct ivpu_device *vdev)
{
vdev->hw->ops->diagnose_failure(vdev);
ivpu_hw_ip_diagnose_failure(vdev);
ivpu_hw_btrs_diagnose_failure(vdev);
}
static inline u32 ivpu_hw_telemetry_offset_get(struct ivpu_device *vdev)
{
return ivpu_hw_btrs_telemetry_offset_get(vdev);
}
static inline u32 ivpu_hw_telemetry_size_get(struct ivpu_device *vdev)
{
return ivpu_hw_btrs_telemetry_size_get(vdev);
}
static inline u32 ivpu_hw_telemetry_enable_get(struct ivpu_device *vdev)
{
return ivpu_hw_btrs_telemetry_enable_get(vdev);
}
static inline bool ivpu_hw_is_idle(struct ivpu_device *vdev)
{
return ivpu_hw_btrs_is_idle(vdev);
}
static inline int ivpu_hw_wait_for_idle(struct ivpu_device *vdev)
{
return ivpu_hw_btrs_wait_for_idle(vdev);
}
static inline void ivpu_hw_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
{
ivpu_hw_ip_ipc_tx_set(vdev, vpu_addr);
}
static inline void ivpu_hw_db_set(struct ivpu_device *vdev, u32 db_id)
{
ivpu_hw_ip_db_set(vdev, db_id);
}
static inline u32 ivpu_hw_ipc_rx_addr_get(struct ivpu_device *vdev)
{
return ivpu_hw_ip_ipc_rx_addr_get(vdev);
}
static inline u32 ivpu_hw_ipc_rx_count_get(struct ivpu_device *vdev)
{
return ivpu_hw_ip_ipc_rx_count_get(vdev);
}
#endif /* __IVPU_HW_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -8,78 +8,6 @@
#include <linux/bits.h>
#define VPU_37XX_BUTTRESS_INTERRUPT_TYPE 0x00000000u
#define VPU_37XX_BUTTRESS_INTERRUPT_STAT 0x00000004u
#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_UFI_ERR_MASK BIT_MASK(2)
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0 0x00000008u
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1 0x0000000cu
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2 0x00000010u
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
#define VPU_37XX_BUTTRESS_WP_REQ_CMD 0x00000014u
#define VPU_37XX_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
#define VPU_37XX_BUTTRESS_WP_DOWNLOAD 0x00000018u
#define VPU_37XX_BUTTRESS_WP_DOWNLOAD_TARGET_RATIO_MASK GENMASK(15, 0)
#define VPU_37XX_BUTTRESS_CURRENT_PLL 0x0000001cu
#define VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK GENMASK(15, 0)
#define VPU_37XX_BUTTRESS_PLL_ENABLE 0x00000020u
#define VPU_37XX_BUTTRESS_FMIN_FUSE 0x00000024u
#define VPU_37XX_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
#define VPU_37XX_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
#define VPU_37XX_BUTTRESS_FMAX_FUSE 0x00000028u
#define VPU_37XX_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
#define VPU_37XX_BUTTRESS_TILE_FUSE 0x0000002cu
#define VPU_37XX_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0)
#define VPU_37XX_BUTTRESS_TILE_FUSE_SKU_MASK GENMASK(3, 2)
#define VPU_37XX_BUTTRESS_LOCAL_INT_MASK 0x00000030u
#define VPU_37XX_BUTTRESS_GLOBAL_INT_MASK 0x00000034u
#define VPU_37XX_BUTTRESS_PLL_STATUS 0x00000040u
#define VPU_37XX_BUTTRESS_PLL_STATUS_LOCK_MASK BIT_MASK(1)
#define VPU_37XX_BUTTRESS_VPU_STATUS 0x00000044u
#define VPU_37XX_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0)
#define VPU_37XX_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1)
#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL 0x00000060u
#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL_I3_MASK BIT_MASK(2)
#define VPU_37XX_BUTTRESS_VPU_IP_RESET 0x00000050u
#define VPU_37XX_BUTTRESS_VPU_IP_RESET_TRIGGER_MASK BIT_MASK(0)
#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000080u
#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE 0x00000084u
#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000088u
#define VPU_37XX_BUTTRESS_ATS_ERR_LOG_0 0x000000a0u
#define VPU_37XX_BUTTRESS_ATS_ERR_LOG_1 0x000000a4u
#define VPU_37XX_BUTTRESS_ATS_ERR_CLEAR 0x000000a8u
#define VPU_37XX_BUTTRESS_UFI_ERR_LOG 0x000000b0u
#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_CQ_ID_MASK GENMASK(11, 0)
#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_AXI_ID_MASK GENMASK(19, 12)
#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_OPCODE_MASK GENMASK(24, 20)
#define VPU_37XX_BUTTRESS_UFI_ERR_CLEAR 0x000000b4u
#define VPU_37XX_HOST_SS_CPR_CLK_SET 0x00000084u
#define VPU_37XX_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK BIT_MASK(1)
#define VPU_37XX_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK BIT_MASK(10)

File diff suppressed because it is too large Load Diff

View File

@ -8,91 +8,6 @@
#include <linux/bits.h>
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT 0x00000000u
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_CFI0_ERR_MASK BIT_MASK(2)
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_CFI1_ERR_MASK BIT_MASK(3)
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_IMR0_ERR_MASK BIT_MASK(4)
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_IMR1_ERR_MASK BIT_MASK(5)
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_SURV_ERR_MASK BIT_MASK(6)
#define VPU_40XX_BUTTRESS_LOCAL_INT_MASK 0x00000004u
#define VPU_40XX_BUTTRESS_GLOBAL_INT_MASK 0x00000008u
#define VPU_40XX_BUTTRESS_HM_ATS 0x0000000cu
#define VPU_40XX_BUTTRESS_ATS_ERR_LOG1 0x00000010u
#define VPU_40XX_BUTTRESS_ATS_ERR_LOG2 0x00000014u
#define VPU_40XX_BUTTRESS_ATS_ERR_CLEAR 0x00000018u
#define VPU_40XX_BUTTRESS_CFI0_ERR_LOG 0x0000001cu
#define VPU_40XX_BUTTRESS_CFI0_ERR_CLEAR 0x00000020u
#define VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS_ATS 0x00000024u
#define VPU_40XX_BUTTRESS_CFI1_ERR_LOG 0x00000040u
#define VPU_40XX_BUTTRESS_CFI1_ERR_CLEAR 0x00000044u
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW 0x00000048u
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH 0x0000004cu
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_CLEAR 0x00000050u
#define VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS 0x00000054u
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW 0x00000058u
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH 0x0000005cu
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_CLEAR 0x00000060u
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0 0x00000130u
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1 0x00000134u
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2 0x00000138u
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2_CDYN_MASK GENMASK(31, 16)
#define VPU_40XX_BUTTRESS_WP_REQ_CMD 0x0000013cu
#define VPU_40XX_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
#define VPU_40XX_BUTTRESS_PLL_FREQ 0x00000148u
#define VPU_40XX_BUTTRESS_PLL_FREQ_RATIO_MASK GENMASK(15, 0)
#define VPU_40XX_BUTTRESS_TILE_FUSE 0x00000150u
#define VPU_40XX_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0)
#define VPU_40XX_BUTTRESS_TILE_FUSE_CONFIG_MASK GENMASK(6, 1)
#define VPU_40XX_BUTTRESS_VPU_STATUS 0x00000154u
#define VPU_40XX_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0)
#define VPU_40XX_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1)
#define VPU_40XX_BUTTRESS_VPU_STATUS_DUP_IDLE_MASK BIT_MASK(2)
#define VPU_40XX_BUTTRESS_VPU_STATUS_CLOCK_RESOURCE_OWN_ACK_MASK BIT_MASK(6)
#define VPU_40XX_BUTTRESS_VPU_STATUS_POWER_RESOURCE_OWN_ACK_MASK BIT_MASK(7)
#define VPU_40XX_BUTTRESS_VPU_STATUS_PERF_CLK_MASK BIT_MASK(11)
#define VPU_40XX_BUTTRESS_VPU_STATUS_DISABLE_CLK_RELINQUISH_MASK BIT_MASK(12)
#define VPU_40XX_BUTTRESS_IP_RESET 0x00000160u
#define VPU_40XX_BUTTRESS_IP_RESET_TRIGGER_MASK BIT_MASK(0)
#define VPU_40XX_BUTTRESS_D0I3_CONTROL 0x00000164u
#define VPU_40XX_BUTTRESS_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
#define VPU_40XX_BUTTRESS_D0I3_CONTROL_I3_MASK BIT_MASK(2)
#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000168u
#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_SIZE 0x0000016cu
#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000170u
#define VPU_40XX_BUTTRESS_FMIN_FUSE 0x00000174u
#define VPU_40XX_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
#define VPU_40XX_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
#define VPU_40XX_BUTTRESS_FMAX_FUSE 0x00000178u
#define VPU_40XX_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
#define VPU_40XX_HOST_SS_CPR_CLK_EN 0x00000080u
#define VPU_40XX_HOST_SS_CPR_CLK_EN_TOP_NOC_MASK BIT_MASK(1)
#define VPU_40XX_HOST_SS_CPR_CLK_EN_DSS_MAS_MASK BIT_MASK(10)
@ -198,6 +113,12 @@
#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0 0x0003002cu
#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0_CSS_CPU_MASK BIT_MASK(3)
#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY 0x00030068u
#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY_POST_DLY_MASK GENMASK(7, 0)
#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY 0x0003006cu
#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY_STATUS_DLY_MASK GENMASK(7, 0)
#define VPU_40XX_HOST_SS_AON_IDLE_GEN 0x00030200u
#define VPU_40XX_HOST_SS_AON_IDLE_GEN_EN_MASK BIT_MASK(0)
#define VPU_40XX_HOST_SS_AON_IDLE_GEN_HW_PG_EN_MASK BIT_MASK(1)
@ -205,6 +126,9 @@
#define VPU_40XX_HOST_SS_AON_DPU_ACTIVE 0x00030204u
#define VPU_40XX_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK BIT_MASK(0)
#define VPU_50XX_HOST_SS_AON_FABRIC_REQ_OVERRIDE 0x00030210u
#define VPU_50XX_HOST_SS_AON_FABRIC_REQ_OVERRIDE_REQ_OVERRIDE_MASK BIT_MASK(0)
#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO 0x00040040u
#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_DONE_MASK BIT_MASK(0)
#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IOSF_RS_ID_MASK GENMASK(2, 1)

View File

@ -0,0 +1,905 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2024 Intel Corporation
*/
#include "ivpu_drv.h"
#include "ivpu_hw.h"
#include "ivpu_hw_btrs.h"
#include "ivpu_hw_btrs_lnl_reg.h"
#include "ivpu_hw_btrs_mtl_reg.h"
#include "ivpu_hw_reg_io.h"
#include "ivpu_pm.h"
#define BTRS_MTL_IRQ_MASK ((REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR)) | \
(REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR)))
#define BTRS_LNL_IRQ_MASK ((REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR)) | \
(REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR)) | \
(REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR)) | \
(REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR)) | \
(REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR)) | \
(REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR)))
#define BTRS_MTL_ALL_IRQ_MASK (BTRS_MTL_IRQ_MASK | (REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, \
FREQ_CHANGE)))
#define BTRS_IRQ_DISABLE_MASK ((u32)-1)
#define BTRS_LNL_ALL_IRQ_MASK ((u32)-1)
#define BTRS_MTL_WP_CONFIG_1_TILE_5_3_RATIO WP_CONFIG(MTL_CONFIG_1_TILE, MTL_PLL_RATIO_5_3)
#define BTRS_MTL_WP_CONFIG_1_TILE_4_3_RATIO WP_CONFIG(MTL_CONFIG_1_TILE, MTL_PLL_RATIO_4_3)
#define BTRS_MTL_WP_CONFIG_2_TILE_5_3_RATIO WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_5_3)
#define BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_4_3)
#define BTRS_MTL_WP_CONFIG_0_TILE_PLL_OFF WP_CONFIG(0, 0)
#define PLL_CDYN_DEFAULT 0x80
#define PLL_EPP_DEFAULT 0x80
#define PLL_CONFIG_DEFAULT 0x0
#define PLL_SIMULATION_FREQ 10000000
#define PLL_REF_CLK_FREQ 50000000
#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
#define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC)
#define TIMEOUT_US (150 * USEC_PER_MSEC)
/* Work point configuration values */
#define WP_CONFIG(tile, ratio) (((tile) << 8) | (ratio))
#define MTL_CONFIG_1_TILE 0x01
#define MTL_CONFIG_2_TILE 0x02
#define MTL_PLL_RATIO_5_3 0x01
#define MTL_PLL_RATIO_4_3 0x02
#define BTRS_MTL_TILE_FUSE_ENABLE_BOTH 0x0
#define BTRS_MTL_TILE_SKU_BOTH 0x3630
#define BTRS_LNL_TILE_MAX_NUM 6
#define BTRS_LNL_TILE_MAX_MASK 0x3f
#define WEIGHTS_DEFAULT 0xf711f711u
#define WEIGHTS_ATS_DEFAULT 0x0000f711u
#define DCT_REQ 0x2
#define DCT_ENABLE 0x1
#define DCT_DISABLE 0x0
int ivpu_hw_btrs_irqs_clear_with_0_mtl(struct ivpu_device *vdev)
{
REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, BTRS_MTL_ALL_IRQ_MASK);
if (REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) == BTRS_MTL_ALL_IRQ_MASK) {
/* Writing 1s does not clear the interrupt status register */
REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, 0x0);
return true;
}
return false;
}
static void freq_ratios_init_mtl(struct ivpu_device *vdev)
{
struct ivpu_hw_info *hw = vdev->hw;
u32 fmin_fuse, fmax_fuse;
fmin_fuse = REGB_RD32(VPU_HW_BTRS_MTL_FMIN_FUSE);
hw->pll.min_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMIN_FUSE, MIN_RATIO, fmin_fuse);
hw->pll.pn_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMIN_FUSE, PN_RATIO, fmin_fuse);
fmax_fuse = REGB_RD32(VPU_HW_BTRS_MTL_FMAX_FUSE);
hw->pll.max_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMAX_FUSE, MAX_RATIO, fmax_fuse);
}
static void freq_ratios_init_lnl(struct ivpu_device *vdev)
{
struct ivpu_hw_info *hw = vdev->hw;
u32 fmin_fuse, fmax_fuse;
fmin_fuse = REGB_RD32(VPU_HW_BTRS_LNL_FMIN_FUSE);
hw->pll.min_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMIN_FUSE, MIN_RATIO, fmin_fuse);
hw->pll.pn_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMIN_FUSE, PN_RATIO, fmin_fuse);
fmax_fuse = REGB_RD32(VPU_HW_BTRS_LNL_FMAX_FUSE);
hw->pll.max_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMAX_FUSE, MAX_RATIO, fmax_fuse);
}
void ivpu_hw_btrs_freq_ratios_init(struct ivpu_device *vdev)
{
struct ivpu_hw_info *hw = vdev->hw;
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
freq_ratios_init_mtl(vdev);
else
freq_ratios_init_lnl(vdev);
hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
hw->pll.pn_ratio = clamp_t(u8, hw->pll.pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
}
static bool tile_disable_check(u32 config)
{
/* Allowed values: 0 or one bit from range 0-5 (6 tiles) */
if (config == 0)
return true;
if (config > BIT(BTRS_LNL_TILE_MAX_NUM - 1))
return false;
if ((config & (config - 1)) == 0)
return true;
return false;
}
static int read_tile_config_fuse(struct ivpu_device *vdev, u32 *tile_fuse_config)
{
u32 fuse;
u32 config;
fuse = REGB_RD32(VPU_HW_BTRS_LNL_TILE_FUSE);
if (!REG_TEST_FLD(VPU_HW_BTRS_LNL_TILE_FUSE, VALID, fuse)) {
ivpu_err(vdev, "Fuse: invalid (0x%x)\n", fuse);
return -EIO;
}
config = REG_GET_FLD(VPU_HW_BTRS_LNL_TILE_FUSE, CONFIG, fuse);
if (!tile_disable_check(config)) {
ivpu_err(vdev, "Fuse: Invalid tile disable config (0x%x)\n", config);
return -EIO;
}
if (config)
ivpu_dbg(vdev, MISC, "Fuse: %d tiles enabled. Tile number %d disabled\n",
BTRS_LNL_TILE_MAX_NUM - 1, ffs(config) - 1);
else
ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", BTRS_LNL_TILE_MAX_NUM);
*tile_fuse_config = config;
return 0;
}
static int info_init_mtl(struct ivpu_device *vdev)
{
struct ivpu_hw_info *hw = vdev->hw;
hw->tile_fuse = BTRS_MTL_TILE_FUSE_ENABLE_BOTH;
hw->sku = BTRS_MTL_TILE_SKU_BOTH;
hw->config = BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO;
hw->sched_mode = ivpu_sched_mode;
return 0;
}
static int info_init_lnl(struct ivpu_device *vdev)
{
struct ivpu_hw_info *hw = vdev->hw;
u32 tile_fuse_config;
int ret;
ret = read_tile_config_fuse(vdev, &tile_fuse_config);
if (ret)
return ret;
hw->sched_mode = ivpu_sched_mode;
hw->tile_fuse = tile_fuse_config;
hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
return 0;
}
int ivpu_hw_btrs_info_init(struct ivpu_device *vdev)
{
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
return info_init_mtl(vdev);
else
return info_init_lnl(vdev);
}
static int wp_request_sync(struct ivpu_device *vdev)
{
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
return REGB_POLL_FLD(VPU_HW_BTRS_MTL_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
else
return REGB_POLL_FLD(VPU_HW_BTRS_LNL_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
}
static int wait_for_status_ready(struct ivpu_device *vdev, bool enable)
{
u32 exp_val = enable ? 0x1 : 0x0;
if (IVPU_WA(punit_disabled))
return 0;
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
return REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, READY, exp_val, PLL_TIMEOUT_US);
else
return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, READY, exp_val, PLL_TIMEOUT_US);
}
struct wp_request {
u16 min;
u16 max;
u16 target;
u16 cfg;
u16 epp;
u16 cdyn;
};
static void wp_request_mtl(struct ivpu_device *vdev, struct wp_request *wp)
{
u32 val;
val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0);
val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, MIN_RATIO, wp->min, val);
val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, MAX_RATIO, wp->max, val);
REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, val);
val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1);
val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, TARGET_RATIO, wp->target, val);
val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, EPP, PLL_EPP_DEFAULT, val);
REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, val);
val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2);
val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2, CONFIG, wp->cfg, val);
REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2, val);
val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_CMD);
val = REG_SET_FLD(VPU_HW_BTRS_MTL_WP_REQ_CMD, SEND, val);
REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_CMD, val);
}
static void wp_request_lnl(struct ivpu_device *vdev, struct wp_request *wp)
{
u32 val;
val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0);
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, MIN_RATIO, wp->min, val);
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, MAX_RATIO, wp->max, val);
REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, val);
val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1);
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, TARGET_RATIO, wp->target, val);
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, EPP, wp->epp, val);
REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, val);
val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2);
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, CONFIG, wp->cfg, val);
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, CDYN, wp->cdyn, val);
REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, val);
val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_CMD);
val = REG_SET_FLD(VPU_HW_BTRS_LNL_WP_REQ_CMD, SEND, val);
REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_CMD, val);
}
static void wp_request(struct ivpu_device *vdev, struct wp_request *wp)
{
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
wp_request_mtl(vdev, wp);
else
wp_request_lnl(vdev, wp);
}
static int wp_request_send(struct ivpu_device *vdev, struct wp_request *wp)
{
int ret;
ret = wp_request_sync(vdev);
if (ret) {
ivpu_err(vdev, "Failed to sync before workpoint request: %d\n", ret);
return ret;
}
wp_request(vdev, wp);
ret = wp_request_sync(vdev);
if (ret)
ivpu_err(vdev, "Failed to sync after workpoint request: %d\n", ret);
return ret;
}
static void prepare_wp_request(struct ivpu_device *vdev, struct wp_request *wp, bool enable)
{
struct ivpu_hw_info *hw = vdev->hw;
wp->min = hw->pll.min_ratio;
wp->max = hw->pll.max_ratio;
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
wp->target = enable ? hw->pll.pn_ratio : 0;
wp->cfg = enable ? hw->config : 0;
wp->cdyn = 0;
wp->epp = 0;
} else {
wp->target = hw->pll.pn_ratio;
wp->cfg = enable ? PLL_CONFIG_DEFAULT : 0;
wp->cdyn = enable ? PLL_CDYN_DEFAULT : 0;
wp->epp = enable ? PLL_EPP_DEFAULT : 0;
}
/* Simics cannot start without at least one tile */
if (enable && ivpu_is_simics(vdev))
wp->cfg = 1;
}
static int wait_for_pll_lock(struct ivpu_device *vdev, bool enable)
{
u32 exp_val = enable ? 0x1 : 0x0;
if (ivpu_hw_btrs_gen(vdev) != IVPU_HW_BTRS_MTL)
return 0;
if (IVPU_WA(punit_disabled))
return 0;
return REGB_POLL_FLD(VPU_HW_BTRS_MTL_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
}
int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable)
{
struct wp_request wp;
int ret;
if (IVPU_WA(punit_disabled)) {
ivpu_dbg(vdev, PM, "Skipping workpoint request\n");
return 0;
}
prepare_wp_request(vdev, &wp, enable);
ivpu_dbg(vdev, PM, "PLL workpoint request: %u Hz, config: 0x%x, epp: 0x%x, cdyn: 0x%x\n",
PLL_RATIO_TO_FREQ(wp.target), wp.cfg, wp.epp, wp.cdyn);
ret = wp_request_send(vdev, &wp);
if (ret) {
ivpu_err(vdev, "Failed to send workpoint request: %d\n", ret);
return ret;
}
ret = wait_for_pll_lock(vdev, enable);
if (ret) {
ivpu_err(vdev, "Timed out waiting for PLL lock\n");
return ret;
}
ret = wait_for_status_ready(vdev, enable);
if (ret) {
ivpu_err(vdev, "Timed out waiting for NPU ready status\n");
return ret;
}
return 0;
}
static int d0i3_drive_mtl(struct ivpu_device *vdev, bool enable)
{
int ret;
u32 val;
ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
if (ret) {
ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
return ret;
}
val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL);
if (enable)
val = REG_SET_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, I3, val);
else
val = REG_CLR_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, I3, val);
REGB_WR32(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, val);
ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
if (ret)
ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
return ret;
}
static int d0i3_drive_lnl(struct ivpu_device *vdev, bool enable)
{
int ret;
u32 val;
ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
if (ret) {
ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
return ret;
}
val = REGB_RD32(VPU_HW_BTRS_LNL_D0I3_CONTROL);
if (enable)
val = REG_SET_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, I3, val);
else
val = REG_CLR_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, I3, val);
REGB_WR32(VPU_HW_BTRS_LNL_D0I3_CONTROL, val);
ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
if (ret) {
ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
return ret;
}
return 0;
}
static int d0i3_drive(struct ivpu_device *vdev, bool enable)
{
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
return d0i3_drive_mtl(vdev, enable);
else
return d0i3_drive_lnl(vdev, enable);
}
int ivpu_hw_btrs_d0i3_enable(struct ivpu_device *vdev)
{
int ret;
if (IVPU_WA(punit_disabled))
return 0;
ret = d0i3_drive(vdev, true);
if (ret)
ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret);
udelay(5); /* VPU requires 5 us to complete the transition */
return ret;
}
int ivpu_hw_btrs_d0i3_disable(struct ivpu_device *vdev)
{
int ret;
if (IVPU_WA(punit_disabled))
return 0;
ret = d0i3_drive(vdev, false);
if (ret)
ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret);
return ret;
}
int ivpu_hw_btrs_wait_for_clock_res_own_ack(struct ivpu_device *vdev)
{
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
return 0;
if (ivpu_is_simics(vdev))
return 0;
return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, CLOCK_RESOURCE_OWN_ACK, 1, TIMEOUT_US);
}
void ivpu_hw_btrs_set_port_arbitration_weights_lnl(struct ivpu_device *vdev)
{
REGB_WR32(VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS, WEIGHTS_DEFAULT);
REGB_WR32(VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS_ATS, WEIGHTS_ATS_DEFAULT);
}
static int ip_reset_mtl(struct ivpu_device *vdev)
{
int ret;
u32 val;
ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
if (ret) {
ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
return ret;
}
val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_IP_RESET);
val = REG_SET_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, val);
REGB_WR32(VPU_HW_BTRS_MTL_VPU_IP_RESET, val);
ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
if (ret)
ivpu_err(vdev, "Timed out waiting for RESET completion\n");
return ret;
}
static int ip_reset_lnl(struct ivpu_device *vdev)
{
int ret;
u32 val;
ivpu_hw_btrs_clock_relinquish_disable_lnl(vdev);
ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, 0, TIMEOUT_US);
if (ret) {
ivpu_err(vdev, "Wait for *_TRIGGER timed out\n");
return ret;
}
val = REGB_RD32(VPU_HW_BTRS_LNL_IP_RESET);
val = REG_SET_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, val);
REGB_WR32(VPU_HW_BTRS_LNL_IP_RESET, val);
ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, 0, TIMEOUT_US);
if (ret)
ivpu_err(vdev, "Timed out waiting for RESET completion\n");
return ret;
}
int ivpu_hw_btrs_ip_reset(struct ivpu_device *vdev)
{
if (IVPU_WA(punit_disabled))
return 0;
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
return ip_reset_mtl(vdev);
else
return ip_reset_lnl(vdev);
}
void ivpu_hw_btrs_profiling_freq_reg_set_lnl(struct ivpu_device *vdev)
{
u32 val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT)
val = REG_CLR_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PERF_CLK, val);
else
val = REG_SET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PERF_CLK, val);
REGB_WR32(VPU_HW_BTRS_LNL_VPU_STATUS, val);
}
void ivpu_hw_btrs_ats_print_lnl(struct ivpu_device *vdev)
{
ivpu_dbg(vdev, MISC, "Buttress ATS: %s\n",
REGB_RD32(VPU_HW_BTRS_LNL_HM_ATS) ? "Enable" : "Disable");
}
void ivpu_hw_btrs_clock_relinquish_disable_lnl(struct ivpu_device *vdev)
{
u32 val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
val = REG_SET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, DISABLE_CLK_RELINQUISH, val);
REGB_WR32(VPU_HW_BTRS_LNL_VPU_STATUS, val);
}
bool ivpu_hw_btrs_is_idle(struct ivpu_device *vdev)
{
u32 val;
if (IVPU_WA(punit_disabled))
return true;
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_STATUS);
return REG_TEST_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, READY, val) &&
REG_TEST_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, IDLE, val);
} else {
val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
return REG_TEST_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, READY, val) &&
REG_TEST_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, val);
}
}
int ivpu_hw_btrs_wait_for_idle(struct ivpu_device *vdev)
{
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
return REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
else
return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
}
/* Handler for IRQs from Buttress core (irqB) */
bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq)
{
u32 status = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_MTL_IRQ_MASK;
bool schedule_recovery = false;
if (!status)
return false;
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, FREQ_CHANGE, status))
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL));
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, status)) {
ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0));
REGB_WR32(VPU_HW_BTRS_MTL_ATS_ERR_CLEAR, 0x1);
schedule_recovery = true;
}
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR, status)) {
u32 ufi_log = REGB_RD32(VPU_HW_BTRS_MTL_UFI_ERR_LOG);
ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
ufi_log, REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, OPCODE, ufi_log),
REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, AXI_ID, ufi_log),
REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, CQ_ID, ufi_log));
REGB_WR32(VPU_HW_BTRS_MTL_UFI_ERR_CLEAR, 0x1);
schedule_recovery = true;
}
/* This must be done after interrupts are cleared at the source. */
if (IVPU_WA(interrupt_clear_with_0))
/*
* Writing 1 triggers an interrupt, so we can't perform read update write.
* Clear local interrupt status by writing 0 to all bits.
*/
REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, 0x0);
else
REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, status);
if (schedule_recovery)
ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
return true;
}
/* Handler for IRQs from Buttress core (irqB) */
bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq)
{
u32 status = REGB_RD32(VPU_HW_BTRS_LNL_INTERRUPT_STAT) & BTRS_LNL_IRQ_MASK;
bool schedule_recovery = false;
if (!status)
return false;
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) {
ivpu_dbg(vdev, IRQ, "Survivability IRQ\n");
if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_DCT))
ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
}
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status))
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ));
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, status)) {
ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG1),
REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG2));
REGB_WR32(VPU_HW_BTRS_LNL_ATS_ERR_CLEAR, 0x1);
schedule_recovery = true;
}
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR, status)) {
ivpu_err(vdev, "CFI0_ERR 0x%08x", REGB_RD32(VPU_HW_BTRS_LNL_CFI0_ERR_LOG));
REGB_WR32(VPU_HW_BTRS_LNL_CFI0_ERR_CLEAR, 0x1);
schedule_recovery = true;
}
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR, status)) {
ivpu_err(vdev, "CFI1_ERR 0x%08x", REGB_RD32(VPU_HW_BTRS_LNL_CFI1_ERR_LOG));
REGB_WR32(VPU_HW_BTRS_LNL_CFI1_ERR_CLEAR, 0x1);
schedule_recovery = true;
}
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR, status)) {
ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x",
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_LOW),
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_HIGH));
REGB_WR32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_CLEAR, 0x1);
schedule_recovery = true;
}
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR, status)) {
ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x",
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_LOW),
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH));
REGB_WR32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_CLEAR, 0x1);
schedule_recovery = true;
}
/* This must be done after interrupts are cleared at the source. */
REGB_WR32(VPU_HW_BTRS_LNL_INTERRUPT_STAT, status);
if (schedule_recovery)
ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
return true;
}
int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable)
{
u32 val = REGB_RD32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW);
u32 cmd = REG_GET_FLD(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW, CMD, val);
u32 param1 = REG_GET_FLD(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW, PARAM1, val);
if (cmd != DCT_REQ) {
ivpu_err_ratelimited(vdev, "Unsupported PCODE command: 0x%x\n", cmd);
return -EBADR;
}
switch (param1) {
case DCT_ENABLE:
*enable = true;
return 0;
case DCT_DISABLE:
*enable = false;
return 0;
default:
ivpu_err_ratelimited(vdev, "Invalid PARAM1 value: %u\n", param1);
return -EINVAL;
}
}
void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent)
{
u32 val = 0;
u32 cmd = enable ? DCT_ENABLE : DCT_DISABLE;
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, CMD, DCT_REQ, val);
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, PARAM1, cmd, val);
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, PARAM2, active_percent, val);
REGB_WR32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, val);
}
static u32 pll_ratio_to_freq_mtl(u32 ratio, u32 config)
{
u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
u32 cpu_clock;
if ((config & 0xff) == MTL_PLL_RATIO_4_3)
cpu_clock = pll_clock * 2 / 4;
else
cpu_clock = pll_clock * 2 / 5;
return cpu_clock;
}
u32 ivpu_hw_btrs_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
{
struct ivpu_hw_info *hw = vdev->hw;
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
return pll_ratio_to_freq_mtl(ratio, hw->config);
else
return PLL_RATIO_TO_FREQ(ratio);
}
static u32 pll_freq_get_mtl(struct ivpu_device *vdev)
{
u32 pll_curr_ratio;
pll_curr_ratio = REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL);
pll_curr_ratio &= VPU_HW_BTRS_MTL_CURRENT_PLL_RATIO_MASK;
if (!ivpu_is_silicon(vdev))
return PLL_SIMULATION_FREQ;
return pll_ratio_to_freq_mtl(pll_curr_ratio, vdev->hw->config);
}
static u32 pll_freq_get_lnl(struct ivpu_device *vdev)
{
u32 pll_curr_ratio;
pll_curr_ratio = REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ);
pll_curr_ratio &= VPU_HW_BTRS_LNL_PLL_FREQ_RATIO_MASK;
return PLL_RATIO_TO_FREQ(pll_curr_ratio);
}
u32 ivpu_hw_btrs_pll_freq_get(struct ivpu_device *vdev)
{
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
return pll_freq_get_mtl(vdev);
else
return pll_freq_get_lnl(vdev);
}
u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev)
{
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_OFFSET);
else
return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_OFFSET);
}
u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev)
{
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_SIZE);
else
return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_SIZE);
}
u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev)
{
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_ENABLE);
else
return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_ENABLE);
}
void ivpu_hw_btrs_global_int_disable(struct ivpu_device *vdev)
{
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x1);
else
REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x1);
}
void ivpu_hw_btrs_global_int_enable(struct ivpu_device *vdev)
{
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x0);
else
REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x0);
}
void ivpu_hw_btrs_irq_enable(struct ivpu_device *vdev)
{
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
REGB_WR32(VPU_HW_BTRS_MTL_LOCAL_INT_MASK, (u32)(~BTRS_MTL_IRQ_MASK));
REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x0);
} else {
REGB_WR32(VPU_HW_BTRS_LNL_LOCAL_INT_MASK, (u32)(~BTRS_LNL_IRQ_MASK));
REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x0);
}
}
void ivpu_hw_btrs_irq_disable(struct ivpu_device *vdev)
{
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x1);
REGB_WR32(VPU_HW_BTRS_MTL_LOCAL_INT_MASK, BTRS_IRQ_DISABLE_MASK);
} else {
REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x1);
REGB_WR32(VPU_HW_BTRS_LNL_LOCAL_INT_MASK, BTRS_IRQ_DISABLE_MASK);
}
}
static void diagnose_failure_mtl(struct ivpu_device *vdev)
{
u32 reg = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_MTL_IRQ_MASK;
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, reg))
ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0));
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR, reg)) {
u32 log = REGB_RD32(VPU_HW_BTRS_MTL_UFI_ERR_LOG);
ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
log, REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, OPCODE, log),
REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, AXI_ID, log),
REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, CQ_ID, log));
}
}
static void diagnose_failure_lnl(struct ivpu_device *vdev)
{
u32 reg = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_LNL_IRQ_MASK;
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, reg)) {
ivpu_err(vdev, "ATS_ERR_LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG1),
REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG2));
}
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR, reg))
ivpu_err(vdev, "CFI0_ERR_LOG 0x%08x\n", REGB_RD32(VPU_HW_BTRS_LNL_CFI0_ERR_LOG));
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR, reg))
ivpu_err(vdev, "CFI1_ERR_LOG 0x%08x\n", REGB_RD32(VPU_HW_BTRS_LNL_CFI1_ERR_LOG));
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR, reg))
ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x\n",
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_LOW),
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_HIGH));
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR, reg))
ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x\n",
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_LOW),
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH));
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, reg))
ivpu_err(vdev, "Survivability IRQ\n");
}
void ivpu_hw_btrs_diagnose_failure(struct ivpu_device *vdev)
{
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
return diagnose_failure_mtl(vdev);
else
return diagnose_failure_lnl(vdev);
}

View File

@ -0,0 +1,50 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020-2024 Intel Corporation
*/
#ifndef __IVPU_HW_BTRS_H__
#define __IVPU_HW_BTRS_H__
#include "ivpu_drv.h"
#include "ivpu_hw_37xx_reg.h"
#include "ivpu_hw_40xx_reg.h"
#include "ivpu_hw_reg_io.h"
#define PLL_PROFILING_FREQ_DEFAULT 38400000
#define PLL_PROFILING_FREQ_HIGH 400000000
#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
#define DCT_DEFAULT_ACTIVE_PERCENT 15u
#define DCT_PERIOD_US 35300u
int ivpu_hw_btrs_info_init(struct ivpu_device *vdev);
void ivpu_hw_btrs_freq_ratios_init(struct ivpu_device *vdev);
int ivpu_hw_btrs_irqs_clear_with_0_mtl(struct ivpu_device *vdev);
int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable);
int ivpu_hw_btrs_wait_for_clock_res_own_ack(struct ivpu_device *vdev);
int ivpu_hw_btrs_d0i3_enable(struct ivpu_device *vdev);
int ivpu_hw_btrs_d0i3_disable(struct ivpu_device *vdev);
void ivpu_hw_btrs_set_port_arbitration_weights_lnl(struct ivpu_device *vdev);
bool ivpu_hw_btrs_is_idle(struct ivpu_device *vdev);
int ivpu_hw_btrs_wait_for_idle(struct ivpu_device *vdev);
int ivpu_hw_btrs_ip_reset(struct ivpu_device *vdev);
void ivpu_hw_btrs_profiling_freq_reg_set_lnl(struct ivpu_device *vdev);
void ivpu_hw_btrs_ats_print_lnl(struct ivpu_device *vdev);
void ivpu_hw_btrs_clock_relinquish_disable_lnl(struct ivpu_device *vdev);
bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq);
bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq);
int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable);
void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 dct_percent);
u32 ivpu_hw_btrs_pll_freq_get(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_ratio_to_freq(struct ivpu_device *vdev, u32 ratio);
u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev);
void ivpu_hw_btrs_global_int_enable(struct ivpu_device *vdev);
void ivpu_hw_btrs_global_int_disable(struct ivpu_device *vdev);
void ivpu_hw_btrs_irq_enable(struct ivpu_device *vdev);
void ivpu_hw_btrs_irq_disable(struct ivpu_device *vdev);
void ivpu_hw_btrs_diagnose_failure(struct ivpu_device *vdev);
#endif /* __IVPU_HW_BTRS_H__ */

View File

@ -0,0 +1,108 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020-2024 Intel Corporation
*/
#ifndef __IVPU_HW_BTRS_LNL_REG_H__
#define __IVPU_HW_BTRS_LNL_REG_H__
#include <linux/bits.h>
#define VPU_HW_BTRS_LNL_INTERRUPT_STAT 0x00000000u
#define VPU_HW_BTRS_LNL_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
#define VPU_HW_BTRS_LNL_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
#define VPU_HW_BTRS_LNL_INTERRUPT_STAT_CFI0_ERR_MASK BIT_MASK(2)
#define VPU_HW_BTRS_LNL_INTERRUPT_STAT_CFI1_ERR_MASK BIT_MASK(3)
#define VPU_HW_BTRS_LNL_INTERRUPT_STAT_IMR0_ERR_MASK BIT_MASK(4)
#define VPU_HW_BTRS_LNL_INTERRUPT_STAT_IMR1_ERR_MASK BIT_MASK(5)
#define VPU_HW_BTRS_LNL_INTERRUPT_STAT_SURV_ERR_MASK BIT_MASK(6)
#define VPU_HW_BTRS_LNL_LOCAL_INT_MASK 0x00000004u
#define VPU_HW_BTRS_LNL_GLOBAL_INT_MASK 0x00000008u
#define VPU_HW_BTRS_LNL_HM_ATS 0x0000000cu
#define VPU_HW_BTRS_LNL_ATS_ERR_LOG1 0x00000010u
#define VPU_HW_BTRS_LNL_ATS_ERR_LOG2 0x00000014u
#define VPU_HW_BTRS_LNL_ATS_ERR_CLEAR 0x00000018u
#define VPU_HW_BTRS_LNL_CFI0_ERR_LOG 0x0000001cu
#define VPU_HW_BTRS_LNL_CFI0_ERR_CLEAR 0x00000020u
#define VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS_ATS 0x00000024u
#define VPU_HW_BTRS_LNL_CFI1_ERR_LOG 0x00000040u
#define VPU_HW_BTRS_LNL_CFI1_ERR_CLEAR 0x00000044u
#define VPU_HW_BTRS_LNL_IMR_ERR_CFI0_LOW 0x00000048u
#define VPU_HW_BTRS_LNL_IMR_ERR_CFI0_HIGH 0x0000004cu
#define VPU_HW_BTRS_LNL_IMR_ERR_CFI0_CLEAR 0x00000050u
#define VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS 0x00000054u
#define VPU_HW_BTRS_LNL_IMR_ERR_CFI1_LOW 0x00000058u
#define VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH 0x0000005cu
#define VPU_HW_BTRS_LNL_IMR_ERR_CFI1_CLEAR 0x00000060u
#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS 0x00000070u
#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS_CMD_MASK GENMASK(7, 0)
#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS_PARAM1_MASK GENMASK(15, 8)
#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS_PARAM2_MASK GENMASK(23, 16)
#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS_PARAM3_MASK GENMASK(31, 24)
#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW 0x00000074u
#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW_CMD_MASK GENMASK(7, 0)
#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW_PARAM1_MASK GENMASK(15, 8)
#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW_PARAM2_MASK GENMASK(23, 16)
#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW_PARAM3_MASK GENMASK(31, 24)
#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0 0x00000130u
#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1 0x00000134u
#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2 0x00000138u
#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2_CDYN_MASK GENMASK(31, 16)
#define VPU_HW_BTRS_LNL_WP_REQ_CMD 0x0000013cu
#define VPU_HW_BTRS_LNL_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
#define VPU_HW_BTRS_LNL_PLL_FREQ 0x00000148u
#define VPU_HW_BTRS_LNL_PLL_FREQ_RATIO_MASK GENMASK(15, 0)
#define VPU_HW_BTRS_LNL_TILE_FUSE 0x00000150u
#define VPU_HW_BTRS_LNL_TILE_FUSE_VALID_MASK BIT_MASK(0)
#define VPU_HW_BTRS_LNL_TILE_FUSE_CONFIG_MASK GENMASK(6, 1)
#define VPU_HW_BTRS_LNL_VPU_STATUS 0x00000154u
#define VPU_HW_BTRS_LNL_VPU_STATUS_READY_MASK BIT_MASK(0)
#define VPU_HW_BTRS_LNL_VPU_STATUS_IDLE_MASK BIT_MASK(1)
#define VPU_HW_BTRS_LNL_VPU_STATUS_DUP_IDLE_MASK BIT_MASK(2)
#define VPU_HW_BTRS_LNL_VPU_STATUS_CLOCK_RESOURCE_OWN_ACK_MASK BIT_MASK(6)
#define VPU_HW_BTRS_LNL_VPU_STATUS_POWER_RESOURCE_OWN_ACK_MASK BIT_MASK(7)
#define VPU_HW_BTRS_LNL_VPU_STATUS_PERF_CLK_MASK BIT_MASK(11)
#define VPU_HW_BTRS_LNL_VPU_STATUS_DISABLE_CLK_RELINQUISH_MASK BIT_MASK(12)
#define VPU_HW_BTRS_LNL_IP_RESET 0x00000160u
#define VPU_HW_BTRS_LNL_IP_RESET_TRIGGER_MASK BIT_MASK(0)
#define VPU_HW_BTRS_LNL_D0I3_CONTROL 0x00000164u
#define VPU_HW_BTRS_LNL_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
#define VPU_HW_BTRS_LNL_D0I3_CONTROL_I3_MASK BIT_MASK(2)
#define VPU_HW_BTRS_LNL_VPU_TELEMETRY_OFFSET 0x00000168u
#define VPU_HW_BTRS_LNL_VPU_TELEMETRY_SIZE 0x0000016cu
#define VPU_HW_BTRS_LNL_VPU_TELEMETRY_ENABLE 0x00000170u
#define VPU_HW_BTRS_LNL_FMIN_FUSE 0x00000174u
#define VPU_HW_BTRS_LNL_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
#define VPU_HW_BTRS_LNL_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
#define VPU_HW_BTRS_LNL_FMAX_FUSE 0x00000178u
#define VPU_HW_BTRS_LNL_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
#endif /* __IVPU_HW_BTRS_LNL_REG_H__ */

View File

@ -0,0 +1,83 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#ifndef __IVPU_HW_BTRS_MTL_REG_H__
#define __IVPU_HW_BTRS_MTL_REG_H__
#include <linux/bits.h>
#define VPU_HW_BTRS_MTL_INTERRUPT_TYPE 0x00000000u
#define VPU_HW_BTRS_MTL_INTERRUPT_STAT 0x00000004u
#define VPU_HW_BTRS_MTL_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
#define VPU_HW_BTRS_MTL_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
#define VPU_HW_BTRS_MTL_INTERRUPT_STAT_UFI_ERR_MASK BIT_MASK(2)
#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0 0x00000008u
#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1 0x0000000cu
#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2 0x00000010u
#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
#define VPU_HW_BTRS_MTL_WP_REQ_CMD 0x00000014u
#define VPU_HW_BTRS_MTL_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
#define VPU_HW_BTRS_MTL_WP_DOWNLOAD 0x00000018u
#define VPU_HW_BTRS_MTL_WP_DOWNLOAD_TARGET_RATIO_MASK GENMASK(15, 0)
#define VPU_HW_BTRS_MTL_CURRENT_PLL 0x0000001cu
#define VPU_HW_BTRS_MTL_CURRENT_PLL_RATIO_MASK GENMASK(15, 0)
#define VPU_HW_BTRS_MTL_PLL_ENABLE 0x00000020u
#define VPU_HW_BTRS_MTL_FMIN_FUSE 0x00000024u
#define VPU_HW_BTRS_MTL_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
#define VPU_HW_BTRS_MTL_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
#define VPU_HW_BTRS_MTL_FMAX_FUSE 0x00000028u
#define VPU_HW_BTRS_MTL_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
#define VPU_HW_BTRS_MTL_TILE_FUSE 0x0000002cu
#define VPU_HW_BTRS_MTL_TILE_FUSE_VALID_MASK BIT_MASK(0)
#define VPU_HW_BTRS_MTL_TILE_FUSE_SKU_MASK GENMASK(3, 2)
#define VPU_HW_BTRS_MTL_LOCAL_INT_MASK 0x00000030u
#define VPU_HW_BTRS_MTL_GLOBAL_INT_MASK 0x00000034u
#define VPU_HW_BTRS_MTL_PLL_STATUS 0x00000040u
#define VPU_HW_BTRS_MTL_PLL_STATUS_LOCK_MASK BIT_MASK(1)
#define VPU_HW_BTRS_MTL_VPU_STATUS 0x00000044u
#define VPU_HW_BTRS_MTL_VPU_STATUS_READY_MASK BIT_MASK(0)
#define VPU_HW_BTRS_MTL_VPU_STATUS_IDLE_MASK BIT_MASK(1)
#define VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL 0x00000060u
#define VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
#define VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL_I3_MASK BIT_MASK(2)
#define VPU_HW_BTRS_MTL_VPU_IP_RESET 0x00000050u
#define VPU_HW_BTRS_MTL_VPU_IP_RESET_TRIGGER_MASK BIT_MASK(0)
#define VPU_HW_BTRS_MTL_VPU_TELEMETRY_OFFSET 0x00000080u
#define VPU_HW_BTRS_MTL_VPU_TELEMETRY_SIZE 0x00000084u
#define VPU_HW_BTRS_MTL_VPU_TELEMETRY_ENABLE 0x00000088u
#define VPU_HW_BTRS_MTL_ATS_ERR_LOG_0 0x000000a0u
#define VPU_HW_BTRS_MTL_ATS_ERR_LOG_1 0x000000a4u
#define VPU_HW_BTRS_MTL_ATS_ERR_CLEAR 0x000000a8u
#define VPU_HW_BTRS_MTL_UFI_ERR_LOG 0x000000b0u
#define VPU_HW_BTRS_MTL_UFI_ERR_LOG_CQ_ID_MASK GENMASK(11, 0)
#define VPU_HW_BTRS_MTL_UFI_ERR_LOG_AXI_ID_MASK GENMASK(19, 12)
#define VPU_HW_BTRS_MTL_UFI_ERR_LOG_OPCODE_MASK GENMASK(24, 20)
#define VPU_HW_BTRS_MTL_UFI_ERR_CLEAR 0x000000b4u
#endif /* __IVPU_HW_BTRS_MTL_REG_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,36 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020-2024 Intel Corporation
*/
#ifndef __IVPU_HW_IP_H__
#define __IVPU_HW_IP_H__
#include "ivpu_drv.h"
int ivpu_hw_ip_host_ss_configure(struct ivpu_device *vdev);
void ivpu_hw_ip_idle_gen_enable(struct ivpu_device *vdev);
void ivpu_hw_ip_idle_gen_disable(struct ivpu_device *vdev);
int ivpu_hw_ip_pwr_domain_enable(struct ivpu_device *vdev);
int ivpu_hw_ip_host_ss_axi_enable(struct ivpu_device *vdev);
int ivpu_hw_ip_top_noc_enable(struct ivpu_device *vdev);
u64 ivpu_hw_ip_read_perf_timer_counter(struct ivpu_device *vdev);
void ivpu_hw_ip_snoop_disable(struct ivpu_device *vdev);
void ivpu_hw_ip_tbu_mmu_enable(struct ivpu_device *vdev);
int ivpu_hw_ip_soc_cpu_boot(struct ivpu_device *vdev);
void ivpu_hw_ip_wdt_disable(struct ivpu_device *vdev);
void ivpu_hw_ip_diagnose_failure(struct ivpu_device *vdev);
u32 ivpu_hw_ip_ipc_rx_count_get(struct ivpu_device *vdev);
void ivpu_hw_ip_irq_clear(struct ivpu_device *vdev);
bool ivpu_hw_ip_irq_handler_37xx(struct ivpu_device *vdev, int irq);
bool ivpu_hw_ip_irq_handler_40xx(struct ivpu_device *vdev, int irq);
void ivpu_hw_ip_db_set(struct ivpu_device *vdev, u32 db_id);
u32 ivpu_hw_ip_ipc_rx_addr_get(struct ivpu_device *vdev);
void ivpu_hw_ip_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr);
void ivpu_hw_ip_irq_enable(struct ivpu_device *vdev);
void ivpu_hw_ip_irq_disable(struct ivpu_device *vdev);
void ivpu_hw_ip_diagnose_failure(struct ivpu_device *vdev);
void ivpu_hw_ip_fabric_req_override_enable_50xx(struct ivpu_device *vdev);
void ivpu_hw_ip_fabric_req_override_disable_50xx(struct ivpu_device *vdev);
#endif /* __IVPU_HW_IP_H__ */

View File

@ -129,7 +129,7 @@ static void ivpu_ipc_tx_release(struct ivpu_device *vdev, u32 vpu_addr)
static void ivpu_ipc_tx(struct ivpu_device *vdev, u32 vpu_addr)
{
ivpu_hw_reg_ipc_tx_set(vdev, vpu_addr);
ivpu_hw_ipc_tx_set(vdev, vpu_addr);
}
static void
@ -210,8 +210,7 @@ void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *c
ivpu_ipc_tx_release(vdev, cons->tx_vpu_addr);
}
static int
ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct vpu_jsm_msg *req)
int ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct vpu_jsm_msg *req)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
int ret;
@ -378,7 +377,7 @@ ivpu_ipc_match_consumer(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons
return false;
}
void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread)
void ivpu_ipc_irq_handler(struct ivpu_device *vdev)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
struct ivpu_ipc_consumer *cons;
@ -392,8 +391,8 @@ void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread)
* Driver needs to purge all messages from IPC FIFO to clear IPC interrupt.
* Without purge IPC FIFO to 0 next IPC interrupts won't be generated.
*/
while (ivpu_hw_reg_ipc_rx_count_get(vdev)) {
vpu_addr = ivpu_hw_reg_ipc_rx_addr_get(vdev);
while (ivpu_hw_ipc_rx_count_get(vdev)) {
vpu_addr = ivpu_hw_ipc_rx_addr_get(vdev);
if (vpu_addr == REG_IO_ERROR) {
ivpu_err_ratelimited(vdev, "Failed to read IPC rx addr register\n");
return;
@ -442,11 +441,12 @@ void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread)
}
}
if (wake_thread)
*wake_thread = !list_empty(&ipc->cb_msg_list);
if (!list_empty(&ipc->cb_msg_list))
if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_IPC))
ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
}
irqreturn_t ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev)
void ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
struct ivpu_ipc_rx_msg *rx_msg, *r;
@ -462,8 +462,6 @@ irqreturn_t ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev)
rx_msg->callback(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
ivpu_ipc_rx_msg_del(vdev, rx_msg);
}
return IRQ_HANDLED;
}
int ivpu_ipc_init(struct ivpu_device *vdev)

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020-2023 Intel Corporation
* Copyright (C) 2020-2024 Intel Corporation
*/
#ifndef __IVPU_IPC_H__
@ -89,13 +89,15 @@ void ivpu_ipc_enable(struct ivpu_device *vdev);
void ivpu_ipc_disable(struct ivpu_device *vdev);
void ivpu_ipc_reset(struct ivpu_device *vdev);
void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread);
irqreturn_t ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev);
void ivpu_ipc_irq_handler(struct ivpu_device *vdev);
void ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev);
void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
u32 channel, ivpu_ipc_rx_callback_t callback);
void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons);
int ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
struct vpu_jsm_msg *req);
int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *jsm_msg,
unsigned long timeout_ms);

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
* Copyright (C) 2020-2024 Intel Corporation
*/
#include <drm/drm_file.h>
@ -12,11 +12,13 @@
#include <uapi/drm/ivpu_accel.h>
#include "ivpu_drv.h"
#include "ivpu_fw.h"
#include "ivpu_hw.h"
#include "ivpu_ipc.h"
#include "ivpu_job.h"
#include "ivpu_jsm_msg.h"
#include "ivpu_pm.h"
#include "vpu_boot_api.h"
#define CMD_BUF_IDX 0
#define JOB_ID_JOB_MASK GENMASK(7, 0)
@ -25,14 +27,60 @@
static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
{
ivpu_hw_reg_db_set(vdev, cmdq->db_id);
ivpu_hw_db_set(vdev, cmdq->db_id);
}
static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv, u16 engine)
static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
u64 primary_size = ALIGN(vdev->fw->primary_preempt_buf_size, PAGE_SIZE);
u64 secondary_size = ALIGN(vdev->fw->secondary_preempt_buf_size, PAGE_SIZE);
struct ivpu_addr_range range;
if (vdev->hw->sched_mode != VPU_SCHEDULING_MODE_HW)
return 0;
range.start = vdev->hw->ranges.user.end - (primary_size * IVPU_NUM_CMDQS_PER_CTX);
range.end = vdev->hw->ranges.user.end;
cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &range, primary_size,
DRM_IVPU_BO_WC);
if (!cmdq->primary_preempt_buf) {
ivpu_err(vdev, "Failed to create primary preemption buffer\n");
return -ENOMEM;
}
range.start = vdev->hw->ranges.shave.end - (secondary_size * IVPU_NUM_CMDQS_PER_CTX);
range.end = vdev->hw->ranges.shave.end;
cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &range, secondary_size,
DRM_IVPU_BO_WC);
if (!cmdq->secondary_preempt_buf) {
ivpu_err(vdev, "Failed to create secondary preemption buffer\n");
goto err_free_primary;
}
return 0;
err_free_primary:
ivpu_bo_free(cmdq->primary_preempt_buf);
return -ENOMEM;
}
static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
if (vdev->hw->sched_mode != VPU_SCHEDULING_MODE_HW)
return;
drm_WARN_ON(&vdev->drm, !cmdq->primary_preempt_buf);
drm_WARN_ON(&vdev->drm, !cmdq->secondary_preempt_buf);
ivpu_bo_free(cmdq->primary_preempt_buf);
ivpu_bo_free(cmdq->secondary_preempt_buf);
}
static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
{
struct xa_limit db_xa_limit = {.max = IVPU_MAX_DB, .min = IVPU_MIN_DB};
struct ivpu_device *vdev = file_priv->vdev;
struct vpu_job_queue_header *jobq_header;
struct ivpu_cmdq *cmdq;
int ret;
@ -50,18 +98,14 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv, u16 e
if (!cmdq->mem)
goto err_erase_xa;
cmdq->entry_count = (u32)((ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header)) /
sizeof(struct vpu_job_queue_entry));
cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
jobq_header = &cmdq->jobq->header;
jobq_header->engine_idx = engine;
jobq_header->head = 0;
jobq_header->tail = 0;
wmb(); /* Flush WC buffer for jobq->header */
ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
if (ret)
goto err_free_cmdq_mem;
return cmdq;
err_free_cmdq_mem:
ivpu_bo_free(cmdq->mem);
err_erase_xa:
xa_erase(&vdev->db_xa, cmdq->db_id);
err_free_cmdq:
@ -74,92 +118,183 @@ static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *c
if (!cmdq)
return;
ivpu_preemption_buffers_free(file_priv->vdev, file_priv, cmdq);
ivpu_bo_free(cmdq->mem);
xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
kfree(cmdq);
}
static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine)
static int ivpu_hws_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 engine,
u8 priority)
{
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
int ret;
ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->db_id,
task_pid_nr(current), engine,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
if (ret)
return ret;
ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->db_id,
priority);
if (ret)
return ret;
return 0;
}
static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
struct ivpu_device *vdev = file_priv->vdev;
int ret;
if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW)
ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->db_id, cmdq->db_id,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
else
ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
if (!ret)
ivpu_dbg(vdev, JOB, "DB %d registered to ctx %d\n", cmdq->db_id, file_priv->ctx.id);
return ret;
}
static int
ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 engine, u8 priority)
{
struct ivpu_device *vdev = file_priv->vdev;
struct vpu_job_queue_header *jobq_header;
int ret;
lockdep_assert_held(&file_priv->lock);
if (cmdq->db_registered)
return 0;
cmdq->entry_count = (u32)((ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header)) /
sizeof(struct vpu_job_queue_entry));
cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
jobq_header = &cmdq->jobq->header;
jobq_header->engine_idx = engine;
jobq_header->head = 0;
jobq_header->tail = 0;
wmb(); /* Flush WC buffer for jobq->header */
if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) {
ret = ivpu_hws_cmdq_init(file_priv, cmdq, engine, priority);
if (ret)
return ret;
}
ret = ivpu_register_db(file_priv, cmdq);
if (ret)
return ret;
cmdq->db_registered = true;
return 0;
}
static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
struct ivpu_device *vdev = file_priv->vdev;
int ret;
lockdep_assert_held(&file_priv->lock);
if (!cmdq->db_registered)
return 0;
cmdq->db_registered = false;
if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) {
ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->db_id);
if (!ret)
ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->db_id);
}
ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
if (!ret)
ivpu_dbg(vdev, JOB, "DB %d unregistered\n", cmdq->db_id);
return 0;
}
static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine,
u8 priority)
{
int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
int ret;
lockdep_assert_held(&file_priv->lock);
if (!cmdq) {
cmdq = ivpu_cmdq_alloc(file_priv, engine);
cmdq = ivpu_cmdq_alloc(file_priv);
if (!cmdq)
return NULL;
file_priv->cmdq[engine] = cmdq;
file_priv->cmdq[cmdq_idx] = cmdq;
}
if (cmdq->db_registered)
return cmdq;
ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
ret = ivpu_cmdq_init(file_priv, cmdq, engine, priority);
if (ret)
return NULL;
cmdq->db_registered = true;
return cmdq;
}
static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engine)
static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engine, u8 priority)
{
struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
lockdep_assert_held(&file_priv->lock);
if (cmdq) {
file_priv->cmdq[engine] = NULL;
if (cmdq->db_registered)
ivpu_jsm_unregister_db(file_priv->vdev, cmdq->db_id);
file_priv->cmdq[cmdq_idx] = NULL;
ivpu_cmdq_fini(file_priv, cmdq);
ivpu_cmdq_free(file_priv, cmdq);
}
}
void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
{
int i;
u16 engine;
u8 priority;
lockdep_assert_held(&file_priv->lock);
for (i = 0; i < IVPU_NUM_ENGINES; i++)
ivpu_cmdq_release_locked(file_priv, i);
for (engine = 0; engine < IVPU_NUM_ENGINES; engine++)
for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++)
ivpu_cmdq_release_locked(file_priv, engine, priority);
}
/*
* Mark the doorbell as unregistered and reset job queue pointers.
* Mark the doorbell as unregistered
* This function needs to be called when the VPU hardware is restarted
* and FW loses job queue state. The next time job queue is used it
* will be registered again.
*/
static void ivpu_cmdq_reset_locked(struct ivpu_file_priv *file_priv, u16 engine)
static void ivpu_cmdq_reset(struct ivpu_file_priv *file_priv)
{
struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
lockdep_assert_held(&file_priv->lock);
if (cmdq) {
cmdq->db_registered = false;
cmdq->jobq->header.head = 0;
cmdq->jobq->header.tail = 0;
wmb(); /* Flush WC buffer for jobq header */
}
}
static void ivpu_cmdq_reset_all(struct ivpu_file_priv *file_priv)
{
int i;
u16 engine;
u8 priority;
mutex_lock(&file_priv->lock);
for (i = 0; i < IVPU_NUM_ENGINES; i++)
ivpu_cmdq_reset_locked(file_priv, i);
for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) {
for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) {
int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
if (cmdq)
cmdq->db_registered = false;
}
}
mutex_unlock(&file_priv->lock);
}
@ -172,10 +307,36 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
mutex_lock(&vdev->context_list_lock);
xa_for_each(&vdev->context_xa, ctx_id, file_priv)
ivpu_cmdq_reset_all(file_priv);
ivpu_cmdq_reset(file_priv);
mutex_unlock(&vdev->context_list_lock);
}
static void ivpu_cmdq_fini_all(struct ivpu_file_priv *file_priv)
{
u16 engine;
u8 priority;
for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) {
for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) {
int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
if (file_priv->cmdq[cmdq_idx])
ivpu_cmdq_fini(file_priv, file_priv->cmdq[cmdq_idx]);
}
}
}
void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
{
struct ivpu_device *vdev = file_priv->vdev;
lockdep_assert_held(&file_priv->lock);
ivpu_cmdq_fini_all(file_priv);
if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_OS)
ivpu_jsm_context_release(vdev, file_priv->ctx.id);
}
static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
@ -199,6 +360,15 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
entry->flags = 0;
if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION))
entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK;
if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW &&
(unlikely(!(ivpu_test_mode & IVPU_TEST_MODE_PREEMPTION_DISABLE)))) {
entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);
entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr;
entry->secondary_preempt_buf_size = ivpu_bo_size(cmdq->secondary_preempt_buf);
}
wmb(); /* Ensure that tail is updated after filling entry */
header->tail = next_entry;
wmb(); /* Flush WC buffer for jobq header */
@ -295,11 +465,28 @@ err_free_job:
return NULL;
}
static struct ivpu_job *ivpu_job_remove_from_submitted_jobs(struct ivpu_device *vdev, u32 job_id)
{
struct ivpu_job *job;
xa_lock(&vdev->submitted_jobs_xa);
job = __xa_erase(&vdev->submitted_jobs_xa, job_id);
if (xa_empty(&vdev->submitted_jobs_xa) && job) {
vdev->busy_time = ktime_add(ktime_sub(ktime_get(), vdev->busy_start_ts),
vdev->busy_time);
}
xa_unlock(&vdev->submitted_jobs_xa);
return job;
}
static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
{
struct ivpu_job *job;
job = xa_erase(&vdev->submitted_jobs_xa, job_id);
job = ivpu_job_remove_from_submitted_jobs(vdev, job_id);
if (!job)
return -ENOENT;
@ -328,12 +515,13 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev)
ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
}
static int ivpu_job_submit(struct ivpu_job *job)
static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
{
struct ivpu_file_priv *file_priv = job->file_priv;
struct ivpu_device *vdev = job->vdev;
struct xa_limit job_id_range;
struct ivpu_cmdq *cmdq;
bool is_first_job;
int ret;
ret = ivpu_rpm_get(vdev);
@ -342,10 +530,10 @@ static int ivpu_job_submit(struct ivpu_job *job)
mutex_lock(&file_priv->lock);
cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx);
cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx, priority);
if (!cmdq) {
ivpu_warn_ratelimited(vdev, "Failed get job queue, ctx %d engine %d\n",
file_priv->ctx.id, job->engine_idx);
ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d engine %d prio %d\n",
file_priv->ctx.id, job->engine_idx, priority);
ret = -EINVAL;
goto err_unlock_file_priv;
}
@ -354,6 +542,7 @@ static int ivpu_job_submit(struct ivpu_job *job)
job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK;
xa_lock(&vdev->submitted_jobs_xa);
is_first_job = xa_empty(&vdev->submitted_jobs_xa);
ret = __xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
if (ret) {
ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
@ -373,10 +562,12 @@ static int ivpu_job_submit(struct ivpu_job *job)
wmb(); /* Flush WC buffer for jobq header */
} else {
ivpu_cmdq_ring_db(vdev, cmdq);
if (is_first_job)
vdev->busy_start_ts = ktime_get();
}
ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d addr 0x%llx next %d\n",
job->job_id, file_priv->ctx.id, job->engine_idx,
ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d prio %d addr 0x%llx next %d\n",
job->job_id, file_priv->ctx.id, job->engine_idx, priority,
job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
xa_unlock(&vdev->submitted_jobs_xa);
@ -464,6 +655,14 @@ unlock_reservations:
return ret;
}
static inline u8 ivpu_job_to_hws_priority(struct ivpu_file_priv *file_priv, u8 priority)
{
if (priority == DRM_IVPU_JOB_PRIORITY_DEFAULT)
return DRM_IVPU_JOB_PRIORITY_NORMAL;
return priority - 1;
}
int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
@ -472,6 +671,7 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct ivpu_job *job;
u32 *buf_handles;
int idx, ret;
u8 priority;
if (params->engine > DRM_IVPU_ENGINE_COPY)
return -EINVAL;
@ -525,8 +725,10 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto err_destroy_job;
}
priority = ivpu_job_to_hws_priority(file_priv, params->priority);
down_read(&vdev->pm->reset_lock);
ret = ivpu_job_submit(job);
ret = ivpu_job_submit(job, priority);
up_read(&vdev->pm->reset_lock);
if (ret)
goto err_signal_fence;

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020-2023 Intel Corporation
* Copyright (C) 2020-2024 Intel Corporation
*/
#ifndef __IVPU_JOB_H__
@ -24,6 +24,8 @@ struct ivpu_file_priv;
*/
struct ivpu_cmdq {
struct vpu_job_queue *jobq;
struct ivpu_bo *primary_preempt_buf;
struct ivpu_bo *secondary_preempt_buf;
struct ivpu_bo *mem;
u32 entry_count;
u32 db_id;
@ -55,6 +57,8 @@ struct ivpu_job {
int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv);
void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv);
void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
* Copyright (C) 2020-2024 Intel Corporation
*/
#include "ivpu_drv.h"
@ -103,14 +103,10 @@ int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret) {
ivpu_err_ratelimited(vdev, "Failed to register doorbell %d: %d\n", db_id, ret);
return ret;
}
if (ret)
ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
ivpu_dbg(vdev, JSM, "Doorbell %d registered to context %d\n", db_id, ctx_id);
return 0;
return ret;
}
int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id)
@ -123,14 +119,10 @@ int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id)
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret) {
ivpu_warn_ratelimited(vdev, "Failed to unregister doorbell %d: %d\n", db_id, ret);
return ret;
}
if (ret)
ivpu_warn_ratelimited(vdev, "Failed to unregister doorbell %u: %d\n", db_id, ret);
ivpu_dbg(vdev, JSM, "Doorbell %d unregistered\n", db_id);
return 0;
return ret;
}
int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
@ -255,11 +247,16 @@ int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE };
struct vpu_jsm_msg resp;
int ret;
req.payload.ssid_release.host_ssid = host_ssid;
return ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret)
ivpu_warn_ratelimited(vdev, "Failed to release context: %d\n", ret);
return ret;
}
int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev)
@ -281,3 +278,283 @@ int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev)
return ivpu_hw_wait_for_idle(vdev);
}
int ivpu_jsm_hws_create_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_group, u32 cmdq_id,
u32 pid, u32 engine, u64 cmdq_base, u32 cmdq_size)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_CREATE_CMD_QUEUE };
struct vpu_jsm_msg resp;
int ret;
req.payload.hws_create_cmdq.host_ssid = ctx_id;
req.payload.hws_create_cmdq.process_id = pid;
req.payload.hws_create_cmdq.engine_idx = engine;
req.payload.hws_create_cmdq.cmdq_group = cmdq_group;
req.payload.hws_create_cmdq.cmdq_id = cmdq_id;
req.payload.hws_create_cmdq.cmdq_base = cmdq_base;
req.payload.hws_create_cmdq.cmdq_size = cmdq_size;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret)
ivpu_warn_ratelimited(vdev, "Failed to create command queue: %d\n", ret);
return ret;
}
int ivpu_jsm_hws_destroy_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DESTROY_CMD_QUEUE };
struct vpu_jsm_msg resp;
int ret;
req.payload.hws_destroy_cmdq.host_ssid = ctx_id;
req.payload.hws_destroy_cmdq.cmdq_id = cmdq_id;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret)
ivpu_warn_ratelimited(vdev, "Failed to destroy command queue: %d\n", ret);
return ret;
}
int ivpu_jsm_hws_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id, u32 db_id,
u64 cmdq_base, u32 cmdq_size)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_REGISTER_DB };
struct vpu_jsm_msg resp;
int ret = 0;
req.payload.hws_register_db.db_id = db_id;
req.payload.hws_register_db.host_ssid = ctx_id;
req.payload.hws_register_db.cmdq_id = cmdq_id;
req.payload.hws_register_db.cmdq_base = cmdq_base;
req.payload.hws_register_db.cmdq_size = cmdq_size;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret)
ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
return ret;
}
int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_ENGINE_RESUME };
struct vpu_jsm_msg resp;
int ret;
if (engine >= VPU_ENGINE_NB)
return -EINVAL;
req.payload.hws_resume_engine.engine_idx = engine;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret)
ivpu_err_ratelimited(vdev, "Failed to resume engine %d: %d\n", engine, ret);
return ret;
}
int ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id,
u32 priority)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES };
struct vpu_jsm_msg resp;
int ret;
req.payload.hws_set_context_sched_properties.host_ssid = ctx_id;
req.payload.hws_set_context_sched_properties.cmdq_id = cmdq_id;
req.payload.hws_set_context_sched_properties.priority_band = priority;
req.payload.hws_set_context_sched_properties.realtime_priority_level = 0;
req.payload.hws_set_context_sched_properties.in_process_priority = 0;
req.payload.hws_set_context_sched_properties.context_quantum = 20000;
req.payload.hws_set_context_sched_properties.grace_period_same_priority = 10000;
req.payload.hws_set_context_sched_properties.grace_period_lower_priority = 0;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret)
ivpu_warn_ratelimited(vdev, "Failed to set context sched properties: %d\n", ret);
return ret;
}
int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device *vdev, u32 engine_idx, u32 host_ssid,
u64 vpu_log_buffer_va)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG };
struct vpu_jsm_msg resp;
int ret;
req.payload.hws_set_scheduling_log.engine_idx = engine_idx;
req.payload.hws_set_scheduling_log.host_ssid = host_ssid;
req.payload.hws_set_scheduling_log.vpu_log_buffer_va = vpu_log_buffer_va;
req.payload.hws_set_scheduling_log.notify_index = 0;
req.payload.hws_set_scheduling_log.enable_extra_events =
ivpu_test_mode & IVPU_TEST_MODE_HWS_EXTRA_EVENTS;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret)
ivpu_warn_ratelimited(vdev, "Failed to set scheduling log: %d\n", ret);
return ret;
}
int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP };
struct vpu_jsm_msg resp;
int ret;
/* Idle */
req.payload.hws_priority_band_setup.grace_period[0] = 0;
req.payload.hws_priority_band_setup.process_grace_period[0] = 50000;
req.payload.hws_priority_band_setup.process_quantum[0] = 160000;
/* Normal */
req.payload.hws_priority_band_setup.grace_period[1] = 50000;
req.payload.hws_priority_band_setup.process_grace_period[1] = 50000;
req.payload.hws_priority_band_setup.process_quantum[1] = 300000;
/* Focus */
req.payload.hws_priority_band_setup.grace_period[2] = 50000;
req.payload.hws_priority_band_setup.process_grace_period[2] = 50000;
req.payload.hws_priority_band_setup.process_quantum[2] = 200000;
/* Realtime */
req.payload.hws_priority_band_setup.grace_period[3] = 0;
req.payload.hws_priority_band_setup.process_grace_period[3] = 50000;
req.payload.hws_priority_band_setup.process_quantum[3] = 200000;
req.payload.hws_priority_band_setup.normal_band_percentage = 10;
ret = ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
&resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret)
ivpu_warn_ratelimited(vdev, "Failed to set priority bands: %d\n", ret);
return ret;
}
int ivpu_jsm_metric_streamer_start(struct ivpu_device *vdev, u64 metric_group_mask,
u64 sampling_rate, u64 buffer_addr, u64 buffer_size)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_START };
struct vpu_jsm_msg resp;
int ret;
req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
req.payload.metric_streamer_start.sampling_rate = sampling_rate;
req.payload.metric_streamer_start.buffer_addr = buffer_addr;
req.payload.metric_streamer_start.buffer_size = buffer_size;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_START_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret) {
ivpu_warn_ratelimited(vdev, "Failed to start metric streamer: ret %d\n", ret);
return ret;
}
return ret;
}
int ivpu_jsm_metric_streamer_stop(struct ivpu_device *vdev, u64 metric_group_mask)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_STOP };
struct vpu_jsm_msg resp;
int ret;
req.payload.metric_streamer_stop.metric_group_mask = metric_group_mask;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret)
ivpu_warn_ratelimited(vdev, "Failed to stop metric streamer: ret %d\n", ret);
return ret;
}
int ivpu_jsm_metric_streamer_update(struct ivpu_device *vdev, u64 metric_group_mask,
u64 buffer_addr, u64 buffer_size, u64 *bytes_written)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_UPDATE };
struct vpu_jsm_msg resp;
int ret;
req.payload.metric_streamer_update.metric_group_mask = metric_group_mask;
req.payload.metric_streamer_update.buffer_addr = buffer_addr;
req.payload.metric_streamer_update.buffer_size = buffer_size;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret) {
ivpu_warn_ratelimited(vdev, "Failed to update metric streamer: ret %d\n", ret);
return ret;
}
if (buffer_size && resp.payload.metric_streamer_done.bytes_written > buffer_size) {
ivpu_warn_ratelimited(vdev, "MS buffer overflow: bytes_written %#llx > buffer_size %#llx\n",
resp.payload.metric_streamer_done.bytes_written, buffer_size);
return -EOVERFLOW;
}
*bytes_written = resp.payload.metric_streamer_done.bytes_written;
return ret;
}
int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mask, u64 buffer_addr,
u64 buffer_size, u32 *sample_size, u64 *info_size)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_INFO };
struct vpu_jsm_msg resp;
int ret;
req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
req.payload.metric_streamer_start.buffer_addr = buffer_addr;
req.payload.metric_streamer_start.buffer_size = buffer_size;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret) {
ivpu_warn_ratelimited(vdev, "Failed to get metric streamer info: ret %d\n", ret);
return ret;
}
if (!resp.payload.metric_streamer_done.sample_size) {
ivpu_warn_ratelimited(vdev, "Invalid sample size\n");
return -EBADMSG;
}
if (sample_size)
*sample_size = resp.payload.metric_streamer_done.sample_size;
if (info_size)
*info_size = resp.payload.metric_streamer_done.bytes_written;
return ret;
}
int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_ENABLE };
struct vpu_jsm_msg resp;
req.payload.pwr_dct_control.dct_active_us = active_us;
req.payload.pwr_dct_control.dct_inactive_us = inactive_us;
return ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_DCT_ENABLE_DONE,
&resp, VPU_IPC_CHAN_ASYNC_CMD,
vdev->timeout.jsm);
}
int ivpu_jsm_dct_disable(struct ivpu_device *vdev)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_DISABLE };
struct vpu_jsm_msg resp;
return ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE,
&resp, VPU_IPC_CHAN_ASYNC_CMD,
vdev->timeout.jsm);
}

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020-2023 Intel Corporation
* Copyright (C) 2020-2024 Intel Corporation
*/
#ifndef __IVPU_JSM_MSG_H__
@ -23,4 +23,24 @@ int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 tra
u64 trace_hw_component_mask);
int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid);
int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev);
int ivpu_jsm_hws_create_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_group, u32 cmdq_id,
u32 pid, u32 engine, u64 cmdq_base, u32 cmdq_size);
int ivpu_jsm_hws_destroy_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id);
int ivpu_jsm_hws_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id, u32 db_id,
u64 cmdq_base, u32 cmdq_size);
int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine);
int ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id,
u32 priority);
int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device *vdev, u32 engine_idx, u32 host_ssid,
u64 vpu_log_buffer_va);
int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev);
int ivpu_jsm_metric_streamer_start(struct ivpu_device *vdev, u64 metric_group_mask,
u64 sampling_rate, u64 buffer_addr, u64 buffer_size);
int ivpu_jsm_metric_streamer_stop(struct ivpu_device *vdev, u64 metric_group_mask);
int ivpu_jsm_metric_streamer_update(struct ivpu_device *vdev, u64 metric_group_mask,
u64 buffer_addr, u64 buffer_size, u64 *bytes_written);
int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mask, u64 buffer_addr,
u64 buffer_size, u32 *sample_size, u64 *info_size);
int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us);
int ivpu_jsm_dct_disable(struct ivpu_device *vdev);
#endif

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2023 Intel Corporation
* Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/circ_buf.h>
@ -519,7 +519,8 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
if (ret)
return ret;
clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
if (!ivpu_is_force_snoop_enabled(vdev))
clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, q->prod);
ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
@ -567,7 +568,8 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
int ret;
memset(mmu->cmdq.base, 0, IVPU_MMU_CMDQ_SIZE);
clflush_cache_range(mmu->cmdq.base, IVPU_MMU_CMDQ_SIZE);
if (!ivpu_is_force_snoop_enabled(vdev))
clflush_cache_range(mmu->cmdq.base, IVPU_MMU_CMDQ_SIZE);
mmu->cmdq.prod = 0;
mmu->cmdq.cons = 0;
@ -661,7 +663,8 @@ static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid)
WRITE_ONCE(entry[1], str[1]);
WRITE_ONCE(entry[0], str[0]);
clflush_cache_range(entry, IVPU_MMU_STRTAB_ENT_SIZE);
if (!ivpu_is_force_snoop_enabled(vdev))
clflush_cache_range(entry, IVPU_MMU_STRTAB_ENT_SIZE);
ivpu_dbg(vdev, MMU, "STRTAB write entry (SSID=%u): 0x%llx, 0x%llx\n", sid, str[0], str[1]);
}
@ -735,7 +738,8 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
WRITE_ONCE(entry[3], cd[3]);
WRITE_ONCE(entry[0], cd[0]);
clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE);
if (!ivpu_is_force_snoop_enabled(vdev))
clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE);
ivpu_dbg(vdev, MMU, "CDTAB %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
cd_dma ? "write" : "clear", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]);
@ -874,8 +878,9 @@ static void ivpu_mmu_dump_event(struct ivpu_device *vdev, u32 *event)
u64 in_addr = ((u64)event[5]) << 32 | event[4];
u32 sid = event[1];
ivpu_err(vdev, "MMU EVTQ: 0x%x (%s) SSID: %d SID: %d, e[2] %08x, e[3] %08x, in addr: 0x%llx, fetch addr: 0x%llx\n",
op, ivpu_mmu_event_to_str(op), ssid, sid, event[2], event[3], in_addr, fetch_addr);
ivpu_err_ratelimited(vdev, "MMU EVTQ: 0x%x (%s) SSID: %d SID: %d, e[2] %08x, e[3] %08x, in addr: 0x%llx, fetch addr: 0x%llx\n",
op, ivpu_mmu_event_to_str(op), ssid, sid,
event[2], event[3], in_addr, fetch_addr);
}
static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
@ -911,6 +916,9 @@ void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
ivpu_mmu_user_context_mark_invalid(vdev, ssid);
REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
}
if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_MMU_EVTQ))
ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
}
void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)

View File

@ -24,6 +24,7 @@
#define IVPU_MMU_ENTRY_FLAG_CONT BIT(52)
#define IVPU_MMU_ENTRY_FLAG_NG BIT(11)
#define IVPU_MMU_ENTRY_FLAG_AF BIT(10)
#define IVPU_MMU_ENTRY_FLAG_RO BIT(7)
#define IVPU_MMU_ENTRY_FLAG_USER BIT(6)
#define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2)
#define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE BIT(1)
@ -319,6 +320,91 @@ ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ct
return 0;
}
static void ivpu_mmu_context_set_page_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr)
{
int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] |= IVPU_MMU_ENTRY_FLAG_RO;
}
static void ivpu_mmu_context_split_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr)
{
int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] &= ~IVPU_MMU_ENTRY_FLAG_CONT;
}
static void ivpu_mmu_context_split_64k_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr)
{
u64 start = ALIGN_DOWN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE);
u64 end = ALIGN(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE);
u64 offset = 0;
ivpu_dbg(vdev, MMU_MAP, "Split 64K page ctx: %u vpu_addr: 0x%llx\n", ctx->id, vpu_addr);
while (start + offset < end) {
ivpu_mmu_context_split_page(vdev, ctx, start + offset);
offset += IVPU_MMU_PAGE_SIZE;
}
}
int
ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
size_t size)
{
u64 end = vpu_addr + size;
size_t size_left = size;
int ret;
if (size == 0)
return 0;
if (drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr | size, IVPU_MMU_PAGE_SIZE)))
return -EINVAL;
mutex_lock(&ctx->lock);
ivpu_dbg(vdev, MMU_MAP, "Set read-only pages ctx: %u vpu_addr: 0x%llx size: %lu\n",
ctx->id, vpu_addr, size);
if (!ivpu_disable_mmu_cont_pages) {
/* Split 64K contiguous page at the beginning if needed */
if (!IS_ALIGNED(vpu_addr, IVPU_MMU_CONT_PAGES_SIZE))
ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr);
/* Split 64K contiguous page at the end if needed */
if (!IS_ALIGNED(vpu_addr + size, IVPU_MMU_CONT_PAGES_SIZE))
ivpu_mmu_context_split_64k_page(vdev, ctx, vpu_addr + size);
}
while (size_left) {
if (vpu_addr < end)
ivpu_mmu_context_set_page_ro(vdev, ctx, vpu_addr);
vpu_addr += IVPU_MMU_PAGE_SIZE;
size_left -= IVPU_MMU_PAGE_SIZE;
}
/* Ensure page table modifications are flushed from wc buffers to memory */
wmb();
mutex_unlock(&ctx->lock);
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
if (ret)
ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
return 0;
}
static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
{
while (size) {

View File

@ -46,5 +46,7 @@ int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *
u64 vpu_addr, struct sg_table *sgt, bool llc_coherent);
void ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, struct sg_table *sgt);
int ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, size_t size);
#endif /* __IVPU_MMU_CONTEXT_H__ */

View File

@ -0,0 +1,309 @@
// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
* Copyright (C) 2020-2024 Intel Corporation
*/
#include <drm/drm_file.h>
#include "ivpu_drv.h"
#include "ivpu_gem.h"
#include "ivpu_jsm_msg.h"
#include "ivpu_ms.h"
#include "ivpu_pm.h"
#define MS_INFO_BUFFER_SIZE SZ_16K
#define MS_NUM_BUFFERS 2
#define MS_READ_PERIOD_MULTIPLIER 2
#define MS_MIN_SAMPLE_PERIOD_NS 1000000
static struct ivpu_ms_instance *
get_instance_by_mask(struct ivpu_file_priv *file_priv, u64 metric_mask)
{
struct ivpu_ms_instance *ms;
lockdep_assert_held(&file_priv->ms_lock);
list_for_each_entry(ms, &file_priv->ms_instance_list, ms_instance_node)
if (ms->mask == metric_mask)
return ms;
return NULL;
}
int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
struct drm_ivpu_metric_streamer_start *args = data;
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_ms_instance *ms;
u64 single_buff_size;
u32 sample_size;
int ret;
if (!args->metric_group_mask || !args->read_period_samples ||
args->sampling_period_ns < MS_MIN_SAMPLE_PERIOD_NS)
return -EINVAL;
mutex_lock(&file_priv->ms_lock);
if (get_instance_by_mask(file_priv, args->metric_group_mask)) {
ivpu_err(vdev, "Instance already exists (mask %#llx)\n", args->metric_group_mask);
ret = -EALREADY;
goto unlock;
}
ms = kzalloc(sizeof(*ms), GFP_KERNEL);
if (!ms) {
ret = -ENOMEM;
goto unlock;
}
ms->mask = args->metric_group_mask;
ret = ivpu_jsm_metric_streamer_info(vdev, ms->mask, 0, 0, &sample_size, NULL);
if (ret)
goto err_free_ms;
single_buff_size = sample_size *
((u64)args->read_period_samples * MS_READ_PERIOD_MULTIPLIER);
ms->bo = ivpu_bo_create_global(vdev, PAGE_ALIGN(single_buff_size * MS_NUM_BUFFERS),
DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
if (!ms->bo) {
ivpu_err(vdev, "Failed to allocate MS buffer (size %llu)\n", single_buff_size);
ret = -ENOMEM;
goto err_free_ms;
}
ms->buff_size = ivpu_bo_size(ms->bo) / MS_NUM_BUFFERS;
ms->active_buff_vpu_addr = ms->bo->vpu_addr;
ms->inactive_buff_vpu_addr = ms->bo->vpu_addr + ms->buff_size;
ms->active_buff_ptr = ivpu_bo_vaddr(ms->bo);
ms->inactive_buff_ptr = ivpu_bo_vaddr(ms->bo) + ms->buff_size;
ret = ivpu_jsm_metric_streamer_start(vdev, ms->mask, args->sampling_period_ns,
ms->active_buff_vpu_addr, ms->buff_size);
if (ret)
goto err_free_bo;
args->sample_size = sample_size;
args->max_data_size = ivpu_bo_size(ms->bo);
list_add_tail(&ms->ms_instance_node, &file_priv->ms_instance_list);
goto unlock;
err_free_bo:
ivpu_bo_free(ms->bo);
err_free_ms:
kfree(ms);
unlock:
mutex_unlock(&file_priv->ms_lock);
return ret;
}
static int
copy_leftover_bytes(struct ivpu_ms_instance *ms,
void __user *user_ptr, u64 user_size, u64 *user_bytes_copied)
{
u64 copy_bytes;
if (ms->leftover_bytes) {
copy_bytes = min(user_size - *user_bytes_copied, ms->leftover_bytes);
if (copy_to_user(user_ptr + *user_bytes_copied, ms->leftover_addr, copy_bytes))
return -EFAULT;
ms->leftover_bytes -= copy_bytes;
ms->leftover_addr += copy_bytes;
*user_bytes_copied += copy_bytes;
}
return 0;
}
static int
copy_samples_to_user(struct ivpu_device *vdev, struct ivpu_ms_instance *ms,
void __user *user_ptr, u64 user_size, u64 *user_bytes_copied)
{
u64 bytes_written;
int ret;
*user_bytes_copied = 0;
ret = copy_leftover_bytes(ms, user_ptr, user_size, user_bytes_copied);
if (ret)
return ret;
if (*user_bytes_copied == user_size)
return 0;
ret = ivpu_jsm_metric_streamer_update(vdev, ms->mask, ms->inactive_buff_vpu_addr,
ms->buff_size, &bytes_written);
if (ret)
return ret;
swap(ms->active_buff_vpu_addr, ms->inactive_buff_vpu_addr);
swap(ms->active_buff_ptr, ms->inactive_buff_ptr);
ms->leftover_bytes = bytes_written;
ms->leftover_addr = ms->inactive_buff_ptr;
return copy_leftover_bytes(ms, user_ptr, user_size, user_bytes_copied);
}
int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_ivpu_metric_streamer_get_data *args = data;
struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_ms_instance *ms;
u64 bytes_written;
int ret;
if (!args->metric_group_mask)
return -EINVAL;
mutex_lock(&file_priv->ms_lock);
ms = get_instance_by_mask(file_priv, args->metric_group_mask);
if (!ms) {
ivpu_err(vdev, "Instance doesn't exist for mask: %#llx\n", args->metric_group_mask);
ret = -EINVAL;
goto unlock;
}
if (!args->buffer_size) {
ret = ivpu_jsm_metric_streamer_update(vdev, ms->mask, 0, 0, &bytes_written);
if (ret)
goto unlock;
args->data_size = bytes_written + ms->leftover_bytes;
goto unlock;
}
if (!args->buffer_ptr) {
ret = -EINVAL;
goto unlock;
}
ret = copy_samples_to_user(vdev, ms, u64_to_user_ptr(args->buffer_ptr),
args->buffer_size, &args->data_size);
unlock:
mutex_unlock(&file_priv->ms_lock);
return ret;
}
static void free_instance(struct ivpu_file_priv *file_priv, struct ivpu_ms_instance *ms)
{
lockdep_assert_held(&file_priv->ms_lock);
list_del(&ms->ms_instance_node);
ivpu_jsm_metric_streamer_stop(file_priv->vdev, ms->mask);
ivpu_bo_free(ms->bo);
kfree(ms);
}
int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
struct drm_ivpu_metric_streamer_stop *args = data;
struct ivpu_ms_instance *ms;
if (!args->metric_group_mask)
return -EINVAL;
mutex_lock(&file_priv->ms_lock);
ms = get_instance_by_mask(file_priv, args->metric_group_mask);
if (ms)
free_instance(file_priv, ms);
mutex_unlock(&file_priv->ms_lock);
return ms ? 0 : -EINVAL;
}
static inline struct ivpu_bo *get_ms_info_bo(struct ivpu_file_priv *file_priv)
{
lockdep_assert_held(&file_priv->ms_lock);
if (file_priv->ms_info_bo)
return file_priv->ms_info_bo;
file_priv->ms_info_bo = ivpu_bo_create_global(file_priv->vdev, MS_INFO_BUFFER_SIZE,
DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
return file_priv->ms_info_bo;
}
int ivpu_ms_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_ivpu_metric_streamer_get_data *args = data;
struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_bo *bo;
u64 info_size;
int ret;
if (!args->metric_group_mask)
return -EINVAL;
if (!args->buffer_size)
return ivpu_jsm_metric_streamer_info(vdev, args->metric_group_mask,
0, 0, NULL, &args->data_size);
if (!args->buffer_ptr)
return -EINVAL;
mutex_lock(&file_priv->ms_lock);
bo = get_ms_info_bo(file_priv);
if (!bo) {
ret = -ENOMEM;
goto unlock;
}
ret = ivpu_jsm_metric_streamer_info(vdev, args->metric_group_mask, bo->vpu_addr,
ivpu_bo_size(bo), NULL, &info_size);
if (ret)
goto unlock;
if (args->buffer_size < info_size) {
ret = -ENOSPC;
goto unlock;
}
if (copy_to_user(u64_to_user_ptr(args->buffer_ptr), ivpu_bo_vaddr(bo), info_size))
ret = -EFAULT;
args->data_size = info_size;
unlock:
mutex_unlock(&file_priv->ms_lock);
return ret;
}
void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv)
{
struct ivpu_ms_instance *ms, *tmp;
mutex_lock(&file_priv->ms_lock);
if (file_priv->ms_info_bo) {
ivpu_bo_free(file_priv->ms_info_bo);
file_priv->ms_info_bo = NULL;
}
list_for_each_entry_safe(ms, tmp, &file_priv->ms_instance_list, ms_instance_node)
free_instance(file_priv, ms);
mutex_unlock(&file_priv->ms_lock);
}
void ivpu_ms_cleanup_all(struct ivpu_device *vdev)
{
struct ivpu_file_priv *file_priv;
unsigned long ctx_id;
mutex_lock(&vdev->context_list_lock);
xa_for_each(&vdev->context_xa, ctx_id, file_priv)
ivpu_ms_cleanup(file_priv);
mutex_unlock(&vdev->context_list_lock);
}

View File

@ -0,0 +1,36 @@
/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
/*
* Copyright (C) 2020-2024 Intel Corporation
*/
#ifndef __IVPU_MS_H__
#define __IVPU_MS_H__
#include <linux/list.h>
struct drm_device;
struct drm_file;
struct ivpu_bo;
struct ivpu_device;
struct ivpu_file_priv;
struct ivpu_ms_instance {
struct ivpu_bo *bo;
struct list_head ms_instance_node;
u64 mask;
u64 buff_size;
u64 active_buff_vpu_addr;
u64 inactive_buff_vpu_addr;
void *active_buff_ptr;
void *inactive_buff_ptr;
u64 leftover_bytes;
void *leftover_addr;
};
int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int ivpu_ms_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv);
void ivpu_ms_cleanup_all(struct ivpu_device *vdev);
#endif /* __IVPU_MS_H__ */

View File

@ -18,6 +18,7 @@
#include "ivpu_job.h"
#include "ivpu_jsm_msg.h"
#include "ivpu_mmu.h"
#include "ivpu_ms.h"
#include "ivpu_pm.h"
static bool ivpu_disable_recovery;
@ -131,6 +132,7 @@ static void ivpu_pm_recovery_work(struct work_struct *work)
ivpu_suspend(vdev);
ivpu_pm_prepare_cold_boot(vdev);
ivpu_jobs_abort_all(vdev);
ivpu_ms_cleanup_all(vdev);
ret = ivpu_resume(vdev);
if (ret)
@ -235,41 +237,37 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct ivpu_device *vdev = to_ivpu_device(drm);
bool hw_is_idle = true;
int ret;
int ret, ret_d0i3;
bool is_idle;
drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
drm_WARN_ON(&vdev->drm, work_pending(&vdev->pm->recovery_work));
ivpu_dbg(vdev, PM, "Runtime suspend..\n");
if (!ivpu_hw_is_idle(vdev) && vdev->pm->suspend_reschedule_counter) {
ivpu_dbg(vdev, PM, "Failed to enter idle, rescheduling suspend, retries left %d\n",
vdev->pm->suspend_reschedule_counter);
pm_schedule_suspend(dev, vdev->timeout.reschedule_suspend);
vdev->pm->suspend_reschedule_counter--;
return -EAGAIN;
}
ivpu_mmu_disable(vdev);
if (!vdev->pm->suspend_reschedule_counter)
hw_is_idle = false;
else if (ivpu_jsm_pwr_d0i3_enter(vdev))
hw_is_idle = false;
is_idle = ivpu_hw_is_idle(vdev) || vdev->pm->dct_active_percent;
if (!is_idle)
ivpu_err(vdev, "NPU is not idle before autosuspend\n");
ret_d0i3 = ivpu_jsm_pwr_d0i3_enter(vdev);
if (ret_d0i3)
ivpu_err(vdev, "Failed to prepare for d0i3: %d\n", ret_d0i3);
ret = ivpu_suspend(vdev);
if (ret)
ivpu_err(vdev, "Failed to suspend NPU: %d\n", ret);
if (!hw_is_idle) {
ivpu_err(vdev, "NPU failed to enter idle, force suspended.\n");
if (!is_idle || ret_d0i3) {
ivpu_err(vdev, "Forcing cold boot due to previous errors\n");
atomic_inc(&vdev->pm->reset_counter);
ivpu_fw_log_dump(vdev);
ivpu_pm_prepare_cold_boot(vdev);
} else {
ivpu_pm_prepare_warm_boot(vdev);
}
vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
ivpu_dbg(vdev, PM, "Runtime suspend done.\n");
return 0;
@ -297,17 +295,6 @@ int ivpu_rpm_get(struct ivpu_device *vdev)
int ret;
ret = pm_runtime_resume_and_get(vdev->drm.dev);
if (!drm_WARN_ON(&vdev->drm, ret < 0))
vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
return ret;
}
int ivpu_rpm_get_if_active(struct ivpu_device *vdev)
{
int ret;
ret = pm_runtime_get_if_in_use(vdev->drm.dev);
drm_WARN_ON(&vdev->drm, ret < 0);
return ret;
@ -333,6 +320,8 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
ivpu_hw_reset(vdev);
ivpu_pm_prepare_cold_boot(vdev);
ivpu_jobs_abort_all(vdev);
ivpu_ms_cleanup_all(vdev);
ivpu_dbg(vdev, PM, "Pre-reset done.\n");
}
@ -360,7 +349,6 @@ void ivpu_pm_init(struct ivpu_device *vdev)
int delay;
pm->vdev = vdev;
pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
init_rwsem(&pm->reset_lock);
atomic_set(&pm->reset_pending, 0);
@ -401,3 +389,68 @@ void ivpu_pm_disable(struct ivpu_device *vdev)
pm_runtime_get_noresume(vdev->drm.dev);
pm_runtime_forbid(vdev->drm.dev);
}
int ivpu_pm_dct_init(struct ivpu_device *vdev)
{
if (vdev->pm->dct_active_percent)
return ivpu_pm_dct_enable(vdev, vdev->pm->dct_active_percent);
return 0;
}
int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent)
{
u32 active_us, inactive_us;
int ret;
if (active_percent == 0 || active_percent > 100)
return -EINVAL;
active_us = (DCT_PERIOD_US * active_percent) / 100;
inactive_us = DCT_PERIOD_US - active_us;
ret = ivpu_jsm_dct_enable(vdev, active_us, inactive_us);
if (ret) {
ivpu_err_ratelimited(vdev, "Filed to enable DCT: %d\n", ret);
return ret;
}
vdev->pm->dct_active_percent = active_percent;
ivpu_dbg(vdev, PM, "DCT set to %u%% (D0: %uus, D0i2: %uus)\n",
active_percent, active_us, inactive_us);
return 0;
}
int ivpu_pm_dct_disable(struct ivpu_device *vdev)
{
int ret;
ret = ivpu_jsm_dct_disable(vdev);
if (ret) {
ivpu_err_ratelimited(vdev, "Filed to disable DCT: %d\n", ret);
return ret;
}
vdev->pm->dct_active_percent = 0;
ivpu_dbg(vdev, PM, "DCT disabled\n");
return 0;
}
void ivpu_pm_dct_irq_thread_handler(struct ivpu_device *vdev)
{
bool enable;
int ret;
if (ivpu_hw_btrs_dct_get_request(vdev, &enable))
return;
if (vdev->pm->dct_active_percent)
ret = ivpu_pm_dct_enable(vdev, DCT_DEFAULT_ACTIVE_PERCENT);
else
ret = ivpu_pm_dct_disable(vdev);
if (!ret)
ivpu_hw_btrs_dct_set_status(vdev, enable, vdev->pm->dct_active_percent);
}

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020-2023 Intel Corporation
* Copyright (C) 2020-2024 Intel Corporation
*/
#ifndef __IVPU_PM_H__
@ -19,7 +19,7 @@ struct ivpu_pm_info {
atomic_t reset_counter;
atomic_t reset_pending;
bool is_warmboot;
u32 suspend_reschedule_counter;
u8 dct_active_percent;
};
void ivpu_pm_init(struct ivpu_device *vdev);
@ -36,11 +36,15 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev);
void ivpu_pm_reset_done_cb(struct pci_dev *pdev);
int __must_check ivpu_rpm_get(struct ivpu_device *vdev);
int __must_check ivpu_rpm_get_if_active(struct ivpu_device *vdev);
void ivpu_rpm_put(struct ivpu_device *vdev);
void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason);
void ivpu_start_job_timeout_detection(struct ivpu_device *vdev);
void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev);
int ivpu_pm_dct_init(struct ivpu_device *vdev);
int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent);
int ivpu_pm_dct_disable(struct ivpu_device *vdev);
void ivpu_pm_dct_irq_thread_handler(struct ivpu_device *vdev);
#endif /* __IVPU_PM_H__ */

View File

@ -0,0 +1,58 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2024 Intel Corporation
*/
#include <linux/device.h>
#include <linux/err.h>
#include "ivpu_hw.h"
#include "ivpu_sysfs.h"
/*
* npu_busy_time_us is the time that the device spent executing jobs.
* The time is counted when and only when there are jobs submitted to firmware.
*
* This time can be used to measure the utilization of NPU, either by calculating
* npu_busy_time_us difference between two timepoints (i.e. measuring the time
* that the NPU was active during some workload) or monitoring utilization percentage
* by reading npu_busy_time_us periodically.
*
* When reading the value periodically, it shouldn't be read too often as it may have
* an impact on job submission performance. Recommended period is 1 second.
*/
static ssize_t
npu_busy_time_us_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct ivpu_device *vdev = to_ivpu_device(drm);
ktime_t total, now = 0;
xa_lock(&vdev->submitted_jobs_xa);
total = vdev->busy_time;
if (!xa_empty(&vdev->submitted_jobs_xa))
now = ktime_sub(ktime_get(), vdev->busy_start_ts);
xa_unlock(&vdev->submitted_jobs_xa);
return sysfs_emit(buf, "%lld\n", ktime_to_us(ktime_add(total, now)));
}
static DEVICE_ATTR_RO(npu_busy_time_us);
static struct attribute *ivpu_dev_attrs[] = {
&dev_attr_npu_busy_time_us.attr,
NULL,
};
static struct attribute_group ivpu_dev_attr_group = {
.attrs = ivpu_dev_attrs,
};
void ivpu_sysfs_init(struct ivpu_device *vdev)
{
int ret;
ret = devm_device_add_group(vdev->drm.dev, &ivpu_dev_attr_group);
if (ret)
ivpu_warn(vdev, "Failed to add group to device, ret %d", ret);
}

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2024 Intel Corporation
*/
#ifndef __IVPU_SYSFS_H__
#define __IVPU_SYSFS_H__
#include "ivpu_drv.h"
void ivpu_sysfs_init(struct ivpu_device *vdev);
#endif /* __IVPU_SYSFS_H__ */

View File

@ -27,7 +27,7 @@
* Minor version changes when API backward compatibility is preserved.
* Resets to 0 if Major version is incremented.
*/
#define VPU_BOOT_API_VER_MINOR 22
#define VPU_BOOT_API_VER_MINOR 24
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
@ -80,6 +80,11 @@ struct vpu_firmware_header {
u32 preemption_buffer_2_size;
/* Space reserved for future preemption-related fields. */
u32 preemption_reserved[6];
/* FW image read only section start address, 4KB aligned */
u64 ro_section_start_address;
/* FW image read only section size, 4KB aligned */
u32 ro_section_size;
u32 reserved;
};
/*
@ -333,7 +338,14 @@ struct vpu_boot_params {
* The KMD is required to update this value on every VPU reset.
*/
u64 system_time_us;
u32 pad4[18];
u32 pad4[2];
/*
* The delta between device monotonic time and the current value of the
* HW timestamp register, in ticks. Written by the firmware during boot.
* Can be used by the KMD to calculate device time.
*/
u64 device_time_delta_ticks;
u32 pad7[14];
/* Warm boot information: 0x400 - 0x43F */
u32 warm_boot_sections_count;
u32 warm_boot_start_address_reference;

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright (c) 2020-2023, Intel Corporation.
* Copyright (c) 2020-2024, Intel Corporation.
*/
/**
@ -22,12 +22,12 @@
/*
* Minor version changes when API backward compatibility is preserved.
*/
#define VPU_JSM_API_VER_MINOR 15
#define VPU_JSM_API_VER_MINOR 16
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
*/
#define VPU_JSM_API_VER_PATCH 6
#define VPU_JSM_API_VER_PATCH 0
/*
* Index in the API version table
@ -868,6 +868,14 @@ struct vpu_ipc_msg_payload_hws_set_scheduling_log {
* is generated when an event log is written to this index.
*/
u64 notify_index;
/*
* Enable extra events to be output to log for debug of scheduling algorithm.
* Interpreted by VPU as a boolean to enable or disable, expected values are
* 0 and 1.
*/
u32 enable_extra_events;
/* Zero Padding */
u32 reserved_0;
};
/*

View File

@ -802,4 +802,5 @@ module_exit(agp_amd64_cleanup);
MODULE_AUTHOR("Dave Jones, Andi Kleen");
module_param(agp_try_unsupported, bool, 0);
MODULE_DESCRIPTION("GART driver for the AMD Opteron/Athlon64 on-CPU northbridge");
MODULE_LICENSE("GPL");

View File

@ -12,7 +12,7 @@
#include <asm/smp.h>
#include "agp.h"
#include "intel-agp.h"
#include <drm/intel-gtt.h>
#include <drm/intel/intel-gtt.h>
static int intel_fetch_size(void)
{
@ -920,4 +920,5 @@ module_init(agp_intel_init);
module_exit(agp_intel_cleanup);
MODULE_AUTHOR("Dave Jones, Various @Intel");
MODULE_DESCRIPTION("Intel AGPGART routines");
MODULE_LICENSE("GPL and additional rights");

View File

@ -25,7 +25,7 @@
#include <asm/smp.h>
#include "agp.h"
#include "intel-agp.h"
#include <drm/intel-gtt.h>
#include <drm/intel/intel-gtt.h>
#include <asm/set_memory.h>
/*
@ -1461,4 +1461,5 @@ void intel_gmch_remove(void)
EXPORT_SYMBOL(intel_gmch_remove);
MODULE_AUTHOR("Dave Jones, Various @Intel");
MODULE_DESCRIPTION("Intel GTT (Graphics Translation Table) routines");
MODULE_LICENSE("GPL and additional rights");

View File

@ -433,4 +433,5 @@ module_param(agp_sis_force_delay, bool, 0);
MODULE_PARM_DESC(agp_sis_force_delay,"forces sis delay hack");
module_param(agp_sis_agp_spec, int, 0);
MODULE_PARM_DESC(agp_sis_agp_spec,"0=force sis init, 1=force generic agp3 init, default: autodetect");
MODULE_DESCRIPTION("SiS AGPGART routines");
MODULE_LICENSE("GPL and additional rights");

View File

@ -575,5 +575,6 @@ static void __exit agp_via_cleanup(void)
module_init(agp_via_init);
module_exit(agp_via_cleanup);
MODULE_DESCRIPTION("VIA AGPGART routines");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dave Jones");

View File

@ -70,7 +70,7 @@ static void dma_fence_array_cb_func(struct dma_fence *f,
static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
{
struct dma_fence_array *array = to_dma_fence_array(fence);
struct dma_fence_array_cb *cb = (void *)(&array[1]);
struct dma_fence_array_cb *cb = array->callbacks;
unsigned i;
for (i = 0; i < array->num_fences; ++i) {
@ -168,22 +168,20 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
bool signal_on_any)
{
struct dma_fence_array *array;
size_t size = sizeof(*array);
WARN_ON(!num_fences || !fences);
/* Allocate the callback structures behind the array. */
size += num_fences * sizeof(struct dma_fence_array_cb);
array = kzalloc(size, GFP_KERNEL);
array = kzalloc(struct_size(array, callbacks, num_fences), GFP_KERNEL);
if (!array)
return NULL;
array->num_fences = num_fences;
spin_lock_init(&array->lock);
dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
context, seqno);
init_irq_work(&array->work, irq_dma_fence_array_work);
array->num_fences = num_fences;
atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
array->fences = fences;

View File

@ -50,8 +50,8 @@ static struct class *dma_heap_class;
static DEFINE_XARRAY_ALLOC(dma_heap_minors);
static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
unsigned int fd_flags,
unsigned int heap_flags)
u32 fd_flags,
u64 heap_flags)
{
struct dma_buf *dmabuf;
int fd;

View File

@ -186,6 +186,13 @@ int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
dma_resv_assert_held(obj);
/* Driver and component code should never call this function with
* num_fences=0. If they do it usually points to bugs when calculating
* the number of needed fences dynamically.
*/
if (WARN_ON(!num_fences))
return -EINVAL;
old = dma_resv_fences_list(obj);
if (old && old->max_fences) {
if ((old->num_fences + num_fences) <= old->max_fences)

View File

@ -274,8 +274,8 @@ static const struct dma_buf_ops cma_heap_buf_ops = {
static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
unsigned long len,
unsigned long fd_flags,
unsigned long heap_flags)
u32 fd_flags,
u64 heap_flags)
{
struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
struct cma_heap_buffer *buffer;

View File

@ -333,8 +333,8 @@ static struct page *alloc_largest_available(unsigned long size,
static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
unsigned long len,
unsigned long fd_flags,
unsigned long heap_flags)
u32 fd_flags,
u64 heap_flags)
{
struct system_heap_buffer *buffer;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);

View File

@ -79,6 +79,7 @@ config DRM_KUNIT_TEST
depends on DRM && KUNIT && MMU
select DRM_BUDDY
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDMI_STATE_HELPER
select DRM_DISPLAY_HELPER
select DRM_EXEC
select DRM_EXPORT_FOR_TESTS if m
@ -106,7 +107,7 @@ config DRM_KMS_HELPER
config DRM_PANIC
bool "Display a user-friendly message when a kernel panic occurs"
depends on DRM && !FRAMEBUFFER_CONSOLE
depends on DRM && !(FRAMEBUFFER_CONSOLE && VT_CONSOLE)
select DRM_KMS_HELPER
select FONT_SUPPORT
help
@ -136,6 +137,19 @@ config DRM_PANIC_DEBUG
This is unsafe and should not be enabled on a production build.
If in doubt, say "N".
config DRM_PANIC_SCREEN
string "Panic screen formater"
default "user"
depends on DRM_PANIC
help
This option enable to choose what will be displayed when a kernel
panic occurs. You can choose between "user", a short message telling
the user to reboot the system, or "kmsg" which will display the last
lines of kmsg.
This can also be overridden by drm.panic_screen=xxxx kernel parameter
or by writing to /sys/module/drm/parameters/panic_screen sysfs entry
Default is "user"
config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
bool "Enable refcount backtrace history in the DP MST helpers"
depends on STACKTRACE_SUPPORT

View File

@ -108,6 +108,7 @@ drm_dma_helper-$(CONFIG_DRM_KMS_HELPER) += drm_fb_dma_helper.o
obj-$(CONFIG_DRM_GEM_DMA_HELPER) += drm_dma_helper.o
drm_shmem_helper-y := drm_gem_shmem_helper.o
drm_shmem_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fbdev_shmem.o
obj-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_shmem_helper.o
drm_suballoc_helper-y := drm_suballoc.o
@ -117,6 +118,7 @@ drm_vram_helper-y := drm_gem_vram_helper.o
obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o
drm_ttm_helper-y := drm_gem_ttm_helper.o
drm_ttm_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fbdev_ttm.o
obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
#
@ -142,9 +144,7 @@ drm_kms_helper-y := \
drm_self_refresh_helper.o \
drm_simple_kms_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += \
drm_fbdev_generic.o \
drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
#

View File

@ -246,22 +246,6 @@ amdgpu_connector_find_encoder(struct drm_connector *connector,
return NULL;
}
struct edid *amdgpu_connector_edid(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct drm_property_blob *edid_blob = connector->edid_blob_ptr;
if (amdgpu_connector->edid) {
return amdgpu_connector->edid;
} else if (edid_blob) {
struct edid *edid = kmemdup(edid_blob->data, edid_blob->length, GFP_KERNEL);
if (edid)
amdgpu_connector->edid = edid;
}
return amdgpu_connector->edid;
}
static struct edid *
amdgpu_connector_get_hardcoded_edid(struct amdgpu_device *adev)
{

View File

@ -24,7 +24,6 @@
#ifndef __AMDGPU_CONNECTORS_H__
#define __AMDGPU_CONNECTORS_H__
struct edid *amdgpu_connector_edid(struct drm_connector *connector);
void amdgpu_connector_hotplug(struct drm_connector *connector);
int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector);
u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);

View File

@ -24,7 +24,7 @@
#include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_gem.h>
#include <drm/drm_managed.h>
#include <drm/drm_pciids.h>
@ -2346,9 +2346,9 @@ retry_init:
!list_empty(&adev_to_drm(adev)->mode_config.connector_list)) {
/* select 8 bpp console on low vram cards */
if (adev->gmc.real_vram_size <= (32*1024*1024))
drm_fbdev_generic_setup(adev_to_drm(adev), 8);
drm_fbdev_ttm_setup(adev_to_drm(adev), 8);
else
drm_fbdev_generic_setup(adev_to_drm(adev), 32);
drm_fbdev_ttm_setup(adev_to_drm(adev), 32);
}
ret = amdgpu_debugfs_init(adev);

View File

@ -65,9 +65,7 @@ static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer)
static int amdgpu_vkms_enable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
@ -91,10 +89,8 @@ static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
ktime_t *vblank_time,
bool in_vblank_irq)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (!READ_ONCE(vblank->enabled)) {

View File

@ -1299,7 +1299,7 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder
return;
}
sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
if (sad_count < 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
sad_count = 0;
@ -1369,7 +1369,7 @@ static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
return;
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
if (sad_count <= 0)

View File

@ -1331,7 +1331,7 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder
return;
}
sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
if (sad_count < 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
sad_count = 0;
@ -1401,7 +1401,7 @@ static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
return;
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
if (sad_count <= 0)

View File

@ -1217,7 +1217,7 @@ static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
return;
}
sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
if (sad_count < 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
sad_count = 0;
@ -1292,7 +1292,7 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
return;
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
if (sad_count <= 0)

View File

@ -1272,7 +1272,7 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
return;
}
sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
if (sad_count < 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
sad_count = 0;
@ -1340,7 +1340,7 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
return;
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
if (sad_count <= 0)

View File

@ -534,7 +534,7 @@ static void dm_vupdate_high_irq(void *interrupt_params)
if (acrtc) {
vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
drm_dev = acrtc->base.dev;
vblank = &drm_dev->vblank[acrtc->base.index];
vblank = drm_crtc_vblank_crtc(&acrtc->base);
previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
frame_duration_ns = vblank->time - previous_timestamp;

View File

@ -5,6 +5,7 @@
*
*/
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
@ -294,7 +295,6 @@ komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc,
struct komeda_dev *mdev = kcrtc->master->mdev;
struct completion *flip_done;
struct completion temp;
int timeout;
/* if caller doesn't send a flip_done, use a private flip_done */
if (input_flip_done) {
@ -308,8 +308,7 @@ komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc,
mdev->funcs->flush(mdev, kcrtc->master->id, 0);
/* wait the flip take affect.*/
timeout = wait_for_completion_timeout(flip_done, HZ);
if (timeout == 0) {
if (wait_for_completion_timeout(flip_done, HZ) == 0) {
DRM_ERROR("wait pipe%d flip done timeout\n", kcrtc->master->id);
if (!input_flip_done) {
unsigned long flags;
@ -610,12 +609,34 @@ get_crtc_primary(struct komeda_kms_dev *kms, struct komeda_crtc *crtc)
return NULL;
}
static int komeda_attach_bridge(struct device *dev,
struct komeda_pipeline *pipe,
struct drm_encoder *encoder)
{
struct drm_bridge *bridge;
int err;
bridge = devm_drm_of_get_bridge(dev, pipe->of_node,
KOMEDA_OF_PORT_OUTPUT, 0);
if (IS_ERR(bridge))
return dev_err_probe(dev, PTR_ERR(bridge), "remote bridge not found for pipe: %s\n",
of_node_full_name(pipe->of_node));
err = drm_bridge_attach(encoder, bridge, NULL, 0);
if (err)
dev_err(dev, "bridge_attach() failed for pipe: %s\n",
of_node_full_name(pipe->of_node));
return err;
}
static int komeda_crtc_add(struct komeda_kms_dev *kms,
struct komeda_crtc *kcrtc)
{
struct drm_crtc *crtc = &kcrtc->base;
struct drm_device *base = &kms->base;
struct drm_bridge *bridge;
struct komeda_pipeline *pipe = kcrtc->master;
struct drm_encoder *encoder = &kcrtc->encoder;
int err;
err = drm_crtc_init_with_planes(base, crtc,
@ -626,27 +647,27 @@ static int komeda_crtc_add(struct komeda_kms_dev *kms,
drm_crtc_helper_add(crtc, &komeda_crtc_helper_funcs);
crtc->port = kcrtc->master->of_output_port;
crtc->port = pipe->of_output_port;
/* Construct an encoder for each pipeline and attach it to the remote
* bridge
*/
kcrtc->encoder.possible_crtcs = drm_crtc_mask(crtc);
err = drm_simple_encoder_init(base, &kcrtc->encoder,
DRM_MODE_ENCODER_TMDS);
err = drm_simple_encoder_init(base, encoder, DRM_MODE_ENCODER_TMDS);
if (err)
return err;
bridge = devm_drm_of_get_bridge(base->dev, kcrtc->master->of_node,
KOMEDA_OF_PORT_OUTPUT, 0);
if (IS_ERR(bridge))
return PTR_ERR(bridge);
err = drm_bridge_attach(&kcrtc->encoder, bridge, NULL, 0);
if (pipe->of_output_links[0]) {
err = komeda_attach_bridge(base->dev, pipe, encoder);
if (err)
return err;
}
drm_crtc_enable_color_mgmt(crtc, 0, true, KOMEDA_COLOR_LUT_SIZE);
return err;
komeda_pipeline_dump(pipe);
return 0;
}
int komeda_kms_add_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev)

View File

@ -9,7 +9,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include "komeda_dev.h"
@ -59,6 +59,10 @@ static int komeda_platform_probe(struct platform_device *pdev)
struct komeda_drv *mdrv;
int err;
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
if (err)
return dev_err_probe(dev, err, "DMA mask error\n");
mdrv = devm_kzalloc(dev, sizeof(*mdrv), GFP_KERNEL);
if (!mdrv)
return -ENOMEM;
@ -80,7 +84,7 @@ static int komeda_platform_probe(struct platform_device *pdev)
}
dev_set_drvdata(dev, mdrv);
drm_fbdev_generic_setup(&mdrv->kms->base, 32);
drm_fbdev_dma_setup(&mdrv->kms->base, 32);
return 0;

View File

@ -191,5 +191,6 @@ void komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc,
struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev);
void komeda_kms_detach(struct komeda_kms_dev *kms);
void komeda_kms_shutdown(struct komeda_kms_dev *kms);
void komeda_pipeline_dump(struct komeda_pipeline *pipe);
#endif /*_KOMEDA_KMS_H_*/

View File

@ -10,6 +10,7 @@
#include <drm/drm_print.h>
#include "komeda_dev.h"
#include "komeda_kms.h"
#include "komeda_pipeline.h"
/** komeda_pipeline_add - Add a pipeline to &komeda_dev */
@ -247,7 +248,7 @@ static void komeda_component_dump(struct komeda_component *c)
c->max_active_outputs, c->supported_outputs);
}
static void komeda_pipeline_dump(struct komeda_pipeline *pipe)
void komeda_pipeline_dump(struct komeda_pipeline *pipe)
{
struct komeda_component *c;
int id;
@ -351,7 +352,6 @@ int komeda_assemble_pipelines(struct komeda_dev *mdev)
pipe = mdev->pipelines[i];
komeda_pipeline_assemble(pipe);
komeda_pipeline_dump(pipe);
}
return 0;

View File

@ -33,7 +33,7 @@
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
@ -360,7 +360,7 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
drm_fbdev_generic_setup(drm, 32);
drm_fbdev_shmem_setup(drm, 32);
return 0;
}

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_ATMEL_HLCDC
tristate "DRM Support for ATMEL HLCDC Display Controller"
depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM
depends on DRM && OF && COMMON_CLK && ((MFD_ATMEL_HLCDC && ARM) || COMPILE_TEST)
select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL

View File

@ -30,10 +30,12 @@
*
* @base: base CRTC state
* @output_mode: RGBXXX output mode
* @dpi: output DPI mode
*/
struct atmel_hlcdc_crtc_state {
struct drm_crtc_state base;
unsigned int output_mode;
u8 dpi;
};
static inline struct atmel_hlcdc_crtc_state *
@ -164,18 +166,24 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
state = drm_crtc_state_to_atmel_hlcdc_crtc_state(c->state);
cfg = state->output_mode << 8;
if (adj->flags & DRM_MODE_FLAG_NVSYNC)
cfg |= ATMEL_HLCDC_VSPOL;
if (!crtc->dc->desc->is_xlcdc) {
if (adj->flags & DRM_MODE_FLAG_NVSYNC)
cfg |= ATMEL_HLCDC_VSPOL;
if (adj->flags & DRM_MODE_FLAG_NHSYNC)
cfg |= ATMEL_HLCDC_HSPOL;
if (adj->flags & DRM_MODE_FLAG_NHSYNC)
cfg |= ATMEL_HLCDC_HSPOL;
} else {
cfg |= state->dpi << 11;
}
regmap_update_bits(regmap, ATMEL_HLCDC_CFG(5),
ATMEL_HLCDC_HSPOL | ATMEL_HLCDC_VSPOL |
ATMEL_HLCDC_VSPDLYS | ATMEL_HLCDC_VSPDLYE |
ATMEL_HLCDC_DISPPOL | ATMEL_HLCDC_DISPDLY |
ATMEL_HLCDC_VSPSU | ATMEL_HLCDC_VSPHO |
ATMEL_HLCDC_GUARDTIME_MASK | ATMEL_HLCDC_MODE_MASK,
ATMEL_HLCDC_GUARDTIME_MASK |
(crtc->dc->desc->is_xlcdc ? ATMEL_XLCDC_MODE_MASK |
ATMEL_XLCDC_DPI : ATMEL_HLCDC_MODE_MASK),
cfg);
clk_disable_unprepare(crtc->dc->hlcdc->sys_clk);
@ -202,20 +210,37 @@ static void atmel_hlcdc_crtc_atomic_disable(struct drm_crtc *c,
pm_runtime_get_sync(dev->dev);
if (crtc->dc->desc->is_xlcdc) {
regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_XLCDC_CM);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
!(status & ATMEL_XLCDC_CM),
10, 1000))
dev_warn(dev->dev, "Atmel LCDC status register CMSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_XLCDC_SD);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
status & ATMEL_XLCDC_SD,
10, 1000))
dev_warn(dev->dev, "Atmel LCDC status register SDSTS timeout\n");
}
regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_DISP);
while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
(status & ATMEL_HLCDC_DISP))
cpu_relax();
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
!(status & ATMEL_HLCDC_DISP),
10, 1000))
dev_warn(dev->dev, "Atmel LCDC status register DISPSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_SYNC);
while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
(status & ATMEL_HLCDC_SYNC))
cpu_relax();
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
!(status & ATMEL_HLCDC_SYNC),
10, 1000))
dev_warn(dev->dev, "Atmel LCDC status register LCDSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_PIXEL_CLK);
while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
(status & ATMEL_HLCDC_PIXEL_CLK))
cpu_relax();
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
!(status & ATMEL_HLCDC_PIXEL_CLK),
10, 1000))
dev_warn(dev->dev, "Atmel LCDC status register CLKSTS timeout\n");
clk_disable_unprepare(crtc->dc->hlcdc->sys_clk);
pinctrl_pm_select_sleep_state(dev->dev);
@ -241,30 +266,95 @@ static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c,
clk_prepare_enable(crtc->dc->hlcdc->sys_clk);
regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_PIXEL_CLK);
while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
!(status & ATMEL_HLCDC_PIXEL_CLK))
cpu_relax();
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
status & ATMEL_HLCDC_PIXEL_CLK,
10, 1000))
dev_warn(dev->dev, "Atmel LCDC status register CLKSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_SYNC);
while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
!(status & ATMEL_HLCDC_SYNC))
cpu_relax();
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
status & ATMEL_HLCDC_SYNC,
10, 1000))
dev_warn(dev->dev, "Atmel LCDC status register LCDSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_DISP);
while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
!(status & ATMEL_HLCDC_DISP))
cpu_relax();
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
status & ATMEL_HLCDC_DISP,
10, 1000))
dev_warn(dev->dev, "Atmel LCDC status register DISPSTS timeout\n");
if (crtc->dc->desc->is_xlcdc) {
regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_XLCDC_CM);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
status & ATMEL_XLCDC_CM,
10, 1000))
dev_warn(dev->dev, "Atmel LCDC status register CMSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_XLCDC_SD);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
!(status & ATMEL_XLCDC_SD),
10, 1000))
dev_warn(dev->dev, "Atmel LCDC status register SDSTS timeout\n");
}
pm_runtime_put_sync(dev->dev);
}
#define ATMEL_HLCDC_RGB444_OUTPUT BIT(0)
#define ATMEL_HLCDC_RGB565_OUTPUT BIT(1)
#define ATMEL_HLCDC_RGB666_OUTPUT BIT(2)
#define ATMEL_HLCDC_RGB888_OUTPUT BIT(3)
#define ATMEL_HLCDC_OUTPUT_MODE_MASK GENMASK(3, 0)
#define ATMEL_HLCDC_RGB444_OUTPUT BIT(0)
#define ATMEL_HLCDC_RGB565_OUTPUT BIT(1)
#define ATMEL_HLCDC_RGB666_OUTPUT BIT(2)
#define ATMEL_HLCDC_RGB888_OUTPUT BIT(3)
#define ATMEL_HLCDC_DPI_RGB565C1_OUTPUT BIT(4)
#define ATMEL_HLCDC_DPI_RGB565C2_OUTPUT BIT(5)
#define ATMEL_HLCDC_DPI_RGB565C3_OUTPUT BIT(6)
#define ATMEL_HLCDC_DPI_RGB666C1_OUTPUT BIT(7)
#define ATMEL_HLCDC_DPI_RGB666C2_OUTPUT BIT(8)
#define ATMEL_HLCDC_DPI_RGB888_OUTPUT BIT(9)
#define ATMEL_HLCDC_OUTPUT_MODE_MASK GENMASK(3, 0)
#define ATMEL_XLCDC_OUTPUT_MODE_MASK GENMASK(9, 0)
static int atmel_xlcdc_connector_output_dsi(struct drm_encoder *encoder,
struct drm_display_info *info)
{
int j;
unsigned int supported_fmts = 0;
switch (atmel_hlcdc_encoder_get_bus_fmt(encoder)) {
case 0:
break;
case MEDIA_BUS_FMT_RGB565_1X16:
return ATMEL_HLCDC_DPI_RGB565C1_OUTPUT;
case MEDIA_BUS_FMT_RGB666_1X18:
return ATMEL_HLCDC_DPI_RGB666C1_OUTPUT;
case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
return ATMEL_HLCDC_DPI_RGB666C2_OUTPUT;
case MEDIA_BUS_FMT_RGB888_1X24:
return ATMEL_HLCDC_DPI_RGB888_OUTPUT;
default:
return -EINVAL;
}
for (j = 0; j < info->num_bus_formats; j++) {
switch (info->bus_formats[j]) {
case MEDIA_BUS_FMT_RGB565_1X16:
supported_fmts |= ATMEL_HLCDC_DPI_RGB565C1_OUTPUT;
break;
case MEDIA_BUS_FMT_RGB666_1X18:
supported_fmts |= ATMEL_HLCDC_DPI_RGB666C1_OUTPUT;
break;
case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
supported_fmts |= ATMEL_HLCDC_DPI_RGB666C2_OUTPUT;
break;
case MEDIA_BUS_FMT_RGB888_1X24:
supported_fmts |= ATMEL_HLCDC_DPI_RGB888_OUTPUT;
break;
default:
break;
}
}
return supported_fmts;
}
static int atmel_hlcdc_connector_output_mode(struct drm_connector_state *state)
{
@ -277,6 +367,13 @@ static int atmel_hlcdc_connector_output_mode(struct drm_connector_state *state)
encoder = state->best_encoder;
if (!encoder)
encoder = connector->encoder;
/*
* atmel-hlcdc to support DSI formats with DSI video pipeline
* when DRM_MODE_ENCODER_DSI type is set by
* connector driver component.
*/
if (encoder->encoder_type == DRM_MODE_ENCODER_DSI)
return atmel_xlcdc_connector_output_dsi(encoder, info);
switch (atmel_hlcdc_encoder_get_bus_fmt(encoder)) {
case 0:
@ -317,7 +414,7 @@ static int atmel_hlcdc_connector_output_mode(struct drm_connector_state *state)
static int atmel_hlcdc_crtc_select_output_mode(struct drm_crtc_state *state)
{
unsigned int output_fmts = ATMEL_HLCDC_OUTPUT_MODE_MASK;
unsigned int output_fmts;
struct atmel_hlcdc_crtc_state *hstate;
struct drm_connector_state *cstate;
struct drm_connector *connector;
@ -325,6 +422,8 @@ static int atmel_hlcdc_crtc_select_output_mode(struct drm_crtc_state *state)
int i;
crtc = drm_crtc_to_atmel_hlcdc_crtc(state->crtc);
output_fmts = crtc->dc->desc->is_xlcdc ? ATMEL_XLCDC_OUTPUT_MODE_MASK :
ATMEL_HLCDC_OUTPUT_MODE_MASK;
for_each_new_connector_in_state(state->state, connector, cstate, i) {
unsigned int supported_fmts = 0;
@ -345,7 +444,15 @@ static int atmel_hlcdc_crtc_select_output_mode(struct drm_crtc_state *state)
hstate = drm_crtc_state_to_atmel_hlcdc_crtc_state(state);
hstate->output_mode = fls(output_fmts) - 1;
if (crtc->dc->desc->is_xlcdc) {
/* check if MIPI DPI bit needs to be set */
if (fls(output_fmts) > 3) {
hstate->output_mode -= 4;
hstate->dpi = 1;
} else {
hstate->dpi = 0;
}
}
return 0;
}
@ -449,6 +556,7 @@ static struct drm_crtc_state *
atmel_hlcdc_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct atmel_hlcdc_crtc_state *state, *cur;
struct atmel_hlcdc_crtc *c = drm_crtc_to_atmel_hlcdc_crtc(crtc);
if (WARN_ON(!crtc->state))
return NULL;
@ -460,6 +568,8 @@ atmel_hlcdc_crtc_duplicate_state(struct drm_crtc *crtc)
cur = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state);
state->output_mode = cur->output_mode;
if (c->dc->desc->is_xlcdc)
state->dpi = cur->dpi;
return &state->base;
}

View File

@ -58,6 +58,7 @@ static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_at91sam9n12 = {
.conflicting_output_formats = true,
.nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9n12_layers),
.layers = atmel_hlcdc_at91sam9n12_layers,
.ops = &atmel_hlcdc_ops,
};
static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9x5_layers[] = {
@ -151,6 +152,7 @@ static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_at91sam9x5 = {
.conflicting_output_formats = true,
.nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9x5_layers),
.layers = atmel_hlcdc_at91sam9x5_layers,
.ops = &atmel_hlcdc_ops,
};
static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = {
@ -269,6 +271,7 @@ static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sama5d3 = {
.conflicting_output_formats = true,
.nlayers = ARRAY_SIZE(atmel_hlcdc_sama5d3_layers),
.layers = atmel_hlcdc_sama5d3_layers,
.ops = &atmel_hlcdc_ops,
};
static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d4_layers[] = {
@ -364,6 +367,7 @@ static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sama5d4 = {
.max_hpw = 0x3ff,
.nlayers = ARRAY_SIZE(atmel_hlcdc_sama5d4_layers),
.layers = atmel_hlcdc_sama5d4_layers,
.ops = &atmel_hlcdc_ops,
};
static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sam9x60_layers[] = {
@ -460,6 +464,103 @@ static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sam9x60 = {
.fixed_clksrc = true,
.nlayers = ARRAY_SIZE(atmel_hlcdc_sam9x60_layers),
.layers = atmel_hlcdc_sam9x60_layers,
.ops = &atmel_hlcdc_ops,
};
static const struct atmel_hlcdc_layer_desc atmel_xlcdc_sam9x75_layers[] = {
{
.name = "base",
.formats = &atmel_hlcdc_plane_rgb_formats,
.regs_offset = 0x60,
.id = 0,
.type = ATMEL_HLCDC_BASE_LAYER,
.cfgs_offset = 0x1c,
.layout = {
.xstride = { 2 },
.default_color = 3,
.general_config = 4,
.disc_pos = 5,
.disc_size = 6,
},
.clut_offset = 0x700,
},
{
.name = "overlay1",
.formats = &atmel_hlcdc_plane_rgb_formats,
.regs_offset = 0x160,
.id = 1,
.type = ATMEL_HLCDC_OVERLAY_LAYER,
.cfgs_offset = 0x1c,
.layout = {
.pos = 2,
.size = 3,
.xstride = { 4 },
.pstride = { 5 },
.default_color = 6,
.chroma_key = 7,
.chroma_key_mask = 8,
.general_config = 9,
},
.clut_offset = 0xb00,
},
{
.name = "overlay2",
.formats = &atmel_hlcdc_plane_rgb_formats,
.regs_offset = 0x260,
.id = 2,
.type = ATMEL_HLCDC_OVERLAY_LAYER,
.cfgs_offset = 0x1c,
.layout = {
.pos = 2,
.size = 3,
.xstride = { 4 },
.pstride = { 5 },
.default_color = 6,
.chroma_key = 7,
.chroma_key_mask = 8,
.general_config = 9,
},
.clut_offset = 0xf00,
},
{
.name = "high-end-overlay",
.formats = &atmel_hlcdc_plane_rgb_and_yuv_formats,
.regs_offset = 0x360,
.id = 3,
.type = ATMEL_HLCDC_OVERLAY_LAYER,
.cfgs_offset = 0x30,
.layout = {
.pos = 2,
.size = 3,
.memsize = 4,
.xstride = { 5, 7 },
.pstride = { 6, 8 },
.default_color = 9,
.chroma_key = 10,
.chroma_key_mask = 11,
.general_config = 12,
.csc = 16,
.scaler_config = 23,
.vxs_config = 30,
.hxs_config = 31,
},
.clut_offset = 0x1300,
},
};
static const struct atmel_hlcdc_dc_desc atmel_xlcdc_dc_sam9x75 = {
.min_width = 0,
.min_height = 0,
.max_width = 2048,
.max_height = 2048,
.max_spw = 0x3ff,
.max_vpw = 0x3ff,
.max_hpw = 0x3ff,
.fixed_clksrc = true,
.is_xlcdc = true,
.nlayers = ARRAY_SIZE(atmel_xlcdc_sam9x75_layers),
.layers = atmel_xlcdc_sam9x75_layers,
.ops = &atmel_xlcdc_ops,
};
static const struct of_device_id atmel_hlcdc_of_match[] = {
@ -487,6 +588,10 @@ static const struct of_device_id atmel_hlcdc_of_match[] = {
.compatible = "microchip,sam9x60-hlcdc",
.data = &atmel_hlcdc_dc_sam9x60,
},
{
.compatible = "microchip,sam9x75-xlcdc",
.data = &atmel_xlcdc_dc_sam9x75,
},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, atmel_hlcdc_of_match);

View File

@ -15,6 +15,7 @@
#include <drm/drm_plane.h>
/* LCD controller common registers */
#define ATMEL_HLCDC_LAYER_CHER 0x0
#define ATMEL_HLCDC_LAYER_CHDR 0x4
#define ATMEL_HLCDC_LAYER_CHSR 0x8
@ -128,6 +129,47 @@
#define ATMEL_HLCDC_MAX_LAYERS 6
/* XLCDC controller specific registers */
#define ATMEL_XLCDC_LAYER_ENR 0x10
#define ATMEL_XLCDC_LAYER_EN BIT(0)
#define ATMEL_XLCDC_LAYER_IER 0x0
#define ATMEL_XLCDC_LAYER_IDR 0x4
#define ATMEL_XLCDC_LAYER_ISR 0xc
#define ATMEL_XLCDC_LAYER_OVR_IRQ(p) BIT(2 + (8 * (p)))
#define ATMEL_XLCDC_LAYER_PLANE_ADDR(p) (((p) * 0x4) + 0x18)
#define ATMEL_XLCDC_LAYER_DMA_CFG 0
#define ATMEL_XLCDC_LAYER_DMA BIT(0)
#define ATMEL_XLCDC_LAYER_REP BIT(1)
#define ATMEL_XLCDC_LAYER_DISCEN BIT(4)
#define ATMEL_XLCDC_LAYER_SFACTC_A0_MULT_AS (4 << 6)
#define ATMEL_XLCDC_LAYER_SFACTA_ONE BIT(9)
#define ATMEL_XLCDC_LAYER_DFACTC_M_A0_MULT_AS (6 << 11)
#define ATMEL_XLCDC_LAYER_DFACTA_ONE BIT(14)
#define ATMEL_XLCDC_LAYER_A0_SHIFT 16
#define ATMEL_XLCDC_LAYER_A0(x) \
((x) << ATMEL_XLCDC_LAYER_A0_SHIFT)
#define ATMEL_XLCDC_LAYER_VSCALER_LUMA_ENABLE BIT(0)
#define ATMEL_XLCDC_LAYER_VSCALER_CHROMA_ENABLE BIT(1)
#define ATMEL_XLCDC_LAYER_HSCALER_LUMA_ENABLE BIT(4)
#define ATMEL_XLCDC_LAYER_HSCALER_CHROMA_ENABLE BIT(5)
#define ATMEL_XLCDC_LAYER_VXSYCFG_ONE BIT(0)
#define ATMEL_XLCDC_LAYER_VXSYTAP2_ENABLE BIT(4)
#define ATMEL_XLCDC_LAYER_VXSCCFG_ONE BIT(16)
#define ATMEL_XLCDC_LAYER_VXSCTAP2_ENABLE BIT(20)
#define ATMEL_XLCDC_LAYER_HXSYCFG_ONE BIT(0)
#define ATMEL_XLCDC_LAYER_HXSYTAP2_ENABLE BIT(4)
#define ATMEL_XLCDC_LAYER_HXSCCFG_ONE BIT(16)
#define ATMEL_XLCDC_LAYER_HXSCTAP2_ENABLE BIT(20)
/**
* Atmel HLCDC Layer registers layout structure
*
@ -156,6 +198,8 @@
* @disc_pos: discard area position register
* @disc_size: discard area size register
* @csc: color space conversion register
* @vxs_config: vertical scalar filter taps control register
* @hxs_config: horizontal scalar filter taps control register
*/
struct atmel_hlcdc_layer_cfg_layout {
int xstride[ATMEL_HLCDC_LAYER_MAX_PLANES];
@ -175,6 +219,8 @@ struct atmel_hlcdc_layer_cfg_layout {
int disc_pos;
int disc_size;
int csc;
int vxs_config;
int hxs_config;
};
/**
@ -288,6 +334,64 @@ atmel_hlcdc_layer_to_plane(struct atmel_hlcdc_layer *layer)
return container_of(layer, struct atmel_hlcdc_plane, layer);
}
/**
* struct atmel_hlcdc_dc - Atmel HLCDC Display Controller.
* @desc: HLCDC Display Controller description
* @dscrpool: DMA coherent pool used to allocate DMA descriptors
* @hlcdc: pointer to the atmel_hlcdc structure provided by the MFD device
* @crtc: CRTC provided by the display controller
* @layers: active HLCDC layers
* @suspend: used to store the HLCDC state when entering suspend
* @suspend.imr: used to read/write LCDC Interrupt Mask Register
* @suspend.state: Atomic commit structure
*/
struct atmel_hlcdc_dc {
const struct atmel_hlcdc_dc_desc *desc;
struct dma_pool *dscrpool;
struct atmel_hlcdc *hlcdc;
struct drm_crtc *crtc;
struct atmel_hlcdc_layer *layers[ATMEL_HLCDC_MAX_LAYERS];
struct {
u32 imr;
struct drm_atomic_state *state;
} suspend;
};
struct atmel_hlcdc_plane_state;
/**
* struct atmel_lcdc_dc_ops - describes atmel_lcdc ops group
* to differentiate HLCDC and XLCDC IP code support
* @plane_setup_scaler: update the vertical and horizontal scaling factors
* @update_lcdc_buffers: update the each LCDC layers DMA registers
* @lcdc_atomic_disable: disable LCDC interrupts and layers
* @lcdc_update_general_settings: update each LCDC layers general
* configuration register
* @lcdc_atomic_update: enable the LCDC layers and interrupts
* @lcdc_csc_init: update the color space conversion co-efficient of
* High-end overlay register
* @lcdc_irq_dbg: to raise alert incase of interrupt overrun in any LCDC layer
*/
struct atmel_lcdc_dc_ops {
void (*plane_setup_scaler)(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_state *state);
void (*lcdc_update_buffers)(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_state *state,
u32 sr, int i);
void (*lcdc_atomic_disable)(struct atmel_hlcdc_plane *plane);
void (*lcdc_update_general_settings)(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_state *state);
void (*lcdc_atomic_update)(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_dc *dc);
void (*lcdc_csc_init)(struct atmel_hlcdc_plane *plane,
const struct atmel_hlcdc_layer_desc *desc);
void (*lcdc_irq_dbg)(struct atmel_hlcdc_plane *plane,
const struct atmel_hlcdc_layer_desc *desc);
};
extern const struct atmel_lcdc_dc_ops atmel_hlcdc_ops;
extern const struct atmel_lcdc_dc_ops atmel_xlcdc_ops;
/**
* Atmel HLCDC Display Controller description structure.
*
@ -304,8 +408,10 @@ atmel_hlcdc_layer_to_plane(struct atmel_hlcdc_layer *layer)
* @conflicting_output_formats: true if RGBXXX output formats conflict with
* each other.
* @fixed_clksrc: true if clock source is fixed
* @is_xlcdc: true if XLCDC IP is supported
* @layers: a layer description table describing available layers
* @nlayers: layer description table size
* @ops: atmel lcdc dc ops
*/
struct atmel_hlcdc_dc_desc {
int min_width;
@ -317,32 +423,10 @@ struct atmel_hlcdc_dc_desc {
int max_hpw;
bool conflicting_output_formats;
bool fixed_clksrc;
bool is_xlcdc;
const struct atmel_hlcdc_layer_desc *layers;
int nlayers;
};
/**
* Atmel HLCDC Display Controller.
*
* @desc: HLCDC Display Controller description
* @dscrpool: DMA coherent pool used to allocate DMA descriptors
* @hlcdc: pointer to the atmel_hlcdc structure provided by the MFD device
* @fbdev: framebuffer device attached to the Display Controller
* @crtc: CRTC provided by the display controller
* @planes: instantiated planes
* @layers: active HLCDC layers
* @suspend: used to store the HLCDC state when entering suspend
*/
struct atmel_hlcdc_dc {
const struct atmel_hlcdc_dc_desc *desc;
struct dma_pool *dscrpool;
struct atmel_hlcdc *hlcdc;
struct drm_crtc *crtc;
struct atmel_hlcdc_layer *layers[ATMEL_HLCDC_MAX_LAYERS];
struct {
u32 imr;
struct drm_atomic_state *state;
} suspend;
const struct atmel_lcdc_dc_ops *ops;
};
extern struct atmel_hlcdc_formats atmel_hlcdc_plane_rgb_formats;

View File

@ -282,8 +282,9 @@ atmel_hlcdc_plane_scaler_set_phicoeff(struct atmel_hlcdc_plane *plane,
coeff_tab[i]);
}
static void atmel_hlcdc_plane_setup_scaler(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_state *state)
static
void atmel_hlcdc_plane_setup_scaler(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_state *state)
{
const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc;
u32 xfactor, yfactor;
@ -330,11 +331,61 @@ static void atmel_hlcdc_plane_setup_scaler(struct atmel_hlcdc_plane *plane,
yfactor));
}
static
void atmel_xlcdc_plane_setup_scaler(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_state *state)
{
const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc;
u32 xfactor, yfactor;
if (!desc->layout.scaler_config)
return;
if (state->crtc_w == state->src_w && state->crtc_h == state->src_h) {
atmel_hlcdc_layer_write_cfg(&plane->layer,
desc->layout.scaler_config, 0);
return;
}
/* xfactor = round[(2^20 * XMEMSIZE)/XSIZE)] */
xfactor = (u32)(((1 << 20) * state->src_w) / state->crtc_w);
/* yfactor = round[(2^20 * YMEMSIZE)/YSIZE)] */
yfactor = (u32)(((1 << 20) * state->src_h) / state->crtc_h);
atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.scaler_config,
ATMEL_XLCDC_LAYER_VSCALER_LUMA_ENABLE |
ATMEL_XLCDC_LAYER_VSCALER_CHROMA_ENABLE |
ATMEL_XLCDC_LAYER_HSCALER_LUMA_ENABLE |
ATMEL_XLCDC_LAYER_HSCALER_CHROMA_ENABLE);
atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.scaler_config + 1,
yfactor);
atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.scaler_config + 3,
xfactor);
/*
* With YCbCr 4:2:2 and YCbYcr 4:2:0 window resampling, configuration
* register LCDC_HEOCFG25.VXSCFACT and LCDC_HEOCFG27.HXSCFACT is half
* the value of yfactor and xfactor.
*/
if (state->base.fb->format->format == DRM_FORMAT_YUV420) {
yfactor /= 2;
xfactor /= 2;
}
atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.scaler_config + 2,
yfactor);
atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.scaler_config + 4,
xfactor);
}
static void
atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_state *state)
{
const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc;
struct atmel_hlcdc_dc *dc = plane->base.dev->dev_private;
if (desc->layout.size)
atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.size,
@ -352,12 +403,12 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
ATMEL_HLCDC_LAYER_POS(state->crtc_x,
state->crtc_y));
atmel_hlcdc_plane_setup_scaler(plane, state);
dc->desc->ops->plane_setup_scaler(plane, state);
}
static void
atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_state *state)
static
void atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_state *state)
{
unsigned int cfg = ATMEL_HLCDC_LAYER_DMA_BLEN_INCR16 | state->ahb_id;
const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc;
@ -393,6 +444,40 @@ atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
cfg);
}
static
void atmel_xlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_state *state)
{
const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc;
const struct drm_format_info *format = state->base.fb->format;
unsigned int cfg;
atmel_hlcdc_layer_write_cfg(&plane->layer, ATMEL_XLCDC_LAYER_DMA_CFG,
ATMEL_HLCDC_LAYER_DMA_BLEN_INCR16 | state->ahb_id);
cfg = ATMEL_XLCDC_LAYER_DMA | ATMEL_XLCDC_LAYER_REP;
if (plane->base.type != DRM_PLANE_TYPE_PRIMARY) {
/*
* Alpha Blending bits specific to SAM9X7 SoC
*/
cfg |= ATMEL_XLCDC_LAYER_SFACTC_A0_MULT_AS |
ATMEL_XLCDC_LAYER_SFACTA_ONE |
ATMEL_XLCDC_LAYER_DFACTC_M_A0_MULT_AS |
ATMEL_XLCDC_LAYER_DFACTA_ONE;
if (format->has_alpha)
cfg |= ATMEL_XLCDC_LAYER_A0(0xff);
else
cfg |= ATMEL_XLCDC_LAYER_A0(state->base.alpha);
}
if (state->disc_h && state->disc_w)
cfg |= ATMEL_XLCDC_LAYER_DISCEN;
atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.general_config,
cfg);
}
static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_state *state)
{
@ -437,36 +522,55 @@ static void atmel_hlcdc_plane_update_clut(struct atmel_hlcdc_plane *plane,
}
}
static void atmel_hlcdc_update_buffers(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_state *state,
u32 sr, int i)
{
atmel_hlcdc_layer_write_reg(&plane->layer,
ATMEL_HLCDC_LAYER_PLANE_HEAD(i),
state->dscrs[i]->self);
if (sr & ATMEL_HLCDC_LAYER_EN)
return;
atmel_hlcdc_layer_write_reg(&plane->layer,
ATMEL_HLCDC_LAYER_PLANE_ADDR(i),
state->dscrs[i]->addr);
atmel_hlcdc_layer_write_reg(&plane->layer,
ATMEL_HLCDC_LAYER_PLANE_CTRL(i),
state->dscrs[i]->ctrl);
atmel_hlcdc_layer_write_reg(&plane->layer,
ATMEL_HLCDC_LAYER_PLANE_NEXT(i),
state->dscrs[i]->self);
}
static void atmel_xlcdc_update_buffers(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_state *state,
u32 sr, int i)
{
atmel_hlcdc_layer_write_reg(&plane->layer,
ATMEL_XLCDC_LAYER_PLANE_ADDR(i),
state->dscrs[i]->addr);
}
static void atmel_hlcdc_plane_update_buffers(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_state *state)
struct atmel_hlcdc_plane_state *state)
{
const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc;
struct atmel_hlcdc_dc *dc = plane->base.dev->dev_private;
struct drm_framebuffer *fb = state->base.fb;
u32 sr;
int i;
sr = atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_CHSR);
if (!dc->desc->is_xlcdc)
sr = atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_CHSR);
for (i = 0; i < state->nplanes; i++) {
struct drm_gem_dma_object *gem = drm_fb_dma_get_gem_obj(fb, i);
state->dscrs[i]->addr = gem->dma_addr + state->offsets[i];
atmel_hlcdc_layer_write_reg(&plane->layer,
ATMEL_HLCDC_LAYER_PLANE_HEAD(i),
state->dscrs[i]->self);
if (!(sr & ATMEL_HLCDC_LAYER_EN)) {
atmel_hlcdc_layer_write_reg(&plane->layer,
ATMEL_HLCDC_LAYER_PLANE_ADDR(i),
state->dscrs[i]->addr);
atmel_hlcdc_layer_write_reg(&plane->layer,
ATMEL_HLCDC_LAYER_PLANE_CTRL(i),
state->dscrs[i]->ctrl);
atmel_hlcdc_layer_write_reg(&plane->layer,
ATMEL_HLCDC_LAYER_PLANE_NEXT(i),
state->dscrs[i]->self);
}
dc->desc->ops->lcdc_update_buffers(plane, state, sr, i);
if (desc->layout.xstride[i])
atmel_hlcdc_layer_write_cfg(&plane->layer,
@ -712,11 +816,8 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
return 0;
}
static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p,
struct drm_atomic_state *state)
static void atmel_hlcdc_atomic_disable(struct atmel_hlcdc_plane *plane)
{
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
/* Disable interrupts */
atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_IDR,
0xffffffff);
@ -731,31 +832,34 @@ static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p,
atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_ISR);
}
static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p,
struct drm_atomic_state *state)
static void atmel_xlcdc_atomic_disable(struct atmel_hlcdc_plane *plane)
{
/* Disable interrupts */
atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_XLCDC_LAYER_IDR,
0xffffffff);
/* Disable the layer */
atmel_hlcdc_layer_write_reg(&plane->layer,
ATMEL_XLCDC_LAYER_ENR, 0);
/* Clear all pending interrupts */
atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_XLCDC_LAYER_ISR);
}
static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_s = drm_atomic_get_new_plane_state(state,
p);
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
struct atmel_hlcdc_plane_state *hstate =
drm_plane_state_to_atmel_hlcdc_plane_state(new_s);
struct atmel_hlcdc_dc *dc = plane->base.dev->dev_private;
dc->desc->ops->lcdc_atomic_disable(plane);
}
static void atmel_hlcdc_atomic_update(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_dc *dc)
{
u32 sr;
if (!new_s->crtc || !new_s->fb)
return;
if (!hstate->base.visible) {
atmel_hlcdc_plane_atomic_disable(p, state);
return;
}
atmel_hlcdc_plane_update_pos_and_size(plane, hstate);
atmel_hlcdc_plane_update_general_settings(plane, hstate);
atmel_hlcdc_plane_update_format(plane, hstate);
atmel_hlcdc_plane_update_clut(plane, hstate);
atmel_hlcdc_plane_update_buffers(plane, hstate);
atmel_hlcdc_plane_update_disc_area(plane, hstate);
/* Enable the overrun interrupts. */
atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_IER,
ATMEL_HLCDC_LAYER_OVR_IRQ(0) |
@ -765,14 +869,129 @@ static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p,
/* Apply the new config at the next SOF event. */
sr = atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_CHSR);
atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_CHER,
ATMEL_HLCDC_LAYER_UPDATE |
(sr & ATMEL_HLCDC_LAYER_EN ?
ATMEL_HLCDC_LAYER_A2Q : ATMEL_HLCDC_LAYER_EN));
ATMEL_HLCDC_LAYER_UPDATE |
(sr & ATMEL_HLCDC_LAYER_EN ?
ATMEL_HLCDC_LAYER_A2Q : ATMEL_HLCDC_LAYER_EN));
}
static void atmel_xlcdc_atomic_update(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_dc *dc)
{
/* Enable the overrun interrupts. */
atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_XLCDC_LAYER_IER,
ATMEL_XLCDC_LAYER_OVR_IRQ(0) |
ATMEL_XLCDC_LAYER_OVR_IRQ(1) |
ATMEL_XLCDC_LAYER_OVR_IRQ(2));
atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_XLCDC_LAYER_ENR,
ATMEL_XLCDC_LAYER_EN);
/*
* Updating XLCDC_xxxCFGx, XLCDC_xxxFBA and XLCDC_xxxEN,
* (where xxx indicates each layer) requires writing one to the
* Update Attribute field for each layer in LCDC_ATTRE register for SAM9X7.
*/
regmap_write(dc->hlcdc->regmap, ATMEL_XLCDC_ATTRE, ATMEL_XLCDC_BASE_UPDATE |
ATMEL_XLCDC_OVR1_UPDATE | ATMEL_XLCDC_OVR3_UPDATE |
ATMEL_XLCDC_HEO_UPDATE);
}
static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_s = drm_atomic_get_new_plane_state(state,
p);
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
struct atmel_hlcdc_plane_state *hstate =
drm_plane_state_to_atmel_hlcdc_plane_state(new_s);
struct atmel_hlcdc_dc *dc = p->dev->dev_private;
if (!new_s->crtc || !new_s->fb)
return;
if (!hstate->base.visible) {
atmel_hlcdc_plane_atomic_disable(p, state);
return;
}
atmel_hlcdc_plane_update_pos_and_size(plane, hstate);
dc->desc->ops->lcdc_update_general_settings(plane, hstate);
atmel_hlcdc_plane_update_format(plane, hstate);
atmel_hlcdc_plane_update_clut(plane, hstate);
atmel_hlcdc_plane_update_buffers(plane, hstate);
atmel_hlcdc_plane_update_disc_area(plane, hstate);
dc->desc->ops->lcdc_atomic_update(plane, dc);
}
static void atmel_hlcdc_csc_init(struct atmel_hlcdc_plane *plane,
const struct atmel_hlcdc_layer_desc *desc)
{
/*
* TODO: declare a "yuv-to-rgb-conv-factors" property to let
* userspace modify these factors (using a BLOB property ?).
*/
static const u32 hlcdc_csc_coeffs[] = {
0x4c900091,
0x7a5f5090,
0x40040890
};
for (int i = 0; i < ARRAY_SIZE(hlcdc_csc_coeffs); i++) {
atmel_hlcdc_layer_write_cfg(&plane->layer,
desc->layout.csc + i,
hlcdc_csc_coeffs[i]);
}
}
static void atmel_xlcdc_csc_init(struct atmel_hlcdc_plane *plane,
const struct atmel_hlcdc_layer_desc *desc)
{
/*
* yuv-to-rgb-conv-factors are now defined from LCDC_HEOCFG16 to
* LCDC_HEOCFG21 registers in SAM9X7.
*/
static const u32 xlcdc_csc_coeffs[] = {
0x00000488,
0x00000648,
0x1EA00480,
0x00001D28,
0x08100480,
0x00000000,
0x00000007
};
for (int i = 0; i < ARRAY_SIZE(xlcdc_csc_coeffs); i++) {
atmel_hlcdc_layer_write_cfg(&plane->layer,
desc->layout.csc + i,
xlcdc_csc_coeffs[i]);
}
if (desc->layout.vxs_config && desc->layout.hxs_config) {
/*
* Updating vxs.config and hxs.config fixes the
* Green Color Issue in SAM9X7 EGT Video Player App
*/
atmel_hlcdc_layer_write_cfg(&plane->layer,
desc->layout.vxs_config,
ATMEL_XLCDC_LAYER_VXSYCFG_ONE |
ATMEL_XLCDC_LAYER_VXSYTAP2_ENABLE |
ATMEL_XLCDC_LAYER_VXSCCFG_ONE |
ATMEL_XLCDC_LAYER_VXSCTAP2_ENABLE);
atmel_hlcdc_layer_write_cfg(&plane->layer,
desc->layout.hxs_config,
ATMEL_XLCDC_LAYER_HXSYCFG_ONE |
ATMEL_XLCDC_LAYER_HXSYTAP2_ENABLE |
ATMEL_XLCDC_LAYER_HXSCCFG_ONE |
ATMEL_XLCDC_LAYER_HXSCTAP2_ENABLE);
}
}
static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane)
{
const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc;
struct atmel_hlcdc_dc *dc = plane->base.dev->dev_private;
if (desc->type == ATMEL_HLCDC_OVERLAY_LAYER ||
desc->type == ATMEL_HLCDC_CURSOR_LAYER) {
@ -796,31 +1015,16 @@ static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane)
return ret;
}
if (desc->layout.csc) {
/*
* TODO: decare a "yuv-to-rgb-conv-factors" property to let
* userspace modify these factors (using a BLOB property ?).
*/
atmel_hlcdc_layer_write_cfg(&plane->layer,
desc->layout.csc,
0x4c900091);
atmel_hlcdc_layer_write_cfg(&plane->layer,
desc->layout.csc + 1,
0x7a5f5090);
atmel_hlcdc_layer_write_cfg(&plane->layer,
desc->layout.csc + 2,
0x40040890);
}
if (desc->layout.csc)
dc->desc->ops->lcdc_csc_init(plane, desc);
return 0;
}
void atmel_hlcdc_plane_irq(struct atmel_hlcdc_plane *plane)
static void atmel_hlcdc_irq_dbg(struct atmel_hlcdc_plane *plane,
const struct atmel_hlcdc_layer_desc *desc)
{
const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc;
u32 isr;
isr = atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_ISR);
u32 isr = atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_ISR);
/*
* There's not much we can do in case of overrun except informing
@ -834,6 +1038,51 @@ void atmel_hlcdc_plane_irq(struct atmel_hlcdc_plane *plane)
desc->name);
}
static void atmel_xlcdc_irq_dbg(struct atmel_hlcdc_plane *plane,
const struct atmel_hlcdc_layer_desc *desc)
{
u32 isr = atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_XLCDC_LAYER_ISR);
/*
* There's not much we can do in case of overrun except informing
* the user. However, we are in interrupt context here, hence the
* use of dev_dbg().
*/
if (isr &
(ATMEL_XLCDC_LAYER_OVR_IRQ(0) | ATMEL_XLCDC_LAYER_OVR_IRQ(1) |
ATMEL_XLCDC_LAYER_OVR_IRQ(2)))
dev_dbg(plane->base.dev->dev, "overrun on plane %s\n",
desc->name);
}
void atmel_hlcdc_plane_irq(struct atmel_hlcdc_plane *plane)
{
const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc;
struct atmel_hlcdc_dc *dc = plane->base.dev->dev_private;
dc->desc->ops->lcdc_irq_dbg(plane, desc);
}
const struct atmel_lcdc_dc_ops atmel_hlcdc_ops = {
.plane_setup_scaler = atmel_hlcdc_plane_setup_scaler,
.lcdc_update_buffers = atmel_hlcdc_update_buffers,
.lcdc_atomic_disable = atmel_hlcdc_atomic_disable,
.lcdc_update_general_settings = atmel_hlcdc_plane_update_general_settings,
.lcdc_atomic_update = atmel_hlcdc_atomic_update,
.lcdc_csc_init = atmel_hlcdc_csc_init,
.lcdc_irq_dbg = atmel_hlcdc_irq_dbg,
};
const struct atmel_lcdc_dc_ops atmel_xlcdc_ops = {
.plane_setup_scaler = atmel_xlcdc_plane_setup_scaler,
.lcdc_update_buffers = atmel_xlcdc_update_buffers,
.lcdc_atomic_disable = atmel_xlcdc_atomic_disable,
.lcdc_update_general_settings = atmel_xlcdc_plane_update_general_settings,
.lcdc_atomic_update = atmel_xlcdc_atomic_update,
.lcdc_csc_init = atmel_xlcdc_csc_init,
.lcdc_irq_dbg = atmel_xlcdc_irq_dbg,
};
static const struct drm_plane_helper_funcs atmel_hlcdc_layer_plane_helper_funcs = {
.atomic_check = atmel_hlcdc_plane_atomic_check,
.atomic_update = atmel_hlcdc_plane_atomic_update,

View File

@ -877,11 +877,6 @@ static int adv7511_connector_init(struct adv7511 *adv)
struct drm_bridge *bridge = &adv->bridge;
int ret;
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
}
if (adv->i2c_main->irq)
adv->connector.polled = DRM_CONNECTOR_POLL_HPD;
else

View File

@ -47,7 +47,7 @@ struct anx6345 {
struct drm_dp_aux aux;
struct drm_bridge bridge;
struct i2c_client *client;
struct edid *edid;
const struct drm_edid *drm_edid;
struct drm_connector connector;
struct drm_panel *panel;
struct regulator *dvdd12;
@ -458,7 +458,7 @@ static int anx6345_get_modes(struct drm_connector *connector)
mutex_lock(&anx6345->lock);
if (!anx6345->edid) {
if (!anx6345->drm_edid) {
if (!anx6345->powered) {
anx6345_poweron(anx6345);
power_off = true;
@ -470,19 +470,18 @@ static int anx6345_get_modes(struct drm_connector *connector)
goto unlock;
}
anx6345->edid = drm_get_edid(connector, &anx6345->aux.ddc);
if (!anx6345->edid)
anx6345->drm_edid = drm_edid_read_ddc(connector, &anx6345->aux.ddc);
if (!anx6345->drm_edid)
DRM_ERROR("Failed to read EDID from panel\n");
err = drm_connector_update_edid_property(connector,
anx6345->edid);
err = drm_edid_connector_update(connector, anx6345->drm_edid);
if (err) {
DRM_ERROR("Failed to update EDID property: %d\n", err);
goto unlock;
}
}
num_modes += drm_add_edid_modes(connector, anx6345->edid);
num_modes += drm_edid_connector_add_modes(connector);
/* Driver currently supports only 6bpc */
connector->display_info.bpc = 6;
@ -528,11 +527,6 @@ static int anx6345_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
}
/* Register aux channel */
anx6345->aux.name = "DP-AUX";
anx6345->aux.dev = &anx6345->client->dev;
@ -793,7 +787,7 @@ static void anx6345_i2c_remove(struct i2c_client *client)
unregister_i2c_dummy_clients(anx6345);
kfree(anx6345->edid);
drm_edid_free(anx6345->drm_edid);
mutex_destroy(&anx6345->lock);
}

View File

@ -67,7 +67,7 @@ struct anx78xx {
struct drm_dp_aux aux;
struct drm_bridge bridge;
struct i2c_client *client;
struct edid *edid;
const struct drm_edid *drm_edid;
struct drm_connector connector;
struct anx78xx_platform_data pdata;
struct mutex lock;
@ -830,8 +830,8 @@ static int anx78xx_get_modes(struct drm_connector *connector)
if (WARN_ON(!anx78xx->powered))
return 0;
if (anx78xx->edid)
return drm_add_edid_modes(connector, anx78xx->edid);
if (anx78xx->drm_edid)
return drm_edid_connector_add_modes(connector);
mutex_lock(&anx78xx->lock);
@ -841,20 +841,21 @@ static int anx78xx_get_modes(struct drm_connector *connector)
goto unlock;
}
anx78xx->edid = drm_get_edid(connector, &anx78xx->aux.ddc);
if (!anx78xx->edid) {
anx78xx->drm_edid = drm_edid_read_ddc(connector, &anx78xx->aux.ddc);
err = drm_edid_connector_update(connector, anx78xx->drm_edid);
if (!anx78xx->drm_edid) {
DRM_ERROR("Failed to read EDID\n");
goto unlock;
}
err = drm_connector_update_edid_property(connector,
anx78xx->edid);
if (err) {
DRM_ERROR("Failed to update EDID property: %d\n", err);
goto unlock;
}
num_modes = drm_add_edid_modes(connector, anx78xx->edid);
num_modes = drm_edid_connector_add_modes(connector);
unlock:
mutex_unlock(&anx78xx->lock);
@ -897,11 +898,6 @@ static int anx78xx_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
}
/* Register aux channel */
anx78xx->aux.name = "DP-AUX";
anx78xx->aux.dev = &anx78xx->client->dev;
@ -1091,8 +1087,8 @@ static bool anx78xx_handle_common_int_4(struct anx78xx *anx78xx, u8 irq)
event = true;
anx78xx_poweroff(anx78xx);
/* Free cached EDID */
kfree(anx78xx->edid);
anx78xx->edid = NULL;
drm_edid_free(anx78xx->drm_edid);
anx78xx->drm_edid = NULL;
} else if (irq & SP_HPD_PLUG) {
DRM_DEBUG_KMS("IRQ: Hot plug detect - cable plug\n");
event = true;
@ -1363,7 +1359,7 @@ static void anx78xx_i2c_remove(struct i2c_client *client)
unregister_i2c_dummy_clients(anx78xx);
kfree(anx78xx->edid);
drm_edid_free(anx78xx->drm_edid);
}
static const struct of_device_id anx78xx_match_table[] = {

View File

@ -234,28 +234,6 @@ static int analogix_dp_training_pattern_dis(struct analogix_dp_device *dp)
return ret < 0 ? ret : 0;
}
static void
analogix_dp_set_lane_lane_pre_emphasis(struct analogix_dp_device *dp,
int pre_emphasis, int lane)
{
switch (lane) {
case 0:
analogix_dp_set_lane0_pre_emphasis(dp, pre_emphasis);
break;
case 1:
analogix_dp_set_lane1_pre_emphasis(dp, pre_emphasis);
break;
case 2:
analogix_dp_set_lane2_pre_emphasis(dp, pre_emphasis);
break;
case 3:
analogix_dp_set_lane3_pre_emphasis(dp, pre_emphasis);
break;
}
}
static int analogix_dp_link_start(struct analogix_dp_device *dp)
{
u8 buf[4];
@ -286,10 +264,12 @@ static int analogix_dp_link_start(struct analogix_dp_device *dp)
return retval;
}
/* Set TX pre-emphasis to minimum */
/* Set TX voltage-swing and pre-emphasis to minimum */
for (lane = 0; lane < lane_count; lane++)
analogix_dp_set_lane_lane_pre_emphasis(dp,
PRE_EMPHASIS_LEVEL_0, lane);
dp->link_train.training_lane[lane] =
DP_TRAIN_VOLTAGE_SWING_LEVEL_0 |
DP_TRAIN_PRE_EMPH_LEVEL_0;
analogix_dp_set_lane_link_training(dp);
/* Wait for PLL lock */
pll_tries = 0;
@ -384,54 +364,6 @@ static unsigned char analogix_dp_get_adjust_request_pre_emphasis(
return ((link_value >> shift) & 0xc) >> 2;
}
static void analogix_dp_set_lane_link_training(struct analogix_dp_device *dp,
u8 training_lane_set, int lane)
{
switch (lane) {
case 0:
analogix_dp_set_lane0_link_training(dp, training_lane_set);
break;
case 1:
analogix_dp_set_lane1_link_training(dp, training_lane_set);
break;
case 2:
analogix_dp_set_lane2_link_training(dp, training_lane_set);
break;
case 3:
analogix_dp_set_lane3_link_training(dp, training_lane_set);
break;
}
}
static unsigned int
analogix_dp_get_lane_link_training(struct analogix_dp_device *dp,
int lane)
{
u32 reg;
switch (lane) {
case 0:
reg = analogix_dp_get_lane0_link_training(dp);
break;
case 1:
reg = analogix_dp_get_lane1_link_training(dp);
break;
case 2:
reg = analogix_dp_get_lane2_link_training(dp);
break;
case 3:
reg = analogix_dp_get_lane3_link_training(dp);
break;
default:
WARN_ON(1);
return 0;
}
return reg;
}
static void analogix_dp_reduce_link_rate(struct analogix_dp_device *dp)
{
analogix_dp_training_pattern_dis(dp);
@ -478,11 +410,6 @@ static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp)
if (retval < 0)
return retval;
retval = drm_dp_dpcd_read(&dp->aux, DP_ADJUST_REQUEST_LANE0_1,
adjust_request, 2);
if (retval < 0)
return retval;
if (analogix_dp_clock_recovery_ok(link_status, lane_count) == 0) {
/* set training pattern 2 for EQ */
analogix_dp_set_training_pattern(dp, TRAINING_PTN2);
@ -495,38 +422,37 @@ static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp)
dev_dbg(dp->dev, "Link Training Clock Recovery success\n");
dp->link_train.lt_state = EQUALIZER_TRAINING;
} else {
for (lane = 0; lane < lane_count; lane++) {
training_lane = analogix_dp_get_lane_link_training(
dp, lane);
voltage_swing = analogix_dp_get_adjust_request_voltage(
adjust_request, lane);
pre_emphasis = analogix_dp_get_adjust_request_pre_emphasis(
adjust_request, lane);
if (DPCD_VOLTAGE_SWING_GET(training_lane) ==
voltage_swing &&
DPCD_PRE_EMPHASIS_GET(training_lane) ==
pre_emphasis)
dp->link_train.cr_loop[lane]++;
return 0;
}
if (dp->link_train.cr_loop[lane] == MAX_CR_LOOP ||
voltage_swing == VOLTAGE_LEVEL_3 ||
pre_emphasis == PRE_EMPHASIS_LEVEL_3) {
dev_err(dp->dev, "CR Max reached (%d,%d,%d)\n",
dp->link_train.cr_loop[lane],
voltage_swing, pre_emphasis);
analogix_dp_reduce_link_rate(dp);
return -EIO;
}
retval = drm_dp_dpcd_read(&dp->aux, DP_ADJUST_REQUEST_LANE0_1,
adjust_request, 2);
if (retval < 0)
return retval;
for (lane = 0; lane < lane_count; lane++) {
training_lane = analogix_dp_get_lane_link_training(dp, lane);
voltage_swing = analogix_dp_get_adjust_request_voltage(adjust_request, lane);
pre_emphasis = analogix_dp_get_adjust_request_pre_emphasis(adjust_request, lane);
if (DPCD_VOLTAGE_SWING_GET(training_lane) == voltage_swing &&
DPCD_PRE_EMPHASIS_GET(training_lane) == pre_emphasis)
dp->link_train.cr_loop[lane]++;
if (dp->link_train.cr_loop[lane] == MAX_CR_LOOP ||
voltage_swing == VOLTAGE_LEVEL_3 ||
pre_emphasis == PRE_EMPHASIS_LEVEL_3) {
dev_err(dp->dev, "CR Max reached (%d,%d,%d)\n",
dp->link_train.cr_loop[lane],
voltage_swing, pre_emphasis);
analogix_dp_reduce_link_rate(dp);
return -EIO;
}
}
analogix_dp_get_adjust_training_lane(dp, adjust_request);
for (lane = 0; lane < lane_count; lane++)
analogix_dp_set_lane_link_training(dp,
dp->link_train.training_lane[lane], lane);
analogix_dp_set_lane_link_training(dp);
retval = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET,
dp->link_train.training_lane, lane_count);
@ -538,7 +464,7 @@ static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp)
static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
{
int lane, lane_count, retval;
int lane_count, retval;
u32 reg;
u8 link_align, link_status[2], adjust_request[2];
@ -598,9 +524,7 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
return -EIO;
}
for (lane = 0; lane < lane_count; lane++)
analogix_dp_set_lane_link_training(dp,
dp->link_train.training_lane[lane], lane);
analogix_dp_set_lane_link_training(dp);
retval = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET,
dp->link_train.training_lane, lane_count);
@ -712,7 +636,7 @@ static int analogix_dp_full_link_train(struct analogix_dp_device *dp,
static int analogix_dp_fast_link_train(struct analogix_dp_device *dp)
{
int i, ret;
int ret;
u8 link_align, link_status[2];
enum pll_status status;
@ -720,11 +644,7 @@ static int analogix_dp_fast_link_train(struct analogix_dp_device *dp)
analogix_dp_set_link_bandwidth(dp, dp->link_train.link_rate);
analogix_dp_set_lane_count(dp, dp->link_train.lane_count);
for (i = 0; i < dp->link_train.lane_count; i++) {
analogix_dp_set_lane_link_training(dp,
dp->link_train.training_lane[i], i);
}
analogix_dp_set_lane_link_training(dp);
ret = readx_poll_timeout(analogix_dp_get_pll_lock_status, dp, status,
status != PLL_UNLOCKED, 120,
@ -1108,7 +1028,7 @@ out:
static int analogix_dp_get_modes(struct drm_connector *connector)
{
struct analogix_dp_device *dp = to_dp(connector);
struct edid *edid;
const struct drm_edid *drm_edid;
int ret, num_modes = 0;
if (dp->plat_data->panel) {
@ -1120,12 +1040,13 @@ static int analogix_dp_get_modes(struct drm_connector *connector)
return 0;
}
edid = drm_get_edid(connector, &dp->aux.ddc);
if (edid) {
drm_connector_update_edid_property(&dp->connector,
edid);
num_modes += drm_add_edid_modes(&dp->connector, edid);
kfree(edid);
drm_edid = drm_edid_read_ddc(connector, &dp->aux.ddc);
drm_edid_connector_update(&dp->connector, drm_edid);
if (drm_edid) {
num_modes += drm_edid_connector_add_modes(&dp->connector);
drm_edid_free(drm_edid);
}
ret = analogix_dp_prepare_panel(dp, false, false);
@ -1228,11 +1149,6 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
}
if (!dp->plat_data->skip_connector) {
connector = &dp->connector;
connector->polled = DRM_CONNECTOR_POLL_HPD;

View File

@ -213,26 +213,8 @@ void analogix_dp_enable_enhanced_mode(struct analogix_dp_device *dp,
bool enable);
void analogix_dp_set_training_pattern(struct analogix_dp_device *dp,
enum pattern_set pattern);
void analogix_dp_set_lane0_pre_emphasis(struct analogix_dp_device *dp,
u32 level);
void analogix_dp_set_lane1_pre_emphasis(struct analogix_dp_device *dp,
u32 level);
void analogix_dp_set_lane2_pre_emphasis(struct analogix_dp_device *dp,
u32 level);
void analogix_dp_set_lane3_pre_emphasis(struct analogix_dp_device *dp,
u32 level);
void analogix_dp_set_lane0_link_training(struct analogix_dp_device *dp,
u32 training_lane);
void analogix_dp_set_lane1_link_training(struct analogix_dp_device *dp,
u32 training_lane);
void analogix_dp_set_lane2_link_training(struct analogix_dp_device *dp,
u32 training_lane);
void analogix_dp_set_lane3_link_training(struct analogix_dp_device *dp,
u32 training_lane);
u32 analogix_dp_get_lane0_link_training(struct analogix_dp_device *dp);
u32 analogix_dp_get_lane1_link_training(struct analogix_dp_device *dp);
u32 analogix_dp_get_lane2_link_training(struct analogix_dp_device *dp);
u32 analogix_dp_get_lane3_link_training(struct analogix_dp_device *dp);
void analogix_dp_set_lane_link_training(struct analogix_dp_device *dp);
u32 analogix_dp_get_lane_link_training(struct analogix_dp_device *dp, u8 lane);
void analogix_dp_reset_macro(struct analogix_dp_device *dp);
void analogix_dp_init_video(struct analogix_dp_device *dp);

View File

@ -557,6 +557,20 @@ void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count)
*count = reg;
}
void analogix_dp_set_lane_link_training(struct analogix_dp_device *dp)
{
u8 lane;
for (lane = 0; lane < dp->link_train.lane_count; lane++)
writel(dp->link_train.training_lane[lane],
dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL + 4 * lane);
}
u32 analogix_dp_get_lane_link_training(struct analogix_dp_device *dp, u8 lane)
{
return readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL + 4 * lane);
}
void analogix_dp_enable_enhanced_mode(struct analogix_dp_device *dp,
bool enable)
{
@ -606,106 +620,6 @@ void analogix_dp_set_training_pattern(struct analogix_dp_device *dp,
}
}
void analogix_dp_set_lane0_pre_emphasis(struct analogix_dp_device *dp,
u32 level)
{
u32 reg;
reg = readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
reg &= ~PRE_EMPHASIS_SET_MASK;
reg |= level << PRE_EMPHASIS_SET_SHIFT;
writel(reg, dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
}
void analogix_dp_set_lane1_pre_emphasis(struct analogix_dp_device *dp,
u32 level)
{
u32 reg;
reg = readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
reg &= ~PRE_EMPHASIS_SET_MASK;
reg |= level << PRE_EMPHASIS_SET_SHIFT;
writel(reg, dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
}
void analogix_dp_set_lane2_pre_emphasis(struct analogix_dp_device *dp,
u32 level)
{
u32 reg;
reg = readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
reg &= ~PRE_EMPHASIS_SET_MASK;
reg |= level << PRE_EMPHASIS_SET_SHIFT;
writel(reg, dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
}
void analogix_dp_set_lane3_pre_emphasis(struct analogix_dp_device *dp,
u32 level)
{
u32 reg;
reg = readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
reg &= ~PRE_EMPHASIS_SET_MASK;
reg |= level << PRE_EMPHASIS_SET_SHIFT;
writel(reg, dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
}
void analogix_dp_set_lane0_link_training(struct analogix_dp_device *dp,
u32 training_lane)
{
u32 reg;
reg = training_lane;
writel(reg, dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
}
void analogix_dp_set_lane1_link_training(struct analogix_dp_device *dp,
u32 training_lane)
{
u32 reg;
reg = training_lane;
writel(reg, dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
}
void analogix_dp_set_lane2_link_training(struct analogix_dp_device *dp,
u32 training_lane)
{
u32 reg;
reg = training_lane;
writel(reg, dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
}
void analogix_dp_set_lane3_link_training(struct analogix_dp_device *dp,
u32 training_lane)
{
u32 reg;
reg = training_lane;
writel(reg, dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
}
u32 analogix_dp_get_lane0_link_training(struct analogix_dp_device *dp)
{
return readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
}
u32 analogix_dp_get_lane1_link_training(struct analogix_dp_device *dp)
{
return readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
}
u32 analogix_dp_get_lane2_link_training(struct analogix_dp_device *dp)
{
return readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
}
u32 analogix_dp_get_lane3_link_training(struct analogix_dp_device *dp)
{
return readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
}
void analogix_dp_reset_macro(struct analogix_dp_device *dp)
{
u32 reg;
@ -1027,7 +941,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
u32 status_reg;
u8 *buffer = msg->buffer;
unsigned int i;
int num_transferred = 0;
int ret;
/* Buffer size of AUX CH is 16 bytes */
@ -1079,7 +992,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
reg = buffer[i];
writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
4 * i);
num_transferred++;
}
}
@ -1127,7 +1039,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
4 * i);
buffer[i] = (unsigned char)reg;
num_transferred++;
}
}
@ -1144,7 +1055,7 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
(msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_READ)
msg->reply = DP_AUX_NATIVE_REPLY_ACK;
return num_transferred > 0 ? num_transferred : -EBUSY;
return msg->size;
aux_error:
/* if aux err happen, reset aux */

View File

@ -464,9 +464,11 @@ static int anx7625_odfc_config(struct anx7625_data *ctx,
*/
static int anx7625_set_k_value(struct anx7625_data *ctx)
{
struct edid *edid = (struct edid *)ctx->slimport_edid_p.edid_raw_data;
struct drm_edid_product_id id;
if (edid->mfg_id[0] == IVO_MID0 && edid->mfg_id[1] == IVO_MID1)
drm_edid_get_product_id(ctx->cached_drm_edid, &id);
if (be16_to_cpu(id.manufacturer_name) == IVO_MID)
return anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
MIPI_DIGITAL_ADJ_1, 0x3B);
@ -1526,7 +1528,8 @@ static int anx7625_wait_hpd_asserted(struct drm_dp_aux *aux,
static void anx7625_remove_edid(struct anx7625_data *ctx)
{
ctx->slimport_edid_p.edid_block_num = -1;
drm_edid_free(ctx->cached_drm_edid);
ctx->cached_drm_edid = NULL;
}
static void anx7625_dp_adjust_swing(struct anx7625_data *ctx)
@ -1787,27 +1790,32 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
static const struct drm_edid *anx7625_edid_read(struct anx7625_data *ctx)
{
struct device *dev = ctx->dev;
struct s_edid_data *p_edid = &ctx->slimport_edid_p;
u8 *edid_buf;
int edid_num;
if (ctx->slimport_edid_p.edid_block_num > 0)
if (ctx->cached_drm_edid)
goto out;
edid_buf = kmalloc(FOUR_BLOCK_SIZE, GFP_KERNEL);
if (!edid_buf)
return NULL;
pm_runtime_get_sync(dev);
_anx7625_hpd_polling(ctx, 5000 * 100);
edid_num = sp_tx_edid_read(ctx, p_edid->edid_raw_data);
edid_num = sp_tx_edid_read(ctx, edid_buf);
pm_runtime_put_sync(dev);
if (edid_num < 1) {
DRM_DEV_ERROR(dev, "Fail to read EDID: %d\n", edid_num);
kfree(edid_buf);
return NULL;
}
p_edid->edid_block_num = edid_num;
ctx->cached_drm_edid = drm_edid_alloc(edid_buf, FOUR_BLOCK_SIZE);
kfree(edid_buf);
out:
return drm_edid_alloc(ctx->slimport_edid_p.edid_raw_data,
FOUR_BLOCK_SIZE);
return drm_edid_dup(ctx->cached_drm_edid);
}
static enum drm_connector_status anx7625_sink_detect(struct anx7625_data *ctx)
@ -2193,11 +2201,6 @@ static int anx7625_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
if (!bridge->encoder) {
DRM_DEV_ERROR(dev, "Parent encoder object not found");
return -ENODEV;
}
ctx->aux.drm_dev = bridge->dev;
err = drm_dp_aux_register(&ctx->aux);
if (err) {
@ -2435,11 +2438,6 @@ static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
dev_dbg(dev, "drm atomic enable\n");
if (!bridge->encoder) {
dev_err(dev, "Parent encoder object not found");
return;
}
connector = drm_atomic_get_new_connector_for_encoder(state->base.state,
bridge->encoder);
if (!connector)

View File

@ -286,8 +286,7 @@
#define MIPI_LANE_CTRL_10 0x0F
#define MIPI_DIGITAL_ADJ_1 0x1B
#define IVO_MID0 0x26
#define IVO_MID1 0xCF
#define IVO_MID 0x26CF
#define MIPI_PLL_M_NUM_23_16 0x1E
#define MIPI_PLL_M_NUM_15_8 0x1F
@ -417,11 +416,6 @@ enum audio_wd_len {
#define EDID_TRY_CNT 3
#define SUPPORT_PIXEL_CLOCK 300000
struct s_edid_data {
int edid_block_num;
u8 edid_raw_data[FOUR_BLOCK_SIZE];
};
/***************** Display End *****************/
#define MAX_LANES_SUPPORT 4
@ -466,7 +460,7 @@ struct anx7625_data {
struct anx7625_i2c_client i2c;
struct i2c_client *last_client;
struct timer_list hdcp_timer;
struct s_edid_data slimport_edid_p;
const struct drm_edid *cached_drm_edid;
struct device *codec_dev;
hdmi_codec_plugged_cb plugged_cb;
struct work_struct work;

View File

@ -1697,11 +1697,6 @@ static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp)
struct drm_bridge *bridge = &mhdp->bridge;
int ret;
if (!bridge->encoder) {
dev_err(mhdp->dev, "Parent encoder object not found");
return -ENODEV;
}
conn->polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init(bridge->dev, conn, &cdns_mhdp_conn_funcs,

View File

@ -116,11 +116,6 @@ int ldb_bridge_attach_helper(struct drm_bridge *bridge,
return -EINVAL;
}
if (!bridge->encoder) {
DRM_DEV_ERROR(ldb->dev, "missing encoder\n");
return -ENODEV;
}
return drm_bridge_attach(bridge->encoder,
ldb_ch->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);

View File

@ -119,11 +119,6 @@ static int imx8qxp_pc_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
if (!bridge->encoder) {
DRM_DEV_ERROR(pc->dev, "missing encoder\n");
return -ENODEV;
}
return drm_bridge_attach(bridge->encoder,
ch->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);

View File

@ -138,11 +138,6 @@ static int imx8qxp_pixel_link_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
if (!bridge->encoder) {
DRM_DEV_ERROR(pl->dev, "missing encoder\n");
return -ENODEV;
}
return drm_bridge_attach(bridge->encoder,
pl->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);

View File

@ -58,11 +58,6 @@ static int imx8qxp_pxl2dpi_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
if (!bridge->encoder) {
DRM_DEV_ERROR(p2d->dev, "missing encoder\n");
return -ENODEV;
}
return drm_bridge_attach(bridge->encoder,
p2d->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);

View File

@ -1307,9 +1307,15 @@ static void it6505_video_reset(struct it6505 *it6505)
it6505_link_reset_step_train(it6505);
it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_VID_MUTE, EN_VID_MUTE);
it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_VID_CTRL_PKT, 0x00);
it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, VIDEO_RESET);
it6505_set_bits(it6505, REG_VID_BUS_CTRL1, TX_FIFO_RESET, TX_FIFO_RESET);
it6505_set_bits(it6505, REG_VID_BUS_CTRL1, TX_FIFO_RESET, 0x00);
it6505_set_bits(it6505, REG_501_FIFO_CTRL, RST_501_FIFO, RST_501_FIFO);
it6505_set_bits(it6505, REG_501_FIFO_CTRL, RST_501_FIFO, 0x00);
it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, VIDEO_RESET);
usleep_range(1000, 2000);
it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, 0x00);
}
@ -2245,12 +2251,11 @@ static void it6505_link_training_work(struct work_struct *work)
if (ret) {
it6505->auto_train_retry = AUTO_TRAIN_RETRY;
it6505_link_train_ok(it6505);
return;
} else {
it6505->auto_train_retry--;
it6505_dump(it6505);
}
it6505_dump(it6505);
}
static void it6505_plugged_status_to_codec(struct it6505 *it6505)
@ -2471,33 +2476,55 @@ static void it6505_irq_link_train_fail(struct it6505 *it6505)
schedule_work(&it6505->link_works);
}
static void it6505_irq_video_fifo_error(struct it6505 *it6505)
{
struct device *dev = it6505->dev;
DRM_DEV_DEBUG_DRIVER(dev, "video fifo overflow interrupt");
it6505->auto_train_retry = AUTO_TRAIN_RETRY;
flush_work(&it6505->link_works);
it6505_stop_hdcp(it6505);
it6505_video_reset(it6505);
}
static void it6505_irq_io_latch_fifo_overflow(struct it6505 *it6505)
{
struct device *dev = it6505->dev;
DRM_DEV_DEBUG_DRIVER(dev, "IO latch fifo overflow interrupt");
it6505->auto_train_retry = AUTO_TRAIN_RETRY;
flush_work(&it6505->link_works);
it6505_stop_hdcp(it6505);
it6505_video_reset(it6505);
}
static bool it6505_test_bit(unsigned int bit, const unsigned int *addr)
{
return 1 & (addr[bit / BITS_PER_BYTE] >> (bit % BITS_PER_BYTE));
}
static void it6505_irq_video_handler(struct it6505 *it6505, const int *int_status)
{
struct device *dev = it6505->dev;
int reg_0d, reg_int03;
/*
* When video SCDT change with video not stable,
* Or video FIFO error, need video reset
*/
if ((!it6505_get_video_status(it6505) &&
(it6505_test_bit(INT_SCDT_CHANGE, (unsigned int *)int_status))) ||
(it6505_test_bit(BIT_INT_IO_FIFO_OVERFLOW,
(unsigned int *)int_status)) ||
(it6505_test_bit(BIT_INT_VID_FIFO_ERROR,
(unsigned int *)int_status))) {
it6505->auto_train_retry = AUTO_TRAIN_RETRY;
flush_work(&it6505->link_works);
it6505_stop_hdcp(it6505);
it6505_video_reset(it6505);
usleep_range(10000, 11000);
/*
* Clear FIFO error IRQ to prevent fifo error -> reset loop
* HW will trigger SCDT change IRQ again when video stable
*/
reg_int03 = it6505_read(it6505, INT_STATUS_03);
reg_0d = it6505_read(it6505, REG_SYSTEM_STS);
reg_int03 &= (BIT(INT_VID_FIFO_ERROR) | BIT(INT_IO_LATCH_FIFO_OVERFLOW));
it6505_write(it6505, INT_STATUS_03, reg_int03);
DRM_DEV_DEBUG_DRIVER(dev, "reg08 = 0x%02x", reg_int03);
DRM_DEV_DEBUG_DRIVER(dev, "reg0D = 0x%02x", reg_0d);
return;
}
if (it6505_test_bit(INT_SCDT_CHANGE, (unsigned int *)int_status))
it6505_irq_scdt(it6505);
}
static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
{
struct it6505 *it6505 = data;
@ -2508,15 +2535,12 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
} irq_vec[] = {
{ BIT_INT_HPD, it6505_irq_hpd },
{ BIT_INT_HPD_IRQ, it6505_irq_hpd_irq },
{ BIT_INT_SCDT, it6505_irq_scdt },
{ BIT_INT_HDCP_FAIL, it6505_irq_hdcp_fail },
{ BIT_INT_HDCP_DONE, it6505_irq_hdcp_done },
{ BIT_INT_AUX_CMD_FAIL, it6505_irq_aux_cmd_fail },
{ BIT_INT_HDCP_KSV_CHECK, it6505_irq_hdcp_ksv_check },
{ BIT_INT_AUDIO_FIFO_ERROR, it6505_irq_audio_fifo_error },
{ BIT_INT_LINK_TRAIN_FAIL, it6505_irq_link_train_fail },
{ BIT_INT_VID_FIFO_ERROR, it6505_irq_video_fifo_error },
{ BIT_INT_IO_FIFO_OVERFLOW, it6505_irq_io_latch_fifo_overflow },
};
int int_status[3], i;
@ -2546,6 +2570,7 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
if (it6505_test_bit(irq_vec[i].bit, (unsigned int *)int_status))
irq_vec[i].handler(it6505);
}
it6505_irq_video_handler(it6505, (unsigned int *)int_status);
}
pm_runtime_put_sync(dev);
@ -2590,7 +2615,7 @@ static int it6505_poweron(struct it6505 *it6505)
gpiod_set_value_cansleep(pdata->gpiod_reset, 0);
usleep_range(1000, 2000);
gpiod_set_value_cansleep(pdata->gpiod_reset, 1);
usleep_range(10000, 20000);
usleep_range(25000, 35000);
}
it6505->powered = true;
@ -2882,11 +2907,6 @@ static int it6505_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
if (!bridge->encoder) {
dev_err(dev, "Parent encoder object not found");
return -ENODEV;
}
/* Register aux channel */
it6505->aux.drm_dev = bridge->dev;

Some files were not shown because too many files have changed in this diff Show More