ARM: SoC driver updates for v5.17

There are cleanups and minor bugfixes across several SoC specific
 drivers, for Qualcomm, Samsung, NXP i.MX, AT91, Tegra, Keystone,
 Renesas, ZynqMP
 
 Noteworthy new features are:
 
  - The op-tee firmware driver gains support for asynchronous
    notifications from secure-world firmware.
 
  - Qualcomm platforms gain support for new SoC types in various
    drivers: power domain, cache controller, RPM sleep, soc-info
 
  - Samsung SoC drivers gain support for new SoCs in ChipID and PMU,
    as well as a new USIv2 driver that handles various types of
    serial communiction (uart, i2c, spi)
 
  - Renesas adds support for R-Car S4-8 (R8A779F0) in multiple
    drivers, as well as memory controller support for RZ/G2L
    (R9A07G044).
 
  - Apple M1 gains support for the PMGR power management driver
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEo6/YBQwIrVS28WGKmmx57+YAGNkFAmHDpK8ACgkQmmx57+YA
 GNmUqhAAnaxXJeu+dfWanrAAeH1MKju/uxB6bogtwrAc928BaqmeZfiAqsT9KsWu
 FhkWSGRu+y74fct4zkD3xfl0V2ROSrSjcvrWVu0GJPnXoOLKDZ1RbdZDnfXbwowa
 4urL5HeOtKYUpLjw7z8EQ43SHZba7CsiImGcF/4OtHW1hNGNIlU/Mym93lFT5Xdq
 HwDrktBWWVvBkCnxVGJjMwGGSCJbowIsPK8p8xr4CqML4Vdcx89qKB8cnCOg8Bgo
 YqzcJCTOY9K5qXI8D20GaTJCP5vCPxMgmeFn4LgWa+h9iJrt+g4J8zA1qGw92nwJ
 W2uRu/6YkCC2HMiyExuxkJNPKbFRbOqAm7lA/ZzuFpFU5RowACIrlwm4ZR/4UFDG
 fzrt3ZfNLRu33QxqhKY0jWGeHu729+RE2kpQ4FXveFmrtRIWnuX70/+NQFVhm+qy
 EBgXmlWNhTh2tcgfEzPja52+5h3SYKk6/J44266i/34x5eLDvmuRADzCVpLUmbPS
 G5UHVkWHZPJne6ZJQ+yz+o2h6BjcpCTvPRbt2/KkSOo9S0Qj4/XrOUHBAofy3odH
 Tdiba6lXMHZqvLlOOrMyJ0qdv26FJyJaSg5Wqhq45G1YCW5Xjc+cYzZggJBvBpIh
 thyN4b9jqfTnAZzKB6LCBUmEF2A7gXkuW9oXzNUkKtVzYluu1aQ=
 =tlZT
 -----END PGP SIGNATURE-----

Merge tag 'drivers-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc

Pull ARM SoC driver updates from Arnd Bergmann:
 "There are cleanups and minor bugfixes across several SoC specific
  drivers, for Qualcomm, Samsung, NXP i.MX, AT91, Tegra, Keystone,
  Renesas, ZynqMP

  Noteworthy new features are:

   - The op-tee firmware driver gains support for asynchronous
     notifications from secure-world firmware.

   - Qualcomm platforms gain support for new SoC types in various
     drivers: power domain, cache controller, RPM sleep, soc-info

   - Samsung SoC drivers gain support for new SoCs in ChipID and PMU, as
     well as a new USIv2 driver that handles various types of serial
     communiction (uart, i2c, spi)

   - Renesas adds support for R-Car S4-8 (R8A779F0) in multiple drivers,
     as well as memory controller support for RZ/G2L (R9A07G044).

   - Apple M1 gains support for the PMGR power management driver"

* tag 'drivers-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (94 commits)
  soc: qcom: rpmh-rsc: Fix typo in a comment
  soc: qcom: socinfo: Add SM6350 and SM7225
  dt-bindings: arm: msm: Don't mark LLCC interrupt as required
  dt-bindings: firmware: scm: Add SM6350 compatible
  dt-bindings: arm: msm: Add LLCC for SM6350
  soc: qcom: rpmhpd: Sort power-domain definitions and lists
  soc: qcom: rpmhpd: Remove mx/cx relationship on sc7280
  soc: qcom: rpmhpd: Rename rpmhpd struct names
  soc: qcom: rpmhpd: sm8450: Add the missing .peer for sm8450_cx_ao
  soc: qcom: socinfo: add SM8450 ID
  soc: qcom: rpmhpd: Add SM8450 power domains
  dt-bindings: power: rpmpd: Add SM8450 to rpmpd binding
  soc: qcom: smem: Update max processor count
  dt-bindings: arm: qcom: Document SM8450 SoC and boards
  dt-bindings: firmware: scm: Add SM8450 compatible
  dt-bindings: arm: cpus: Add kryo780 compatible
  soc: qcom: rpmpd: Add support for sm6125
  dt-bindings: qcom-rpmpd: Add sm6125 power domains
  soc: qcom: aoss: constify static struct thermal_cooling_device_ops
  PM: AVS: qcom-cpr: Use div64_ul instead of do_div
  ...
This commit is contained in:
Linus Torvalds 2022-01-10 08:13:52 -08:00
commit e85195d5bf
90 changed files with 3406 additions and 922 deletions

View File

@ -174,6 +174,7 @@ properties:
- qcom,kryo560 - qcom,kryo560
- qcom,kryo570 - qcom,kryo570
- qcom,kryo685 - qcom,kryo685
- qcom,kryo780
- qcom,scorpion - qcom,scorpion
enable-method: enable-method:

View File

@ -24,6 +24,12 @@ properties:
compatible: compatible:
const: linaro,optee-tz const: linaro,optee-tz
interrupts:
maxItems: 1
description: |
This interrupt which is used to signal an event by the secure world
software is expected to be edge-triggered.
method: method:
enum: [smc, hvc] enum: [smc, hvc]
description: | description: |
@ -42,10 +48,12 @@ additionalProperties: false
examples: examples:
- | - |
#include <dt-bindings/interrupt-controller/arm-gic.h>
firmware { firmware {
optee { optee {
compatible = "linaro,optee-tz"; compatible = "linaro,optee-tz";
method = "smc"; method = "smc";
interrupts = <GIC_SPI 187 IRQ_TYPE_EDGE_RISING>;
}; };
}; };

View File

@ -24,6 +24,7 @@ properties:
- qcom,sc7180-llcc - qcom,sc7180-llcc
- qcom,sc7280-llcc - qcom,sc7280-llcc
- qcom,sdm845-llcc - qcom,sdm845-llcc
- qcom,sm6350-llcc
- qcom,sm8150-llcc - qcom,sm8150-llcc
- qcom,sm8250-llcc - qcom,sm8250-llcc
@ -44,7 +45,6 @@ required:
- compatible - compatible
- reg - reg
- reg-names - reg-names
- interrupts
additionalProperties: false additionalProperties: false

View File

@ -50,6 +50,7 @@ description: |
sm8150 sm8150
sm8250 sm8250
sm8350 sm8350
sm8450
The 'board' element must be one of the following strings: The 'board' element must be one of the following strings:
@ -257,6 +258,11 @@ properties:
- qcom,sm8350-mtp - qcom,sm8350-mtp
- const: qcom,sm8350 - const: qcom,sm8350
- items:
- enum:
- qcom,sm8450-qrd
- const: qcom,sm8450
additionalProperties: true additionalProperties: true
... ...

View File

@ -26,9 +26,11 @@ Required properties:
* "qcom,scm-sc7280" * "qcom,scm-sc7280"
* "qcom,scm-sdm845" * "qcom,scm-sdm845"
* "qcom,scm-sdx55" * "qcom,scm-sdx55"
* "qcom,scm-sm6350"
* "qcom,scm-sm8150" * "qcom,scm-sm8150"
* "qcom,scm-sm8250" * "qcom,scm-sm8250"
* "qcom,scm-sm8350" * "qcom,scm-sm8350"
* "qcom,scm-sm8450"
and: and:
* "qcom,scm" * "qcom,scm"
- clocks: Specifies clocks needed by the SCM interface, if any: - clocks: Specifies clocks needed by the SCM interface, if any:

View File

@ -24,7 +24,8 @@ allOf:
properties: properties:
compatible: compatible:
items: oneOf:
- items:
- enum: - enum:
- renesas,r8a774a1-rpc-if # RZ/G2M - renesas,r8a774a1-rpc-if # RZ/G2M
- renesas,r8a774b1-rpc-if # RZ/G2N - renesas,r8a774b1-rpc-if # RZ/G2N
@ -34,7 +35,12 @@ properties:
- renesas,r8a77980-rpc-if # R-Car V3H - renesas,r8a77980-rpc-if # R-Car V3H
- renesas,r8a77995-rpc-if # R-Car D3 - renesas,r8a77995-rpc-if # R-Car D3
- renesas,r8a779a0-rpc-if # R-Car V3U - renesas,r8a779a0-rpc-if # R-Car V3U
- const: renesas,rcar-gen3-rpc-if # a generic R-Car gen3 or RZ/G2 device - const: renesas,rcar-gen3-rpc-if # a generic R-Car gen3 or RZ/G2{E,H,M,N} device
- items:
- enum:
- renesas,r9a07g044-rpc-if # RZ/G2{L,LC}
- const: renesas,rzg2l-rpc-if # RZ/G2L family
reg: reg:
items: items:
@ -48,7 +54,9 @@ properties:
- const: dirmap - const: dirmap
- const: wbuf - const: wbuf
clocks: clocks: true
interrupts:
maxItems: 1 maxItems: 1
power-domains: power-domains:
@ -67,8 +75,6 @@ patternProperties:
- cfi-flash - cfi-flash
- jedec,spi-nor - jedec,spi-nor
unevaluatedProperties: false
required: required:
- compatible - compatible
- reg - reg
@ -79,6 +85,26 @@ required:
- '#address-cells' - '#address-cells'
- '#size-cells' - '#size-cells'
if:
properties:
compatible:
contains:
enum:
- renesas,rzg2l-rpc-if
then:
properties:
clocks:
items:
- description: SPI Multi IO Register access clock (SPI_CLK2)
- description: SPI Multi IO Main clock (SPI_CLK).
else:
properties:
clocks:
maxItems: 1
unevaluatedProperties: false
examples: examples:
- | - |
#include <dt-bindings/clock/renesas-cpg-mssr.h> #include <dt-bindings/clock/renesas-cpg-mssr.h>

View File

@ -24,6 +24,7 @@ properties:
- qcom,msm8994-rpmpd - qcom,msm8994-rpmpd
- qcom,msm8996-rpmpd - qcom,msm8996-rpmpd
- qcom,msm8998-rpmpd - qcom,msm8998-rpmpd
- qcom,qcm2290-rpmpd
- qcom,qcs404-rpmpd - qcom,qcs404-rpmpd
- qcom,sdm660-rpmpd - qcom,sdm660-rpmpd
- qcom,sc7180-rpmhpd - qcom,sc7180-rpmhpd
@ -32,10 +33,12 @@ properties:
- qcom,sdm845-rpmhpd - qcom,sdm845-rpmhpd
- qcom,sdx55-rpmhpd - qcom,sdx55-rpmhpd
- qcom,sm6115-rpmpd - qcom,sm6115-rpmpd
- qcom,sm6125-rpmpd
- qcom,sm6350-rpmhpd - qcom,sm6350-rpmhpd
- qcom,sm8150-rpmhpd - qcom,sm8150-rpmhpd
- qcom,sm8250-rpmhpd - qcom,sm8250-rpmhpd
- qcom,sm8350-rpmhpd - qcom,sm8350-rpmhpd
- qcom,sm8450-rpmhpd
'#power-domain-cells': '#power-domain-cells':
const: 1 const: 1

View File

@ -21,6 +21,11 @@ properties:
enum: enum:
- qcom,rpmh-stats - qcom,rpmh-stats
- qcom,rpm-stats - qcom,rpm-stats
# For older RPM firmware versions with fixed offset for the sleep stats
- qcom,apq8084-rpm-stats
- qcom,msm8226-rpm-stats
- qcom,msm8916-rpm-stats
- qcom,msm8974-rpm-stats
reg: reg:
maxItems: 1 maxItems: 1

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
%YAML 1.2 %YAML 1.2
--- ---
$id: http://devicetree.org/schemas/arm/samsung/exynos-chipid.yaml# $id: http://devicetree.org/schemas/soc/samsung/exynos-chipid.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung Exynos SoC series Chipid driver title: Samsung Exynos SoC series Chipid driver

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
%YAML 1.2 %YAML 1.2
--- ---
$id: http://devicetree.org/schemas/arm/samsung/pmu.yaml# $id: http://devicetree.org/schemas/soc/samsung/exynos-pmu.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung Exynos SoC series Power Management Unit (PMU) title: Samsung Exynos SoC series Power Management Unit (PMU)
@ -24,6 +24,7 @@ select:
- samsung,exynos5420-pmu - samsung,exynos5420-pmu
- samsung,exynos5433-pmu - samsung,exynos5433-pmu
- samsung,exynos7-pmu - samsung,exynos7-pmu
- samsung,exynos850-pmu
- samsung-s5pv210-pmu - samsung-s5pv210-pmu
required: required:
- compatible - compatible
@ -41,6 +42,7 @@ properties:
- samsung,exynos5420-pmu - samsung,exynos5420-pmu
- samsung,exynos5433-pmu - samsung,exynos5433-pmu
- samsung,exynos7-pmu - samsung,exynos7-pmu
- samsung,exynos850-pmu
- samsung-s5pv210-pmu - samsung-s5pv210-pmu
- const: syscon - const: syscon

View File

@ -0,0 +1,159 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/soc/samsung/exynos-usi.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung's Exynos USI (Universal Serial Interface) binding
maintainers:
- Sam Protsenko <semen.protsenko@linaro.org>
- Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
description: |
USI IP-core provides selectable serial protocol (UART, SPI or High-Speed I2C).
USI shares almost all internal circuits within each protocol, so only one
protocol can be chosen at a time. USI is modeled as a node with zero or more
child nodes, each representing a serial sub-node device. The mode setting
selects which particular function will be used.
Refer to next bindings documentation for information on protocol subnodes that
can exist under USI node:
[1] Documentation/devicetree/bindings/serial/samsung_uart.yaml
[2] Documentation/devicetree/bindings/i2c/i2c-exynos5.txt
[3] Documentation/devicetree/bindings/spi/spi-samsung.txt
properties:
$nodename:
pattern: "^usi@[0-9a-f]+$"
compatible:
enum:
- samsung,exynos850-usi # for USIv2 (Exynos850, ExynosAutoV9)
reg: true
clocks: true
clock-names: true
ranges: true
"#address-cells":
const: 1
"#size-cells":
const: 1
samsung,sysreg:
$ref: /schemas/types.yaml#/definitions/phandle-array
description:
Should be phandle/offset pair. The phandle to System Register syscon node
(for the same domain where this USI controller resides) and the offset
of SW_CONF register for this USI controller.
samsung,mode:
$ref: /schemas/types.yaml#/definitions/uint32
description:
Selects USI function (which serial protocol to use). Refer to
<include/dt-bindings/soc/samsung,exynos-usi.h> for valid USI mode values.
samsung,clkreq-on:
type: boolean
description:
Enable this property if underlying protocol requires the clock to be
continuously provided without automatic gating. As suggested by SoC
manual, it should be set in case of SPI/I2C slave, UART Rx and I2C
multi-master mode. Usually this property is needed if USI mode is set
to "UART".
This property is optional.
patternProperties:
# All other properties should be child nodes
"^(serial|spi|i2c)@[0-9a-f]+$":
type: object
description: Child node describing underlying USI serial protocol
required:
- compatible
- ranges
- "#address-cells"
- "#size-cells"
- samsung,sysreg
- samsung,mode
if:
properties:
compatible:
contains:
enum:
- samsung,exynos850-usi
then:
properties:
reg:
maxItems: 1
clocks:
items:
- description: Bus (APB) clock
- description: Operating clock for UART/SPI/I2C protocol
clock-names:
items:
- const: pclk
- const: ipclk
required:
- reg
- clocks
- clock-names
else:
properties:
reg: false
clocks: false
clock-names: false
samsung,clkreq-on: false
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/soc/samsung,exynos-usi.h>
usi0: usi@138200c0 {
compatible = "samsung,exynos850-usi";
reg = <0x138200c0 0x20>;
samsung,sysreg = <&sysreg_peri 0x1010>;
samsung,mode = <USI_V2_UART>;
samsung,clkreq-on; /* needed for UART mode */
#address-cells = <1>;
#size-cells = <1>;
ranges;
clocks = <&cmu_peri 32>, <&cmu_peri 31>;
clock-names = "pclk", "ipclk";
serial_0: serial@13820000 {
compatible = "samsung,exynos850-uart";
reg = <0x13820000 0xc0>;
interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cmu_peri 32>, <&cmu_peri 31>;
clock-names = "uart", "clk_uart_baud0";
status = "disabled";
};
hsi2c_0: i2c@13820000 {
compatible = "samsung,exynosautov9-hsi2c";
reg = <0x13820000 0xc0>;
interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
clocks = <&cmu_peri 32>, <&cmu_peri 31>;
clock-names = "hsi2c_pclk", "hsi2c";
status = "disabled";
};
};

View File

@ -184,6 +184,36 @@ order to support device enumeration. In other words, OP-TEE driver invokes this
application to retrieve a list of Trusted Applications which can be registered application to retrieve a list of Trusted Applications which can be registered
as devices on the TEE bus. as devices on the TEE bus.
OP-TEE notifications
--------------------
There are two kinds of notifications that secure world can use to make
normal world aware of some event.
1. Synchronous notifications delivered with ``OPTEE_RPC_CMD_NOTIFICATION``
using the ``OPTEE_RPC_NOTIFICATION_SEND`` parameter.
2. Asynchronous notifications delivered with a combination of a non-secure
edge-triggered interrupt and a fast call from the non-secure interrupt
handler.
Synchronous notifications are limited by depending on RPC for delivery,
this is only usable when secure world is entered with a yielding call via
``OPTEE_SMC_CALL_WITH_ARG``. This excludes such notifications from secure
world interrupt handlers.
An asynchronous notification is delivered via a non-secure edge-triggered
interrupt to an interrupt handler registered in the OP-TEE driver. The
actual notification value are retrieved with the fast call
``OPTEE_SMC_GET_ASYNC_NOTIF_VALUE``. Note that one interrupt can represent
multiple notifications.
One notification value ``OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF`` has a
special meaning. When this value is received it means that normal world is
supposed to make a yielding call ``OPTEE_MSG_CMD_DO_BOTTOM_HALF``. This
call is done from the thread assisting the interrupt handler. This is a
building block for OP-TEE OS in secure world to implement the top half and
bottom half style of device drivers.
AMD-TEE driver AMD-TEE driver
============== ==============

View File

@ -2551,6 +2551,7 @@ Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
F: Documentation/arm/samsung/ F: Documentation/arm/samsung/
F: Documentation/devicetree/bindings/arm/samsung/ F: Documentation/devicetree/bindings/arm/samsung/
F: Documentation/devicetree/bindings/power/pd-samsung.yaml F: Documentation/devicetree/bindings/power/pd-samsung.yaml
F: Documentation/devicetree/bindings/soc/samsung/
F: arch/arm/boot/dts/exynos* F: arch/arm/boot/dts/exynos*
F: arch/arm/boot/dts/s3c* F: arch/arm/boot/dts/s3c*
F: arch/arm/boot/dts/s5p* F: arch/arm/boot/dts/s5p*

View File

@ -645,6 +645,7 @@ static __init int at91_dt_ramc(bool phy_mandatory)
if (!soc_pm.data.ramc[idx]) { if (!soc_pm.data.ramc[idx]) {
pr_err("unable to map ramc[%d] cpu registers\n", idx); pr_err("unable to map ramc[%d] cpu registers\n", idx);
ret = -ENOMEM; ret = -ENOMEM;
of_node_put(np);
goto unmap_ramc; goto unmap_ramc;
} }
@ -670,6 +671,7 @@ static __init int at91_dt_ramc(bool phy_mandatory)
if (!soc_pm.data.ramc_phy) { if (!soc_pm.data.ramc_phy) {
pr_err("unable to map ramc phy cpu registers\n"); pr_err("unable to map ramc phy cpu registers\n");
ret = -ENOMEM; ret = -ENOMEM;
of_node_put(np);
goto unmap_ramc; goto unmap_ramc;
} }
} }

View File

@ -21,6 +21,7 @@ struct imx_weim_devtype {
unsigned int cs_stride; unsigned int cs_stride;
unsigned int wcr_offset; unsigned int wcr_offset;
unsigned int wcr_bcm; unsigned int wcr_bcm;
unsigned int wcr_cont_bclk;
}; };
static const struct imx_weim_devtype imx1_weim_devtype = { static const struct imx_weim_devtype imx1_weim_devtype = {
@ -41,6 +42,7 @@ static const struct imx_weim_devtype imx50_weim_devtype = {
.cs_stride = 0x18, .cs_stride = 0x18,
.wcr_offset = 0x90, .wcr_offset = 0x90,
.wcr_bcm = BIT(0), .wcr_bcm = BIT(0),
.wcr_cont_bclk = BIT(3),
}; };
static const struct imx_weim_devtype imx51_weim_devtype = { static const struct imx_weim_devtype imx51_weim_devtype = {
@ -206,8 +208,20 @@ static int weim_parse_dt(struct platform_device *pdev, void __iomem *base)
if (of_property_read_bool(pdev->dev.of_node, "fsl,burst-clk-enable")) { if (of_property_read_bool(pdev->dev.of_node, "fsl,burst-clk-enable")) {
if (devtype->wcr_bcm) { if (devtype->wcr_bcm) {
reg = readl(base + devtype->wcr_offset); reg = readl(base + devtype->wcr_offset);
writel(reg | devtype->wcr_bcm, reg |= devtype->wcr_bcm;
base + devtype->wcr_offset);
if (of_property_read_bool(pdev->dev.of_node,
"fsl,continuous-burst-clk")) {
if (devtype->wcr_cont_bclk) {
reg |= devtype->wcr_cont_bclk;
} else {
dev_err(&pdev->dev,
"continuous burst clk not supported.\n");
return -EINVAL;
}
}
writel(reg, base + devtype->wcr_offset);
} else { } else {
dev_err(&pdev->dev, "burst clk mode not supported.\n"); dev_err(&pdev->dev, "burst clk mode not supported.\n");
return -EINVAL; return -EINVAL;

View File

@ -13,8 +13,11 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h> #include <linux/reset.h>
#include <soc/tegra/common.h>
#define TEGRA_GMI_CONFIG 0x00 #define TEGRA_GMI_CONFIG 0x00
#define TEGRA_GMI_CONFIG_GO BIT(31) #define TEGRA_GMI_CONFIG_GO BIT(31)
#define TEGRA_GMI_BUS_WIDTH_32BIT BIT(30) #define TEGRA_GMI_BUS_WIDTH_32BIT BIT(30)
@ -54,9 +57,10 @@ static int tegra_gmi_enable(struct tegra_gmi *gmi)
{ {
int err; int err;
err = clk_prepare_enable(gmi->clk); pm_runtime_enable(gmi->dev);
if (err < 0) { err = pm_runtime_resume_and_get(gmi->dev);
dev_err(gmi->dev, "failed to enable clock: %d\n", err); if (err) {
pm_runtime_disable(gmi->dev);
return err; return err;
} }
@ -83,7 +87,9 @@ static void tegra_gmi_disable(struct tegra_gmi *gmi)
writel(config, gmi->base + TEGRA_GMI_CONFIG); writel(config, gmi->base + TEGRA_GMI_CONFIG);
reset_control_assert(gmi->rst); reset_control_assert(gmi->rst);
clk_disable_unprepare(gmi->clk);
pm_runtime_put_sync_suspend(gmi->dev);
pm_runtime_force_suspend(gmi->dev);
} }
static int tegra_gmi_parse_dt(struct tegra_gmi *gmi) static int tegra_gmi_parse_dt(struct tegra_gmi *gmi)
@ -213,6 +219,7 @@ static int tegra_gmi_probe(struct platform_device *pdev)
if (!gmi) if (!gmi)
return -ENOMEM; return -ENOMEM;
platform_set_drvdata(pdev, gmi);
gmi->dev = dev; gmi->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@ -232,6 +239,10 @@ static int tegra_gmi_probe(struct platform_device *pdev)
return PTR_ERR(gmi->rst); return PTR_ERR(gmi->rst);
} }
err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (err)
return err;
err = tegra_gmi_parse_dt(gmi); err = tegra_gmi_parse_dt(gmi);
if (err) if (err)
return err; return err;
@ -247,8 +258,6 @@ static int tegra_gmi_probe(struct platform_device *pdev)
return err; return err;
} }
platform_set_drvdata(pdev, gmi);
return 0; return 0;
} }
@ -262,6 +271,34 @@ static int tegra_gmi_remove(struct platform_device *pdev)
return 0; return 0;
} }
static int __maybe_unused tegra_gmi_runtime_resume(struct device *dev)
{
struct tegra_gmi *gmi = dev_get_drvdata(dev);
int err;
err = clk_prepare_enable(gmi->clk);
if (err < 0) {
dev_err(gmi->dev, "failed to enable clock: %d\n", err);
return err;
}
return 0;
}
static int __maybe_unused tegra_gmi_runtime_suspend(struct device *dev)
{
struct tegra_gmi *gmi = dev_get_drvdata(dev);
clk_disable_unprepare(gmi->clk);
return 0;
}
static const struct dev_pm_ops tegra_gmi_pm = {
SET_RUNTIME_PM_OPS(tegra_gmi_runtime_suspend, tegra_gmi_runtime_resume,
NULL)
};
static const struct of_device_id tegra_gmi_id_table[] = { static const struct of_device_id tegra_gmi_id_table[] = {
{ .compatible = "nvidia,tegra20-gmi", }, { .compatible = "nvidia,tegra20-gmi", },
{ .compatible = "nvidia,tegra30-gmi", }, { .compatible = "nvidia,tegra30-gmi", },
@ -275,6 +312,7 @@ static struct platform_driver tegra_gmi_driver = {
.driver = { .driver = {
.name = "tegra-gmi", .name = "tegra-gmi",
.of_match_table = tegra_gmi_id_table, .of_match_table = tegra_gmi_id_table,
.pm = &tegra_gmi_pm,
}, },
}; };
module_platform_driver(tegra_gmi_driver); module_platform_driver(tegra_gmi_driver);

View File

@ -1759,7 +1759,7 @@ static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
desc->num = resp->range_num; desc->num = resp->range_num;
desc->start_sec = resp->range_start_sec; desc->start_sec = resp->range_start_sec;
desc->num_sec = resp->range_num_sec; desc->num_sec = resp->range_num_sec;
}; }
fail: fail:
ti_sci_put_one_xfer(&info->minfo, xfer); ti_sci_put_one_xfer(&info->minfo, xfer);

View File

@ -1434,7 +1434,10 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
return ret; return ret;
/* Check PM API version number */ /* Check PM API version number */
zynqmp_pm_get_api_version(&pm_api_version); ret = zynqmp_pm_get_api_version(&pm_api_version);
if (ret)
return ret;
if (pm_api_version < ZYNQMP_PM_VERSION) { if (pm_api_version < ZYNQMP_PM_VERSION) {
panic("%s Platform Management API version error. Expected: v%d.%d - Found: v%d.%d\n", panic("%s Platform Management API version error. Expected: v%d.%d - Found: v%d.%d\n",
__func__, __func__,

View File

@ -12,6 +12,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_device.h>
#include <linux/regmap.h> #include <linux/regmap.h>
#include <linux/reset.h> #include <linux/reset.h>
@ -19,19 +20,17 @@
#define RPCIF_CMNCR 0x0000 /* R/W */ #define RPCIF_CMNCR 0x0000 /* R/W */
#define RPCIF_CMNCR_MD BIT(31) #define RPCIF_CMNCR_MD BIT(31)
#define RPCIF_CMNCR_SFDE BIT(24) /* undocumented but must be set */
#define RPCIF_CMNCR_MOIIO3(val) (((val) & 0x3) << 22) #define RPCIF_CMNCR_MOIIO3(val) (((val) & 0x3) << 22)
#define RPCIF_CMNCR_MOIIO2(val) (((val) & 0x3) << 20) #define RPCIF_CMNCR_MOIIO2(val) (((val) & 0x3) << 20)
#define RPCIF_CMNCR_MOIIO1(val) (((val) & 0x3) << 18) #define RPCIF_CMNCR_MOIIO1(val) (((val) & 0x3) << 18)
#define RPCIF_CMNCR_MOIIO0(val) (((val) & 0x3) << 16) #define RPCIF_CMNCR_MOIIO0(val) (((val) & 0x3) << 16)
#define RPCIF_CMNCR_MOIIO_HIZ (RPCIF_CMNCR_MOIIO0(3) | \ #define RPCIF_CMNCR_MOIIO(val) (RPCIF_CMNCR_MOIIO0(val) | RPCIF_CMNCR_MOIIO1(val) | \
RPCIF_CMNCR_MOIIO1(3) | \ RPCIF_CMNCR_MOIIO2(val) | RPCIF_CMNCR_MOIIO3(val))
RPCIF_CMNCR_MOIIO2(3) | RPCIF_CMNCR_MOIIO3(3)) #define RPCIF_CMNCR_IO3FV(val) (((val) & 0x3) << 14) /* documented for RZ/G2L */
#define RPCIF_CMNCR_IO3FV(val) (((val) & 0x3) << 14) /* undocumented */ #define RPCIF_CMNCR_IO2FV(val) (((val) & 0x3) << 12) /* documented for RZ/G2L */
#define RPCIF_CMNCR_IO2FV(val) (((val) & 0x3) << 12) /* undocumented */
#define RPCIF_CMNCR_IO0FV(val) (((val) & 0x3) << 8) #define RPCIF_CMNCR_IO0FV(val) (((val) & 0x3) << 8)
#define RPCIF_CMNCR_IOFV_HIZ (RPCIF_CMNCR_IO0FV(3) | RPCIF_CMNCR_IO2FV(3) | \ #define RPCIF_CMNCR_IOFV(val) (RPCIF_CMNCR_IO0FV(val) | RPCIF_CMNCR_IO2FV(val) | \
RPCIF_CMNCR_IO3FV(3)) RPCIF_CMNCR_IO3FV(val))
#define RPCIF_CMNCR_BSZ(val) (((val) & 0x3) << 0) #define RPCIF_CMNCR_BSZ(val) (((val) & 0x3) << 0)
#define RPCIF_SSLDR 0x0004 /* R/W */ #define RPCIF_SSLDR 0x0004 /* R/W */
@ -126,6 +125,9 @@
#define RPCIF_SMDRENR_OPDRE BIT(4) #define RPCIF_SMDRENR_OPDRE BIT(4)
#define RPCIF_SMDRENR_SPIDRE BIT(0) #define RPCIF_SMDRENR_SPIDRE BIT(0)
#define RPCIF_PHYADD 0x0070 /* R/W available on R-Car E3/D3/V3M and RZ/G2{E,L} */
#define RPCIF_PHYWR 0x0074 /* R/W available on R-Car E3/D3/V3M and RZ/G2{E,L} */
#define RPCIF_PHYCNT 0x007C /* R/W */ #define RPCIF_PHYCNT 0x007C /* R/W */
#define RPCIF_PHYCNT_CAL BIT(31) #define RPCIF_PHYCNT_CAL BIT(31)
#define RPCIF_PHYCNT_OCTA(v) (((v) & 0x3) << 22) #define RPCIF_PHYCNT_OCTA(v) (((v) & 0x3) << 22)
@ -133,10 +135,12 @@
#define RPCIF_PHYCNT_OCT BIT(20) #define RPCIF_PHYCNT_OCT BIT(20)
#define RPCIF_PHYCNT_DDRCAL BIT(19) #define RPCIF_PHYCNT_DDRCAL BIT(19)
#define RPCIF_PHYCNT_HS BIT(18) #define RPCIF_PHYCNT_HS BIT(18)
#define RPCIF_PHYCNT_STRTIM(v) (((v) & 0x7) << 15) #define RPCIF_PHYCNT_CKSEL(v) (((v) & 0x3) << 16) /* valid only for RZ/G2L */
#define RPCIF_PHYCNT_STRTIM(v) (((v) & 0x7) << 15) /* valid for R-Car and RZ/G2{E,H,M,N} */
#define RPCIF_PHYCNT_WBUF2 BIT(4) #define RPCIF_PHYCNT_WBUF2 BIT(4)
#define RPCIF_PHYCNT_WBUF BIT(2) #define RPCIF_PHYCNT_WBUF BIT(2)
#define RPCIF_PHYCNT_PHYMEM(v) (((v) & 0x3) << 0) #define RPCIF_PHYCNT_PHYMEM(v) (((v) & 0x3) << 0)
#define RPCIF_PHYCNT_PHYMEM_MASK GENMASK(1, 0)
#define RPCIF_PHYOFFSET1 0x0080 /* R/W */ #define RPCIF_PHYOFFSET1 0x0080 /* R/W */
#define RPCIF_PHYOFFSET1_DDRTMG(v) (((v) & 0x3) << 28) #define RPCIF_PHYOFFSET1_DDRTMG(v) (((v) & 0x3) << 28)
@ -147,8 +151,6 @@
#define RPCIF_PHYINT 0x0088 /* R/W */ #define RPCIF_PHYINT 0x0088 /* R/W */
#define RPCIF_PHYINT_WPVAL BIT(1) #define RPCIF_PHYINT_WPVAL BIT(1)
#define RPCIF_DIRMAP_SIZE 0x4000000
static const struct regmap_range rpcif_volatile_ranges[] = { static const struct regmap_range rpcif_volatile_ranges[] = {
regmap_reg_range(RPCIF_SMRDR0, RPCIF_SMRDR1), regmap_reg_range(RPCIF_SMRDR0, RPCIF_SMRDR1),
regmap_reg_range(RPCIF_SMWDR0, RPCIF_SMWDR1), regmap_reg_range(RPCIF_SMWDR0, RPCIF_SMWDR1),
@ -243,50 +245,74 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap"); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
rpc->dirmap = devm_ioremap_resource(&pdev->dev, res); rpc->dirmap = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(rpc->dirmap)) if (IS_ERR(rpc->dirmap))
rpc->dirmap = NULL; return PTR_ERR(rpc->dirmap);
rpc->size = resource_size(res); rpc->size = resource_size(res);
rpc->type = (uintptr_t)of_device_get_match_data(dev);
rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
return PTR_ERR_OR_ZERO(rpc->rstc); return PTR_ERR_OR_ZERO(rpc->rstc);
} }
EXPORT_SYMBOL(rpcif_sw_init); EXPORT_SYMBOL(rpcif_sw_init);
void rpcif_hw_init(struct rpcif *rpc, bool hyperflash) static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif *rpc)
{
regmap_write(rpc->regmap, RPCIF_PHYWR, 0xa5390000);
regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000000);
regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00008080);
regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000022);
regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00008080);
regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000024);
regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_CKSEL(3),
RPCIF_PHYCNT_CKSEL(3));
regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00000030);
regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000032);
}
int rpcif_hw_init(struct rpcif *rpc, bool hyperflash)
{ {
u32 dummy; u32 dummy;
pm_runtime_get_sync(rpc->dev); pm_runtime_get_sync(rpc->dev);
/* if (rpc->type == RPCIF_RZ_G2L) {
* NOTE: The 0x260 are undocumented bits, but they must be set. int ret;
* RPCIF_PHYCNT_STRTIM is strobe timing adjustment bits,
* 0x0 : the delay is biggest,
* 0x1 : the delay is 2nd biggest,
* On H3 ES1.x, the value should be 0, while on others,
* the value should be 7.
*/
regmap_write(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_STRTIM(7) |
RPCIF_PHYCNT_PHYMEM(hyperflash ? 3 : 0) | 0x260);
/* ret = reset_control_reset(rpc->rstc);
* NOTE: The 0x1511144 are undocumented bits, but they must be set if (ret)
* for RPCIF_PHYOFFSET1. return ret;
* The 0x31 are undocumented bits, but they must be set usleep_range(200, 300);
* for RPCIF_PHYOFFSET2. rpcif_rzg2l_timing_adjust_sdr(rpc);
*/ }
regmap_write(rpc->regmap, RPCIF_PHYOFFSET1, 0x1511144 |
regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_PHYMEM_MASK,
RPCIF_PHYCNT_PHYMEM(hyperflash ? 3 : 0));
if (rpc->type == RPCIF_RCAR_GEN3)
regmap_update_bits(rpc->regmap, RPCIF_PHYCNT,
RPCIF_PHYCNT_STRTIM(7), RPCIF_PHYCNT_STRTIM(7));
regmap_update_bits(rpc->regmap, RPCIF_PHYOFFSET1, RPCIF_PHYOFFSET1_DDRTMG(3),
RPCIF_PHYOFFSET1_DDRTMG(3)); RPCIF_PHYOFFSET1_DDRTMG(3));
regmap_write(rpc->regmap, RPCIF_PHYOFFSET2, 0x31 | regmap_update_bits(rpc->regmap, RPCIF_PHYOFFSET2, RPCIF_PHYOFFSET2_OCTTMG(7),
RPCIF_PHYOFFSET2_OCTTMG(4)); RPCIF_PHYOFFSET2_OCTTMG(4));
if (hyperflash) if (hyperflash)
regmap_update_bits(rpc->regmap, RPCIF_PHYINT, regmap_update_bits(rpc->regmap, RPCIF_PHYINT,
RPCIF_PHYINT_WPVAL, 0); RPCIF_PHYINT_WPVAL, 0);
regmap_write(rpc->regmap, RPCIF_CMNCR, RPCIF_CMNCR_SFDE | if (rpc->type == RPCIF_RCAR_GEN3)
RPCIF_CMNCR_MOIIO_HIZ | RPCIF_CMNCR_IOFV_HIZ | regmap_update_bits(rpc->regmap, RPCIF_CMNCR,
RPCIF_CMNCR_MOIIO(3) | RPCIF_CMNCR_BSZ(3),
RPCIF_CMNCR_MOIIO(3) |
RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0)); RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0));
else
regmap_update_bits(rpc->regmap, RPCIF_CMNCR,
RPCIF_CMNCR_MOIIO(3) | RPCIF_CMNCR_IOFV(3) |
RPCIF_CMNCR_BSZ(3),
RPCIF_CMNCR_MOIIO(1) | RPCIF_CMNCR_IOFV(2) |
RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0));
/* Set RCF after BSZ update */ /* Set RCF after BSZ update */
regmap_write(rpc->regmap, RPCIF_DRCR, RPCIF_DRCR_RCF); regmap_write(rpc->regmap, RPCIF_DRCR, RPCIF_DRCR_RCF);
/* Dummy read according to spec */ /* Dummy read according to spec */
@ -297,6 +323,8 @@ void rpcif_hw_init(struct rpcif *rpc, bool hyperflash)
pm_runtime_put(rpc->dev); pm_runtime_put(rpc->dev);
rpc->bus_size = hyperflash ? 2 : 1; rpc->bus_size = hyperflash ? 2 : 1;
return 0;
} }
EXPORT_SYMBOL(rpcif_hw_init); EXPORT_SYMBOL(rpcif_hw_init);
@ -588,8 +616,8 @@ static void memcpy_fromio_readw(void *to,
ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf) ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf)
{ {
loff_t from = offs & (RPCIF_DIRMAP_SIZE - 1); loff_t from = offs & (rpc->size - 1);
size_t size = RPCIF_DIRMAP_SIZE - from; size_t size = rpc->size - from;
if (len > size) if (len > size)
len = size; len = size;
@ -659,7 +687,8 @@ static int rpcif_remove(struct platform_device *pdev)
} }
static const struct of_device_id rpcif_of_match[] = { static const struct of_device_id rpcif_of_match[] = {
{ .compatible = "renesas,rcar-gen3-rpc-if", }, { .compatible = "renesas,rcar-gen3-rpc-if", .data = (void *)RPCIF_RCAR_GEN3 },
{ .compatible = "renesas,rzg2l-rpc-if", .data = (void *)RPCIF_RZ_G2L },
{}, {},
}; };
MODULE_DEVICE_TABLE(of, rpcif_of_match); MODULE_DEVICE_TABLE(of, rpcif_of_match);

View File

@ -15,6 +15,8 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/pinctrl/consumer.h> #include <linux/pinctrl/consumer.h>
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h> #include <linux/regulator/consumer.h>
#include <linux/reset.h> #include <linux/reset.h>
#include <linux/mmc/card.h> #include <linux/mmc/card.h>
@ -24,6 +26,8 @@
#include <linux/gpio/consumer.h> #include <linux/gpio/consumer.h>
#include <linux/ktime.h> #include <linux/ktime.h>
#include <soc/tegra/common.h>
#include "sdhci-pltfm.h" #include "sdhci-pltfm.h"
#include "cqhci.h" #include "cqhci.h"
@ -743,7 +747,9 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{ {
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
struct device *dev = mmc_dev(host->mmc);
unsigned long host_clk; unsigned long host_clk;
int err;
if (!clock) if (!clock)
return sdhci_set_clock(host, clock); return sdhci_set_clock(host, clock);
@ -761,7 +767,12 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
* from clk_get_rate() is used. * from clk_get_rate() is used.
*/ */
host_clk = tegra_host->ddr_signaling ? clock * 2 : clock; host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
clk_set_rate(pltfm_host->clk, host_clk);
err = dev_pm_opp_set_rate(dev, host_clk);
if (err)
dev_err(dev, "failed to set clk rate to %luHz: %d\n",
host_clk, err);
tegra_host->curr_clk_rate = host_clk; tegra_host->curr_clk_rate = host_clk;
if (tegra_host->ddr_signaling) if (tegra_host->ddr_signaling)
host->max_clk = host_clk; host->max_clk = host_clk;
@ -1714,7 +1725,6 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
"failed to get clock\n"); "failed to get clock\n");
goto err_clk_get; goto err_clk_get;
} }
clk_prepare_enable(clk);
pltfm_host->clk = clk; pltfm_host->clk = clk;
tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev, tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev,
@ -1725,15 +1735,24 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
goto err_rst_get; goto err_rst_get;
} }
rc = reset_control_assert(tegra_host->rst); rc = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (rc) if (rc)
goto err_rst_get; goto err_rst_get;
pm_runtime_enable(&pdev->dev);
rc = pm_runtime_resume_and_get(&pdev->dev);
if (rc)
goto err_pm_get;
rc = reset_control_assert(tegra_host->rst);
if (rc)
goto err_rst_assert;
usleep_range(2000, 4000); usleep_range(2000, 4000);
rc = reset_control_deassert(tegra_host->rst); rc = reset_control_deassert(tegra_host->rst);
if (rc) if (rc)
goto err_rst_get; goto err_rst_assert;
usleep_range(2000, 4000); usleep_range(2000, 4000);
@ -1745,8 +1764,11 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
err_add_host: err_add_host:
reset_control_assert(tegra_host->rst); reset_control_assert(tegra_host->rst);
err_rst_assert:
pm_runtime_put_sync_suspend(&pdev->dev);
err_pm_get:
pm_runtime_disable(&pdev->dev);
err_rst_get: err_rst_get:
clk_disable_unprepare(pltfm_host->clk);
err_clk_get: err_clk_get:
clk_disable_unprepare(tegra_host->tmclk); clk_disable_unprepare(tegra_host->tmclk);
err_power_req: err_power_req:
@ -1765,19 +1787,38 @@ static int sdhci_tegra_remove(struct platform_device *pdev)
reset_control_assert(tegra_host->rst); reset_control_assert(tegra_host->rst);
usleep_range(2000, 4000); usleep_range(2000, 4000);
clk_disable_unprepare(pltfm_host->clk);
clk_disable_unprepare(tegra_host->tmclk);
pm_runtime_put_sync_suspend(&pdev->dev);
pm_runtime_force_suspend(&pdev->dev);
clk_disable_unprepare(tegra_host->tmclk);
sdhci_pltfm_free(pdev); sdhci_pltfm_free(pdev);
return 0; return 0;
} }
#ifdef CONFIG_PM_SLEEP static int __maybe_unused sdhci_tegra_runtime_suspend(struct device *dev)
static int __maybe_unused sdhci_tegra_suspend(struct device *dev)
{ {
struct sdhci_host *host = dev_get_drvdata(dev); struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
clk_disable_unprepare(pltfm_host->clk);
return 0;
}
static int __maybe_unused sdhci_tegra_runtime_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
return clk_prepare_enable(pltfm_host->clk);
}
#ifdef CONFIG_PM_SLEEP
static int sdhci_tegra_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
int ret; int ret;
if (host->mmc->caps2 & MMC_CAP2_CQE) { if (host->mmc->caps2 & MMC_CAP2_CQE) {
@ -1792,17 +1833,22 @@ static int __maybe_unused sdhci_tegra_suspend(struct device *dev)
return ret; return ret;
} }
clk_disable_unprepare(pltfm_host->clk); ret = pm_runtime_force_suspend(dev);
if (ret) {
sdhci_resume_host(host);
cqhci_resume(host->mmc);
return ret;
}
return 0; return 0;
} }
static int __maybe_unused sdhci_tegra_resume(struct device *dev) static int sdhci_tegra_resume(struct device *dev)
{ {
struct sdhci_host *host = dev_get_drvdata(dev); struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
int ret; int ret;
ret = clk_prepare_enable(pltfm_host->clk); ret = pm_runtime_force_resume(dev);
if (ret) if (ret)
return ret; return ret;
@ -1821,13 +1867,16 @@ static int __maybe_unused sdhci_tegra_resume(struct device *dev)
suspend_host: suspend_host:
sdhci_suspend_host(host); sdhci_suspend_host(host);
disable_clk: disable_clk:
clk_disable_unprepare(pltfm_host->clk); pm_runtime_force_suspend(dev);
return ret; return ret;
} }
#endif #endif
static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend, static const struct dev_pm_ops sdhci_tegra_dev_pm_ops = {
sdhci_tegra_resume); SET_RUNTIME_PM_OPS(sdhci_tegra_runtime_suspend, sdhci_tegra_runtime_resume,
NULL)
SET_SYSTEM_SLEEP_PM_OPS(sdhci_tegra_suspend, sdhci_tegra_resume)
};
static struct platform_driver sdhci_tegra_driver = { static struct platform_driver sdhci_tegra_driver = {
.driver = { .driver = {

View File

@ -130,7 +130,9 @@ static int rpcif_hb_probe(struct platform_device *pdev)
rpcif_enable_rpm(&hyperbus->rpc); rpcif_enable_rpm(&hyperbus->rpc);
rpcif_hw_init(&hyperbus->rpc, true); error = rpcif_hw_init(&hyperbus->rpc, true);
if (error)
return error;
hyperbus->hbdev.map.size = hyperbus->rpc.size; hyperbus->hbdev.map.size = hyperbus->rpc.size;
hyperbus->hbdev.map.virt = hyperbus->rpc.dirmap; hyperbus->hbdev.map.virt = hyperbus->rpc.dirmap;

View File

@ -17,8 +17,11 @@
#include <linux/mtd/rawnand.h> #include <linux/mtd/rawnand.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h> #include <linux/reset.h>
#include <soc/tegra/common.h>
#define COMMAND 0x00 #define COMMAND 0x00
#define COMMAND_GO BIT(31) #define COMMAND_GO BIT(31)
#define COMMAND_CLE BIT(30) #define COMMAND_CLE BIT(30)
@ -1151,6 +1154,7 @@ static int tegra_nand_probe(struct platform_device *pdev)
return -ENOMEM; return -ENOMEM;
ctrl->dev = &pdev->dev; ctrl->dev = &pdev->dev;
platform_set_drvdata(pdev, ctrl);
nand_controller_init(&ctrl->controller); nand_controller_init(&ctrl->controller);
ctrl->controller.ops = &tegra_nand_controller_ops; ctrl->controller.ops = &tegra_nand_controller_ops;
@ -1166,14 +1170,23 @@ static int tegra_nand_probe(struct platform_device *pdev)
if (IS_ERR(ctrl->clk)) if (IS_ERR(ctrl->clk))
return PTR_ERR(ctrl->clk); return PTR_ERR(ctrl->clk);
err = clk_prepare_enable(ctrl->clk); err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (err)
return err;
/*
* This driver doesn't support active power management yet,
* so we will simply keep device resumed.
*/
pm_runtime_enable(&pdev->dev);
err = pm_runtime_resume_and_get(&pdev->dev);
if (err) if (err)
return err; return err;
err = reset_control_reset(rst); err = reset_control_reset(rst);
if (err) { if (err) {
dev_err(ctrl->dev, "Failed to reset HW: %d\n", err); dev_err(ctrl->dev, "Failed to reset HW: %d\n", err);
goto err_disable_clk; goto err_put_pm;
} }
writel_relaxed(HWSTATUS_CMD_DEFAULT, ctrl->regs + HWSTATUS_CMD); writel_relaxed(HWSTATUS_CMD_DEFAULT, ctrl->regs + HWSTATUS_CMD);
@ -1188,21 +1201,20 @@ static int tegra_nand_probe(struct platform_device *pdev)
dev_name(&pdev->dev), ctrl); dev_name(&pdev->dev), ctrl);
if (err) { if (err) {
dev_err(ctrl->dev, "Failed to get IRQ: %d\n", err); dev_err(ctrl->dev, "Failed to get IRQ: %d\n", err);
goto err_disable_clk; goto err_put_pm;
} }
writel_relaxed(DMA_MST_CTRL_IS_DONE, ctrl->regs + DMA_MST_CTRL); writel_relaxed(DMA_MST_CTRL_IS_DONE, ctrl->regs + DMA_MST_CTRL);
err = tegra_nand_chips_init(ctrl->dev, ctrl); err = tegra_nand_chips_init(ctrl->dev, ctrl);
if (err) if (err)
goto err_disable_clk; goto err_put_pm;
platform_set_drvdata(pdev, ctrl);
return 0; return 0;
err_disable_clk: err_put_pm:
clk_disable_unprepare(ctrl->clk); pm_runtime_put_sync_suspend(ctrl->dev);
pm_runtime_force_suspend(ctrl->dev);
return err; return err;
} }
@ -1219,11 +1231,40 @@ static int tegra_nand_remove(struct platform_device *pdev)
nand_cleanup(chip); nand_cleanup(chip);
pm_runtime_put_sync_suspend(ctrl->dev);
pm_runtime_force_suspend(ctrl->dev);
return 0;
}
static int __maybe_unused tegra_nand_runtime_resume(struct device *dev)
{
struct tegra_nand_controller *ctrl = dev_get_drvdata(dev);
int err;
err = clk_prepare_enable(ctrl->clk);
if (err) {
dev_err(dev, "Failed to enable clock: %d\n", err);
return err;
}
return 0;
}
static int __maybe_unused tegra_nand_runtime_suspend(struct device *dev)
{
struct tegra_nand_controller *ctrl = dev_get_drvdata(dev);
clk_disable_unprepare(ctrl->clk); clk_disable_unprepare(ctrl->clk);
return 0; return 0;
} }
static const struct dev_pm_ops tegra_nand_pm = {
SET_RUNTIME_PM_OPS(tegra_nand_runtime_suspend, tegra_nand_runtime_resume,
NULL)
};
static const struct of_device_id tegra_nand_of_match[] = { static const struct of_device_id tegra_nand_of_match[] = {
{ .compatible = "nvidia,tegra20-nand" }, { .compatible = "nvidia,tegra20-nand" },
{ /* sentinel */ } { /* sentinel */ }
@ -1234,6 +1275,7 @@ static struct platform_driver tegra_nand_driver = {
.driver = { .driver = {
.name = "tegra-nand", .name = "tegra-nand",
.of_match_table = tegra_nand_of_match, .of_match_table = tegra_nand_of_match,
.pm = &tegra_nand_pm,
}, },
.probe = tegra_nand_probe, .probe = tegra_nand_probe,
.remove = tegra_nand_remove, .remove = tegra_nand_remove,

View File

@ -42,12 +42,16 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/pm_opp.h>
#include <linux/pwm.h> #include <linux/pwm.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h> #include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/reset.h> #include <linux/reset.h>
#include <soc/tegra/common.h>
#define PWM_ENABLE (1 << 31) #define PWM_ENABLE (1 << 31)
#define PWM_DUTY_WIDTH 8 #define PWM_DUTY_WIDTH 8
#define PWM_DUTY_SHIFT 16 #define PWM_DUTY_SHIFT 16
@ -145,7 +149,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
required_clk_rate = required_clk_rate =
(NSEC_PER_SEC / period_ns) << PWM_DUTY_WIDTH; (NSEC_PER_SEC / period_ns) << PWM_DUTY_WIDTH;
err = clk_set_rate(pc->clk, required_clk_rate); err = dev_pm_opp_set_rate(pc->dev, required_clk_rate);
if (err < 0) if (err < 0)
return -EINVAL; return -EINVAL;
@ -181,8 +185,8 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* before writing the register. Otherwise, keep it enabled. * before writing the register. Otherwise, keep it enabled.
*/ */
if (!pwm_is_enabled(pwm)) { if (!pwm_is_enabled(pwm)) {
err = clk_prepare_enable(pc->clk); err = pm_runtime_resume_and_get(pc->dev);
if (err < 0) if (err)
return err; return err;
} else } else
val |= PWM_ENABLE; val |= PWM_ENABLE;
@ -193,7 +197,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* If the PWM is not enabled, turn the clock off again to save power. * If the PWM is not enabled, turn the clock off again to save power.
*/ */
if (!pwm_is_enabled(pwm)) if (!pwm_is_enabled(pwm))
clk_disable_unprepare(pc->clk); pm_runtime_put(pc->dev);
return 0; return 0;
} }
@ -204,8 +208,8 @@ static int tegra_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
int rc = 0; int rc = 0;
u32 val; u32 val;
rc = clk_prepare_enable(pc->clk); rc = pm_runtime_resume_and_get(pc->dev);
if (rc < 0) if (rc)
return rc; return rc;
val = pwm_readl(pc, pwm->hwpwm); val = pwm_readl(pc, pwm->hwpwm);
@ -224,7 +228,7 @@ static void tegra_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
val &= ~PWM_ENABLE; val &= ~PWM_ENABLE;
pwm_writel(pc, pwm->hwpwm, val); pwm_writel(pc, pwm->hwpwm, val);
clk_disable_unprepare(pc->clk); pm_runtime_put_sync(pc->dev);
} }
static const struct pwm_ops tegra_pwm_ops = { static const struct pwm_ops tegra_pwm_ops = {
@ -256,11 +260,20 @@ static int tegra_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pwm->clk)) if (IS_ERR(pwm->clk))
return PTR_ERR(pwm->clk); return PTR_ERR(pwm->clk);
ret = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (ret)
return ret;
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret)
return ret;
/* Set maximum frequency of the IP */ /* Set maximum frequency of the IP */
ret = clk_set_rate(pwm->clk, pwm->soc->max_frequency); ret = dev_pm_opp_set_rate(pwm->dev, pwm->soc->max_frequency);
if (ret < 0) { if (ret < 0) {
dev_err(&pdev->dev, "Failed to set max frequency: %d\n", ret); dev_err(&pdev->dev, "Failed to set max frequency: %d\n", ret);
return ret; goto put_pm;
} }
/* /*
@ -278,7 +291,7 @@ static int tegra_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pwm->rst)) { if (IS_ERR(pwm->rst)) {
ret = PTR_ERR(pwm->rst); ret = PTR_ERR(pwm->rst);
dev_err(&pdev->dev, "Reset control is not found: %d\n", ret); dev_err(&pdev->dev, "Reset control is not found: %d\n", ret);
return ret; goto put_pm;
} }
reset_control_deassert(pwm->rst); reset_control_deassert(pwm->rst);
@ -291,10 +304,16 @@ static int tegra_pwm_probe(struct platform_device *pdev)
if (ret < 0) { if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret); dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
reset_control_assert(pwm->rst); reset_control_assert(pwm->rst);
return ret; goto put_pm;
} }
pm_runtime_put(&pdev->dev);
return 0; return 0;
put_pm:
pm_runtime_put_sync_suspend(&pdev->dev);
pm_runtime_force_suspend(&pdev->dev);
return ret;
} }
static int tegra_pwm_remove(struct platform_device *pdev) static int tegra_pwm_remove(struct platform_device *pdev)
@ -305,20 +324,44 @@ static int tegra_pwm_remove(struct platform_device *pdev)
reset_control_assert(pc->rst); reset_control_assert(pc->rst);
pm_runtime_force_suspend(&pdev->dev);
return 0; return 0;
} }
#ifdef CONFIG_PM_SLEEP static int __maybe_unused tegra_pwm_runtime_suspend(struct device *dev)
static int tegra_pwm_suspend(struct device *dev)
{ {
return pinctrl_pm_select_sleep_state(dev); struct tegra_pwm_chip *pc = dev_get_drvdata(dev);
int err;
clk_disable_unprepare(pc->clk);
err = pinctrl_pm_select_sleep_state(dev);
if (err) {
clk_prepare_enable(pc->clk);
return err;
}
return 0;
} }
static int tegra_pwm_resume(struct device *dev) static int __maybe_unused tegra_pwm_runtime_resume(struct device *dev)
{ {
return pinctrl_pm_select_default_state(dev); struct tegra_pwm_chip *pc = dev_get_drvdata(dev);
int err;
err = pinctrl_pm_select_default_state(dev);
if (err)
return err;
err = clk_prepare_enable(pc->clk);
if (err) {
pinctrl_pm_select_sleep_state(dev);
return err;
}
return 0;
} }
#endif
static const struct tegra_pwm_soc tegra20_pwm_soc = { static const struct tegra_pwm_soc tegra20_pwm_soc = {
.num_channels = 4, .num_channels = 4,
@ -344,7 +387,10 @@ static const struct of_device_id tegra_pwm_of_match[] = {
MODULE_DEVICE_TABLE(of, tegra_pwm_of_match); MODULE_DEVICE_TABLE(of, tegra_pwm_of_match);
static const struct dev_pm_ops tegra_pwm_pm_ops = { static const struct dev_pm_ops tegra_pwm_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tegra_pwm_suspend, tegra_pwm_resume) SET_RUNTIME_PM_OPS(tegra_pwm_runtime_suspend, tegra_pwm_runtime_resume,
NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
}; };
static struct platform_driver tegra_pwm_driver = { static struct platform_driver tegra_pwm_driver = {

View File

@ -3,6 +3,7 @@ menu "SOC (System On Chip) specific Drivers"
source "drivers/soc/actions/Kconfig" source "drivers/soc/actions/Kconfig"
source "drivers/soc/amlogic/Kconfig" source "drivers/soc/amlogic/Kconfig"
source "drivers/soc/apple/Kconfig"
source "drivers/soc/aspeed/Kconfig" source "drivers/soc/aspeed/Kconfig"
source "drivers/soc/atmel/Kconfig" source "drivers/soc/atmel/Kconfig"
source "drivers/soc/bcm/Kconfig" source "drivers/soc/bcm/Kconfig"

View File

@ -4,6 +4,7 @@
# #
obj-$(CONFIG_ARCH_ACTIONS) += actions/ obj-$(CONFIG_ARCH_ACTIONS) += actions/
obj-$(CONFIG_ARCH_APPLE) += apple/
obj-y += aspeed/ obj-y += aspeed/
obj-$(CONFIG_ARCH_AT91) += atmel/ obj-$(CONFIG_ARCH_AT91) += atmel/
obj-y += bcm/ obj-y += bcm/

22
drivers/soc/apple/Kconfig Normal file
View File

@ -0,0 +1,22 @@
# SPDX-License-Identifier: GPL-2.0-only
if ARCH_APPLE || COMPILE_TEST
menu "Apple SoC drivers"
config APPLE_PMGR_PWRSTATE
bool "Apple SoC PMGR power state control"
depends on PM
select REGMAP
select MFD_SYSCON
select PM_GENERIC_DOMAINS
select RESET_CONTROLLER
default ARCH_APPLE
help
The PMGR block in Apple SoCs provides high-level power state
controls for SoC devices. This driver manages them through the
generic power domain framework, and also provides reset support.
endmenu
endif

View File

@ -0,0 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_APPLE_PMGR_PWRSTATE) += apple-pmgr-pwrstate.o

View File

@ -0,0 +1,324 @@
// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
* Apple SoC PMGR device power state driver
*
* Copyright The Asahi Linux Contributors
*/
#include <linux/bitops.h>
#include <linux/bitfield.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/reset-controller.h>
#include <linux/module.h>
#define APPLE_PMGR_RESET BIT(31)
#define APPLE_PMGR_AUTO_ENABLE BIT(28)
#define APPLE_PMGR_PS_AUTO GENMASK(27, 24)
#define APPLE_PMGR_PS_MIN GENMASK(19, 16)
#define APPLE_PMGR_PARENT_OFF BIT(11)
#define APPLE_PMGR_DEV_DISABLE BIT(10)
#define APPLE_PMGR_WAS_CLKGATED BIT(9)
#define APPLE_PMGR_WAS_PWRGATED BIT(8)
#define APPLE_PMGR_PS_ACTUAL GENMASK(7, 4)
#define APPLE_PMGR_PS_TARGET GENMASK(3, 0)
#define APPLE_PMGR_FLAGS (APPLE_PMGR_WAS_CLKGATED | APPLE_PMGR_WAS_PWRGATED)
#define APPLE_PMGR_PS_ACTIVE 0xf
#define APPLE_PMGR_PS_CLKGATE 0x4
#define APPLE_PMGR_PS_PWRGATE 0x0
#define APPLE_PMGR_PS_SET_TIMEOUT 100
#define APPLE_PMGR_RESET_TIME 1
struct apple_pmgr_ps {
struct device *dev;
struct generic_pm_domain genpd;
struct reset_controller_dev rcdev;
struct regmap *regmap;
u32 offset;
u32 min_state;
};
#define genpd_to_apple_pmgr_ps(_genpd) container_of(_genpd, struct apple_pmgr_ps, genpd)
#define rcdev_to_apple_pmgr_ps(_rcdev) container_of(_rcdev, struct apple_pmgr_ps, rcdev)
static int apple_pmgr_ps_set(struct generic_pm_domain *genpd, u32 pstate, bool auto_enable)
{
int ret;
struct apple_pmgr_ps *ps = genpd_to_apple_pmgr_ps(genpd);
u32 reg;
ret = regmap_read(ps->regmap, ps->offset, &reg);
if (ret < 0)
return ret;
/* Resets are synchronous, and only work if the device is powered and clocked. */
if (reg & APPLE_PMGR_RESET && pstate != APPLE_PMGR_PS_ACTIVE)
dev_err(ps->dev, "PS %s: powering off with RESET active\n",
genpd->name);
reg &= ~(APPLE_PMGR_AUTO_ENABLE | APPLE_PMGR_FLAGS | APPLE_PMGR_PS_TARGET);
reg |= FIELD_PREP(APPLE_PMGR_PS_TARGET, pstate);
dev_dbg(ps->dev, "PS %s: pwrstate = 0x%x: 0x%x\n", genpd->name, pstate, reg);
regmap_write(ps->regmap, ps->offset, reg);
ret = regmap_read_poll_timeout_atomic(
ps->regmap, ps->offset, reg,
(FIELD_GET(APPLE_PMGR_PS_ACTUAL, reg) == pstate), 1,
APPLE_PMGR_PS_SET_TIMEOUT);
if (ret < 0)
dev_err(ps->dev, "PS %s: Failed to reach power state 0x%x (now: 0x%x)\n",
genpd->name, pstate, reg);
if (auto_enable) {
/* Not all devices implement this; this is a no-op where not implemented. */
reg &= ~APPLE_PMGR_FLAGS;
reg |= APPLE_PMGR_AUTO_ENABLE;
regmap_write(ps->regmap, ps->offset, reg);
}
return ret;
}
static bool apple_pmgr_ps_is_active(struct apple_pmgr_ps *ps)
{
u32 reg = 0;
regmap_read(ps->regmap, ps->offset, &reg);
/*
* We consider domains as active if they are actually on, or if they have auto-PM
* enabled and the intended target is on.
*/
return (FIELD_GET(APPLE_PMGR_PS_ACTUAL, reg) == APPLE_PMGR_PS_ACTIVE ||
(FIELD_GET(APPLE_PMGR_PS_TARGET, reg) == APPLE_PMGR_PS_ACTIVE &&
reg & APPLE_PMGR_AUTO_ENABLE));
}
static int apple_pmgr_ps_power_on(struct generic_pm_domain *genpd)
{
return apple_pmgr_ps_set(genpd, APPLE_PMGR_PS_ACTIVE, true);
}
static int apple_pmgr_ps_power_off(struct generic_pm_domain *genpd)
{
return apple_pmgr_ps_set(genpd, APPLE_PMGR_PS_PWRGATE, false);
}
static int apple_pmgr_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
{
struct apple_pmgr_ps *ps = rcdev_to_apple_pmgr_ps(rcdev);
mutex_lock(&ps->genpd.mlock);
if (ps->genpd.status == GENPD_STATE_OFF)
dev_err(ps->dev, "PS 0x%x: asserting RESET while powered down\n", ps->offset);
dev_dbg(ps->dev, "PS 0x%x: assert reset\n", ps->offset);
/* Quiesce device before asserting reset */
regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_DEV_DISABLE,
APPLE_PMGR_DEV_DISABLE);
regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_RESET,
APPLE_PMGR_RESET);
mutex_unlock(&ps->genpd.mlock);
return 0;
}
static int apple_pmgr_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
{
struct apple_pmgr_ps *ps = rcdev_to_apple_pmgr_ps(rcdev);
mutex_lock(&ps->genpd.mlock);
dev_dbg(ps->dev, "PS 0x%x: deassert reset\n", ps->offset);
regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_RESET, 0);
regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_DEV_DISABLE, 0);
if (ps->genpd.status == GENPD_STATE_OFF)
dev_err(ps->dev, "PS 0x%x: RESET was deasserted while powered down\n", ps->offset);
mutex_unlock(&ps->genpd.mlock);
return 0;
}
static int apple_pmgr_reset_reset(struct reset_controller_dev *rcdev, unsigned long id)
{
int ret;
ret = apple_pmgr_reset_assert(rcdev, id);
if (ret)
return ret;
usleep_range(APPLE_PMGR_RESET_TIME, 2 * APPLE_PMGR_RESET_TIME);
return apple_pmgr_reset_deassert(rcdev, id);
}
static int apple_pmgr_reset_status(struct reset_controller_dev *rcdev, unsigned long id)
{
struct apple_pmgr_ps *ps = rcdev_to_apple_pmgr_ps(rcdev);
u32 reg = 0;
regmap_read(ps->regmap, ps->offset, &reg);
return !!(reg & APPLE_PMGR_RESET);
}
const struct reset_control_ops apple_pmgr_reset_ops = {
.assert = apple_pmgr_reset_assert,
.deassert = apple_pmgr_reset_deassert,
.reset = apple_pmgr_reset_reset,
.status = apple_pmgr_reset_status,
};
static int apple_pmgr_reset_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec)
{
return 0;
}
static int apple_pmgr_ps_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct apple_pmgr_ps *ps;
struct regmap *regmap;
struct of_phandle_iterator it;
int ret;
const char *name;
bool active;
regmap = syscon_node_to_regmap(node->parent);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
ps = devm_kzalloc(dev, sizeof(*ps), GFP_KERNEL);
if (!ps)
return -ENOMEM;
ps->dev = dev;
ps->regmap = regmap;
ret = of_property_read_string(node, "label", &name);
if (ret < 0) {
dev_err(dev, "missing label property\n");
return ret;
}
ret = of_property_read_u32(node, "reg", &ps->offset);
if (ret < 0) {
dev_err(dev, "missing reg property\n");
return ret;
}
ps->genpd.name = name;
ps->genpd.power_on = apple_pmgr_ps_power_on;
ps->genpd.power_off = apple_pmgr_ps_power_off;
ret = of_property_read_u32(node, "apple,min-state", &ps->min_state);
if (ret == 0 && ps->min_state <= APPLE_PMGR_PS_ACTIVE)
regmap_update_bits(regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_PS_MIN,
FIELD_PREP(APPLE_PMGR_PS_MIN, ps->min_state));
active = apple_pmgr_ps_is_active(ps);
if (of_property_read_bool(node, "apple,always-on")) {
ps->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
if (!active) {
dev_warn(dev, "always-on domain %s is not on at boot\n", name);
/* Turn it on so pm_genpd_init does not fail */
active = apple_pmgr_ps_power_on(&ps->genpd) == 0;
}
}
/* Turn on auto-PM if the domain is already on */
if (active)
regmap_update_bits(regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_AUTO_ENABLE,
APPLE_PMGR_AUTO_ENABLE);
ret = pm_genpd_init(&ps->genpd, NULL, !active);
if (ret < 0) {
dev_err(dev, "pm_genpd_init failed\n");
return ret;
}
ret = of_genpd_add_provider_simple(node, &ps->genpd);
if (ret < 0) {
dev_err(dev, "of_genpd_add_provider_simple failed\n");
return ret;
}
of_for_each_phandle(&it, ret, node, "power-domains", "#power-domain-cells", -1) {
struct of_phandle_args parent, child;
parent.np = it.node;
parent.args_count = of_phandle_iterator_args(&it, parent.args, MAX_PHANDLE_ARGS);
child.np = node;
child.args_count = 0;
ret = of_genpd_add_subdomain(&parent, &child);
if (ret == -EPROBE_DEFER) {
of_node_put(parent.np);
goto err_remove;
} else if (ret < 0) {
dev_err(dev, "failed to add to parent domain: %d (%s -> %s)\n",
ret, it.node->name, node->name);
of_node_put(parent.np);
goto err_remove;
}
}
/*
* Do not participate in regular PM; parent power domains are handled via the
* genpd hierarchy.
*/
pm_genpd_remove_device(dev);
ps->rcdev.owner = THIS_MODULE;
ps->rcdev.nr_resets = 1;
ps->rcdev.ops = &apple_pmgr_reset_ops;
ps->rcdev.of_node = dev->of_node;
ps->rcdev.of_reset_n_cells = 0;
ps->rcdev.of_xlate = apple_pmgr_reset_xlate;
ret = devm_reset_controller_register(dev, &ps->rcdev);
if (ret < 0)
goto err_remove;
return 0;
err_remove:
of_genpd_del_provider(node);
pm_genpd_remove(&ps->genpd);
return ret;
}
static const struct of_device_id apple_pmgr_ps_of_match[] = {
{ .compatible = "apple,pmgr-pwrstate" },
{}
};
MODULE_DEVICE_TABLE(of, apple_pmgr_ps_of_match);
static struct platform_driver apple_pmgr_ps_driver = {
.probe = apple_pmgr_ps_probe,
.driver = {
.name = "apple-pmgr-pwrstate",
.of_match_table = apple_pmgr_ps_of_match,
},
};
MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
MODULE_DESCRIPTION("PMGR power state driver for Apple SoCs");
MODULE_LICENSE("GPL v2");
module_platform_driver(apple_pmgr_ps_driver);

View File

@ -377,7 +377,7 @@ static int imx_pgc_power_down(struct generic_pm_domain *genpd)
} }
} }
pm_runtime_put(domain->dev); pm_runtime_put_sync_suspend(domain->dev);
return 0; return 0;
@ -734,6 +734,7 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.map = IMX8MM_VPUH1_A53_DOMAIN, .map = IMX8MM_VPUH1_A53_DOMAIN,
}, },
.pgc = BIT(IMX8MM_PGC_VPUH1), .pgc = BIT(IMX8MM_PGC_VPUH1),
.keep_clocks = true,
}, },
[IMX8MM_POWER_DOMAIN_DISPMIX] = { [IMX8MM_POWER_DOMAIN_DISPMIX] = {
@ -840,6 +841,32 @@ static const struct imx_pgc_domain imx8mn_pgc_domains[] = {
.hskack = IMX8MN_GPUMIX_HSK_PWRDNACKN, .hskack = IMX8MN_GPUMIX_HSK_PWRDNACKN,
}, },
.pgc = BIT(IMX8MN_PGC_GPUMIX), .pgc = BIT(IMX8MN_PGC_GPUMIX),
.keep_clocks = true,
},
[IMX8MN_POWER_DOMAIN_DISPMIX] = {
.genpd = {
.name = "dispmix",
},
.bits = {
.pxx = IMX8MN_DISPMIX_SW_Pxx_REQ,
.map = IMX8MN_DISPMIX_A53_DOMAIN,
.hskreq = IMX8MN_DISPMIX_HSK_PWRDNREQN,
.hskack = IMX8MN_DISPMIX_HSK_PWRDNACKN,
},
.pgc = BIT(IMX8MN_PGC_DISPMIX),
.keep_clocks = true,
},
[IMX8MN_POWER_DOMAIN_MIPI] = {
.genpd = {
.name = "mipi",
},
.bits = {
.pxx = IMX8MN_MIPI_SW_Pxx_REQ,
.map = IMX8MN_MIPI_A53_DOMAIN,
},
.pgc = BIT(IMX8MN_PGC_MIPI),
}, },
}; };

View File

@ -14,6 +14,7 @@
#include <linux/clk.h> #include <linux/clk.h>
#include <dt-bindings/power/imx8mm-power.h> #include <dt-bindings/power/imx8mm-power.h>
#include <dt-bindings/power/imx8mn-power.h>
#define BLK_SFT_RSTN 0x0 #define BLK_SFT_RSTN 0x0
#define BLK_CLK_EN 0x4 #define BLK_CLK_EN 0x4
@ -517,6 +518,77 @@ static const struct imx8m_blk_ctrl_data imx8mm_disp_blk_ctl_dev_data = {
.num_domains = ARRAY_SIZE(imx8mm_disp_blk_ctl_domain_data), .num_domains = ARRAY_SIZE(imx8mm_disp_blk_ctl_domain_data),
}; };
static int imx8mn_disp_power_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct imx8m_blk_ctrl *bc = container_of(nb, struct imx8m_blk_ctrl,
power_nb);
if (action != GENPD_NOTIFY_ON && action != GENPD_NOTIFY_PRE_OFF)
return NOTIFY_OK;
/* Enable bus clock and deassert bus reset */
regmap_set_bits(bc->regmap, BLK_CLK_EN, BIT(8));
regmap_set_bits(bc->regmap, BLK_SFT_RSTN, BIT(8));
/*
* On power up we have no software backchannel to the GPC to
* wait for the ADB handshake to happen, so we just delay for a
* bit. On power down the GPC driver waits for the handshake.
*/
if (action == GENPD_NOTIFY_ON)
udelay(5);
return NOTIFY_OK;
}
static const struct imx8m_blk_ctrl_domain_data imx8mn_disp_blk_ctl_domain_data[] = {
[IMX8MN_DISPBLK_PD_MIPI_DSI] = {
.name = "dispblk-mipi-dsi",
.clk_names = (const char *[]){ "dsi-pclk", "dsi-ref", },
.num_clks = 2,
.gpc_name = "mipi-dsi",
.rst_mask = BIT(0) | BIT(1),
.clk_mask = BIT(0) | BIT(1),
.mipi_phy_rst_mask = BIT(17),
},
[IMX8MN_DISPBLK_PD_MIPI_CSI] = {
.name = "dispblk-mipi-csi",
.clk_names = (const char *[]){ "csi-aclk", "csi-pclk" },
.num_clks = 2,
.gpc_name = "mipi-csi",
.rst_mask = BIT(2) | BIT(3),
.clk_mask = BIT(2) | BIT(3),
.mipi_phy_rst_mask = BIT(16),
},
[IMX8MN_DISPBLK_PD_LCDIF] = {
.name = "dispblk-lcdif",
.clk_names = (const char *[]){ "lcdif-axi", "lcdif-apb", "lcdif-pix", },
.num_clks = 3,
.gpc_name = "lcdif",
.rst_mask = BIT(4) | BIT(5),
.clk_mask = BIT(4) | BIT(5),
},
[IMX8MN_DISPBLK_PD_ISI] = {
.name = "dispblk-isi",
.clk_names = (const char *[]){ "disp_axi", "disp_apb", "disp_axi_root",
"disp_apb_root"},
.num_clks = 4,
.gpc_name = "isi",
.rst_mask = BIT(6) | BIT(7),
.clk_mask = BIT(6) | BIT(7),
},
};
static const struct imx8m_blk_ctrl_data imx8mn_disp_blk_ctl_dev_data = {
.max_reg = 0x84,
.power_notifier_fn = imx8mn_disp_power_notifier,
.domains = imx8mn_disp_blk_ctl_domain_data,
.num_domains = ARRAY_SIZE(imx8mn_disp_blk_ctl_domain_data),
};
static const struct of_device_id imx8m_blk_ctrl_of_match[] = { static const struct of_device_id imx8m_blk_ctrl_of_match[] = {
{ {
.compatible = "fsl,imx8mm-vpu-blk-ctrl", .compatible = "fsl,imx8mm-vpu-blk-ctrl",
@ -524,7 +596,10 @@ static const struct of_device_id imx8m_blk_ctrl_of_match[] = {
}, { }, {
.compatible = "fsl,imx8mm-disp-blk-ctrl", .compatible = "fsl,imx8mm-disp-blk-ctrl",
.data = &imx8mm_disp_blk_ctl_dev_data .data = &imx8mm_disp_blk_ctl_dev_data
} ,{ }, {
.compatible = "fsl,imx8mn-disp-blk-ctrl",
.data = &imx8mn_disp_blk_ctl_dev_data
}, {
/* Sentinel */ /* Sentinel */
} }
}; };

View File

@ -1010,7 +1010,7 @@ static int cpr_interpolate(const struct corner *corner, int step_volt,
return corner->uV; return corner->uV;
temp = f_diff * (uV_high - uV_low); temp = f_diff * (uV_high - uV_low);
do_div(temp, f_high - f_low); temp = div64_ul(temp, f_high - f_low);
/* /*
* max_volt_scale has units of uV/MHz while freq values * max_volt_scale has units of uV/MHz while freq values

View File

@ -195,6 +195,28 @@ static const struct llcc_slice_config sm8250_data[] = {
{ LLCC_WRCACHE, 31, 256, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, { LLCC_WRCACHE, 31, 256, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
}; };
static const struct llcc_slice_config sm8350_data[] = {
{ LLCC_CPUSS, 1, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 1 },
{ LLCC_VIDSC0, 2, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
{ LLCC_AUDIO, 6, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
{ LLCC_MDMHPGRW, 7, 1024, 3, 0, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
{ LLCC_MODHW, 9, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
{ LLCC_CMPT, 10, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
{ LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
{ LLCC_GPU, 12, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 },
{ LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 },
{ LLCC_DISP, 16, 3072, 2, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
{ LLCC_MDMPNG, 21, 1024, 0, 1, 0xf, 0x0, 0, 0, 0, 0, 1, 0 },
{ LLCC_AUDHW, 22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
{ LLCC_CVP, 28, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
{ LLCC_MODPE, 29, 256, 1, 1, 0xf, 0x0, 0, 0, 0, 0, 1, 0 },
{ LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0x1, 1, 0, 0, 0, 1, 0 },
{ LLCC_WRCACHE, 31, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 },
{ LLCC_CVPFW, 17, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
{ LLCC_CPUSS1, 3, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
{ LLCC_CPUHWT, 5, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 },
};
static const struct qcom_llcc_config sc7180_cfg = { static const struct qcom_llcc_config sc7180_cfg = {
.sct_data = sc7180_data, .sct_data = sc7180_data,
.size = ARRAY_SIZE(sc7180_data), .size = ARRAY_SIZE(sc7180_data),
@ -228,6 +250,11 @@ static const struct qcom_llcc_config sm8250_cfg = {
.size = ARRAY_SIZE(sm8250_data), .size = ARRAY_SIZE(sm8250_data),
}; };
static const struct qcom_llcc_config sm8350_cfg = {
.sct_data = sm8350_data,
.size = ARRAY_SIZE(sm8350_data),
};
static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER; static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
/** /**
@ -644,6 +671,7 @@ static const struct of_device_id qcom_llcc_of_match[] = {
{ .compatible = "qcom,sm6350-llcc", .data = &sm6350_cfg }, { .compatible = "qcom,sm6350-llcc", .data = &sm6350_cfg },
{ .compatible = "qcom,sm8150-llcc", .data = &sm8150_cfg }, { .compatible = "qcom,sm8150-llcc", .data = &sm8150_cfg },
{ .compatible = "qcom,sm8250-llcc", .data = &sm8250_cfg }, { .compatible = "qcom,sm8250-llcc", .data = &sm8250_cfg },
{ .compatible = "qcom,sm8350-llcc", .data = &sm8350_cfg },
{ } { }
}; };

View File

@ -352,7 +352,7 @@ static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev,
return ret; return ret;
} }
static struct thermal_cooling_device_ops qmp_cooling_device_ops = { static const struct thermal_cooling_device_ops qmp_cooling_device_ops = {
.get_max_state = qmp_cdev_get_max_state, .get_max_state = qmp_cdev_get_max_state,
.get_cur_state = qmp_cdev_get_cur_state, .get_cur_state = qmp_cdev_get_cur_state,
.set_cur_state = qmp_cdev_set_cur_state, .set_cur_state = qmp_cdev_set_cur_state,

View File

@ -237,6 +237,15 @@ static const struct stats_config rpm_data = {
.subsystem_stats_in_smem = false, .subsystem_stats_in_smem = false,
}; };
/* Older RPM firmwares have the stats at a fixed offset instead */
static const struct stats_config rpm_data_dba0 = {
.stats_offset = 0xdba0,
.num_records = 2,
.appended_stats_avail = true,
.dynamic_offset = false,
.subsystem_stats_in_smem = false,
};
static const struct stats_config rpmh_data = { static const struct stats_config rpmh_data = {
.stats_offset = 0x48, .stats_offset = 0x48,
.num_records = 3, .num_records = 3,
@ -246,6 +255,10 @@ static const struct stats_config rpmh_data = {
}; };
static const struct of_device_id qcom_stats_table[] = { static const struct of_device_id qcom_stats_table[] = {
{ .compatible = "qcom,apq8084-rpm-stats", .data = &rpm_data_dba0 },
{ .compatible = "qcom,msm8226-rpm-stats", .data = &rpm_data_dba0 },
{ .compatible = "qcom,msm8916-rpm-stats", .data = &rpm_data_dba0 },
{ .compatible = "qcom,msm8974-rpm-stats", .data = &rpm_data_dba0 },
{ .compatible = "qcom,rpm-stats", .data = &rpm_data }, { .compatible = "qcom,rpm-stats", .data = &rpm_data },
{ .compatible = "qcom,rpmh-stats", .data = &rpmh_data }, { .compatible = "qcom,rpmh-stats", .data = &rpmh_data },
{ } { }

View File

@ -96,7 +96,7 @@ static void qmi_recv_del_server(struct qmi_handle *qmi,
* @node: id of the dying node * @node: id of the dying node
* *
* Signals the client that all previously registered services on this node are * Signals the client that all previously registered services on this node are
* now gone and then calls the bye callback to allow the client client further * now gone and then calls the bye callback to allow the client further
* cleaning up resources associated with this remote. * cleaning up resources associated with this remote.
*/ */
static void qmi_recv_bye(struct qmi_handle *qmi, static void qmi_recv_bye(struct qmi_handle *qmi,

View File

@ -691,7 +691,7 @@ static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
* @drv: The controller. * @drv: The controller.
* @msg: The data to be written to the controller. * @msg: The data to be written to the controller.
* *
* This should only be called for for sleep/wake state, never active-only * This should only be called for sleep/wake state, never active-only
* state. * state.
* *
* The caller must ensure that no other RPMH actions are happening and the * The caller must ensure that no other RPMH actions are happening and the

View File

@ -63,73 +63,134 @@ struct rpmhpd_desc {
static DEFINE_MUTEX(rpmhpd_lock); static DEFINE_MUTEX(rpmhpd_lock);
/* SDM845 RPMH powerdomains */ /* RPMH powerdomains */
static struct rpmhpd sdm845_ebi = { static struct rpmhpd cx_ao;
static struct rpmhpd mx;
static struct rpmhpd mx_ao;
static struct rpmhpd cx = {
.pd = { .name = "cx", },
.peer = &cx_ao,
.res_name = "cx.lvl",
};
static struct rpmhpd cx_ao = {
.pd = { .name = "cx_ao", },
.active_only = true,
.peer = &cx,
.res_name = "cx.lvl",
};
static struct rpmhpd cx_ao_w_mx_parent;
static struct rpmhpd cx_w_mx_parent = {
.pd = { .name = "cx", },
.peer = &cx_ao_w_mx_parent,
.parent = &mx.pd,
.res_name = "cx.lvl",
};
static struct rpmhpd cx_ao_w_mx_parent = {
.pd = { .name = "cx_ao", },
.active_only = true,
.peer = &cx_w_mx_parent,
.parent = &mx_ao.pd,
.res_name = "cx.lvl",
};
static struct rpmhpd ebi = {
.pd = { .name = "ebi", }, .pd = { .name = "ebi", },
.res_name = "ebi.lvl", .res_name = "ebi.lvl",
}; };
static struct rpmhpd sdm845_lmx = { static struct rpmhpd gfx = {
.pd = { .name = "lmx", },
.res_name = "lmx.lvl",
};
static struct rpmhpd sdm845_lcx = {
.pd = { .name = "lcx", },
.res_name = "lcx.lvl",
};
static struct rpmhpd sdm845_gfx = {
.pd = { .name = "gfx", }, .pd = { .name = "gfx", },
.res_name = "gfx.lvl", .res_name = "gfx.lvl",
}; };
static struct rpmhpd sdm845_mss = { static struct rpmhpd lcx = {
.pd = { .name = "lcx", },
.res_name = "lcx.lvl",
};
static struct rpmhpd lmx = {
.pd = { .name = "lmx", },
.res_name = "lmx.lvl",
};
static struct rpmhpd mmcx_ao;
static struct rpmhpd mmcx = {
.pd = { .name = "mmcx", },
.peer = &mmcx_ao,
.res_name = "mmcx.lvl",
};
static struct rpmhpd mmcx_ao = {
.pd = { .name = "mmcx_ao", },
.active_only = true,
.peer = &mmcx,
.res_name = "mmcx.lvl",
};
static struct rpmhpd mmcx_ao_w_cx_parent;
static struct rpmhpd mmcx_w_cx_parent = {
.pd = { .name = "mmcx", },
.peer = &mmcx_ao_w_cx_parent,
.parent = &cx.pd,
.res_name = "mmcx.lvl",
};
static struct rpmhpd mmcx_ao_w_cx_parent = {
.pd = { .name = "mmcx_ao", },
.active_only = true,
.peer = &mmcx_w_cx_parent,
.parent = &cx_ao.pd,
.res_name = "mmcx.lvl",
};
static struct rpmhpd mss = {
.pd = { .name = "mss", }, .pd = { .name = "mss", },
.res_name = "mss.lvl", .res_name = "mss.lvl",
}; };
static struct rpmhpd sdm845_mx_ao; static struct rpmhpd mx_ao;
static struct rpmhpd sdm845_mx = { static struct rpmhpd mx = {
.pd = { .name = "mx", }, .pd = { .name = "mx", },
.peer = &sdm845_mx_ao, .peer = &mx_ao,
.res_name = "mx.lvl", .res_name = "mx.lvl",
}; };
static struct rpmhpd sdm845_mx_ao = { static struct rpmhpd mx_ao = {
.pd = { .name = "mx_ao", }, .pd = { .name = "mx_ao", },
.active_only = true, .active_only = true,
.peer = &sdm845_mx, .peer = &mx,
.res_name = "mx.lvl", .res_name = "mx.lvl",
}; };
static struct rpmhpd sdm845_cx_ao; static struct rpmhpd mxc_ao;
static struct rpmhpd sdm845_cx = { static struct rpmhpd mxc = {
.pd = { .name = "cx", }, .pd = { .name = "mxc", },
.peer = &sdm845_cx_ao, .peer = &mxc_ao,
.parent = &sdm845_mx.pd, .res_name = "mxc.lvl",
.res_name = "cx.lvl",
}; };
static struct rpmhpd sdm845_cx_ao = { static struct rpmhpd mxc_ao = {
.pd = { .name = "cx_ao", }, .pd = { .name = "mxc_ao", },
.active_only = true, .active_only = true,
.peer = &sdm845_cx, .peer = &mxc,
.parent = &sdm845_mx_ao.pd, .res_name = "mxc.lvl",
.res_name = "cx.lvl",
}; };
/* SDM845 RPMH powerdomains */
static struct rpmhpd *sdm845_rpmhpds[] = { static struct rpmhpd *sdm845_rpmhpds[] = {
[SDM845_EBI] = &sdm845_ebi, [SDM845_CX] = &cx_w_mx_parent,
[SDM845_MX] = &sdm845_mx, [SDM845_CX_AO] = &cx_ao_w_mx_parent,
[SDM845_MX_AO] = &sdm845_mx_ao, [SDM845_EBI] = &ebi,
[SDM845_CX] = &sdm845_cx, [SDM845_GFX] = &gfx,
[SDM845_CX_AO] = &sdm845_cx_ao, [SDM845_LCX] = &lcx,
[SDM845_LMX] = &sdm845_lmx, [SDM845_LMX] = &lmx,
[SDM845_LCX] = &sdm845_lcx, [SDM845_MSS] = &mss,
[SDM845_GFX] = &sdm845_gfx, [SDM845_MX] = &mx,
[SDM845_MSS] = &sdm845_mss, [SDM845_MX_AO] = &mx_ao,
}; };
static const struct rpmhpd_desc sdm845_desc = { static const struct rpmhpd_desc sdm845_desc = {
@ -139,9 +200,9 @@ static const struct rpmhpd_desc sdm845_desc = {
/* SDX55 RPMH powerdomains */ /* SDX55 RPMH powerdomains */
static struct rpmhpd *sdx55_rpmhpds[] = { static struct rpmhpd *sdx55_rpmhpds[] = {
[SDX55_MSS] = &sdm845_mss, [SDX55_CX] = &cx_w_mx_parent,
[SDX55_MX] = &sdm845_mx, [SDX55_MSS] = &mss,
[SDX55_CX] = &sdm845_cx, [SDX55_MX] = &mx,
}; };
static const struct rpmhpd_desc sdx55_desc = { static const struct rpmhpd_desc sdx55_desc = {
@ -151,12 +212,12 @@ static const struct rpmhpd_desc sdx55_desc = {
/* SM6350 RPMH powerdomains */ /* SM6350 RPMH powerdomains */
static struct rpmhpd *sm6350_rpmhpds[] = { static struct rpmhpd *sm6350_rpmhpds[] = {
[SM6350_CX] = &sdm845_cx, [SM6350_CX] = &cx_w_mx_parent,
[SM6350_GFX] = &sdm845_gfx, [SM6350_GFX] = &gfx,
[SM6350_LCX] = &sdm845_lcx, [SM6350_LCX] = &lcx,
[SM6350_LMX] = &sdm845_lmx, [SM6350_LMX] = &lmx,
[SM6350_MSS] = &sdm845_mss, [SM6350_MSS] = &mss,
[SM6350_MX] = &sdm845_mx, [SM6350_MX] = &mx,
}; };
static const struct rpmhpd_desc sm6350_desc = { static const struct rpmhpd_desc sm6350_desc = {
@ -165,33 +226,18 @@ static const struct rpmhpd_desc sm6350_desc = {
}; };
/* SM8150 RPMH powerdomains */ /* SM8150 RPMH powerdomains */
static struct rpmhpd sm8150_mmcx_ao;
static struct rpmhpd sm8150_mmcx = {
.pd = { .name = "mmcx", },
.peer = &sm8150_mmcx_ao,
.res_name = "mmcx.lvl",
};
static struct rpmhpd sm8150_mmcx_ao = {
.pd = { .name = "mmcx_ao", },
.active_only = true,
.peer = &sm8150_mmcx,
.res_name = "mmcx.lvl",
};
static struct rpmhpd *sm8150_rpmhpds[] = { static struct rpmhpd *sm8150_rpmhpds[] = {
[SM8150_MSS] = &sdm845_mss, [SM8150_CX] = &cx_w_mx_parent,
[SM8150_EBI] = &sdm845_ebi, [SM8150_CX_AO] = &cx_ao_w_mx_parent,
[SM8150_LMX] = &sdm845_lmx, [SM8150_EBI] = &ebi,
[SM8150_LCX] = &sdm845_lcx, [SM8150_GFX] = &gfx,
[SM8150_GFX] = &sdm845_gfx, [SM8150_LCX] = &lcx,
[SM8150_MX] = &sdm845_mx, [SM8150_LMX] = &lmx,
[SM8150_MX_AO] = &sdm845_mx_ao, [SM8150_MMCX] = &mmcx,
[SM8150_CX] = &sdm845_cx, [SM8150_MMCX_AO] = &mmcx_ao,
[SM8150_CX_AO] = &sdm845_cx_ao, [SM8150_MSS] = &mss,
[SM8150_MMCX] = &sm8150_mmcx, [SM8150_MX] = &mx,
[SM8150_MMCX_AO] = &sm8150_mmcx_ao, [SM8150_MX_AO] = &mx_ao,
}; };
static const struct rpmhpd_desc sm8150_desc = { static const struct rpmhpd_desc sm8150_desc = {
@ -199,17 +245,18 @@ static const struct rpmhpd_desc sm8150_desc = {
.num_pds = ARRAY_SIZE(sm8150_rpmhpds), .num_pds = ARRAY_SIZE(sm8150_rpmhpds),
}; };
/* SM8250 RPMH powerdomains */
static struct rpmhpd *sm8250_rpmhpds[] = { static struct rpmhpd *sm8250_rpmhpds[] = {
[SM8250_CX] = &sdm845_cx, [SM8250_CX] = &cx_w_mx_parent,
[SM8250_CX_AO] = &sdm845_cx_ao, [SM8250_CX_AO] = &cx_ao_w_mx_parent,
[SM8250_EBI] = &sdm845_ebi, [SM8250_EBI] = &ebi,
[SM8250_GFX] = &sdm845_gfx, [SM8250_GFX] = &gfx,
[SM8250_LCX] = &sdm845_lcx, [SM8250_LCX] = &lcx,
[SM8250_LMX] = &sdm845_lmx, [SM8250_LMX] = &lmx,
[SM8250_MMCX] = &sm8150_mmcx, [SM8250_MMCX] = &mmcx,
[SM8250_MMCX_AO] = &sm8150_mmcx_ao, [SM8250_MMCX_AO] = &mmcx_ao,
[SM8250_MX] = &sdm845_mx, [SM8250_MX] = &mx,
[SM8250_MX_AO] = &sdm845_mx_ao, [SM8250_MX_AO] = &mx_ao,
}; };
static const struct rpmhpd_desc sm8250_desc = { static const struct rpmhpd_desc sm8250_desc = {
@ -218,34 +265,20 @@ static const struct rpmhpd_desc sm8250_desc = {
}; };
/* SM8350 Power domains */ /* SM8350 Power domains */
static struct rpmhpd sm8350_mxc_ao;
static struct rpmhpd sm8350_mxc = {
.pd = { .name = "mxc", },
.peer = &sm8350_mxc_ao,
.res_name = "mxc.lvl",
};
static struct rpmhpd sm8350_mxc_ao = {
.pd = { .name = "mxc_ao", },
.active_only = true,
.peer = &sm8350_mxc,
.res_name = "mxc.lvl",
};
static struct rpmhpd *sm8350_rpmhpds[] = { static struct rpmhpd *sm8350_rpmhpds[] = {
[SM8350_CX] = &sdm845_cx, [SM8350_CX] = &cx_w_mx_parent,
[SM8350_CX_AO] = &sdm845_cx_ao, [SM8350_CX_AO] = &cx_ao_w_mx_parent,
[SM8350_EBI] = &sdm845_ebi, [SM8350_EBI] = &ebi,
[SM8350_GFX] = &sdm845_gfx, [SM8350_GFX] = &gfx,
[SM8350_LCX] = &sdm845_lcx, [SM8350_LCX] = &lcx,
[SM8350_LMX] = &sdm845_lmx, [SM8350_LMX] = &lmx,
[SM8350_MMCX] = &sm8150_mmcx, [SM8350_MMCX] = &mmcx,
[SM8350_MMCX_AO] = &sm8150_mmcx_ao, [SM8350_MMCX_AO] = &mmcx_ao,
[SM8350_MX] = &sdm845_mx, [SM8350_MSS] = &mss,
[SM8350_MX_AO] = &sdm845_mx_ao, [SM8350_MX] = &mx,
[SM8350_MXC] = &sm8350_mxc, [SM8350_MX_AO] = &mx_ao,
[SM8350_MXC_AO] = &sm8350_mxc_ao, [SM8350_MXC] = &mxc,
[SM8350_MSS] = &sdm845_mss, [SM8350_MXC_AO] = &mxc_ao,
}; };
static const struct rpmhpd_desc sm8350_desc = { static const struct rpmhpd_desc sm8350_desc = {
@ -253,16 +286,38 @@ static const struct rpmhpd_desc sm8350_desc = {
.num_pds = ARRAY_SIZE(sm8350_rpmhpds), .num_pds = ARRAY_SIZE(sm8350_rpmhpds),
}; };
/* SM8450 RPMH powerdomains */
static struct rpmhpd *sm8450_rpmhpds[] = {
[SM8450_CX] = &cx,
[SM8450_CX_AO] = &cx_ao,
[SM8450_EBI] = &ebi,
[SM8450_GFX] = &gfx,
[SM8450_LCX] = &lcx,
[SM8450_LMX] = &lmx,
[SM8450_MMCX] = &mmcx_w_cx_parent,
[SM8450_MMCX_AO] = &mmcx_ao_w_cx_parent,
[SM8450_MSS] = &mss,
[SM8450_MX] = &mx,
[SM8450_MX_AO] = &mx_ao,
[SM8450_MXC] = &mxc,
[SM8450_MXC_AO] = &mxc_ao,
};
static const struct rpmhpd_desc sm8450_desc = {
.rpmhpds = sm8450_rpmhpds,
.num_pds = ARRAY_SIZE(sm8450_rpmhpds),
};
/* SC7180 RPMH powerdomains */ /* SC7180 RPMH powerdomains */
static struct rpmhpd *sc7180_rpmhpds[] = { static struct rpmhpd *sc7180_rpmhpds[] = {
[SC7180_CX] = &sdm845_cx, [SC7180_CX] = &cx_w_mx_parent,
[SC7180_CX_AO] = &sdm845_cx_ao, [SC7180_CX_AO] = &cx_ao_w_mx_parent,
[SC7180_GFX] = &sdm845_gfx, [SC7180_GFX] = &gfx,
[SC7180_MX] = &sdm845_mx, [SC7180_LCX] = &lcx,
[SC7180_MX_AO] = &sdm845_mx_ao, [SC7180_LMX] = &lmx,
[SC7180_LMX] = &sdm845_lmx, [SC7180_MSS] = &mss,
[SC7180_LCX] = &sdm845_lcx, [SC7180_MX] = &mx,
[SC7180_MSS] = &sdm845_mss, [SC7180_MX_AO] = &mx_ao,
}; };
static const struct rpmhpd_desc sc7180_desc = { static const struct rpmhpd_desc sc7180_desc = {
@ -272,15 +327,15 @@ static const struct rpmhpd_desc sc7180_desc = {
/* SC7280 RPMH powerdomains */ /* SC7280 RPMH powerdomains */
static struct rpmhpd *sc7280_rpmhpds[] = { static struct rpmhpd *sc7280_rpmhpds[] = {
[SC7280_CX] = &sdm845_cx, [SC7280_CX] = &cx,
[SC7280_CX_AO] = &sdm845_cx_ao, [SC7280_CX_AO] = &cx_ao,
[SC7280_EBI] = &sdm845_ebi, [SC7280_EBI] = &ebi,
[SC7280_GFX] = &sdm845_gfx, [SC7280_GFX] = &gfx,
[SC7280_MX] = &sdm845_mx, [SC7280_LCX] = &lcx,
[SC7280_MX_AO] = &sdm845_mx_ao, [SC7280_LMX] = &lmx,
[SC7280_LMX] = &sdm845_lmx, [SC7280_MSS] = &mss,
[SC7280_LCX] = &sdm845_lcx, [SC7280_MX] = &mx,
[SC7280_MSS] = &sdm845_mss, [SC7280_MX_AO] = &mx_ao,
}; };
static const struct rpmhpd_desc sc7280_desc = { static const struct rpmhpd_desc sc7280_desc = {
@ -290,17 +345,17 @@ static const struct rpmhpd_desc sc7280_desc = {
/* SC8180x RPMH powerdomains */ /* SC8180x RPMH powerdomains */
static struct rpmhpd *sc8180x_rpmhpds[] = { static struct rpmhpd *sc8180x_rpmhpds[] = {
[SC8180X_CX] = &sdm845_cx, [SC8180X_CX] = &cx_w_mx_parent,
[SC8180X_CX_AO] = &sdm845_cx_ao, [SC8180X_CX_AO] = &cx_ao_w_mx_parent,
[SC8180X_EBI] = &sdm845_ebi, [SC8180X_EBI] = &ebi,
[SC8180X_GFX] = &sdm845_gfx, [SC8180X_GFX] = &gfx,
[SC8180X_LCX] = &sdm845_lcx, [SC8180X_LCX] = &lcx,
[SC8180X_LMX] = &sdm845_lmx, [SC8180X_LMX] = &lmx,
[SC8180X_MMCX] = &sm8150_mmcx, [SC8180X_MMCX] = &mmcx,
[SC8180X_MMCX_AO] = &sm8150_mmcx_ao, [SC8180X_MMCX_AO] = &mmcx_ao,
[SC8180X_MSS] = &sdm845_mss, [SC8180X_MSS] = &mss,
[SC8180X_MX] = &sdm845_mx, [SC8180X_MX] = &mx,
[SC8180X_MX_AO] = &sdm845_mx_ao, [SC8180X_MX_AO] = &mx_ao,
}; };
static const struct rpmhpd_desc sc8180x_desc = { static const struct rpmhpd_desc sc8180x_desc = {
@ -318,6 +373,7 @@ static const struct of_device_id rpmhpd_match_table[] = {
{ .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc }, { .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc },
{ .compatible = "qcom,sm8250-rpmhpd", .data = &sm8250_desc }, { .compatible = "qcom,sm8250-rpmhpd", .data = &sm8250_desc },
{ .compatible = "qcom,sm8350-rpmhpd", .data = &sm8350_desc }, { .compatible = "qcom,sm8350-rpmhpd", .data = &sm8350_desc },
{ .compatible = "qcom,sm8450-rpmhpd", .data = &sm8450_desc },
{ } { }
}; };
MODULE_DEVICE_TABLE(of, rpmhpd_match_table); MODULE_DEVICE_TABLE(of, rpmhpd_match_table);

View File

@ -102,7 +102,6 @@ struct rpmpd {
const bool active_only; const bool active_only;
unsigned int corner; unsigned int corner;
bool enabled; bool enabled;
const char *res_name;
const int res_type; const int res_type;
const int res_id; const int res_id;
struct qcom_smd_rpm *rpm; struct qcom_smd_rpm *rpm;
@ -396,6 +395,45 @@ static const struct rpmpd_desc sm6115_desc = {
.max_state = RPM_SMD_LEVEL_TURBO_NO_CPR, .max_state = RPM_SMD_LEVEL_TURBO_NO_CPR,
}; };
/* sm6125 RPM Power domains */
DEFINE_RPMPD_PAIR(sm6125, vddcx, vddcx_ao, RWCX, LEVEL, 0);
DEFINE_RPMPD_VFL(sm6125, vddcx_vfl, RWCX, 0);
DEFINE_RPMPD_PAIR(sm6125, vddmx, vddmx_ao, RWMX, LEVEL, 0);
DEFINE_RPMPD_VFL(sm6125, vddmx_vfl, RWMX, 0);
static struct rpmpd *sm6125_rpmpds[] = {
[SM6125_VDDCX] = &sm6125_vddcx,
[SM6125_VDDCX_AO] = &sm6125_vddcx_ao,
[SM6125_VDDCX_VFL] = &sm6125_vddcx_vfl,
[SM6125_VDDMX] = &sm6125_vddmx,
[SM6125_VDDMX_AO] = &sm6125_vddmx_ao,
[SM6125_VDDMX_VFL] = &sm6125_vddmx_vfl,
};
static const struct rpmpd_desc sm6125_desc = {
.rpmpds = sm6125_rpmpds,
.num_pds = ARRAY_SIZE(sm6125_rpmpds),
.max_state = RPM_SMD_LEVEL_BINNING,
};
static struct rpmpd *qcm2290_rpmpds[] = {
[QCM2290_VDDCX] = &sm6115_vddcx,
[QCM2290_VDDCX_AO] = &sm6115_vddcx_ao,
[QCM2290_VDDCX_VFL] = &sm6115_vddcx_vfl,
[QCM2290_VDDMX] = &sm6115_vddmx,
[QCM2290_VDDMX_AO] = &sm6115_vddmx_ao,
[QCM2290_VDDMX_VFL] = &sm6115_vddmx_vfl,
[QCM2290_VDD_LPI_CX] = &sm6115_vdd_lpi_cx,
[QCM2290_VDD_LPI_MX] = &sm6115_vdd_lpi_mx,
};
static const struct rpmpd_desc qcm2290_desc = {
.rpmpds = qcm2290_rpmpds,
.num_pds = ARRAY_SIZE(qcm2290_rpmpds),
.max_state = RPM_SMD_LEVEL_TURBO_NO_CPR,
};
static const struct of_device_id rpmpd_match_table[] = { static const struct of_device_id rpmpd_match_table[] = {
{ .compatible = "qcom,mdm9607-rpmpd", .data = &mdm9607_desc }, { .compatible = "qcom,mdm9607-rpmpd", .data = &mdm9607_desc },
{ .compatible = "qcom,msm8916-rpmpd", .data = &msm8916_desc }, { .compatible = "qcom,msm8916-rpmpd", .data = &msm8916_desc },
@ -405,9 +443,11 @@ static const struct of_device_id rpmpd_match_table[] = {
{ .compatible = "qcom,msm8994-rpmpd", .data = &msm8994_desc }, { .compatible = "qcom,msm8994-rpmpd", .data = &msm8994_desc },
{ .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc }, { .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc },
{ .compatible = "qcom,msm8998-rpmpd", .data = &msm8998_desc }, { .compatible = "qcom,msm8998-rpmpd", .data = &msm8998_desc },
{ .compatible = "qcom,qcm2290-rpmpd", .data = &qcm2290_desc },
{ .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc }, { .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc },
{ .compatible = "qcom,sdm660-rpmpd", .data = &sdm660_desc }, { .compatible = "qcom,sdm660-rpmpd", .data = &sdm660_desc },
{ .compatible = "qcom,sm6115-rpmpd", .data = &sm6115_desc }, { .compatible = "qcom,sm6115-rpmpd", .data = &sm6115_desc },
{ .compatible = "qcom,sm6125-rpmpd", .data = &sm6125_desc },
{ } { }
}; };
MODULE_DEVICE_TABLE(of, rpmpd_match_table); MODULE_DEVICE_TABLE(of, rpmpd_match_table);

View File

@ -85,7 +85,7 @@
#define SMEM_GLOBAL_HOST 0xfffe #define SMEM_GLOBAL_HOST 0xfffe
/* Max number of processors/hosts in a system */ /* Max number of processors/hosts in a system */
#define SMEM_HOST_COUNT 14 #define SMEM_HOST_COUNT 15
/** /**
* struct smem_proc_comm - proc_comm communication struct (legacy) * struct smem_proc_comm - proc_comm communication struct (legacy)

View File

@ -313,8 +313,11 @@ static const struct soc_id soc_id[] = {
{ 421, "IPQ6000" }, { 421, "IPQ6000" },
{ 422, "IPQ6010" }, { 422, "IPQ6010" },
{ 425, "SC7180" }, { 425, "SC7180" },
{ 434, "SM6350" },
{ 453, "IPQ6005" }, { 453, "IPQ6005" },
{ 455, "QRB5165" }, { 455, "QRB5165" },
{ 457, "SM8450" },
{ 459, "SM7225" },
}; };
static const char *socinfo_machine(struct device *dev, unsigned int id) static const char *socinfo_machine(struct device *dev, unsigned int id)

View File

@ -235,6 +235,13 @@ config ARCH_R8A77961
This enables support for the Renesas R-Car M3-W+ SoC. This enables support for the Renesas R-Car M3-W+ SoC.
This includes different gradings like R-Car M3e and M3e-2G. This includes different gradings like R-Car M3e and M3e-2G.
config ARCH_R8A779F0
bool "ARM64 Platform support for R-Car S4-8"
select ARCH_RCAR_GEN3
select SYSC_R8A779F0
help
This enables support for the Renesas R-Car S4-8 SoC.
config ARCH_R8A77980 config ARCH_R8A77980
bool "ARM64 Platform support for R-Car V3H" bool "ARM64 Platform support for R-Car V3H"
select ARCH_RCAR_GEN3 select ARCH_RCAR_GEN3
@ -297,6 +304,9 @@ config RST_RCAR
config SYSC_RCAR config SYSC_RCAR
bool "System Controller support for R-Car" if COMPILE_TEST bool "System Controller support for R-Car" if COMPILE_TEST
config SYSC_RCAR_GEN4
bool "System Controller support for R-Car Gen4" if COMPILE_TEST
config SYSC_R8A77995 config SYSC_R8A77995
bool "System Controller support for R-Car D3" if COMPILE_TEST bool "System Controller support for R-Car D3" if COMPILE_TEST
select SYSC_RCAR select SYSC_RCAR
@ -337,6 +347,10 @@ config SYSC_R8A77961
bool "System Controller support for R-Car M3-W+" if COMPILE_TEST bool "System Controller support for R-Car M3-W+" if COMPILE_TEST
select SYSC_RCAR select SYSC_RCAR
config SYSC_R8A779F0
bool "System Controller support for R-Car S4-8" if COMPILE_TEST
select SYSC_RCAR_GEN4
config SYSC_R8A7792 config SYSC_R8A7792
bool "System Controller support for R-Car V2H" if COMPILE_TEST bool "System Controller support for R-Car V2H" if COMPILE_TEST
select SYSC_RCAR select SYSC_RCAR
@ -351,6 +365,7 @@ config SYSC_R8A77970
config SYSC_R8A779A0 config SYSC_R8A779A0
bool "System Controller support for R-Car V3U" if COMPILE_TEST bool "System Controller support for R-Car V3U" if COMPILE_TEST
select SYSC_RCAR_GEN4
config SYSC_RMOBILE config SYSC_RMOBILE
bool "System Controller support for R-Mobile" if COMPILE_TEST bool "System Controller support for R-Mobile" if COMPILE_TEST

View File

@ -25,6 +25,7 @@ obj-$(CONFIG_SYSC_R8A77980) += r8a77980-sysc.o
obj-$(CONFIG_SYSC_R8A77990) += r8a77990-sysc.o obj-$(CONFIG_SYSC_R8A77990) += r8a77990-sysc.o
obj-$(CONFIG_SYSC_R8A77995) += r8a77995-sysc.o obj-$(CONFIG_SYSC_R8A77995) += r8a77995-sysc.o
obj-$(CONFIG_SYSC_R8A779A0) += r8a779a0-sysc.o obj-$(CONFIG_SYSC_R8A779A0) += r8a779a0-sysc.o
obj-$(CONFIG_SYSC_R8A779F0) += r8a779f0-sysc.o
ifdef CONFIG_SMP ifdef CONFIG_SMP
obj-$(CONFIG_ARCH_R9A06G032) += r9a06g032-smp.o obj-$(CONFIG_ARCH_R9A06G032) += r9a06g032-smp.o
endif endif
@ -32,4 +33,5 @@ endif
# Family # Family
obj-$(CONFIG_RST_RCAR) += rcar-rst.o obj-$(CONFIG_RST_RCAR) += rcar-rst.o
obj-$(CONFIG_SYSC_RCAR) += rcar-sysc.o obj-$(CONFIG_SYSC_RCAR) += rcar-sysc.o
obj-$(CONFIG_SYSC_RCAR_GEN4) += rcar-gen4-sysc.o
obj-$(CONFIG_SYSC_RMOBILE) += rmobile-sysc.o obj-$(CONFIG_SYSC_RMOBILE) += rmobile-sysc.o

View File

@ -21,35 +21,9 @@
#include <dt-bindings/power/r8a779a0-sysc.h> #include <dt-bindings/power/r8a779a0-sysc.h>
/* #include "rcar-gen4-sysc.h"
* Power Domain flags
*/
#define PD_CPU BIT(0) /* Area contains main CPU core */
#define PD_SCU BIT(1) /* Area contains SCU and L2 cache */
#define PD_NO_CR BIT(2) /* Area lacks PWR{ON,OFF}CR registers */
#define PD_CPU_NOCR PD_CPU | PD_NO_CR /* CPU area lacks CR */ static struct rcar_gen4_sysc_area r8a779a0_areas[] __initdata = {
#define PD_ALWAYS_ON PD_NO_CR /* Always-on area */
/*
* Description of a Power Area
*/
struct r8a779a0_sysc_area {
const char *name;
u8 pdr; /* PDRn */
int parent; /* -1 if none */
unsigned int flags; /* See PD_* */
};
/*
* SoC-specific Power Area Description
*/
struct r8a779a0_sysc_info {
const struct r8a779a0_sysc_area *areas;
unsigned int num_areas;
};
static struct r8a779a0_sysc_area r8a779a0_areas[] __initdata = {
{ "always-on", R8A779A0_PD_ALWAYS_ON, -1, PD_ALWAYS_ON }, { "always-on", R8A779A0_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "a3e0", R8A779A0_PD_A3E0, R8A779A0_PD_ALWAYS_ON, PD_SCU }, { "a3e0", R8A779A0_PD_A3E0, R8A779A0_PD_ALWAYS_ON, PD_SCU },
{ "a3e1", R8A779A0_PD_A3E1, R8A779A0_PD_ALWAYS_ON, PD_SCU }, { "a3e1", R8A779A0_PD_A3E1, R8A779A0_PD_ALWAYS_ON, PD_SCU },
@ -96,355 +70,7 @@ static struct r8a779a0_sysc_area r8a779a0_areas[] __initdata = {
{ "a1dsp1", R8A779A0_PD_A1DSP1, R8A779A0_PD_A2CN1 }, { "a1dsp1", R8A779A0_PD_A1DSP1, R8A779A0_PD_A2CN1 },
}; };
static const struct r8a779a0_sysc_info r8a779a0_sysc_info __initconst = { const struct rcar_gen4_sysc_info r8a779a0_sysc_info __initconst = {
.areas = r8a779a0_areas, .areas = r8a779a0_areas,
.num_areas = ARRAY_SIZE(r8a779a0_areas), .num_areas = ARRAY_SIZE(r8a779a0_areas),
}; };
/* SYSC Common */
#define SYSCSR 0x000 /* SYSC Status Register */
#define SYSCPONSR(x) (0x800 + ((x) * 0x4)) /* Power-ON Status Register 0 */
#define SYSCPOFFSR(x) (0x808 + ((x) * 0x4)) /* Power-OFF Status Register */
#define SYSCISCR(x) (0x810 + ((x) * 0x4)) /* Interrupt Status/Clear Register */
#define SYSCIER(x) (0x820 + ((x) * 0x4)) /* Interrupt Enable Register */
#define SYSCIMR(x) (0x830 + ((x) * 0x4)) /* Interrupt Mask Register */
/* Power Domain Registers */
#define PDRSR(n) (0x1000 + ((n) * 0x40))
#define PDRONCR(n) (0x1004 + ((n) * 0x40))
#define PDROFFCR(n) (0x1008 + ((n) * 0x40))
#define PDRESR(n) (0x100C + ((n) * 0x40))
/* PWRON/PWROFF */
#define PWRON_PWROFF BIT(0) /* Power-ON/OFF request */
/* PDRESR */
#define PDRESR_ERR BIT(0)
/* PDRSR */
#define PDRSR_OFF BIT(0) /* Power-OFF state */
#define PDRSR_ON BIT(4) /* Power-ON state */
#define PDRSR_OFF_STATE BIT(8) /* Processing Power-OFF sequence */
#define PDRSR_ON_STATE BIT(12) /* Processing Power-ON sequence */
#define SYSCSR_BUSY GENMASK(1, 0) /* All bit sets is not busy */
#define SYSCSR_TIMEOUT 10000
#define SYSCSR_DELAY_US 10
#define PDRESR_RETRIES 1000
#define PDRESR_DELAY_US 10
#define SYSCISR_TIMEOUT 10000
#define SYSCISR_DELAY_US 10
#define NUM_DOMAINS_EACH_REG BITS_PER_TYPE(u32)
static void __iomem *r8a779a0_sysc_base;
static DEFINE_SPINLOCK(r8a779a0_sysc_lock); /* SMP CPUs + I/O devices */
static int r8a779a0_sysc_pwr_on_off(u8 pdr, bool on)
{
unsigned int reg_offs;
u32 val;
int ret;
if (on)
reg_offs = PDRONCR(pdr);
else
reg_offs = PDROFFCR(pdr);
/* Wait until SYSC is ready to accept a power request */
ret = readl_poll_timeout_atomic(r8a779a0_sysc_base + SYSCSR, val,
(val & SYSCSR_BUSY) == SYSCSR_BUSY,
SYSCSR_DELAY_US, SYSCSR_TIMEOUT);
if (ret < 0)
return -EAGAIN;
/* Submit power shutoff or power resume request */
iowrite32(PWRON_PWROFF, r8a779a0_sysc_base + reg_offs);
return 0;
}
static int clear_irq_flags(unsigned int reg_idx, unsigned int isr_mask)
{
u32 val;
int ret;
iowrite32(isr_mask, r8a779a0_sysc_base + SYSCISCR(reg_idx));
ret = readl_poll_timeout_atomic(r8a779a0_sysc_base + SYSCISCR(reg_idx),
val, !(val & isr_mask),
SYSCISR_DELAY_US, SYSCISR_TIMEOUT);
if (ret < 0) {
pr_err("\n %s : Can not clear IRQ flags in SYSCISCR", __func__);
return -EIO;
}
return 0;
}
static int r8a779a0_sysc_power(u8 pdr, bool on)
{
unsigned int isr_mask;
unsigned int reg_idx, bit_idx;
unsigned int status;
unsigned long flags;
int ret = 0;
u32 val;
int k;
spin_lock_irqsave(&r8a779a0_sysc_lock, flags);
reg_idx = pdr / NUM_DOMAINS_EACH_REG;
bit_idx = pdr % NUM_DOMAINS_EACH_REG;
isr_mask = BIT(bit_idx);
/*
* The interrupt source needs to be enabled, but masked, to prevent the
* CPU from receiving it.
*/
iowrite32(ioread32(r8a779a0_sysc_base + SYSCIER(reg_idx)) | isr_mask,
r8a779a0_sysc_base + SYSCIER(reg_idx));
iowrite32(ioread32(r8a779a0_sysc_base + SYSCIMR(reg_idx)) | isr_mask,
r8a779a0_sysc_base + SYSCIMR(reg_idx));
ret = clear_irq_flags(reg_idx, isr_mask);
if (ret)
goto out;
/* Submit power shutoff or resume request until it was accepted */
for (k = 0; k < PDRESR_RETRIES; k++) {
ret = r8a779a0_sysc_pwr_on_off(pdr, on);
if (ret)
goto out;
status = ioread32(r8a779a0_sysc_base + PDRESR(pdr));
if (!(status & PDRESR_ERR))
break;
udelay(PDRESR_DELAY_US);
}
if (k == PDRESR_RETRIES) {
ret = -EIO;
goto out;
}
/* Wait until the power shutoff or resume request has completed * */
ret = readl_poll_timeout_atomic(r8a779a0_sysc_base + SYSCISCR(reg_idx),
val, (val & isr_mask),
SYSCISR_DELAY_US, SYSCISR_TIMEOUT);
if (ret < 0) {
ret = -EIO;
goto out;
}
/* Clear interrupt flags */
ret = clear_irq_flags(reg_idx, isr_mask);
if (ret)
goto out;
out:
spin_unlock_irqrestore(&r8a779a0_sysc_lock, flags);
pr_debug("sysc power %s domain %d: %08x -> %d\n", on ? "on" : "off",
pdr, ioread32(r8a779a0_sysc_base + SYSCISCR(reg_idx)), ret);
return ret;
}
static bool r8a779a0_sysc_power_is_off(u8 pdr)
{
unsigned int st;
st = ioread32(r8a779a0_sysc_base + PDRSR(pdr));
if (st & PDRSR_OFF)
return true;
return false;
}
struct r8a779a0_sysc_pd {
struct generic_pm_domain genpd;
u8 pdr;
unsigned int flags;
char name[];
};
static inline struct r8a779a0_sysc_pd *to_r8a779a0_pd(struct generic_pm_domain *d)
{
return container_of(d, struct r8a779a0_sysc_pd, genpd);
}
static int r8a779a0_sysc_pd_power_off(struct generic_pm_domain *genpd)
{
struct r8a779a0_sysc_pd *pd = to_r8a779a0_pd(genpd);
pr_debug("%s: %s\n", __func__, genpd->name);
return r8a779a0_sysc_power(pd->pdr, false);
}
static int r8a779a0_sysc_pd_power_on(struct generic_pm_domain *genpd)
{
struct r8a779a0_sysc_pd *pd = to_r8a779a0_pd(genpd);
pr_debug("%s: %s\n", __func__, genpd->name);
return r8a779a0_sysc_power(pd->pdr, true);
}
static int __init r8a779a0_sysc_pd_setup(struct r8a779a0_sysc_pd *pd)
{
struct generic_pm_domain *genpd = &pd->genpd;
const char *name = pd->genpd.name;
int error;
if (pd->flags & PD_CPU) {
/*
* This domain contains a CPU core and therefore it should
* only be turned off if the CPU is not in use.
*/
pr_debug("PM domain %s contains %s\n", name, "CPU");
genpd->flags |= GENPD_FLAG_ALWAYS_ON;
} else if (pd->flags & PD_SCU) {
/*
* This domain contains an SCU and cache-controller, and
* therefore it should only be turned off if the CPU cores are
* not in use.
*/
pr_debug("PM domain %s contains %s\n", name, "SCU");
genpd->flags |= GENPD_FLAG_ALWAYS_ON;
} else if (pd->flags & PD_NO_CR) {
/*
* This domain cannot be turned off.
*/
genpd->flags |= GENPD_FLAG_ALWAYS_ON;
}
if (!(pd->flags & (PD_CPU | PD_SCU))) {
/* Enable Clock Domain for I/O devices */
genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
genpd->attach_dev = cpg_mssr_attach_dev;
genpd->detach_dev = cpg_mssr_detach_dev;
}
genpd->power_off = r8a779a0_sysc_pd_power_off;
genpd->power_on = r8a779a0_sysc_pd_power_on;
if (pd->flags & (PD_CPU | PD_NO_CR)) {
/* Skip CPUs (handled by SMP code) and areas without control */
pr_debug("%s: Not touching %s\n", __func__, genpd->name);
goto finalize;
}
if (!r8a779a0_sysc_power_is_off(pd->pdr)) {
pr_debug("%s: %s is already powered\n", __func__, genpd->name);
goto finalize;
}
r8a779a0_sysc_power(pd->pdr, true);
finalize:
error = pm_genpd_init(genpd, &simple_qos_governor, false);
if (error)
pr_err("Failed to init PM domain %s: %d\n", name, error);
return error;
}
static const struct of_device_id r8a779a0_sysc_matches[] __initconst = {
{ .compatible = "renesas,r8a779a0-sysc", .data = &r8a779a0_sysc_info },
{ /* sentinel */ }
};
struct r8a779a0_pm_domains {
struct genpd_onecell_data onecell_data;
struct generic_pm_domain *domains[R8A779A0_PD_ALWAYS_ON + 1];
};
static struct genpd_onecell_data *r8a779a0_sysc_onecell_data;
static int __init r8a779a0_sysc_pd_init(void)
{
const struct r8a779a0_sysc_info *info;
const struct of_device_id *match;
struct r8a779a0_pm_domains *domains;
struct device_node *np;
void __iomem *base;
unsigned int i;
int error;
np = of_find_matching_node_and_match(NULL, r8a779a0_sysc_matches, &match);
if (!np)
return -ENODEV;
info = match->data;
base = of_iomap(np, 0);
if (!base) {
pr_warn("%pOF: Cannot map regs\n", np);
error = -ENOMEM;
goto out_put;
}
r8a779a0_sysc_base = base;
domains = kzalloc(sizeof(*domains), GFP_KERNEL);
if (!domains) {
error = -ENOMEM;
goto out_put;
}
domains->onecell_data.domains = domains->domains;
domains->onecell_data.num_domains = ARRAY_SIZE(domains->domains);
r8a779a0_sysc_onecell_data = &domains->onecell_data;
for (i = 0; i < info->num_areas; i++) {
const struct r8a779a0_sysc_area *area = &info->areas[i];
struct r8a779a0_sysc_pd *pd;
size_t n;
if (!area->name) {
/* Skip NULLified area */
continue;
}
n = strlen(area->name) + 1;
pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL);
if (!pd) {
error = -ENOMEM;
goto out_put;
}
memcpy(pd->name, area->name, n);
pd->genpd.name = pd->name;
pd->pdr = area->pdr;
pd->flags = area->flags;
error = r8a779a0_sysc_pd_setup(pd);
if (error)
goto out_put;
domains->domains[area->pdr] = &pd->genpd;
if (area->parent < 0)
continue;
error = pm_genpd_add_subdomain(domains->domains[area->parent],
&pd->genpd);
if (error) {
pr_warn("Failed to add PM subdomain %s to parent %u\n",
area->name, area->parent);
goto out_put;
}
}
error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
out_put:
of_node_put(np);
return error;
}
early_initcall(r8a779a0_sysc_pd_init);

View File

@ -0,0 +1,47 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car S4-8 System Controller
*
* Copyright (C) 2021 Renesas Electronics Corp.
*/
#include <linux/bits.h>
#include <linux/clk/renesas.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/of_address.h>
#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <dt-bindings/power/r8a779f0-sysc.h>
#include "rcar-gen4-sysc.h"
static struct rcar_gen4_sysc_area r8a779f0_areas[] __initdata = {
{ "always-on", R8A779F0_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "a3e0", R8A779F0_PD_A3E0, R8A779F0_PD_ALWAYS_ON, PD_SCU },
{ "a3e1", R8A779F0_PD_A3E1, R8A779F0_PD_ALWAYS_ON, PD_SCU },
{ "a2e0d0", R8A779F0_PD_A2E0D0, R8A779F0_PD_A3E0, PD_SCU },
{ "a2e0d1", R8A779F0_PD_A2E0D1, R8A779F0_PD_A3E0, PD_SCU },
{ "a2e1d0", R8A779F0_PD_A2E1D0, R8A779F0_PD_A3E1, PD_SCU },
{ "a2e1d1", R8A779F0_PD_A2E1D1, R8A779F0_PD_A3E1, PD_SCU },
{ "a1e0d0c0", R8A779F0_PD_A1E0D0C0, R8A779F0_PD_A2E0D0, PD_CPU_NOCR },
{ "a1e0d0c1", R8A779F0_PD_A1E0D0C1, R8A779F0_PD_A2E0D0, PD_CPU_NOCR },
{ "a1e0d1c0", R8A779F0_PD_A1E0D1C0, R8A779F0_PD_A2E0D1, PD_CPU_NOCR },
{ "a1e0d1c1", R8A779F0_PD_A1E0D1C1, R8A779F0_PD_A2E0D1, PD_CPU_NOCR },
{ "a1e1d0c0", R8A779F0_PD_A1E1D0C0, R8A779F0_PD_A2E1D0, PD_CPU_NOCR },
{ "a1e1d0c1", R8A779F0_PD_A1E1D0C1, R8A779F0_PD_A2E1D0, PD_CPU_NOCR },
{ "a1e1d1c0", R8A779F0_PD_A1E1D1C0, R8A779F0_PD_A2E1D1, PD_CPU_NOCR },
{ "a1e1d1c1", R8A779F0_PD_A1E1D1C1, R8A779F0_PD_A2E1D1, PD_CPU_NOCR },
};
const struct rcar_gen4_sysc_info r8a779f0_sysc_info __initconst = {
.areas = r8a779f0_areas,
.num_areas = ARRAY_SIZE(r8a779f0_areas),
};

View File

@ -0,0 +1,376 @@
// SPDX-License-Identifier: GPL-2.0
/*
* R-Car Gen4 SYSC Power management support
*
* Copyright (C) 2021 Renesas Electronics Corp.
*/
#include <linux/bits.h>
#include <linux/clk/renesas.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/of_address.h>
#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include "rcar-gen4-sysc.h"
/* SYSC Common */
#define SYSCSR 0x000 /* SYSC Status Register */
#define SYSCPONSR(x) (0x800 + ((x) * 0x4)) /* Power-ON Status Register 0 */
#define SYSCPOFFSR(x) (0x808 + ((x) * 0x4)) /* Power-OFF Status Register */
#define SYSCISCR(x) (0x810 + ((x) * 0x4)) /* Interrupt Status/Clear Register */
#define SYSCIER(x) (0x820 + ((x) * 0x4)) /* Interrupt Enable Register */
#define SYSCIMR(x) (0x830 + ((x) * 0x4)) /* Interrupt Mask Register */
/* Power Domain Registers */
#define PDRSR(n) (0x1000 + ((n) * 0x40))
#define PDRONCR(n) (0x1004 + ((n) * 0x40))
#define PDROFFCR(n) (0x1008 + ((n) * 0x40))
#define PDRESR(n) (0x100C + ((n) * 0x40))
/* PWRON/PWROFF */
#define PWRON_PWROFF BIT(0) /* Power-ON/OFF request */
/* PDRESR */
#define PDRESR_ERR BIT(0)
/* PDRSR */
#define PDRSR_OFF BIT(0) /* Power-OFF state */
#define PDRSR_ON BIT(4) /* Power-ON state */
#define PDRSR_OFF_STATE BIT(8) /* Processing Power-OFF sequence */
#define PDRSR_ON_STATE BIT(12) /* Processing Power-ON sequence */
#define SYSCSR_BUSY GENMASK(1, 0) /* All bit sets is not busy */
#define SYSCSR_TIMEOUT 10000
#define SYSCSR_DELAY_US 10
#define PDRESR_RETRIES 1000
#define PDRESR_DELAY_US 10
#define SYSCISR_TIMEOUT 10000
#define SYSCISR_DELAY_US 10
#define RCAR_GEN4_PD_ALWAYS_ON 64
#define NUM_DOMAINS_EACH_REG BITS_PER_TYPE(u32)
static void __iomem *rcar_gen4_sysc_base;
static DEFINE_SPINLOCK(rcar_gen4_sysc_lock); /* SMP CPUs + I/O devices */
static int rcar_gen4_sysc_pwr_on_off(u8 pdr, bool on)
{
unsigned int reg_offs;
u32 val;
int ret;
if (on)
reg_offs = PDRONCR(pdr);
else
reg_offs = PDROFFCR(pdr);
/* Wait until SYSC is ready to accept a power request */
ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCSR, val,
(val & SYSCSR_BUSY) == SYSCSR_BUSY,
SYSCSR_DELAY_US, SYSCSR_TIMEOUT);
if (ret < 0)
return -EAGAIN;
/* Submit power shutoff or power resume request */
iowrite32(PWRON_PWROFF, rcar_gen4_sysc_base + reg_offs);
return 0;
}
static int clear_irq_flags(unsigned int reg_idx, unsigned int isr_mask)
{
u32 val;
int ret;
iowrite32(isr_mask, rcar_gen4_sysc_base + SYSCISCR(reg_idx));
ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCISCR(reg_idx),
val, !(val & isr_mask),
SYSCISR_DELAY_US, SYSCISR_TIMEOUT);
if (ret < 0) {
pr_err("\n %s : Can not clear IRQ flags in SYSCISCR", __func__);
return -EIO;
}
return 0;
}
static int rcar_gen4_sysc_power(u8 pdr, bool on)
{
unsigned int isr_mask;
unsigned int reg_idx, bit_idx;
unsigned int status;
unsigned long flags;
int ret = 0;
u32 val;
int k;
spin_lock_irqsave(&rcar_gen4_sysc_lock, flags);
reg_idx = pdr / NUM_DOMAINS_EACH_REG;
bit_idx = pdr % NUM_DOMAINS_EACH_REG;
isr_mask = BIT(bit_idx);
/*
* The interrupt source needs to be enabled, but masked, to prevent the
* CPU from receiving it.
*/
iowrite32(ioread32(rcar_gen4_sysc_base + SYSCIER(reg_idx)) | isr_mask,
rcar_gen4_sysc_base + SYSCIER(reg_idx));
iowrite32(ioread32(rcar_gen4_sysc_base + SYSCIMR(reg_idx)) | isr_mask,
rcar_gen4_sysc_base + SYSCIMR(reg_idx));
ret = clear_irq_flags(reg_idx, isr_mask);
if (ret)
goto out;
/* Submit power shutoff or resume request until it was accepted */
for (k = 0; k < PDRESR_RETRIES; k++) {
ret = rcar_gen4_sysc_pwr_on_off(pdr, on);
if (ret)
goto out;
status = ioread32(rcar_gen4_sysc_base + PDRESR(pdr));
if (!(status & PDRESR_ERR))
break;
udelay(PDRESR_DELAY_US);
}
if (k == PDRESR_RETRIES) {
ret = -EIO;
goto out;
}
/* Wait until the power shutoff or resume request has completed * */
ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCISCR(reg_idx),
val, (val & isr_mask),
SYSCISR_DELAY_US, SYSCISR_TIMEOUT);
if (ret < 0) {
ret = -EIO;
goto out;
}
/* Clear interrupt flags */
ret = clear_irq_flags(reg_idx, isr_mask);
if (ret)
goto out;
out:
spin_unlock_irqrestore(&rcar_gen4_sysc_lock, flags);
pr_debug("sysc power %s domain %d: %08x -> %d\n", on ? "on" : "off",
pdr, ioread32(rcar_gen4_sysc_base + SYSCISCR(reg_idx)), ret);
return ret;
}
static bool rcar_gen4_sysc_power_is_off(u8 pdr)
{
unsigned int st;
st = ioread32(rcar_gen4_sysc_base + PDRSR(pdr));
if (st & PDRSR_OFF)
return true;
return false;
}
struct rcar_gen4_sysc_pd {
struct generic_pm_domain genpd;
u8 pdr;
unsigned int flags;
char name[];
};
static inline struct rcar_gen4_sysc_pd *to_rcar_gen4_pd(struct generic_pm_domain *d)
{
return container_of(d, struct rcar_gen4_sysc_pd, genpd);
}
static int rcar_gen4_sysc_pd_power_off(struct generic_pm_domain *genpd)
{
struct rcar_gen4_sysc_pd *pd = to_rcar_gen4_pd(genpd);
pr_debug("%s: %s\n", __func__, genpd->name);
return rcar_gen4_sysc_power(pd->pdr, false);
}
static int rcar_gen4_sysc_pd_power_on(struct generic_pm_domain *genpd)
{
struct rcar_gen4_sysc_pd *pd = to_rcar_gen4_pd(genpd);
pr_debug("%s: %s\n", __func__, genpd->name);
return rcar_gen4_sysc_power(pd->pdr, true);
}
static int __init rcar_gen4_sysc_pd_setup(struct rcar_gen4_sysc_pd *pd)
{
struct generic_pm_domain *genpd = &pd->genpd;
const char *name = pd->genpd.name;
int error;
if (pd->flags & PD_CPU) {
/*
* This domain contains a CPU core and therefore it should
* only be turned off if the CPU is not in use.
*/
pr_debug("PM domain %s contains %s\n", name, "CPU");
genpd->flags |= GENPD_FLAG_ALWAYS_ON;
} else if (pd->flags & PD_SCU) {
/*
* This domain contains an SCU and cache-controller, and
* therefore it should only be turned off if the CPU cores are
* not in use.
*/
pr_debug("PM domain %s contains %s\n", name, "SCU");
genpd->flags |= GENPD_FLAG_ALWAYS_ON;
} else if (pd->flags & PD_NO_CR) {
/*
* This domain cannot be turned off.
*/
genpd->flags |= GENPD_FLAG_ALWAYS_ON;
}
if (!(pd->flags & (PD_CPU | PD_SCU))) {
/* Enable Clock Domain for I/O devices */
genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
genpd->attach_dev = cpg_mssr_attach_dev;
genpd->detach_dev = cpg_mssr_detach_dev;
}
genpd->power_off = rcar_gen4_sysc_pd_power_off;
genpd->power_on = rcar_gen4_sysc_pd_power_on;
if (pd->flags & (PD_CPU | PD_NO_CR)) {
/* Skip CPUs (handled by SMP code) and areas without control */
pr_debug("%s: Not touching %s\n", __func__, genpd->name);
goto finalize;
}
if (!rcar_gen4_sysc_power_is_off(pd->pdr)) {
pr_debug("%s: %s is already powered\n", __func__, genpd->name);
goto finalize;
}
rcar_gen4_sysc_power(pd->pdr, true);
finalize:
error = pm_genpd_init(genpd, &simple_qos_governor, false);
if (error)
pr_err("Failed to init PM domain %s: %d\n", name, error);
return error;
}
static const struct of_device_id rcar_gen4_sysc_matches[] __initconst = {
#ifdef CONFIG_SYSC_R8A779A0
{ .compatible = "renesas,r8a779a0-sysc", .data = &r8a779a0_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A779F0
{ .compatible = "renesas,r8a779f0-sysc", .data = &r8a779f0_sysc_info },
#endif
{ /* sentinel */ }
};
struct rcar_gen4_pm_domains {
struct genpd_onecell_data onecell_data;
struct generic_pm_domain *domains[RCAR_GEN4_PD_ALWAYS_ON + 1];
};
static struct genpd_onecell_data *rcar_gen4_sysc_onecell_data;
static int __init rcar_gen4_sysc_pd_init(void)
{
const struct rcar_gen4_sysc_info *info;
const struct of_device_id *match;
struct rcar_gen4_pm_domains *domains;
struct device_node *np;
void __iomem *base;
unsigned int i;
int error;
np = of_find_matching_node_and_match(NULL, rcar_gen4_sysc_matches, &match);
if (!np)
return -ENODEV;
info = match->data;
base = of_iomap(np, 0);
if (!base) {
pr_warn("%pOF: Cannot map regs\n", np);
error = -ENOMEM;
goto out_put;
}
rcar_gen4_sysc_base = base;
domains = kzalloc(sizeof(*domains), GFP_KERNEL);
if (!domains) {
error = -ENOMEM;
goto out_put;
}
domains->onecell_data.domains = domains->domains;
domains->onecell_data.num_domains = ARRAY_SIZE(domains->domains);
rcar_gen4_sysc_onecell_data = &domains->onecell_data;
for (i = 0; i < info->num_areas; i++) {
const struct rcar_gen4_sysc_area *area = &info->areas[i];
struct rcar_gen4_sysc_pd *pd;
size_t n;
if (!area->name) {
/* Skip NULLified area */
continue;
}
n = strlen(area->name) + 1;
pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL);
if (!pd) {
error = -ENOMEM;
goto out_put;
}
memcpy(pd->name, area->name, n);
pd->genpd.name = pd->name;
pd->pdr = area->pdr;
pd->flags = area->flags;
error = rcar_gen4_sysc_pd_setup(pd);
if (error)
goto out_put;
domains->domains[area->pdr] = &pd->genpd;
if (area->parent < 0)
continue;
error = pm_genpd_add_subdomain(domains->domains[area->parent],
&pd->genpd);
if (error) {
pr_warn("Failed to add PM subdomain %s to parent %u\n",
area->name, area->parent);
goto out_put;
}
}
error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
out_put:
of_node_put(np);
return error;
}
early_initcall(rcar_gen4_sysc_pd_init);

View File

@ -0,0 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* R-Car Gen4 System Controller
*
* Copyright (C) 2021 Renesas Electronics Corp.
*/
#ifndef __SOC_RENESAS_RCAR_GEN4_SYSC_H__
#define __SOC_RENESAS_RCAR_GEN4_SYSC_H__
#include <linux/types.h>
/*
* Power Domain flags
*/
#define PD_CPU BIT(0) /* Area contains main CPU core */
#define PD_SCU BIT(1) /* Area contains SCU and L2 cache */
#define PD_NO_CR BIT(2) /* Area lacks PWR{ON,OFF}CR registers */
#define PD_CPU_NOCR (PD_CPU | PD_NO_CR) /* CPU area lacks CR */
#define PD_ALWAYS_ON PD_NO_CR /* Always-on area */
/*
* Description of a Power Area
*/
struct rcar_gen4_sysc_area {
const char *name;
u8 pdr; /* PDRn */
int parent; /* -1 if none */
unsigned int flags; /* See PD_* */
};
/*
* SoC-specific Power Area Description
*/
struct rcar_gen4_sysc_info {
const struct rcar_gen4_sysc_area *areas;
unsigned int num_areas;
};
extern const struct rcar_gen4_sysc_info r8a779a0_sysc_info;
extern const struct rcar_gen4_sysc_info r8a779f0_sysc_info;
#endif /* __SOC_RENESAS_RCAR_GEN4_SYSC_H__ */

View File

@ -13,15 +13,43 @@
#define WDTRSTCR_RESET 0xA55A0002 #define WDTRSTCR_RESET 0xA55A0002
#define WDTRSTCR 0x0054 #define WDTRSTCR 0x0054
#define CR7BAR 0x0070
#define CR7BAREN BIT(4)
#define CR7BAR_MASK 0xFFFC0000
static void __iomem *rcar_rst_base;
static u32 saved_mode __initdata;
static int (*rcar_rst_set_rproc_boot_addr_func)(u64 boot_addr);
static int rcar_rst_enable_wdt_reset(void __iomem *base) static int rcar_rst_enable_wdt_reset(void __iomem *base)
{ {
iowrite32(WDTRSTCR_RESET, base + WDTRSTCR); iowrite32(WDTRSTCR_RESET, base + WDTRSTCR);
return 0; return 0;
} }
/*
* Most of the R-Car Gen3 SoCs have an ARM Realtime Core.
* Firmware boot address has to be set in CR7BAR before
* starting the realtime core.
* Boot address must be aligned on a 256k boundary.
*/
static int rcar_rst_set_gen3_rproc_boot_addr(u64 boot_addr)
{
if (boot_addr & ~(u64)CR7BAR_MASK) {
pr_err("Invalid boot address got %llx\n", boot_addr);
return -EINVAL;
}
iowrite32(boot_addr, rcar_rst_base + CR7BAR);
iowrite32(boot_addr | CR7BAREN, rcar_rst_base + CR7BAR);
return 0;
}
struct rst_config { struct rst_config {
unsigned int modemr; /* Mode Monitoring Register Offset */ unsigned int modemr; /* Mode Monitoring Register Offset */
int (*configure)(void __iomem *base); /* Platform specific config */ int (*configure)(void __iomem *base); /* Platform specific config */
int (*set_rproc_boot_addr)(u64 boot_addr);
}; };
static const struct rst_config rcar_rst_gen1 __initconst = { static const struct rst_config rcar_rst_gen1 __initconst = {
@ -35,9 +63,10 @@ static const struct rst_config rcar_rst_gen2 __initconst = {
static const struct rst_config rcar_rst_gen3 __initconst = { static const struct rst_config rcar_rst_gen3 __initconst = {
.modemr = 0x60, .modemr = 0x60,
.set_rproc_boot_addr = rcar_rst_set_gen3_rproc_boot_addr,
}; };
static const struct rst_config rcar_rst_r8a779a0 __initconst = { static const struct rst_config rcar_rst_gen4 __initconst = {
.modemr = 0x00, /* MODEMR0 and it has CPG related bits */ .modemr = 0x00, /* MODEMR0 and it has CPG related bits */
}; };
@ -71,14 +100,12 @@ static const struct of_device_id rcar_rst_matches[] __initconst = {
{ .compatible = "renesas,r8a77980-rst", .data = &rcar_rst_gen3 }, { .compatible = "renesas,r8a77980-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a77990-rst", .data = &rcar_rst_gen3 }, { .compatible = "renesas,r8a77990-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a77995-rst", .data = &rcar_rst_gen3 }, { .compatible = "renesas,r8a77995-rst", .data = &rcar_rst_gen3 },
/* R-Car V3U */ /* R-Car Gen4 */
{ .compatible = "renesas,r8a779a0-rst", .data = &rcar_rst_r8a779a0 }, { .compatible = "renesas,r8a779a0-rst", .data = &rcar_rst_gen4 },
{ .compatible = "renesas,r8a779f0-rst", .data = &rcar_rst_gen4 },
{ /* sentinel */ } { /* sentinel */ }
}; };
static void __iomem *rcar_rst_base __initdata;
static u32 saved_mode __initdata;
static int __init rcar_rst_init(void) static int __init rcar_rst_init(void)
{ {
const struct of_device_id *match; const struct of_device_id *match;
@ -100,6 +127,8 @@ static int __init rcar_rst_init(void)
rcar_rst_base = base; rcar_rst_base = base;
cfg = match->data; cfg = match->data;
rcar_rst_set_rproc_boot_addr_func = cfg->set_rproc_boot_addr;
saved_mode = ioread32(base + cfg->modemr); saved_mode = ioread32(base + cfg->modemr);
if (cfg->configure) { if (cfg->configure) {
error = cfg->configure(base); error = cfg->configure(base);
@ -130,3 +159,12 @@ int __init rcar_rst_read_mode_pins(u32 *mode)
*mode = saved_mode; *mode = saved_mode;
return 0; return 0;
} }
int rcar_rst_set_rproc_boot_addr(u64 boot_addr)
{
if (!rcar_rst_set_rproc_boot_addr_func)
return -EIO;
return rcar_rst_set_rproc_boot_addr_func(boot_addr);
}
EXPORT_SYMBOL_GPL(rcar_rst_set_rproc_boot_addr);

View File

@ -33,6 +33,10 @@ static const struct renesas_family fam_rcar_gen3 __initconst __maybe_unused = {
.reg = 0xfff00044, /* PRR (Product Register) */ .reg = 0xfff00044, /* PRR (Product Register) */
}; };
static const struct renesas_family fam_rcar_gen4 __initconst __maybe_unused = {
.name = "R-Car Gen4",
};
static const struct renesas_family fam_rmobile __initconst __maybe_unused = { static const struct renesas_family fam_rmobile __initconst __maybe_unused = {
.name = "R-Mobile", .name = "R-Mobile",
.reg = 0xe600101c, /* CCCR (Common Chip Code Register) */ .reg = 0xe600101c, /* CCCR (Common Chip Code Register) */
@ -214,6 +218,11 @@ static const struct renesas_soc soc_rcar_v3u __initconst __maybe_unused = {
.id = 0x59, .id = 0x59,
}; };
static const struct renesas_soc soc_rcar_s4 __initconst __maybe_unused = {
.family = &fam_rcar_gen4,
.id = 0x5a,
};
static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = { static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = {
.family = &fam_shmobile, .family = &fam_shmobile,
.id = 0x37, .id = 0x37,
@ -319,6 +328,9 @@ static const struct of_device_id renesas_socs[] __initconst = {
#ifdef CONFIG_ARCH_R8A779A0 #ifdef CONFIG_ARCH_R8A779A0
{ .compatible = "renesas,r8a779a0", .data = &soc_rcar_v3u }, { .compatible = "renesas,r8a779a0", .data = &soc_rcar_v3u },
#endif #endif
#ifdef CONFIG_ARCH_R8A779F0
{ .compatible = "renesas,r8a779f0", .data = &soc_rcar_s4 },
#endif
#if defined(CONFIG_ARCH_R9A07G044) #if defined(CONFIG_ARCH_R9A07G044)
{ .compatible = "renesas,r9a07g044", .data = &soc_rz_g2l }, { .compatible = "renesas,r9a07g044", .data = &soc_rz_g2l },
#endif #endif
@ -328,94 +340,92 @@ static const struct of_device_id renesas_socs[] __initconst = {
{ /* sentinel */ } { /* sentinel */ }
}; };
struct renesas_id {
unsigned int offset;
u32 mask;
};
static const struct renesas_id id_bsid __initconst = {
.offset = 0,
.mask = 0xff0000,
/*
* TODO: Upper 4 bits of BSID are for chip version, but the format is
* not known at this time so we don't know how to specify eshi and eslo
*/
};
static const struct renesas_id id_rzg2l __initconst = {
.offset = 0xa04,
.mask = 0xfffffff,
};
static const struct renesas_id id_prr __initconst = {
.offset = 0,
.mask = 0xff00,
};
static const struct of_device_id renesas_ids[] __initconst = {
{ .compatible = "renesas,bsid", .data = &id_bsid },
{ .compatible = "renesas,r9a07g044-sysc", .data = &id_rzg2l },
{ .compatible = "renesas,prr", .data = &id_prr },
{ /* sentinel */ }
};
static int __init renesas_soc_init(void) static int __init renesas_soc_init(void)
{ {
struct soc_device_attribute *soc_dev_attr; struct soc_device_attribute *soc_dev_attr;
unsigned int product, eshi = 0, eslo;
const struct renesas_family *family; const struct renesas_family *family;
const struct of_device_id *match; const struct of_device_id *match;
const struct renesas_soc *soc; const struct renesas_soc *soc;
const struct renesas_id *id;
void __iomem *chipid = NULL; void __iomem *chipid = NULL;
struct soc_device *soc_dev; struct soc_device *soc_dev;
struct device_node *np; struct device_node *np;
unsigned int product, eshi = 0, eslo; const char *soc_id;
match = of_match_node(renesas_socs, of_root); match = of_match_node(renesas_socs, of_root);
if (!match) if (!match)
return -ENODEV; return -ENODEV;
soc_id = strchr(match->compatible, ',') + 1;
soc = match->data; soc = match->data;
family = soc->family; family = soc->family;
np = of_find_compatible_node(NULL, NULL, "renesas,bsid"); np = of_find_matching_node_and_match(NULL, renesas_ids, &match);
if (np) {
chipid = of_iomap(np, 0);
of_node_put(np);
if (chipid) {
product = readl(chipid);
iounmap(chipid);
if (soc->id && ((product >> 16) & 0xff) != soc->id) {
pr_warn("SoC mismatch (product = 0x%x)\n",
product);
return -ENODEV;
}
}
/*
* TODO: Upper 4 bits of BSID are for chip version, but the
* format is not known at this time so we don't know how to
* specify eshi and eslo
*/
goto done;
}
np = of_find_compatible_node(NULL, NULL, "renesas,r9a07g044-sysc");
if (np) {
chipid = of_iomap(np, 0);
of_node_put(np);
if (chipid) {
product = readl(chipid + 0x0a04);
iounmap(chipid);
if (soc->id && (product & 0xfffffff) != soc->id) {
pr_warn("SoC mismatch (product = 0x%x)\n",
product);
return -ENODEV;
}
}
goto done;
}
/* Try PRR first, then hardcoded fallback */
np = of_find_compatible_node(NULL, NULL, "renesas,prr");
if (np) { if (np) {
id = match->data;
chipid = of_iomap(np, 0); chipid = of_iomap(np, 0);
of_node_put(np); of_node_put(np);
} else if (soc->id && family->reg) { } else if (soc->id && family->reg) {
/* Try hardcoded CCCR/PRR fallback */
id = &id_prr;
chipid = ioremap(family->reg, 4); chipid = ioremap(family->reg, 4);
} }
if (chipid) { if (chipid) {
product = readl(chipid); product = readl(chipid + id->offset);
iounmap(chipid); iounmap(chipid);
if (id == &id_prr) {
/* R-Car M3-W ES1.1 incorrectly identifies as ES2.0 */ /* R-Car M3-W ES1.1 incorrectly identifies as ES2.0 */
if ((product & 0x7fff) == 0x5210) if ((product & 0x7fff) == 0x5210)
product ^= 0x11; product ^= 0x11;
/* R-Car M3-W ES1.3 incorrectly identifies as ES2.1 */ /* R-Car M3-W ES1.3 incorrectly identifies as ES2.1 */
if ((product & 0x7fff) == 0x5211) if ((product & 0x7fff) == 0x5211)
product ^= 0x12; product ^= 0x12;
if (soc->id && ((product >> 8) & 0xff) != soc->id) {
pr_warn("SoC mismatch (product = 0x%x)\n", product);
return -ENODEV;
}
eshi = ((product >> 4) & 0x0f) + 1; eshi = ((product >> 4) & 0x0f) + 1;
eslo = product & 0xf; eslo = product & 0xf;
} }
done: if (soc->id &&
((product & id->mask) >> __ffs(id->mask)) != soc->id) {
pr_warn("SoC mismatch (product = 0x%x)\n", product);
return -ENODEV;
}
}
soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr) if (!soc_dev_attr)
return -ENOMEM; return -ENOMEM;
@ -425,8 +435,7 @@ done:
of_node_put(np); of_node_put(np);
soc_dev_attr->family = kstrdup_const(family->name, GFP_KERNEL); soc_dev_attr->family = kstrdup_const(family->name, GFP_KERNEL);
soc_dev_attr->soc_id = kstrdup_const(strchr(match->compatible, ',') + 1, soc_dev_attr->soc_id = kstrdup_const(soc_id, GFP_KERNEL);
GFP_KERNEL);
if (eshi) if (eshi)
soc_dev_attr->revision = kasprintf(GFP_KERNEL, "ES%u.%u", eshi, soc_dev_attr->revision = kasprintf(GFP_KERNEL, "ES%u.%u", eshi,
eslo); eslo);

View File

@ -23,6 +23,20 @@ config EXYNOS_CHIPID
Support for Samsung Exynos SoC ChipID and Adaptive Supply Voltage. Support for Samsung Exynos SoC ChipID and Adaptive Supply Voltage.
This driver can also be built as module (exynos_chipid). This driver can also be built as module (exynos_chipid).
config EXYNOS_USI
tristate "Exynos USI (Universal Serial Interface) driver"
default ARCH_EXYNOS && ARM64
depends on ARCH_EXYNOS || COMPILE_TEST
select MFD_SYSCON
help
Enable support for USI block. USI (Universal Serial Interface) is an
IP-core found in modern Samsung Exynos SoCs, like Exynos850 and
ExynosAutoV0. USI block can be configured to provide one of the
following serial protocols: UART, SPI or High Speed I2C.
This driver allows one to configure USI for desired protocol, which
is usually done in USI node in Device Tree.
config EXYNOS_PMU config EXYNOS_PMU
bool "Exynos PMU controller driver" if COMPILE_TEST bool "Exynos PMU controller driver" if COMPILE_TEST
depends on ARCH_EXYNOS || ((ARM || ARM64) && COMPILE_TEST) depends on ARCH_EXYNOS || ((ARM || ARM64) && COMPILE_TEST)

View File

@ -4,6 +4,8 @@ obj-$(CONFIG_EXYNOS_ASV_ARM) += exynos5422-asv.o
obj-$(CONFIG_EXYNOS_CHIPID) += exynos_chipid.o obj-$(CONFIG_EXYNOS_CHIPID) += exynos_chipid.o
exynos_chipid-y += exynos-chipid.o exynos-asv.o exynos_chipid-y += exynos-chipid.o exynos-asv.o
obj-$(CONFIG_EXYNOS_USI) += exynos-usi.o
obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o
obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \ obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \

View File

@ -42,6 +42,7 @@ static const struct exynos_soc_id {
unsigned int id; unsigned int id;
} soc_ids[] = { } soc_ids[] = {
/* List ordered by SoC name */ /* List ordered by SoC name */
/* Compatible with: samsung,exynos4210-chipid */
{ "EXYNOS3250", 0xE3472000 }, { "EXYNOS3250", 0xE3472000 },
{ "EXYNOS4210", 0x43200000 }, /* EVT0 revision */ { "EXYNOS4210", 0x43200000 }, /* EVT0 revision */
{ "EXYNOS4210", 0x43210000 }, { "EXYNOS4210", 0x43210000 },
@ -55,6 +56,8 @@ static const struct exynos_soc_id {
{ "EXYNOS5440", 0xE5440000 }, { "EXYNOS5440", 0xE5440000 },
{ "EXYNOS5800", 0xE5422000 }, { "EXYNOS5800", 0xE5422000 },
{ "EXYNOS7420", 0xE7420000 }, { "EXYNOS7420", 0xE7420000 },
/* Compatible with: samsung,exynos850-chipid */
{ "EXYNOS7885", 0xE7885000 },
{ "EXYNOS850", 0xE3830000 }, { "EXYNOS850", 0xE3830000 },
{ "EXYNOSAUTOV9", 0xAAA80000 }, { "EXYNOSAUTOV9", 0xAAA80000 },
}; };

View File

@ -94,6 +94,8 @@ static const struct of_device_id exynos_pmu_of_device_ids[] = {
.compatible = "samsung,exynos5433-pmu", .compatible = "samsung,exynos5433-pmu",
}, { }, {
.compatible = "samsung,exynos7-pmu", .compatible = "samsung,exynos7-pmu",
}, {
.compatible = "samsung,exynos850-pmu",
}, },
{ /*sentinel*/ }, { /*sentinel*/ },
}; };

View File

@ -0,0 +1,285 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2021 Linaro Ltd.
* Author: Sam Protsenko <semen.protsenko@linaro.org>
*
* Samsung Exynos USI driver (Universal Serial Interface).
*/
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <dt-bindings/soc/samsung,exynos-usi.h>
/* USIv2: System Register: SW_CONF register bits */
#define USI_V2_SW_CONF_NONE 0x0
#define USI_V2_SW_CONF_UART BIT(0)
#define USI_V2_SW_CONF_SPI BIT(1)
#define USI_V2_SW_CONF_I2C BIT(2)
#define USI_V2_SW_CONF_MASK (USI_V2_SW_CONF_UART | USI_V2_SW_CONF_SPI | \
USI_V2_SW_CONF_I2C)
/* USIv2: USI register offsets */
#define USI_CON 0x04
#define USI_OPTION 0x08
/* USIv2: USI register bits */
#define USI_CON_RESET BIT(0)
#define USI_OPTION_CLKREQ_ON BIT(1)
#define USI_OPTION_CLKSTOP_ON BIT(2)
enum exynos_usi_ver {
USI_VER2 = 2,
};
struct exynos_usi_variant {
enum exynos_usi_ver ver; /* USI IP-core version */
unsigned int sw_conf_mask; /* SW_CONF mask for all protocols */
size_t min_mode; /* first index in exynos_usi_modes[] */
size_t max_mode; /* last index in exynos_usi_modes[] */
size_t num_clks; /* number of clocks to assert */
const char * const *clk_names; /* clock names to assert */
};
struct exynos_usi {
struct device *dev;
void __iomem *regs; /* USI register map */
struct clk_bulk_data *clks; /* USI clocks */
size_t mode; /* current USI SW_CONF mode index */
bool clkreq_on; /* always provide clock to IP */
/* System Register */
struct regmap *sysreg; /* System Register map */
unsigned int sw_conf; /* SW_CONF register offset in sysreg */
const struct exynos_usi_variant *data;
};
struct exynos_usi_mode {
const char *name; /* mode name */
unsigned int val; /* mode register value */
};
static const struct exynos_usi_mode exynos_usi_modes[] = {
[USI_V2_NONE] = { .name = "none", .val = USI_V2_SW_CONF_NONE },
[USI_V2_UART] = { .name = "uart", .val = USI_V2_SW_CONF_UART },
[USI_V2_SPI] = { .name = "spi", .val = USI_V2_SW_CONF_SPI },
[USI_V2_I2C] = { .name = "i2c", .val = USI_V2_SW_CONF_I2C },
};
static const char * const exynos850_usi_clk_names[] = { "pclk", "ipclk" };
static const struct exynos_usi_variant exynos850_usi_data = {
.ver = USI_VER2,
.sw_conf_mask = USI_V2_SW_CONF_MASK,
.min_mode = USI_V2_NONE,
.max_mode = USI_V2_I2C,
.num_clks = ARRAY_SIZE(exynos850_usi_clk_names),
.clk_names = exynos850_usi_clk_names,
};
static const struct of_device_id exynos_usi_dt_match[] = {
{
.compatible = "samsung,exynos850-usi",
.data = &exynos850_usi_data,
},
{ } /* sentinel */
};
MODULE_DEVICE_TABLE(of, exynos_usi_dt_match);
/**
* exynos_usi_set_sw_conf - Set USI block configuration mode
* @usi: USI driver object
* @mode: Mode index
*
* Select underlying serial protocol (UART/SPI/I2C) in USI IP-core.
*
* Return: 0 on success, or negative error code on failure.
*/
static int exynos_usi_set_sw_conf(struct exynos_usi *usi, size_t mode)
{
unsigned int val;
int ret;
if (mode < usi->data->min_mode || mode > usi->data->max_mode)
return -EINVAL;
val = exynos_usi_modes[mode].val;
ret = regmap_update_bits(usi->sysreg, usi->sw_conf,
usi->data->sw_conf_mask, val);
if (ret)
return ret;
usi->mode = mode;
dev_dbg(usi->dev, "protocol: %s\n", exynos_usi_modes[usi->mode].name);
return 0;
}
/**
* exynos_usi_enable - Initialize USI block
* @usi: USI driver object
*
* USI IP-core start state is "reset" (on startup and after CPU resume). This
* routine enables the USI block by clearing the reset flag. It also configures
* HWACG behavior (needed e.g. for UART Rx). It should be performed before
* underlying protocol becomes functional.
*
* Return: 0 on success, or negative error code on failure.
*/
static int exynos_usi_enable(const struct exynos_usi *usi)
{
u32 val;
int ret;
ret = clk_bulk_prepare_enable(usi->data->num_clks, usi->clks);
if (ret)
return ret;
/* Enable USI block */
val = readl(usi->regs + USI_CON);
val &= ~USI_CON_RESET;
writel(val, usi->regs + USI_CON);
udelay(1);
/* Continuously provide the clock to USI IP w/o gating */
if (usi->clkreq_on) {
val = readl(usi->regs + USI_OPTION);
val &= ~USI_OPTION_CLKSTOP_ON;
val |= USI_OPTION_CLKREQ_ON;
writel(val, usi->regs + USI_OPTION);
}
clk_bulk_disable_unprepare(usi->data->num_clks, usi->clks);
return ret;
}
static int exynos_usi_configure(struct exynos_usi *usi)
{
int ret;
ret = exynos_usi_set_sw_conf(usi, usi->mode);
if (ret)
return ret;
if (usi->data->ver == USI_VER2)
return exynos_usi_enable(usi);
return 0;
}
static int exynos_usi_parse_dt(struct device_node *np, struct exynos_usi *usi)
{
int ret;
u32 mode;
ret = of_property_read_u32(np, "samsung,mode", &mode);
if (ret)
return ret;
if (mode < usi->data->min_mode || mode > usi->data->max_mode)
return -EINVAL;
usi->mode = mode;
usi->sysreg = syscon_regmap_lookup_by_phandle(np, "samsung,sysreg");
if (IS_ERR(usi->sysreg))
return PTR_ERR(usi->sysreg);
ret = of_property_read_u32_index(np, "samsung,sysreg", 1,
&usi->sw_conf);
if (ret)
return ret;
usi->clkreq_on = of_property_read_bool(np, "samsung,clkreq-on");
return 0;
}
static int exynos_usi_get_clocks(struct exynos_usi *usi)
{
const size_t num = usi->data->num_clks;
struct device *dev = usi->dev;
size_t i;
if (num == 0)
return 0;
usi->clks = devm_kcalloc(dev, num, sizeof(*usi->clks), GFP_KERNEL);
if (!usi->clks)
return -ENOMEM;
for (i = 0; i < num; ++i)
usi->clks[i].id = usi->data->clk_names[i];
return devm_clk_bulk_get(dev, num, usi->clks);
}
static int exynos_usi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct exynos_usi *usi;
int ret;
usi = devm_kzalloc(dev, sizeof(*usi), GFP_KERNEL);
if (!usi)
return -ENOMEM;
usi->dev = dev;
platform_set_drvdata(pdev, usi);
usi->data = of_device_get_match_data(dev);
if (!usi->data)
return -EINVAL;
ret = exynos_usi_parse_dt(np, usi);
if (ret)
return ret;
ret = exynos_usi_get_clocks(usi);
if (ret)
return ret;
if (usi->data->ver == USI_VER2) {
usi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(usi->regs))
return PTR_ERR(usi->regs);
}
ret = exynos_usi_configure(usi);
if (ret)
return ret;
/* Make it possible to embed protocol nodes into USI np */
return of_platform_populate(np, NULL, NULL, dev);
}
static int __maybe_unused exynos_usi_resume_noirq(struct device *dev)
{
struct exynos_usi *usi = dev_get_drvdata(dev);
return exynos_usi_configure(usi);
}
static const struct dev_pm_ops exynos_usi_pm = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, exynos_usi_resume_noirq)
};
static struct platform_driver exynos_usi_driver = {
.driver = {
.name = "exynos-usi",
.pm = &exynos_usi_pm,
.of_match_table = exynos_usi_dt_match,
},
.probe = exynos_usi_probe,
};
module_platform_driver(exynos_usi_driver);
MODULE_DESCRIPTION("Samsung USI driver");
MODULE_AUTHOR("Sam Protsenko <semen.protsenko@linaro.org>");
MODULE_LICENSE("GPL");

View File

@ -10,6 +10,7 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/pm_opp.h> #include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <soc/tegra/common.h> #include <soc/tegra/common.h>
#include <soc/tegra/fuse.h> #include <soc/tegra/fuse.h>
@ -43,6 +44,7 @@ static int tegra_core_dev_init_opp_state(struct device *dev)
{ {
unsigned long rate; unsigned long rate;
struct clk *clk; struct clk *clk;
bool rpm_enabled;
int err; int err;
clk = devm_clk_get(dev, NULL); clk = devm_clk_get(dev, NULL);
@ -57,8 +59,31 @@ static int tegra_core_dev_init_opp_state(struct device *dev)
return -EINVAL; return -EINVAL;
} }
/*
* Runtime PM of the device must be enabled in order to set up
* GENPD's performance properly because GENPD core checks whether
* device is suspended and this check doesn't work while RPM is
* disabled. This makes sure the OPP vote below gets cached in
* GENPD for the device. Instead, the vote is done the next time
* the device gets runtime resumed.
*/
rpm_enabled = pm_runtime_enabled(dev);
if (!rpm_enabled)
pm_runtime_enable(dev);
/* should never happen in practice */
if (!pm_runtime_enabled(dev)) {
dev_WARN(dev, "failed to enable runtime PM\n");
pm_runtime_disable(dev);
return -EINVAL;
}
/* first dummy rate-setting initializes voltage vote */ /* first dummy rate-setting initializes voltage vote */
err = dev_pm_opp_set_rate(dev, rate); err = dev_pm_opp_set_rate(dev, rate);
if (!rpm_enabled)
pm_runtime_disable(dev);
if (err) { if (err) {
dev_err(dev, "failed to initialize OPP clock: %d\n", err); dev_err(dev, "failed to initialize OPP clock: %d\n", err);
return err; return err;
@ -111,9 +136,7 @@ int devm_tegra_core_dev_init_opp_table(struct device *dev,
*/ */
err = devm_pm_opp_of_add_table(dev); err = devm_pm_opp_of_add_table(dev);
if (err) { if (err) {
if (err == -ENODEV) if (err != -ENODEV)
dev_err_once(dev, "OPP table not found, please update device-tree\n");
else
dev_err(dev, "failed to add OPP table: %d\n", err); dev_err(dev, "failed to add OPP table: %d\n", err);
return err; return err;

View File

@ -14,6 +14,7 @@
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/sys_soc.h> #include <linux/sys_soc.h>
@ -181,6 +182,12 @@ static const struct nvmem_cell_info tegra_fuse_cells[] = {
}, },
}; };
static void tegra_fuse_restore(void *base)
{
fuse->clk = NULL;
fuse->base = base;
}
static int tegra_fuse_probe(struct platform_device *pdev) static int tegra_fuse_probe(struct platform_device *pdev)
{ {
void __iomem *base = fuse->base; void __iomem *base = fuse->base;
@ -188,13 +195,16 @@ static int tegra_fuse_probe(struct platform_device *pdev)
struct resource *res; struct resource *res;
int err; int err;
err = devm_add_action(&pdev->dev, tegra_fuse_restore, base);
if (err)
return err;
/* take over the memory region from the early initialization */ /* take over the memory region from the early initialization */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fuse->phys = res->start; fuse->phys = res->start;
fuse->base = devm_ioremap_resource(&pdev->dev, res); fuse->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(fuse->base)) { if (IS_ERR(fuse->base)) {
err = PTR_ERR(fuse->base); err = PTR_ERR(fuse->base);
fuse->base = base;
return err; return err;
} }
@ -204,19 +214,20 @@ static int tegra_fuse_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to get FUSE clock: %ld", dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
PTR_ERR(fuse->clk)); PTR_ERR(fuse->clk));
fuse->base = base;
return PTR_ERR(fuse->clk); return PTR_ERR(fuse->clk);
} }
platform_set_drvdata(pdev, fuse); platform_set_drvdata(pdev, fuse);
fuse->dev = &pdev->dev; fuse->dev = &pdev->dev;
pm_runtime_enable(&pdev->dev); err = devm_pm_runtime_enable(&pdev->dev);
if (err)
return err;
if (fuse->soc->probe) { if (fuse->soc->probe) {
err = fuse->soc->probe(fuse); err = fuse->soc->probe(fuse);
if (err < 0) if (err < 0)
goto restore; return err;
} }
memset(&nvmem, 0, sizeof(nvmem)); memset(&nvmem, 0, sizeof(nvmem));
@ -240,19 +251,37 @@ static int tegra_fuse_probe(struct platform_device *pdev)
err = PTR_ERR(fuse->nvmem); err = PTR_ERR(fuse->nvmem);
dev_err(&pdev->dev, "failed to register NVMEM device: %d\n", dev_err(&pdev->dev, "failed to register NVMEM device: %d\n",
err); err);
goto restore; return err;
}
fuse->rst = devm_reset_control_get_optional(&pdev->dev, "fuse");
if (IS_ERR(fuse->rst)) {
err = PTR_ERR(fuse->rst);
dev_err(&pdev->dev, "failed to get FUSE reset: %pe\n",
fuse->rst);
return err;
}
/*
* FUSE clock is enabled at a boot time, hence this resume/suspend
* disables the clock besides the h/w resetting.
*/
err = pm_runtime_resume_and_get(&pdev->dev);
if (err)
return err;
err = reset_control_reset(fuse->rst);
pm_runtime_put(&pdev->dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to reset FUSE: %d\n", err);
return err;
} }
/* release the early I/O memory mapping */ /* release the early I/O memory mapping */
iounmap(base); iounmap(base);
return 0; return 0;
restore:
fuse->clk = NULL;
fuse->base = base;
pm_runtime_disable(&pdev->dev);
return err;
} }
static int __maybe_unused tegra_fuse_runtime_resume(struct device *dev) static int __maybe_unused tegra_fuse_runtime_resume(struct device *dev)

View File

@ -94,9 +94,28 @@ static bool dma_filter(struct dma_chan *chan, void *filter_param)
return of_device_is_compatible(np, "nvidia,tegra20-apbdma"); return of_device_is_compatible(np, "nvidia,tegra20-apbdma");
} }
static void tegra20_fuse_release_channel(void *data)
{
struct tegra_fuse *fuse = data;
dma_release_channel(fuse->apbdma.chan);
fuse->apbdma.chan = NULL;
}
static void tegra20_fuse_free_coherent(void *data)
{
struct tegra_fuse *fuse = data;
dma_free_coherent(fuse->dev, sizeof(u32), fuse->apbdma.virt,
fuse->apbdma.phys);
fuse->apbdma.virt = NULL;
fuse->apbdma.phys = 0x0;
}
static int tegra20_fuse_probe(struct tegra_fuse *fuse) static int tegra20_fuse_probe(struct tegra_fuse *fuse)
{ {
dma_cap_mask_t mask; dma_cap_mask_t mask;
int err;
dma_cap_zero(mask); dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask); dma_cap_set(DMA_SLAVE, mask);
@ -105,13 +124,21 @@ static int tegra20_fuse_probe(struct tegra_fuse *fuse)
if (!fuse->apbdma.chan) if (!fuse->apbdma.chan)
return -EPROBE_DEFER; return -EPROBE_DEFER;
err = devm_add_action_or_reset(fuse->dev, tegra20_fuse_release_channel,
fuse);
if (err)
return err;
fuse->apbdma.virt = dma_alloc_coherent(fuse->dev, sizeof(u32), fuse->apbdma.virt = dma_alloc_coherent(fuse->dev, sizeof(u32),
&fuse->apbdma.phys, &fuse->apbdma.phys,
GFP_KERNEL); GFP_KERNEL);
if (!fuse->apbdma.virt) { if (!fuse->apbdma.virt)
dma_release_channel(fuse->apbdma.chan);
return -ENOMEM; return -ENOMEM;
}
err = devm_add_action_or_reset(fuse->dev, tegra20_fuse_free_coherent,
fuse);
if (err)
return err;
fuse->apbdma.config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; fuse->apbdma.config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
fuse->apbdma.config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; fuse->apbdma.config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;

View File

@ -43,6 +43,7 @@ struct tegra_fuse {
void __iomem *base; void __iomem *base;
phys_addr_t phys; phys_addr_t phys;
struct clk *clk; struct clk *clk;
struct reset_control *rst;
u32 (*read_early)(struct tegra_fuse *fuse, unsigned int offset); u32 (*read_early)(struct tegra_fuse *fuse, unsigned int offset);
u32 (*read)(struct tegra_fuse *fuse, unsigned int offset); u32 (*read)(struct tegra_fuse *fuse, unsigned int offset);

View File

@ -1064,10 +1064,8 @@ int tegra_pmc_cpu_remove_clamping(unsigned int cpuid)
return tegra_powergate_remove_clamping(id); return tegra_powergate_remove_clamping(id);
} }
static int tegra_pmc_restart_notify(struct notifier_block *this, static void tegra_pmc_program_reboot_reason(const char *cmd)
unsigned long action, void *data)
{ {
const char *cmd = data;
u32 value; u32 value;
value = tegra_pmc_scratch_readl(pmc, pmc->soc->regs->scratch0); value = tegra_pmc_scratch_readl(pmc, pmc->soc->regs->scratch0);
@ -1085,6 +1083,25 @@ static int tegra_pmc_restart_notify(struct notifier_block *this,
} }
tegra_pmc_scratch_writel(pmc, value, pmc->soc->regs->scratch0); tegra_pmc_scratch_writel(pmc, value, pmc->soc->regs->scratch0);
}
static int tegra_pmc_reboot_notify(struct notifier_block *this,
unsigned long action, void *data)
{
if (action == SYS_RESTART)
tegra_pmc_program_reboot_reason(data);
return NOTIFY_DONE;
}
static struct notifier_block tegra_pmc_reboot_notifier = {
.notifier_call = tegra_pmc_reboot_notify,
};
static int tegra_pmc_restart_notify(struct notifier_block *this,
unsigned long action, void *data)
{
u32 value;
/* reset everything but PMC_SCRATCH0 and PMC_RST_STATUS */ /* reset everything but PMC_SCRATCH0 and PMC_RST_STATUS */
value = tegra_pmc_readl(pmc, PMC_CNTRL); value = tegra_pmc_readl(pmc, PMC_CNTRL);
@ -1353,7 +1370,7 @@ static int tegra_pmc_core_pd_add(struct tegra_pmc *pmc, struct device_node *np)
if (!genpd) if (!genpd)
return -ENOMEM; return -ENOMEM;
genpd->name = np->name; genpd->name = "core";
genpd->set_performance_state = tegra_pmc_core_pd_set_performance_state; genpd->set_performance_state = tegra_pmc_core_pd_set_performance_state;
genpd->opp_to_performance_state = tegra_pmc_core_pd_opp_to_performance_state; genpd->opp_to_performance_state = tegra_pmc_core_pd_opp_to_performance_state;
@ -2890,6 +2907,14 @@ static int tegra_pmc_probe(struct platform_device *pdev)
goto cleanup_sysfs; goto cleanup_sysfs;
} }
err = devm_register_reboot_notifier(&pdev->dev,
&tegra_pmc_reboot_notifier);
if (err) {
dev_err(&pdev->dev, "unable to register reboot notifier, %d\n",
err);
goto cleanup_debugfs;
}
err = register_restart_handler(&tegra_pmc_restart_handler); err = register_restart_handler(&tegra_pmc_restart_handler);
if (err) { if (err) {
dev_err(&pdev->dev, "unable to register restart handler, %d\n", dev_err(&pdev->dev, "unable to register restart handler, %d\n",
@ -2963,7 +2988,7 @@ static SIMPLE_DEV_PM_OPS(tegra_pmc_pm_ops, tegra_pmc_suspend, tegra_pmc_resume);
static const char * const tegra20_powergates[] = { static const char * const tegra20_powergates[] = {
[TEGRA_POWERGATE_CPU] = "cpu", [TEGRA_POWERGATE_CPU] = "cpu",
[TEGRA_POWERGATE_3D] = "3d", [TEGRA_POWERGATE_3D] = "td",
[TEGRA_POWERGATE_VENC] = "venc", [TEGRA_POWERGATE_VENC] = "venc",
[TEGRA_POWERGATE_VDEC] = "vdec", [TEGRA_POWERGATE_VDEC] = "vdec",
[TEGRA_POWERGATE_PCIE] = "pcie", [TEGRA_POWERGATE_PCIE] = "pcie",
@ -3071,7 +3096,7 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = {
static const char * const tegra30_powergates[] = { static const char * const tegra30_powergates[] = {
[TEGRA_POWERGATE_CPU] = "cpu0", [TEGRA_POWERGATE_CPU] = "cpu0",
[TEGRA_POWERGATE_3D] = "3d0", [TEGRA_POWERGATE_3D] = "td",
[TEGRA_POWERGATE_VENC] = "venc", [TEGRA_POWERGATE_VENC] = "venc",
[TEGRA_POWERGATE_VDEC] = "vdec", [TEGRA_POWERGATE_VDEC] = "vdec",
[TEGRA_POWERGATE_PCIE] = "pcie", [TEGRA_POWERGATE_PCIE] = "pcie",
@ -3083,7 +3108,7 @@ static const char * const tegra30_powergates[] = {
[TEGRA_POWERGATE_CPU2] = "cpu2", [TEGRA_POWERGATE_CPU2] = "cpu2",
[TEGRA_POWERGATE_CPU3] = "cpu3", [TEGRA_POWERGATE_CPU3] = "cpu3",
[TEGRA_POWERGATE_CELP] = "celp", [TEGRA_POWERGATE_CELP] = "celp",
[TEGRA_POWERGATE_3D1] = "3d1", [TEGRA_POWERGATE_3D1] = "td2",
}; };
static const u8 tegra30_cpu_powergates[] = { static const u8 tegra30_cpu_powergates[] = {
@ -3132,7 +3157,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
static const char * const tegra114_powergates[] = { static const char * const tegra114_powergates[] = {
[TEGRA_POWERGATE_CPU] = "crail", [TEGRA_POWERGATE_CPU] = "crail",
[TEGRA_POWERGATE_3D] = "3d", [TEGRA_POWERGATE_3D] = "td",
[TEGRA_POWERGATE_VENC] = "venc", [TEGRA_POWERGATE_VENC] = "venc",
[TEGRA_POWERGATE_VDEC] = "vdec", [TEGRA_POWERGATE_VDEC] = "vdec",
[TEGRA_POWERGATE_MPE] = "mpe", [TEGRA_POWERGATE_MPE] = "mpe",

View File

@ -16,7 +16,9 @@
#include <linux/regulator/coupler.h> #include <linux/regulator/coupler.h>
#include <linux/regulator/driver.h> #include <linux/regulator/driver.h>
#include <linux/regulator/machine.h> #include <linux/regulator/machine.h>
#include <linux/suspend.h>
#include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h> #include <soc/tegra/pmc.h>
struct tegra_regulator_coupler { struct tegra_regulator_coupler {
@ -25,9 +27,12 @@ struct tegra_regulator_coupler {
struct regulator_dev *cpu_rdev; struct regulator_dev *cpu_rdev;
struct regulator_dev *rtc_rdev; struct regulator_dev *rtc_rdev;
struct notifier_block reboot_notifier; struct notifier_block reboot_notifier;
struct notifier_block suspend_notifier;
int core_min_uV, cpu_min_uV; int core_min_uV, cpu_min_uV;
bool sys_reboot_mode_req; bool sys_reboot_mode_req;
bool sys_reboot_mode; bool sys_reboot_mode;
bool sys_suspend_mode_req;
bool sys_suspend_mode;
}; };
static inline struct tegra_regulator_coupler * static inline struct tegra_regulator_coupler *
@ -105,6 +110,28 @@ static int tegra20_core_rtc_max_spread(struct regulator_dev *core_rdev,
return 150000; return 150000;
} }
static int tegra20_cpu_nominal_uV(void)
{
switch (tegra_sku_info.soc_speedo_id) {
case 0:
return 1100000;
case 1:
return 1025000;
default:
return 1125000;
}
}
static int tegra20_core_nominal_uV(void)
{
switch (tegra_sku_info.soc_speedo_id) {
default:
return 1225000;
case 2:
return 1300000;
}
}
static int tegra20_core_rtc_update(struct tegra_regulator_coupler *tegra, static int tegra20_core_rtc_update(struct tegra_regulator_coupler *tegra,
struct regulator_dev *core_rdev, struct regulator_dev *core_rdev,
struct regulator_dev *rtc_rdev, struct regulator_dev *rtc_rdev,
@ -144,6 +171,11 @@ static int tegra20_core_rtc_update(struct tegra_regulator_coupler *tegra,
if (err) if (err)
return err; return err;
/* prepare voltage level for suspend */
if (tegra->sys_suspend_mode)
core_min_uV = clamp(tegra20_core_nominal_uV(),
core_min_uV, core_max_uV);
core_uV = regulator_get_voltage_rdev(core_rdev); core_uV = regulator_get_voltage_rdev(core_rdev);
if (core_uV < 0) if (core_uV < 0)
return core_uV; return core_uV;
@ -279,6 +311,11 @@ static int tegra20_cpu_voltage_update(struct tegra_regulator_coupler *tegra,
if (tegra->sys_reboot_mode) if (tegra->sys_reboot_mode)
cpu_min_uV = max(cpu_min_uV, tegra->cpu_min_uV); cpu_min_uV = max(cpu_min_uV, tegra->cpu_min_uV);
/* prepare voltage level for suspend */
if (tegra->sys_suspend_mode)
cpu_min_uV = clamp(tegra20_cpu_nominal_uV(),
cpu_min_uV, cpu_max_uV);
if (cpu_min_uV > cpu_uV) { if (cpu_min_uV > cpu_uV) {
err = tegra20_core_rtc_update(tegra, core_rdev, rtc_rdev, err = tegra20_core_rtc_update(tegra, core_rdev, rtc_rdev,
cpu_uV, cpu_min_uV); cpu_uV, cpu_min_uV);
@ -320,6 +357,7 @@ static int tegra20_regulator_balance_voltage(struct regulator_coupler *coupler,
} }
tegra->sys_reboot_mode = READ_ONCE(tegra->sys_reboot_mode_req); tegra->sys_reboot_mode = READ_ONCE(tegra->sys_reboot_mode_req);
tegra->sys_suspend_mode = READ_ONCE(tegra->sys_suspend_mode_req);
if (rdev == cpu_rdev) if (rdev == cpu_rdev)
return tegra20_cpu_voltage_update(tegra, cpu_rdev, return tegra20_cpu_voltage_update(tegra, cpu_rdev,
@ -334,6 +372,63 @@ static int tegra20_regulator_balance_voltage(struct regulator_coupler *coupler,
return -EPERM; return -EPERM;
} }
static int tegra20_regulator_prepare_suspend(struct tegra_regulator_coupler *tegra,
bool sys_suspend_mode)
{
int err;
if (!tegra->core_rdev || !tegra->rtc_rdev || !tegra->cpu_rdev)
return 0;
/*
* All power domains are enabled early during resume from suspend
* by GENPD core. Domains like VENC may require a higher voltage
* when enabled during resume from suspend. This also prepares
* hardware for resuming from LP0.
*/
WRITE_ONCE(tegra->sys_suspend_mode_req, sys_suspend_mode);
err = regulator_sync_voltage_rdev(tegra->cpu_rdev);
if (err)
return err;
err = regulator_sync_voltage_rdev(tegra->core_rdev);
if (err)
return err;
return 0;
}
static int tegra20_regulator_suspend(struct notifier_block *notifier,
unsigned long mode, void *arg)
{
struct tegra_regulator_coupler *tegra;
int ret = 0;
tegra = container_of(notifier, struct tegra_regulator_coupler,
suspend_notifier);
switch (mode) {
case PM_HIBERNATION_PREPARE:
case PM_RESTORE_PREPARE:
case PM_SUSPEND_PREPARE:
ret = tegra20_regulator_prepare_suspend(tegra, true);
break;
case PM_POST_HIBERNATION:
case PM_POST_RESTORE:
case PM_POST_SUSPEND:
ret = tegra20_regulator_prepare_suspend(tegra, false);
break;
}
if (ret)
pr_err("failed to prepare regulators: %d\n", ret);
return notifier_from_errno(ret);
}
static int tegra20_regulator_prepare_reboot(struct tegra_regulator_coupler *tegra, static int tegra20_regulator_prepare_reboot(struct tegra_regulator_coupler *tegra,
bool sys_reboot_mode) bool sys_reboot_mode)
{ {
@ -444,6 +539,7 @@ static struct tegra_regulator_coupler tegra20_coupler = {
.balance_voltage = tegra20_regulator_balance_voltage, .balance_voltage = tegra20_regulator_balance_voltage,
}, },
.reboot_notifier.notifier_call = tegra20_regulator_reboot, .reboot_notifier.notifier_call = tegra20_regulator_reboot,
.suspend_notifier.notifier_call = tegra20_regulator_suspend,
}; };
static int __init tegra_regulator_coupler_init(void) static int __init tegra_regulator_coupler_init(void)
@ -456,6 +552,9 @@ static int __init tegra_regulator_coupler_init(void)
err = register_reboot_notifier(&tegra20_coupler.reboot_notifier); err = register_reboot_notifier(&tegra20_coupler.reboot_notifier);
WARN_ON(err); WARN_ON(err);
err = register_pm_notifier(&tegra20_coupler.suspend_notifier);
WARN_ON(err);
return regulator_coupler_register(&tegra20_coupler.coupler); return regulator_coupler_register(&tegra20_coupler.coupler);
} }
arch_initcall(tegra_regulator_coupler_init); arch_initcall(tegra_regulator_coupler_init);

View File

@ -16,6 +16,7 @@
#include <linux/regulator/coupler.h> #include <linux/regulator/coupler.h>
#include <linux/regulator/driver.h> #include <linux/regulator/driver.h>
#include <linux/regulator/machine.h> #include <linux/regulator/machine.h>
#include <linux/suspend.h>
#include <soc/tegra/fuse.h> #include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h> #include <soc/tegra/pmc.h>
@ -25,9 +26,12 @@ struct tegra_regulator_coupler {
struct regulator_dev *core_rdev; struct regulator_dev *core_rdev;
struct regulator_dev *cpu_rdev; struct regulator_dev *cpu_rdev;
struct notifier_block reboot_notifier; struct notifier_block reboot_notifier;
struct notifier_block suspend_notifier;
int core_min_uV, cpu_min_uV; int core_min_uV, cpu_min_uV;
bool sys_reboot_mode_req; bool sys_reboot_mode_req;
bool sys_reboot_mode; bool sys_reboot_mode;
bool sys_suspend_mode_req;
bool sys_suspend_mode;
}; };
static inline struct tegra_regulator_coupler * static inline struct tegra_regulator_coupler *
@ -113,6 +117,52 @@ static int tegra30_core_cpu_limit(int cpu_uV)
return -EINVAL; return -EINVAL;
} }
static int tegra30_cpu_nominal_uV(void)
{
switch (tegra_sku_info.cpu_speedo_id) {
case 10 ... 11:
return 850000;
case 9:
return 912000;
case 1 ... 3:
case 7 ... 8:
return 1050000;
default:
return 1125000;
case 4 ... 6:
case 12 ... 13:
return 1237000;
}
}
static int tegra30_core_nominal_uV(void)
{
switch (tegra_sku_info.soc_speedo_id) {
case 0:
return 1200000;
case 1:
if (tegra_sku_info.cpu_speedo_id != 7 &&
tegra_sku_info.cpu_speedo_id != 8)
return 1200000;
fallthrough;
case 2:
if (tegra_sku_info.cpu_speedo_id != 13)
return 1300000;
return 1350000;
default:
return 1250000;
}
}
static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra, static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
struct regulator_dev *cpu_rdev, struct regulator_dev *cpu_rdev,
struct regulator_dev *core_rdev) struct regulator_dev *core_rdev)
@ -168,6 +218,11 @@ static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
if (err) if (err)
return err; return err;
/* prepare voltage level for suspend */
if (tegra->sys_suspend_mode)
core_min_uV = clamp(tegra30_core_nominal_uV(),
core_min_uV, core_max_uV);
core_uV = regulator_get_voltage_rdev(core_rdev); core_uV = regulator_get_voltage_rdev(core_rdev);
if (core_uV < 0) if (core_uV < 0)
return core_uV; return core_uV;
@ -223,6 +278,11 @@ static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
if (tegra->sys_reboot_mode) if (tegra->sys_reboot_mode)
cpu_min_uV = max(cpu_min_uV, tegra->cpu_min_uV); cpu_min_uV = max(cpu_min_uV, tegra->cpu_min_uV);
/* prepare voltage level for suspend */
if (tegra->sys_suspend_mode)
cpu_min_uV = clamp(tegra30_cpu_nominal_uV(),
cpu_min_uV, cpu_max_uV);
if (core_min_limited_uV > core_uV) { if (core_min_limited_uV > core_uV) {
pr_err("core voltage constraint violated: %d %d %d\n", pr_err("core voltage constraint violated: %d %d %d\n",
core_uV, core_min_limited_uV, cpu_uV); core_uV, core_min_limited_uV, cpu_uV);
@ -292,10 +352,68 @@ static int tegra30_regulator_balance_voltage(struct regulator_coupler *coupler,
} }
tegra->sys_reboot_mode = READ_ONCE(tegra->sys_reboot_mode_req); tegra->sys_reboot_mode = READ_ONCE(tegra->sys_reboot_mode_req);
tegra->sys_suspend_mode = READ_ONCE(tegra->sys_suspend_mode_req);
return tegra30_voltage_update(tegra, cpu_rdev, core_rdev); return tegra30_voltage_update(tegra, cpu_rdev, core_rdev);
} }
static int tegra30_regulator_prepare_suspend(struct tegra_regulator_coupler *tegra,
bool sys_suspend_mode)
{
int err;
if (!tegra->core_rdev || !tegra->cpu_rdev)
return 0;
/*
* All power domains are enabled early during resume from suspend
* by GENPD core. Domains like VENC may require a higher voltage
* when enabled during resume from suspend. This also prepares
* hardware for resuming from LP0.
*/
WRITE_ONCE(tegra->sys_suspend_mode_req, sys_suspend_mode);
err = regulator_sync_voltage_rdev(tegra->cpu_rdev);
if (err)
return err;
err = regulator_sync_voltage_rdev(tegra->core_rdev);
if (err)
return err;
return 0;
}
static int tegra30_regulator_suspend(struct notifier_block *notifier,
unsigned long mode, void *arg)
{
struct tegra_regulator_coupler *tegra;
int ret = 0;
tegra = container_of(notifier, struct tegra_regulator_coupler,
suspend_notifier);
switch (mode) {
case PM_HIBERNATION_PREPARE:
case PM_RESTORE_PREPARE:
case PM_SUSPEND_PREPARE:
ret = tegra30_regulator_prepare_suspend(tegra, true);
break;
case PM_POST_HIBERNATION:
case PM_POST_RESTORE:
case PM_POST_SUSPEND:
ret = tegra30_regulator_prepare_suspend(tegra, false);
break;
}
if (ret)
pr_err("failed to prepare regulators: %d\n", ret);
return notifier_from_errno(ret);
}
static int tegra30_regulator_prepare_reboot(struct tegra_regulator_coupler *tegra, static int tegra30_regulator_prepare_reboot(struct tegra_regulator_coupler *tegra,
bool sys_reboot_mode) bool sys_reboot_mode)
{ {
@ -395,6 +513,7 @@ static struct tegra_regulator_coupler tegra30_coupler = {
.balance_voltage = tegra30_regulator_balance_voltage, .balance_voltage = tegra30_regulator_balance_voltage,
}, },
.reboot_notifier.notifier_call = tegra30_regulator_reboot, .reboot_notifier.notifier_call = tegra30_regulator_reboot,
.suspend_notifier.notifier_call = tegra30_regulator_suspend,
}; };
static int __init tegra_regulator_coupler_init(void) static int __init tegra_regulator_coupler_init(void)
@ -407,6 +526,9 @@ static int __init tegra_regulator_coupler_init(void)
err = register_reboot_notifier(&tegra30_coupler.reboot_notifier); err = register_reboot_notifier(&tegra30_coupler.reboot_notifier);
WARN_ON(err); WARN_ON(err);
err = register_pm_notifier(&tegra30_coupler.suspend_notifier);
WARN_ON(err);
return regulator_coupler_register(&tegra30_coupler.coupler); return regulator_coupler_register(&tegra30_coupler.coupler);
} }
arch_initcall(tegra_regulator_coupler_init); arch_initcall(tegra_regulator_coupler_init);

View File

@ -40,7 +40,8 @@ static const struct k3_soc_id {
{ 0xBB5A, "AM65X" }, { 0xBB5A, "AM65X" },
{ 0xBB64, "J721E" }, { 0xBB64, "J721E" },
{ 0xBB6D, "J7200" }, { 0xBB6D, "J7200" },
{ 0xBB38, "AM64X" } { 0xBB38, "AM64X" },
{ 0xBB75, "J721S2"},
}; };
static int static int

View File

@ -646,31 +646,31 @@ static int dma_init(struct device_node *cloud, struct device_node *dma_node)
} }
dma->reg_global = pktdma_get_regs(dma, node, 0, &size); dma->reg_global = pktdma_get_regs(dma, node, 0, &size);
if (!dma->reg_global) if (IS_ERR(dma->reg_global))
return -ENODEV; return PTR_ERR(dma->reg_global);
if (size < sizeof(struct reg_global)) { if (size < sizeof(struct reg_global)) {
dev_err(kdev->dev, "bad size %pa for global regs\n", &size); dev_err(kdev->dev, "bad size %pa for global regs\n", &size);
return -ENODEV; return -ENODEV;
} }
dma->reg_tx_chan = pktdma_get_regs(dma, node, 1, &size); dma->reg_tx_chan = pktdma_get_regs(dma, node, 1, &size);
if (!dma->reg_tx_chan) if (IS_ERR(dma->reg_tx_chan))
return -ENODEV; return PTR_ERR(dma->reg_tx_chan);
max_tx_chan = size / sizeof(struct reg_chan); max_tx_chan = size / sizeof(struct reg_chan);
dma->reg_rx_chan = pktdma_get_regs(dma, node, 2, &size); dma->reg_rx_chan = pktdma_get_regs(dma, node, 2, &size);
if (!dma->reg_rx_chan) if (IS_ERR(dma->reg_rx_chan))
return -ENODEV; return PTR_ERR(dma->reg_rx_chan);
max_rx_chan = size / sizeof(struct reg_chan); max_rx_chan = size / sizeof(struct reg_chan);
dma->reg_tx_sched = pktdma_get_regs(dma, node, 3, &size); dma->reg_tx_sched = pktdma_get_regs(dma, node, 3, &size);
if (!dma->reg_tx_sched) if (IS_ERR(dma->reg_tx_sched))
return -ENODEV; return PTR_ERR(dma->reg_tx_sched);
max_tx_sched = size / sizeof(struct reg_tx_sched); max_tx_sched = size / sizeof(struct reg_tx_sched);
dma->reg_rx_flow = pktdma_get_regs(dma, node, 4, &size); dma->reg_rx_flow = pktdma_get_regs(dma, node, 4, &size);
if (!dma->reg_rx_flow) if (IS_ERR(dma->reg_rx_flow))
return -ENODEV; return PTR_ERR(dma->reg_rx_flow);
max_rx_flow = size / sizeof(struct reg_rx_flow); max_rx_flow = size / sizeof(struct reg_rx_flow);
dma->rx_priority = DMA_PRIO_DEFAULT; dma->rx_priority = DMA_PRIO_DEFAULT;

View File

@ -129,7 +129,7 @@ static int pruss_clk_init(struct pruss *pruss, struct device_node *cfg_node)
clks_np = of_get_child_by_name(cfg_node, "clocks"); clks_np = of_get_child_by_name(cfg_node, "clocks");
if (!clks_np) { if (!clks_np) {
dev_err(dev, "%pOF is missing its 'clocks' node\n", clks_np); dev_err(dev, "%pOF is missing its 'clocks' node\n", cfg_node);
return -ENODEV; return -ENODEV;
} }

View File

@ -20,8 +20,6 @@
#include <linux/firmware/xlnx-zynqmp.h> #include <linux/firmware/xlnx-zynqmp.h>
#define ZYNQMP_NUM_DOMAINS (100) #define ZYNQMP_NUM_DOMAINS (100)
/* Flag stating if PM nodes mapped to the PM domain has been requested */
#define ZYNQMP_PM_DOMAIN_REQUESTED BIT(0)
static int min_capability; static int min_capability;
@ -29,14 +27,17 @@ static int min_capability;
* struct zynqmp_pm_domain - Wrapper around struct generic_pm_domain * struct zynqmp_pm_domain - Wrapper around struct generic_pm_domain
* @gpd: Generic power domain * @gpd: Generic power domain
* @node_id: PM node ID corresponding to device inside PM domain * @node_id: PM node ID corresponding to device inside PM domain
* @flags: ZynqMP PM domain flags * @requested: The PM node mapped to the PM domain has been requested
*/ */
struct zynqmp_pm_domain { struct zynqmp_pm_domain {
struct generic_pm_domain gpd; struct generic_pm_domain gpd;
u32 node_id; u32 node_id;
u8 flags; bool requested;
}; };
#define to_zynqmp_pm_domain(pm_domain) \
container_of(pm_domain, struct zynqmp_pm_domain, gpd)
/** /**
* zynqmp_gpd_is_active_wakeup_path() - Check if device is in wakeup source * zynqmp_gpd_is_active_wakeup_path() - Check if device is in wakeup source
* path * path
@ -71,21 +72,23 @@ static int zynqmp_gpd_is_active_wakeup_path(struct device *dev, void *not_used)
*/ */
static int zynqmp_gpd_power_on(struct generic_pm_domain *domain) static int zynqmp_gpd_power_on(struct generic_pm_domain *domain)
{ {
struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain);
int ret; int ret;
struct zynqmp_pm_domain *pd;
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
ret = zynqmp_pm_set_requirement(pd->node_id, ret = zynqmp_pm_set_requirement(pd->node_id,
ZYNQMP_PM_CAPABILITY_ACCESS, ZYNQMP_PM_CAPABILITY_ACCESS,
ZYNQMP_PM_MAX_QOS, ZYNQMP_PM_MAX_QOS,
ZYNQMP_PM_REQUEST_ACK_BLOCKING); ZYNQMP_PM_REQUEST_ACK_BLOCKING);
if (ret) { if (ret) {
pr_err("%s() %s set requirement for node %d failed: %d\n", dev_err(&domain->dev,
__func__, domain->name, pd->node_id, ret); "failed to set requirement to 0x%x for PM node id %d: %d\n",
ZYNQMP_PM_CAPABILITY_ACCESS, pd->node_id, ret);
return ret; return ret;
} }
pr_debug("%s() Powered on %s domain\n", __func__, domain->name); dev_dbg(&domain->dev, "set requirement to 0x%x for PM node id %d\n",
ZYNQMP_PM_CAPABILITY_ACCESS, pd->node_id);
return 0; return 0;
} }
@ -100,18 +103,16 @@ static int zynqmp_gpd_power_on(struct generic_pm_domain *domain)
*/ */
static int zynqmp_gpd_power_off(struct generic_pm_domain *domain) static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
{ {
struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain);
int ret; int ret;
struct pm_domain_data *pdd, *tmp; struct pm_domain_data *pdd, *tmp;
struct zynqmp_pm_domain *pd;
u32 capabilities = min_capability; u32 capabilities = min_capability;
bool may_wakeup; bool may_wakeup;
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
/* If domain is already released there is nothing to be done */ /* If domain is already released there is nothing to be done */
if (!(pd->flags & ZYNQMP_PM_DOMAIN_REQUESTED)) { if (!pd->requested) {
pr_debug("%s() %s domain is already released\n", dev_dbg(&domain->dev, "PM node id %d is already released\n",
__func__, domain->name); pd->node_id);
return 0; return 0;
} }
@ -128,17 +129,16 @@ static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
ret = zynqmp_pm_set_requirement(pd->node_id, capabilities, 0, ret = zynqmp_pm_set_requirement(pd->node_id, capabilities, 0,
ZYNQMP_PM_REQUEST_ACK_NO); ZYNQMP_PM_REQUEST_ACK_NO);
/**
* If powering down of any node inside this domain fails,
* report and return the error
*/
if (ret) { if (ret) {
pr_err("%s() %s set requirement for node %d failed: %d\n", dev_err(&domain->dev,
__func__, domain->name, pd->node_id, ret); "failed to set requirement to 0x%x for PM node id %d: %d\n",
capabilities, pd->node_id, ret);
return ret; return ret;
} }
pr_debug("%s() Powered off %s domain\n", __func__, domain->name); dev_dbg(&domain->dev, "set requirement to 0x%x for PM node id %d\n",
capabilities, pd->node_id);
return 0; return 0;
} }
@ -152,10 +152,14 @@ static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain, static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain,
struct device *dev) struct device *dev)
{ {
struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain);
struct device_link *link;
int ret; int ret;
struct zynqmp_pm_domain *pd;
pd = container_of(domain, struct zynqmp_pm_domain, gpd); link = device_link_add(dev, &domain->dev, DL_FLAG_SYNC_STATE_ONLY);
if (!link)
dev_dbg(&domain->dev, "failed to create device link for %s\n",
dev_name(dev));
/* If this is not the first device to attach there is nothing to do */ /* If this is not the first device to attach there is nothing to do */
if (domain->device_count) if (domain->device_count)
@ -163,17 +167,17 @@ static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain,
ret = zynqmp_pm_request_node(pd->node_id, 0, 0, ret = zynqmp_pm_request_node(pd->node_id, 0, 0,
ZYNQMP_PM_REQUEST_ACK_BLOCKING); ZYNQMP_PM_REQUEST_ACK_BLOCKING);
/* If requesting a node fails print and return the error */
if (ret) { if (ret) {
pr_err("%s() %s request failed for node %d: %d\n", dev_err(&domain->dev, "%s request failed for node %d: %d\n",
__func__, domain->name, pd->node_id, ret); domain->name, pd->node_id, ret);
return ret; return ret;
} }
pd->flags |= ZYNQMP_PM_DOMAIN_REQUESTED; pd->requested = true;
dev_dbg(&domain->dev, "%s requested PM node id %d\n",
dev_name(dev), pd->node_id);
pr_debug("%s() %s attached to %s domain\n", __func__,
dev_name(dev), domain->name);
return 0; return 0;
} }
@ -185,27 +189,24 @@ static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain,
static void zynqmp_gpd_detach_dev(struct generic_pm_domain *domain, static void zynqmp_gpd_detach_dev(struct generic_pm_domain *domain,
struct device *dev) struct device *dev)
{ {
struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain);
int ret; int ret;
struct zynqmp_pm_domain *pd;
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
/* If this is not the last device to detach there is nothing to do */ /* If this is not the last device to detach there is nothing to do */
if (domain->device_count) if (domain->device_count)
return; return;
ret = zynqmp_pm_release_node(pd->node_id); ret = zynqmp_pm_release_node(pd->node_id);
/* If releasing a node fails print the error and return */
if (ret) { if (ret) {
pr_err("%s() %s release failed for node %d: %d\n", dev_err(&domain->dev, "failed to release PM node id %d: %d\n",
__func__, domain->name, pd->node_id, ret); pd->node_id, ret);
return; return;
} }
pd->flags &= ~ZYNQMP_PM_DOMAIN_REQUESTED; pd->requested = false;
pr_debug("%s() %s detached from %s domain\n", __func__, dev_dbg(&domain->dev, "%s released PM node id %d\n",
dev_name(dev), domain->name); dev_name(dev), pd->node_id);
} }
static struct generic_pm_domain *zynqmp_gpd_xlate static struct generic_pm_domain *zynqmp_gpd_xlate
@ -215,7 +216,7 @@ static struct generic_pm_domain *zynqmp_gpd_xlate
unsigned int i, idx = genpdspec->args[0]; unsigned int i, idx = genpdspec->args[0];
struct zynqmp_pm_domain *pd; struct zynqmp_pm_domain *pd;
pd = container_of(genpd_data->domains[0], struct zynqmp_pm_domain, gpd); pd = to_zynqmp_pm_domain(genpd_data->domains[0]);
if (genpdspec->args_count != 1) if (genpdspec->args_count != 1)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
@ -299,9 +300,19 @@ static int zynqmp_gpd_remove(struct platform_device *pdev)
return 0; return 0;
} }
static void zynqmp_gpd_sync_state(struct device *dev)
{
int ret;
ret = zynqmp_pm_init_finalize();
if (ret)
dev_warn(dev, "failed to release power management to firmware\n");
}
static struct platform_driver zynqmp_power_domain_driver = { static struct platform_driver zynqmp_power_domain_driver = {
.driver = { .driver = {
.name = "zynqmp_power_controller", .name = "zynqmp_power_controller",
.sync_state = zynqmp_gpd_sync_state,
}, },
.probe = zynqmp_gpd_probe, .probe = zynqmp_gpd_probe,
.remove = zynqmp_gpd_remove, .remove = zynqmp_gpd_remove,

View File

@ -178,7 +178,6 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
u32 pm_api_version; u32 pm_api_version;
struct mbox_client *client; struct mbox_client *client;
zynqmp_pm_init_finalize();
zynqmp_pm_get_api_version(&pm_api_version); zynqmp_pm_get_api_version(&pm_api_version);
/* Check PM API version number */ /* Check PM API version number */

View File

@ -156,7 +156,9 @@ static int rpcif_spi_probe(struct platform_device *pdev)
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_QUAD | SPI_RX_QUAD; ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_QUAD | SPI_RX_QUAD;
ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX; ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
rpcif_hw_init(rpc, false); error = rpcif_hw_init(rpc, false);
if (error)
return error;
error = spi_register_controller(ctlr); error = spi_register_controller(ctlr);
if (error) { if (error) {

View File

@ -18,12 +18,15 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/reset.h> #include <linux/reset.h>
#include <linux/spi/spi.h> #include <linux/spi/spi.h>
#include <soc/tegra/common.h>
#define SLINK_COMMAND 0x000 #define SLINK_COMMAND 0x000
#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0) #define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0)
#define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5) #define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5)
@ -680,7 +683,7 @@ static int tegra_slink_start_transfer_one(struct spi_device *spi,
bits_per_word = t->bits_per_word; bits_per_word = t->bits_per_word;
speed = t->speed_hz; speed = t->speed_hz;
if (speed != tspi->cur_speed) { if (speed != tspi->cur_speed) {
clk_set_rate(tspi->clk, speed * 4); dev_pm_opp_set_rate(tspi->dev, speed * 4);
tspi->cur_speed = speed; tspi->cur_speed = speed;
} }
@ -1066,6 +1069,10 @@ static int tegra_slink_probe(struct platform_device *pdev)
goto exit_free_master; goto exit_free_master;
} }
ret = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (ret)
goto exit_free_master;
tspi->max_buf_size = SLINK_FIFO_DEPTH << 2; tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN; tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;

View File

@ -20,6 +20,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <soc/tegra/common.h>
#include <soc/tegra/pmc.h> #include <soc/tegra/pmc.h>
#include "uapi.h" #include "uapi.h"
@ -920,13 +921,17 @@ static __maybe_unused int tegra_vde_runtime_suspend(struct device *dev)
struct tegra_vde *vde = dev_get_drvdata(dev); struct tegra_vde *vde = dev_get_drvdata(dev);
int err; int err;
if (!dev->pm_domain) {
err = tegra_powergate_power_off(TEGRA_POWERGATE_VDEC); err = tegra_powergate_power_off(TEGRA_POWERGATE_VDEC);
if (err) { if (err) {
dev_err(dev, "Failed to power down HW: %d\n", err); dev_err(dev, "Failed to power down HW: %d\n", err);
return err; return err;
} }
}
clk_disable_unprepare(vde->clk); clk_disable_unprepare(vde->clk);
reset_control_release(vde->rst);
reset_control_release(vde->rst_mc);
return 0; return 0;
} }
@ -936,14 +941,45 @@ static __maybe_unused int tegra_vde_runtime_resume(struct device *dev)
struct tegra_vde *vde = dev_get_drvdata(dev); struct tegra_vde *vde = dev_get_drvdata(dev);
int err; int err;
err = reset_control_acquire(vde->rst_mc);
if (err) {
dev_err(dev, "Failed to acquire mc reset: %d\n", err);
return err;
}
err = reset_control_acquire(vde->rst);
if (err) {
dev_err(dev, "Failed to acquire reset: %d\n", err);
goto release_mc_reset;
}
if (!dev->pm_domain) {
err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_VDEC, err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_VDEC,
vde->clk, vde->rst); vde->clk, vde->rst);
if (err) { if (err) {
dev_err(dev, "Failed to power up HW : %d\n", err); dev_err(dev, "Failed to power up HW : %d\n", err);
return err; goto release_reset;
}
} else {
/*
* tegra_powergate_sequence_power_up() leaves clocks enabled,
* while GENPD not.
*/
err = clk_prepare_enable(vde->clk);
if (err) {
dev_err(dev, "Failed to enable clock: %d\n", err);
goto release_reset;
}
} }
return 0; return 0;
release_reset:
reset_control_release(vde->rst);
release_mc_reset:
reset_control_release(vde->rst_mc);
return err;
} }
static int tegra_vde_probe(struct platform_device *pdev) static int tegra_vde_probe(struct platform_device *pdev)
@ -1001,14 +1037,14 @@ static int tegra_vde_probe(struct platform_device *pdev)
return err; return err;
} }
vde->rst = devm_reset_control_get(dev, NULL); vde->rst = devm_reset_control_get_exclusive_released(dev, NULL);
if (IS_ERR(vde->rst)) { if (IS_ERR(vde->rst)) {
err = PTR_ERR(vde->rst); err = PTR_ERR(vde->rst);
dev_err(dev, "Could not get VDE reset %d\n", err); dev_err(dev, "Could not get VDE reset %d\n", err);
return err; return err;
} }
vde->rst_mc = devm_reset_control_get_optional(dev, "mc"); vde->rst_mc = devm_reset_control_get_optional_exclusive_released(dev, "mc");
if (IS_ERR(vde->rst_mc)) { if (IS_ERR(vde->rst_mc)) {
err = PTR_ERR(vde->rst_mc); err = PTR_ERR(vde->rst_mc);
dev_err(dev, "Could not get MC reset %d\n", err); dev_err(dev, "Could not get MC reset %d\n", err);
@ -1026,6 +1062,12 @@ static int tegra_vde_probe(struct platform_device *pdev)
return err; return err;
} }
err = devm_tegra_core_dev_init_opp_table_common(dev);
if (err) {
dev_err(dev, "Could initialize OPP table %d\n", err);
return err;
}
vde->iram_pool = of_gen_pool_get(dev->of_node, "iram", 0); vde->iram_pool = of_gen_pool_get(dev->of_node, "iram", 0);
if (!vde->iram_pool) { if (!vde->iram_pool) {
dev_err(dev, "Could not get IRAM pool\n"); dev_err(dev, "Could not get IRAM pool\n");
@ -1133,8 +1175,7 @@ static void tegra_vde_shutdown(struct platform_device *pdev)
* On some devices bootloader isn't ready to a power-gated VDE on * On some devices bootloader isn't ready to a power-gated VDE on
* a warm-reboot, machine will hang in that case. * a warm-reboot, machine will hang in that case.
*/ */
if (pm_runtime_status_suspended(&pdev->dev)) pm_runtime_get_sync(&pdev->dev);
tegra_vde_runtime_resume(&pdev->dev);
} }
static __maybe_unused int tegra_vde_pm_suspend(struct device *dev) static __maybe_unused int tegra_vde_pm_suspend(struct device *dev)

View File

@ -2,6 +2,7 @@
obj-$(CONFIG_OPTEE) += optee.o obj-$(CONFIG_OPTEE) += optee.o
optee-objs += core.o optee-objs += core.o
optee-objs += call.o optee-objs += call.o
optee-objs += notif.o
optee-objs += rpc.o optee-objs += rpc.o
optee-objs += supp.o optee-objs += supp.o
optee-objs += device.o optee-objs += device.o

View File

@ -157,6 +157,7 @@ void optee_remove_common(struct optee *optee)
/* Unregister OP-TEE specific client devices on TEE bus */ /* Unregister OP-TEE specific client devices on TEE bus */
optee_unregister_devices(); optee_unregister_devices();
optee_notif_uninit(optee);
/* /*
* The two devices have to be unregistered before we can free the * The two devices have to be unregistered before we can free the
* other resources. * other resources.
@ -165,7 +166,6 @@ void optee_remove_common(struct optee *optee)
tee_device_unregister(optee->teedev); tee_device_unregister(optee->teedev);
tee_shm_pool_free(optee->pool); tee_shm_pool_free(optee->pool);
optee_wait_queue_exit(&optee->wait_queue);
optee_supp_uninit(&optee->supp); optee_supp_uninit(&optee->supp);
mutex_destroy(&optee->call_queue.mutex); mutex_destroy(&optee->call_queue.mutex);
} }

View File

@ -855,9 +855,13 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
mutex_init(&optee->ffa.mutex); mutex_init(&optee->ffa.mutex);
mutex_init(&optee->call_queue.mutex); mutex_init(&optee->call_queue.mutex);
INIT_LIST_HEAD(&optee->call_queue.waiters); INIT_LIST_HEAD(&optee->call_queue.waiters);
optee_wait_queue_init(&optee->wait_queue);
optee_supp_init(&optee->supp); optee_supp_init(&optee->supp);
ffa_dev_set_drvdata(ffa_dev, optee); ffa_dev_set_drvdata(ffa_dev, optee);
rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE);
if (rc) {
optee_ffa_remove(ffa_dev);
return rc;
}
rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES); rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
if (rc) { if (rc) {

125
drivers/tee/optee/notif.c Normal file
View File

@ -0,0 +1,125 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2021, Linaro Limited
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/arm-smccc.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/tee_drv.h>
#include "optee_private.h"
struct notif_entry {
struct list_head link;
struct completion c;
u_int key;
};
static bool have_key(struct optee *optee, u_int key)
{
struct notif_entry *entry;
list_for_each_entry(entry, &optee->notif.db, link)
if (entry->key == key)
return true;
return false;
}
int optee_notif_wait(struct optee *optee, u_int key)
{
unsigned long flags;
struct notif_entry *entry;
int rc = 0;
if (key > optee->notif.max_key)
return -EINVAL;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
init_completion(&entry->c);
entry->key = key;
spin_lock_irqsave(&optee->notif.lock, flags);
/*
* If the bit is already set it means that the key has already
* been posted and we must not wait.
*/
if (test_bit(key, optee->notif.bitmap)) {
clear_bit(key, optee->notif.bitmap);
goto out;
}
/*
* Check if someone is already waiting for this key. If there is
* it's a programming error.
*/
if (have_key(optee, key)) {
rc = -EBUSY;
goto out;
}
list_add_tail(&entry->link, &optee->notif.db);
/*
* Unlock temporarily and wait for completion.
*/
spin_unlock_irqrestore(&optee->notif.lock, flags);
wait_for_completion(&entry->c);
spin_lock_irqsave(&optee->notif.lock, flags);
list_del(&entry->link);
out:
spin_unlock_irqrestore(&optee->notif.lock, flags);
kfree(entry);
return rc;
}
int optee_notif_send(struct optee *optee, u_int key)
{
unsigned long flags;
struct notif_entry *entry;
if (key > optee->notif.max_key)
return -EINVAL;
spin_lock_irqsave(&optee->notif.lock, flags);
list_for_each_entry(entry, &optee->notif.db, link)
if (entry->key == key) {
complete(&entry->c);
goto out;
}
/* Only set the bit in case there where nobody waiting */
set_bit(key, optee->notif.bitmap);
out:
spin_unlock_irqrestore(&optee->notif.lock, flags);
return 0;
}
int optee_notif_init(struct optee *optee, u_int max_key)
{
spin_lock_init(&optee->notif.lock);
INIT_LIST_HEAD(&optee->notif.db);
optee->notif.bitmap = bitmap_zalloc(max_key, GFP_KERNEL);
if (!optee->notif.bitmap)
return -ENOMEM;
optee->notif.max_key = max_key;
return 0;
}
void optee_notif_uninit(struct optee *optee)
{
kfree(optee->notif.bitmap);
}

View File

@ -318,6 +318,13 @@ struct optee_msg_arg {
* [in] param[0].u.rmem.shm_ref holds shared memory reference * [in] param[0].u.rmem.shm_ref holds shared memory reference
* [in] param[0].u.rmem.offs 0 * [in] param[0].u.rmem.offs 0
* [in] param[0].u.rmem.size 0 * [in] param[0].u.rmem.size 0
*
* OPTEE_MSG_CMD_DO_BOTTOM_HALF does the scheduled bottom half processing
* of a driver.
*
* OPTEE_MSG_CMD_STOP_ASYNC_NOTIF informs secure world that from now is
* normal world unable to process asynchronous notifications. Typically
* used when the driver is shut down.
*/ */
#define OPTEE_MSG_CMD_OPEN_SESSION 0 #define OPTEE_MSG_CMD_OPEN_SESSION 0
#define OPTEE_MSG_CMD_INVOKE_COMMAND 1 #define OPTEE_MSG_CMD_INVOKE_COMMAND 1
@ -325,6 +332,8 @@ struct optee_msg_arg {
#define OPTEE_MSG_CMD_CANCEL 3 #define OPTEE_MSG_CMD_CANCEL 3
#define OPTEE_MSG_CMD_REGISTER_SHM 4 #define OPTEE_MSG_CMD_REGISTER_SHM 4
#define OPTEE_MSG_CMD_UNREGISTER_SHM 5 #define OPTEE_MSG_CMD_UNREGISTER_SHM 5
#define OPTEE_MSG_CMD_DO_BOTTOM_HALF 6
#define OPTEE_MSG_CMD_STOP_ASYNC_NOTIF 7
#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004 #define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004
#endif /* _OPTEE_MSG_H */ #endif /* _OPTEE_MSG_H */

View File

@ -28,6 +28,13 @@
#define TEEC_ORIGIN_COMMS 0x00000002 #define TEEC_ORIGIN_COMMS 0x00000002
/*
* This value should be larger than the number threads in secure world to
* meet the need from secure world. The number of threads in secure world
* are usually not even close to 255 so we should be safe for now.
*/
#define OPTEE_DEFAULT_MAX_NOTIF_VALUE 255
typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long, typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long, unsigned long, unsigned long,
@ -44,10 +51,13 @@ struct optee_call_queue {
struct list_head waiters; struct list_head waiters;
}; };
struct optee_wait_queue { struct optee_notif {
/* Serializes access to this struct */ u_int max_key;
struct mutex mu; struct tee_context *ctx;
/* Serializes access to the elements below in this struct */
spinlock_t lock;
struct list_head db; struct list_head db;
u_long *bitmap;
}; };
/** /**
@ -79,6 +89,7 @@ struct optee_smc {
optee_invoke_fn *invoke_fn; optee_invoke_fn *invoke_fn;
void *memremaped_shm; void *memremaped_shm;
u32 sec_caps; u32 sec_caps;
unsigned int notif_irq;
}; };
/** /**
@ -129,8 +140,7 @@ struct optee_ops {
* @smc: specific to SMC ABI * @smc: specific to SMC ABI
* @ffa: specific to FF-A ABI * @ffa: specific to FF-A ABI
* @call_queue: queue of threads waiting to call @invoke_fn * @call_queue: queue of threads waiting to call @invoke_fn
* @wait_queue: queue of threads from secure world waiting for a * @notif: notification synchronization struct
* secure world sync object
* @supp: supplicant synchronization struct for RPC to supplicant * @supp: supplicant synchronization struct for RPC to supplicant
* @pool: shared memory pool * @pool: shared memory pool
* @rpc_arg_count: If > 0 number of RPC parameters to make room for * @rpc_arg_count: If > 0 number of RPC parameters to make room for
@ -147,7 +157,7 @@ struct optee {
struct optee_ffa ffa; struct optee_ffa ffa;
}; };
struct optee_call_queue call_queue; struct optee_call_queue call_queue;
struct optee_wait_queue wait_queue; struct optee_notif notif;
struct optee_supp supp; struct optee_supp supp;
struct tee_shm_pool *pool; struct tee_shm_pool *pool;
unsigned int rpc_arg_count; unsigned int rpc_arg_count;
@ -185,8 +195,10 @@ struct optee_call_ctx {
size_t num_entries; size_t num_entries;
}; };
void optee_wait_queue_init(struct optee_wait_queue *wq); int optee_notif_init(struct optee *optee, u_int max_key);
void optee_wait_queue_exit(struct optee_wait_queue *wq); void optee_notif_uninit(struct optee *optee);
int optee_notif_wait(struct optee *optee, u_int key);
int optee_notif_send(struct optee *optee, u_int key);
u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
struct tee_param *param); struct tee_param *param);

View File

@ -28,24 +28,27 @@
#define OPTEE_RPC_CMD_GET_TIME 3 #define OPTEE_RPC_CMD_GET_TIME 3
/* /*
* Wait queue primitive, helper for secure world to implement a wait queue. * Notification from/to secure world.
* *
* If secure world needs to wait for a secure world mutex it issues a sleep * If secure world needs to wait for something, for instance a mutex, it
* request instead of spinning in secure world. Conversely is a wakeup * does a notification wait request instead of spinning in secure world.
* request issued when a secure world mutex with a thread waiting thread is * Conversely can a synchronous notification can be sent when a secure
* unlocked. * world mutex with a thread waiting thread is unlocked.
* *
* Waiting on a key * This interface can also be used to wait for a asynchronous notification
* [in] value[0].a OPTEE_RPC_WAIT_QUEUE_SLEEP * which instead is sent via a non-secure interrupt.
* [in] value[0].b Wait key
* *
* Waking up a key * Waiting on notification
* [in] value[0].a OPTEE_RPC_WAIT_QUEUE_WAKEUP * [in] value[0].a OPTEE_RPC_NOTIFICATION_WAIT
* [in] value[0].b Wakeup key * [in] value[0].b notification value
*
* Sending a synchronous notification
* [in] value[0].a OPTEE_RPC_NOTIFICATION_SEND
* [in] value[0].b notification value
*/ */
#define OPTEE_RPC_CMD_WAIT_QUEUE 4 #define OPTEE_RPC_CMD_NOTIFICATION 4
#define OPTEE_RPC_WAIT_QUEUE_SLEEP 0 #define OPTEE_RPC_NOTIFICATION_WAIT 0
#define OPTEE_RPC_WAIT_QUEUE_WAKEUP 1 #define OPTEE_RPC_NOTIFICATION_SEND 1
/* /*
* Suspend execution * Suspend execution

View File

@ -107,6 +107,12 @@ struct optee_smc_call_get_os_revision_result {
/* /*
* Call with struct optee_msg_arg as argument * Call with struct optee_msg_arg as argument
* *
* When calling this function normal world has a few responsibilities:
* 1. It must be able to handle eventual RPCs
* 2. Non-secure interrupts should not be masked
* 3. If asynchronous notifications has been negotiated successfully, then
* asynchronous notifications should be unmasked during this call.
*
* Call register usage: * Call register usage:
* a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG * a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG
* a1 Upper 32 bits of a 64-bit physical pointer to a struct optee_msg_arg * a1 Upper 32 bits of a 64-bit physical pointer to a struct optee_msg_arg
@ -195,7 +201,8 @@ struct optee_smc_get_shm_config_result {
* Normal return register usage: * Normal return register usage:
* a0 OPTEE_SMC_RETURN_OK * a0 OPTEE_SMC_RETURN_OK
* a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_* * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
* a2-7 Preserved * a2 The maximum secure world notification number
* a3-7 Preserved
* *
* Error return register usage: * Error return register usage:
* a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world * a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world
@ -218,6 +225,8 @@ struct optee_smc_get_shm_config_result {
#define OPTEE_SMC_SEC_CAP_VIRTUALIZATION BIT(3) #define OPTEE_SMC_SEC_CAP_VIRTUALIZATION BIT(3)
/* Secure world supports Shared Memory with a NULL reference */ /* Secure world supports Shared Memory with a NULL reference */
#define OPTEE_SMC_SEC_CAP_MEMREF_NULL BIT(4) #define OPTEE_SMC_SEC_CAP_MEMREF_NULL BIT(4)
/* Secure world supports asynchronous notification of normal world */
#define OPTEE_SMC_SEC_CAP_ASYNC_NOTIF BIT(5)
#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9 #define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
#define OPTEE_SMC_EXCHANGE_CAPABILITIES \ #define OPTEE_SMC_EXCHANGE_CAPABILITIES \
@ -226,8 +235,8 @@ struct optee_smc_get_shm_config_result {
struct optee_smc_exchange_capabilities_result { struct optee_smc_exchange_capabilities_result {
unsigned long status; unsigned long status;
unsigned long capabilities; unsigned long capabilities;
unsigned long max_notif_value;
unsigned long reserved0; unsigned long reserved0;
unsigned long reserved1;
}; };
/* /*
@ -319,6 +328,68 @@ struct optee_smc_disable_shm_cache_result {
#define OPTEE_SMC_GET_THREAD_COUNT \ #define OPTEE_SMC_GET_THREAD_COUNT \
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_THREAD_COUNT) OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_THREAD_COUNT)
/*
* Inform OP-TEE that normal world is able to receive asynchronous
* notifications.
*
* Call requests usage:
* a0 SMC Function ID, OPTEE_SMC_ENABLE_ASYNC_NOTIF
* a1-6 Not used
* a7 Hypervisor Client ID register
*
* Normal return register usage:
* a0 OPTEE_SMC_RETURN_OK
* a1-7 Preserved
*
* Not supported return register usage:
* a0 OPTEE_SMC_RETURN_ENOTAVAIL
* a1-7 Preserved
*/
#define OPTEE_SMC_FUNCID_ENABLE_ASYNC_NOTIF 16
#define OPTEE_SMC_ENABLE_ASYNC_NOTIF \
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_ASYNC_NOTIF)
/*
* Retrieve a value of notifications pending since the last call of this
* function.
*
* OP-TEE keeps a record of all posted values. When an interrupt is
* received which indicates that there are posted values this function
* should be called until all pended values have been retrieved. When a
* value is retrieved, it's cleared from the record in secure world.
*
* Call requests usage:
* a0 SMC Function ID, OPTEE_SMC_GET_ASYNC_NOTIF_VALUE
* a1-6 Not used
* a7 Hypervisor Client ID register
*
* Normal return register usage:
* a0 OPTEE_SMC_RETURN_OK
* a1 value
* a2 Bit[0]: OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID if the value in a1 is
* valid, else 0 if no values where pending
* a2 Bit[1]: OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING if another value is
* pending, else 0.
* Bit[31:2]: MBZ
* a3-7 Preserved
*
* Not supported return register usage:
* a0 OPTEE_SMC_RETURN_ENOTAVAIL
* a1-7 Preserved
*/
#define OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID BIT(0)
#define OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING BIT(1)
/*
* Notification that OP-TEE expects a yielding call to do some bottom half
* work in a driver.
*/
#define OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF 0
#define OPTEE_SMC_FUNCID_GET_ASYNC_NOTIF_VALUE 17
#define OPTEE_SMC_GET_ASYNC_NOTIF_VALUE \
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_ASYNC_NOTIF_VALUE)
/* /*
* Resume from RPC (for example after processing a foreign interrupt) * Resume from RPC (for example after processing a foreign interrupt)
* *

View File

@ -12,23 +12,6 @@
#include "optee_private.h" #include "optee_private.h"
#include "optee_rpc_cmd.h" #include "optee_rpc_cmd.h"
struct wq_entry {
struct list_head link;
struct completion c;
u32 key;
};
void optee_wait_queue_init(struct optee_wait_queue *priv)
{
mutex_init(&priv->mu);
INIT_LIST_HEAD(&priv->db);
}
void optee_wait_queue_exit(struct optee_wait_queue *priv)
{
mutex_destroy(&priv->mu);
}
static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg) static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg)
{ {
struct timespec64 ts; struct timespec64 ts;
@ -144,48 +127,6 @@ static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
} }
#endif #endif
static struct wq_entry *wq_entry_get(struct optee_wait_queue *wq, u32 key)
{
struct wq_entry *w;
mutex_lock(&wq->mu);
list_for_each_entry(w, &wq->db, link)
if (w->key == key)
goto out;
w = kmalloc(sizeof(*w), GFP_KERNEL);
if (w) {
init_completion(&w->c);
w->key = key;
list_add_tail(&w->link, &wq->db);
}
out:
mutex_unlock(&wq->mu);
return w;
}
static void wq_sleep(struct optee_wait_queue *wq, u32 key)
{
struct wq_entry *w = wq_entry_get(wq, key);
if (w) {
wait_for_completion(&w->c);
mutex_lock(&wq->mu);
list_del(&w->link);
mutex_unlock(&wq->mu);
kfree(w);
}
}
static void wq_wakeup(struct optee_wait_queue *wq, u32 key)
{
struct wq_entry *w = wq_entry_get(wq, key);
if (w)
complete(&w->c);
}
static void handle_rpc_func_cmd_wq(struct optee *optee, static void handle_rpc_func_cmd_wq(struct optee *optee,
struct optee_msg_arg *arg) struct optee_msg_arg *arg)
{ {
@ -197,11 +138,13 @@ static void handle_rpc_func_cmd_wq(struct optee *optee,
goto bad; goto bad;
switch (arg->params[0].u.value.a) { switch (arg->params[0].u.value.a) {
case OPTEE_RPC_WAIT_QUEUE_SLEEP: case OPTEE_RPC_NOTIFICATION_WAIT:
wq_sleep(&optee->wait_queue, arg->params[0].u.value.b); if (optee_notif_wait(optee, arg->params[0].u.value.b))
goto bad;
break; break;
case OPTEE_RPC_WAIT_QUEUE_WAKEUP: case OPTEE_RPC_NOTIFICATION_SEND:
wq_wakeup(&optee->wait_queue, arg->params[0].u.value.b); if (optee_notif_send(optee, arg->params[0].u.value.b))
goto bad;
break; break;
default: default:
goto bad; goto bad;
@ -319,7 +262,7 @@ void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee,
case OPTEE_RPC_CMD_GET_TIME: case OPTEE_RPC_CMD_GET_TIME:
handle_rpc_func_cmd_get_time(arg); handle_rpc_func_cmd_get_time(arg);
break; break;
case OPTEE_RPC_CMD_WAIT_QUEUE: case OPTEE_RPC_CMD_NOTIFICATION:
handle_rpc_func_cmd_wq(optee, arg); handle_rpc_func_cmd_wq(optee, arg);
break; break;
case OPTEE_RPC_CMD_SUSPEND: case OPTEE_RPC_CMD_SUSPEND:

View File

@ -8,13 +8,16 @@
#include <linux/arm-smccc.h> #include <linux/arm-smccc.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/sched.h> #include <linux/irqdomain.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/tee_drv.h> #include <linux/tee_drv.h>
@ -35,7 +38,8 @@
* 2. Low level support functions to register shared memory in secure world * 2. Low level support functions to register shared memory in secure world
* 3. Dynamic shared memory pool based on alloc_pages() * 3. Dynamic shared memory pool based on alloc_pages()
* 4. Do a normal scheduled call into secure world * 4. Do a normal scheduled call into secure world
* 5. Driver initialization. * 5. Asynchronous notification
* 6. Driver initialization.
*/ */
#define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES #define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
@ -877,10 +881,137 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx,
return rc; return rc;
} }
static int simple_call_with_arg(struct tee_context *ctx, u32 cmd)
{
struct optee_msg_arg *msg_arg;
struct tee_shm *shm;
shm = optee_get_msg_arg(ctx, 0, &msg_arg);
if (IS_ERR(shm))
return PTR_ERR(shm);
msg_arg->cmd = cmd;
optee_smc_do_call_with_arg(ctx, shm);
tee_shm_free(shm);
return 0;
}
static int optee_smc_do_bottom_half(struct tee_context *ctx)
{
return simple_call_with_arg(ctx, OPTEE_MSG_CMD_DO_BOTTOM_HALF);
}
static int optee_smc_stop_async_notif(struct tee_context *ctx)
{
return simple_call_with_arg(ctx, OPTEE_MSG_CMD_STOP_ASYNC_NOTIF);
}
/* /*
* 5. Driver initialization * 5. Asynchronous notification
*/
static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid,
bool *value_pending)
{
struct arm_smccc_res res;
invoke_fn(OPTEE_SMC_GET_ASYNC_NOTIF_VALUE, 0, 0, 0, 0, 0, 0, 0, &res);
if (res.a0)
return 0;
*value_valid = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID);
*value_pending = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING);
return res.a1;
}
static irqreturn_t notif_irq_handler(int irq, void *dev_id)
{
struct optee *optee = dev_id;
bool do_bottom_half = false;
bool value_valid;
bool value_pending;
u32 value;
do {
value = get_async_notif_value(optee->smc.invoke_fn,
&value_valid, &value_pending);
if (!value_valid)
break;
if (value == OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF)
do_bottom_half = true;
else
optee_notif_send(optee, value);
} while (value_pending);
if (do_bottom_half)
return IRQ_WAKE_THREAD;
return IRQ_HANDLED;
}
static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
{
struct optee *optee = dev_id;
optee_smc_do_bottom_half(optee->notif.ctx);
return IRQ_HANDLED;
}
static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
{
struct tee_context *ctx;
int rc;
ctx = teedev_open(optee->teedev);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
optee->notif.ctx = ctx;
rc = request_threaded_irq(irq, notif_irq_handler,
notif_irq_thread_fn,
0, "optee_notification", optee);
if (rc)
goto err_close_ctx;
optee->smc.notif_irq = irq;
return 0;
err_close_ctx:
teedev_close_context(optee->notif.ctx);
optee->notif.ctx = NULL;
return rc;
}
static void optee_smc_notif_uninit_irq(struct optee *optee)
{
if (optee->notif.ctx) {
optee_smc_stop_async_notif(optee->notif.ctx);
if (optee->smc.notif_irq) {
free_irq(optee->smc.notif_irq, optee);
irq_dispose_mapping(optee->smc.notif_irq);
}
/*
* The thread normally working with optee->notif.ctx was
* stopped with free_irq() above.
* *
* During driver inititialization is secure world probed to find out which * Note we're not using teedev_close_context() or
* tee_client_close_context() since we have already called
* tee_device_put() while initializing to avoid a circular
* reference counting.
*/
teedev_close_context(optee->notif.ctx);
}
}
/*
* 6. Driver initialization
*
* During driver initialization is secure world probed to find out which
* features it supports so the driver can be initialized with a matching * features it supports so the driver can be initialized with a matching
* configuration. This involves for instance support for dynamic shared * configuration. This involves for instance support for dynamic shared
* memory instead of a static memory carvout. * memory instead of a static memory carvout.
@ -952,6 +1083,17 @@ static const struct optee_ops optee_ops = {
.from_msg_param = optee_from_msg_param, .from_msg_param = optee_from_msg_param,
}; };
static int enable_async_notif(optee_invoke_fn *invoke_fn)
{
struct arm_smccc_res res;
invoke_fn(OPTEE_SMC_ENABLE_ASYNC_NOTIF, 0, 0, 0, 0, 0, 0, 0, &res);
if (res.a0)
return -EINVAL;
return 0;
}
static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn) static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
{ {
struct arm_smccc_res res; struct arm_smccc_res res;
@ -1001,7 +1143,7 @@ static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
} }
static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn, static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
u32 *sec_caps) u32 *sec_caps, u32 *max_notif_value)
{ {
union { union {
struct arm_smccc_res smccc; struct arm_smccc_res smccc;
@ -1024,6 +1166,11 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
return false; return false;
*sec_caps = res.result.capabilities; *sec_caps = res.result.capabilities;
if (*sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF)
*max_notif_value = res.result.max_notif_value;
else
*max_notif_value = OPTEE_DEFAULT_MAX_NOTIF_VALUE;
return true; return true;
} }
@ -1188,6 +1335,8 @@ static int optee_smc_remove(struct platform_device *pdev)
*/ */
optee_disable_shm_cache(optee); optee_disable_shm_cache(optee);
optee_smc_notif_uninit_irq(optee);
optee_remove_common(optee); optee_remove_common(optee);
if (optee->smc.memremaped_shm) if (optee->smc.memremaped_shm)
@ -1217,6 +1366,7 @@ static int optee_probe(struct platform_device *pdev)
struct optee *optee = NULL; struct optee *optee = NULL;
void *memremaped_shm = NULL; void *memremaped_shm = NULL;
struct tee_device *teedev; struct tee_device *teedev;
u32 max_notif_value;
u32 sec_caps; u32 sec_caps;
int rc; int rc;
@ -1236,7 +1386,8 @@ static int optee_probe(struct platform_device *pdev)
return -EINVAL; return -EINVAL;
} }
if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) { if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps,
&max_notif_value)) {
pr_warn("capabilities mismatch\n"); pr_warn("capabilities mismatch\n");
return -EINVAL; return -EINVAL;
} }
@ -1259,7 +1410,7 @@ static int optee_probe(struct platform_device *pdev)
optee = kzalloc(sizeof(*optee), GFP_KERNEL); optee = kzalloc(sizeof(*optee), GFP_KERNEL);
if (!optee) { if (!optee) {
rc = -ENOMEM; rc = -ENOMEM;
goto err; goto err_free_pool;
} }
optee->ops = &optee_ops; optee->ops = &optee_ops;
@ -1269,32 +1420,55 @@ static int optee_probe(struct platform_device *pdev)
teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee); teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
if (IS_ERR(teedev)) { if (IS_ERR(teedev)) {
rc = PTR_ERR(teedev); rc = PTR_ERR(teedev);
goto err; goto err_free_optee;
} }
optee->teedev = teedev; optee->teedev = teedev;
teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee); teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
if (IS_ERR(teedev)) { if (IS_ERR(teedev)) {
rc = PTR_ERR(teedev); rc = PTR_ERR(teedev);
goto err; goto err_unreg_teedev;
} }
optee->supp_teedev = teedev; optee->supp_teedev = teedev;
rc = tee_device_register(optee->teedev); rc = tee_device_register(optee->teedev);
if (rc) if (rc)
goto err; goto err_unreg_supp_teedev;
rc = tee_device_register(optee->supp_teedev); rc = tee_device_register(optee->supp_teedev);
if (rc) if (rc)
goto err; goto err_unreg_supp_teedev;
mutex_init(&optee->call_queue.mutex); mutex_init(&optee->call_queue.mutex);
INIT_LIST_HEAD(&optee->call_queue.waiters); INIT_LIST_HEAD(&optee->call_queue.waiters);
optee_wait_queue_init(&optee->wait_queue);
optee_supp_init(&optee->supp); optee_supp_init(&optee->supp);
optee->smc.memremaped_shm = memremaped_shm; optee->smc.memremaped_shm = memremaped_shm;
optee->pool = pool; optee->pool = pool;
platform_set_drvdata(pdev, optee);
rc = optee_notif_init(optee, max_notif_value);
if (rc)
goto err_supp_uninit;
if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
unsigned int irq;
rc = platform_get_irq(pdev, 0);
if (rc < 0) {
pr_err("platform_get_irq: ret %d\n", rc);
goto err_notif_uninit;
}
irq = rc;
rc = optee_smc_notif_init_irq(optee, irq);
if (rc) {
irq_dispose_mapping(irq);
goto err_notif_uninit;
}
enable_async_notif(optee->smc.invoke_fn);
pr_info("Asynchronous notifications enabled\n");
}
/* /*
* Ensure that there are no pre-existing shm objects before enabling * Ensure that there are no pre-existing shm objects before enabling
* the shm cache so that there's no chance of receiving an invalid * the shm cache so that there's no chance of receiving an invalid
@ -1309,28 +1483,29 @@ static int optee_probe(struct platform_device *pdev)
if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
pr_info("dynamic shared memory is enabled\n"); pr_info("dynamic shared memory is enabled\n");
platform_set_drvdata(pdev, optee);
rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES); rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
if (rc) { if (rc)
optee_smc_remove(pdev); goto err_disable_shm_cache;
return rc;
}
pr_info("initialized driver\n"); pr_info("initialized driver\n");
return 0; return 0;
err:
if (optee) { err_disable_shm_cache:
/* optee_disable_shm_cache(optee);
* tee_device_unregister() is safe to call even if the optee_smc_notif_uninit_irq(optee);
* devices hasn't been registered with optee_unregister_devices();
* tee_device_register() yet. err_notif_uninit:
*/ optee_notif_uninit(optee);
err_supp_uninit:
optee_supp_uninit(&optee->supp);
mutex_destroy(&optee->call_queue.mutex);
err_unreg_supp_teedev:
tee_device_unregister(optee->supp_teedev); tee_device_unregister(optee->supp_teedev);
err_unreg_teedev:
tee_device_unregister(optee->teedev); tee_device_unregister(optee->teedev);
err_free_optee:
kfree(optee); kfree(optee);
} err_free_pool:
if (pool)
tee_shm_pool_free(pool); tee_shm_pool_free(pool);
if (memremaped_shm) if (memremaped_shm)
memunmap(memremaped_shm); memunmap(memremaped_shm);

View File

@ -43,7 +43,7 @@ static DEFINE_SPINLOCK(driver_lock);
static struct class *tee_class; static struct class *tee_class;
static dev_t tee_devt; static dev_t tee_devt;
static struct tee_context *teedev_open(struct tee_device *teedev) struct tee_context *teedev_open(struct tee_device *teedev)
{ {
int rc; int rc;
struct tee_context *ctx; struct tee_context *ctx;
@ -70,6 +70,7 @@ err:
return ERR_PTR(rc); return ERR_PTR(rc);
} }
EXPORT_SYMBOL_GPL(teedev_open);
void teedev_ctx_get(struct tee_context *ctx) void teedev_ctx_get(struct tee_context *ctx)
{ {
@ -96,11 +97,14 @@ void teedev_ctx_put(struct tee_context *ctx)
kref_put(&ctx->refcount, teedev_ctx_release); kref_put(&ctx->refcount, teedev_ctx_release);
} }
static void teedev_close_context(struct tee_context *ctx) void teedev_close_context(struct tee_context *ctx)
{ {
tee_device_put(ctx->teedev); struct tee_device *teedev = ctx->teedev;
teedev_ctx_put(ctx); teedev_ctx_put(ctx);
tee_device_put(teedev);
} }
EXPORT_SYMBOL_GPL(teedev_close_context);
static int tee_open(struct inode *inode, struct file *filp) static int tee_open(struct inode *inode, struct file *filp)
{ {

View File

@ -7,6 +7,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h> #include <linux/reset.h>
#include <linux/usb.h> #include <linux/usb.h>
@ -15,6 +16,8 @@
#include <linux/usb/of.h> #include <linux/usb/of.h>
#include <linux/usb/phy.h> #include <linux/usb/phy.h>
#include <soc/tegra/common.h>
#include "../host/ehci.h" #include "../host/ehci.h"
#include "ci.h" #include "ci.h"
@ -278,6 +281,8 @@ static int tegra_usb_probe(struct platform_device *pdev)
if (!usb) if (!usb)
return -ENOMEM; return -ENOMEM;
platform_set_drvdata(pdev, usb);
soc = of_device_get_match_data(&pdev->dev); soc = of_device_get_match_data(&pdev->dev);
if (!soc) { if (!soc) {
dev_err(&pdev->dev, "failed to match OF data\n"); dev_err(&pdev->dev, "failed to match OF data\n");
@ -296,11 +301,14 @@ static int tegra_usb_probe(struct platform_device *pdev)
return err; return err;
} }
err = clk_prepare_enable(usb->clk); err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (err < 0) { if (err)
dev_err(&pdev->dev, "failed to enable clock: %d\n", err); return err;
pm_runtime_enable(&pdev->dev);
err = pm_runtime_resume_and_get(&pdev->dev);
if (err)
return err; return err;
}
if (device_property_present(&pdev->dev, "nvidia,needs-double-reset")) if (device_property_present(&pdev->dev, "nvidia,needs-double-reset"))
usb->needs_double_reset = true; usb->needs_double_reset = true;
@ -320,8 +328,6 @@ static int tegra_usb_probe(struct platform_device *pdev)
if (err) if (err)
goto fail_power_off; goto fail_power_off;
platform_set_drvdata(pdev, usb);
/* setup and register ChipIdea HDRC device */ /* setup and register ChipIdea HDRC device */
usb->soc = soc; usb->soc = soc;
usb->data.name = "tegra-usb"; usb->data.name = "tegra-usb";
@ -350,7 +356,9 @@ static int tegra_usb_probe(struct platform_device *pdev)
phy_shutdown: phy_shutdown:
usb_phy_shutdown(usb->phy); usb_phy_shutdown(usb->phy);
fail_power_off: fail_power_off:
clk_disable_unprepare(usb->clk); pm_runtime_put_sync_suspend(&pdev->dev);
pm_runtime_force_suspend(&pdev->dev);
return err; return err;
} }
@ -360,15 +368,46 @@ static int tegra_usb_remove(struct platform_device *pdev)
ci_hdrc_remove_device(usb->dev); ci_hdrc_remove_device(usb->dev);
usb_phy_shutdown(usb->phy); usb_phy_shutdown(usb->phy);
pm_runtime_put_sync_suspend(&pdev->dev);
pm_runtime_force_suspend(&pdev->dev);
return 0;
}
static int __maybe_unused tegra_usb_runtime_resume(struct device *dev)
{
struct tegra_usb *usb = dev_get_drvdata(dev);
int err;
err = clk_prepare_enable(usb->clk);
if (err < 0) {
dev_err(dev, "failed to enable clock: %d\n", err);
return err;
}
return 0;
}
static int __maybe_unused tegra_usb_runtime_suspend(struct device *dev)
{
struct tegra_usb *usb = dev_get_drvdata(dev);
clk_disable_unprepare(usb->clk); clk_disable_unprepare(usb->clk);
return 0; return 0;
} }
static const struct dev_pm_ops tegra_usb_pm = {
SET_RUNTIME_PM_OPS(tegra_usb_runtime_suspend, tegra_usb_runtime_resume,
NULL)
};
static struct platform_driver tegra_usb_driver = { static struct platform_driver tegra_usb_driver = {
.driver = { .driver = {
.name = "tegra-usb", .name = "tegra-usb",
.of_match_table = tegra_usb_of_match, .of_match_table = tegra_usb_of_match,
.pm = &tegra_usb_pm,
}, },
.probe = tegra_usb_probe, .probe = tegra_usb_probe,
.remove = tegra_usb_remove, .remove = tegra_usb_remove,

View File

@ -0,0 +1,64 @@
/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
/*
* Copyright (C) 2021 Renesas Electronics Corp.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A779F0_CPG_MSSR_H__
#define __DT_BINDINGS_CLOCK_R8A779F0_CPG_MSSR_H__
#include <dt-bindings/clock/renesas-cpg-mssr.h>
/* r8a779f0 CPG Core Clocks */
#define R8A779F0_CLK_ZX 0
#define R8A779F0_CLK_ZS 1
#define R8A779F0_CLK_ZT 2
#define R8A779F0_CLK_ZTR 3
#define R8A779F0_CLK_S0D2 4
#define R8A779F0_CLK_S0D3 5
#define R8A779F0_CLK_S0D4 6
#define R8A779F0_CLK_S0D2_MM 7
#define R8A779F0_CLK_S0D3_MM 8
#define R8A779F0_CLK_S0D4_MM 9
#define R8A779F0_CLK_S0D2_RT 10
#define R8A779F0_CLK_S0D3_RT 11
#define R8A779F0_CLK_S0D4_RT 12
#define R8A779F0_CLK_S0D6_RT 13
#define R8A779F0_CLK_S0D3_PER 14
#define R8A779F0_CLK_S0D6_PER 15
#define R8A779F0_CLK_S0D12_PER 16
#define R8A779F0_CLK_S0D24_PER 17
#define R8A779F0_CLK_S0D2_HSC 18
#define R8A779F0_CLK_S0D3_HSC 19
#define R8A779F0_CLK_S0D4_HSC 20
#define R8A779F0_CLK_S0D6_HSC 21
#define R8A779F0_CLK_S0D12_HSC 22
#define R8A779F0_CLK_S0D2_CC 23
#define R8A779F0_CLK_CL 24
#define R8A779F0_CLK_CL16M 25
#define R8A779F0_CLK_CL16M_MM 26
#define R8A779F0_CLK_CL16M_RT 27
#define R8A779F0_CLK_CL16M_PER 28
#define R8A779F0_CLK_CL16M_HSC 29
#define R8A779F0_CLK_Z0 30
#define R8A779F0_CLK_Z1 31
#define R8A779F0_CLK_ZB3 32
#define R8A779F0_CLK_ZB3D2 33
#define R8A779F0_CLK_ZB3D4 34
#define R8A779F0_CLK_SD0H 35
#define R8A779F0_CLK_SD0 36
#define R8A779F0_CLK_RPC 37
#define R8A779F0_CLK_RPCD2 38
#define R8A779F0_CLK_MSO 39
#define R8A779F0_CLK_SASYNCRT 40
#define R8A779F0_CLK_SASYNCPERD1 41
#define R8A779F0_CLK_SASYNCPERD2 42
#define R8A779F0_CLK_SASYNCPERD4 43
#define R8A779F0_CLK_DBGSOC_HSC 44
#define R8A779F0_CLK_RSW2 45
#define R8A779F0_CLK_OSC 46
#define R8A779F0_CLK_ZR 47
#define R8A779F0_CLK_CPEX 48
#define R8A779F0_CLK_CBFUSA 49
#define R8A779F0_CLK_R 50
#endif /* __DT_BINDINGS_CLOCK_R8A779F0_CPG_MSSR_H__ */

View File

@ -12,4 +12,9 @@
#define IMX8MN_POWER_DOMAIN_DISPMIX 3 #define IMX8MN_POWER_DOMAIN_DISPMIX 3
#define IMX8MN_POWER_DOMAIN_MIPI 4 #define IMX8MN_POWER_DOMAIN_MIPI 4
#define IMX8MN_DISPBLK_PD_MIPI_DSI 0
#define IMX8MN_DISPBLK_PD_MIPI_CSI 1
#define IMX8MN_DISPBLK_PD_LCDIF 2
#define IMX8MN_DISPBLK_PD_ISI 3
#endif #endif

View File

@ -68,6 +68,21 @@
#define SM8350_MXC_AO 11 #define SM8350_MXC_AO 11
#define SM8350_MSS 12 #define SM8350_MSS 12
/* SM8450 Power Domain Indexes */
#define SM8450_CX 0
#define SM8450_CX_AO 1
#define SM8450_EBI 2
#define SM8450_GFX 3
#define SM8450_LCX 4
#define SM8450_LMX 5
#define SM8450_MMCX 6
#define SM8450_MMCX_AO 7
#define SM8450_MX 8
#define SM8450_MX_AO 9
#define SM8450_MXC 10
#define SM8450_MXC_AO 11
#define SM8450_MSS 12
/* SC7180 Power Domain Indexes */ /* SC7180 Power Domain Indexes */
#define SC7180_CX 0 #define SC7180_CX 0
#define SC7180_CX_AO 1 #define SC7180_CX_AO 1
@ -219,6 +234,24 @@
#define SM6115_VDD_LPI_CX 6 #define SM6115_VDD_LPI_CX 6
#define SM6115_VDD_LPI_MX 7 #define SM6115_VDD_LPI_MX 7
/* SM6125 Power Domains */
#define SM6125_VDDCX 0
#define SM6125_VDDCX_AO 1
#define SM6125_VDDCX_VFL 2
#define SM6125_VDDMX 3
#define SM6125_VDDMX_AO 4
#define SM6125_VDDMX_VFL 5
/* QCM2290 Power Domains */
#define QCM2290_VDDCX 0
#define QCM2290_VDDCX_AO 1
#define QCM2290_VDDCX_VFL 2
#define QCM2290_VDDMX 3
#define QCM2290_VDDMX_AO 4
#define QCM2290_VDDMX_VFL 5
#define QCM2290_VDD_LPI_CX 6
#define QCM2290_VDD_LPI_MX 7
/* RPM SMD Power Domain performance levels */ /* RPM SMD Power Domain performance levels */
#define RPM_SMD_LEVEL_RETENTION 16 #define RPM_SMD_LEVEL_RETENTION 16
#define RPM_SMD_LEVEL_RETENTION_PLUS 32 #define RPM_SMD_LEVEL_RETENTION_PLUS 32

View File

@ -0,0 +1,30 @@
/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
/*
* Copyright (C) 2021 Renesas Electronics Corp.
*/
#ifndef __DT_BINDINGS_POWER_R8A779F0_SYSC_H__
#define __DT_BINDINGS_POWER_R8A779F0_SYSC_H__
/*
* These power domain indices match the Power Domain Register Numbers (PDR)
*/
#define R8A779F0_PD_A1E0D0C0 0
#define R8A779F0_PD_A1E0D0C1 1
#define R8A779F0_PD_A1E0D1C0 2
#define R8A779F0_PD_A1E0D1C1 3
#define R8A779F0_PD_A1E1D0C0 4
#define R8A779F0_PD_A1E1D0C1 5
#define R8A779F0_PD_A1E1D1C0 6
#define R8A779F0_PD_A1E1D1C1 7
#define R8A779F0_PD_A2E0D0 16
#define R8A779F0_PD_A2E0D1 17
#define R8A779F0_PD_A2E1D0 18
#define R8A779F0_PD_A2E1D1 19
#define R8A779F0_PD_A3E0 20
#define R8A779F0_PD_A3E1 21
/* Always-on power area */
#define R8A779F0_PD_ALWAYS_ON 64
#endif /* __DT_BINDINGS_POWER_R8A779A0_SYSC_H__*/

View File

@ -0,0 +1,17 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/*
* Copyright (c) 2021 Linaro Ltd.
* Author: Sam Protsenko <semen.protsenko@linaro.org>
*
* Device Tree bindings for Samsung Exynos USI (Universal Serial Interface).
*/
#ifndef __DT_BINDINGS_SAMSUNG_EXYNOS_USI_H
#define __DT_BINDINGS_SAMSUNG_EXYNOS_USI_H
#define USI_V2_NONE 0
#define USI_V2_UART 1
#define USI_V2_SPI 2
#define USI_V2_I2C 3
#endif /* __DT_BINDINGS_SAMSUNG_EXYNOS_USI_H */

View File

@ -33,6 +33,9 @@
#define LLCC_MODPE 29 #define LLCC_MODPE 29
#define LLCC_APTCM 30 #define LLCC_APTCM 30
#define LLCC_WRCACHE 31 #define LLCC_WRCACHE 31
#define LLCC_CVPFW 32
#define LLCC_CPUSS1 33
#define LLCC_CPUHWT 36
/** /**
* struct llcc_slice_desc - Cache slice descriptor * struct llcc_slice_desc - Cache slice descriptor

View File

@ -4,8 +4,10 @@
#ifdef CONFIG_RST_RCAR #ifdef CONFIG_RST_RCAR
int rcar_rst_read_mode_pins(u32 *mode); int rcar_rst_read_mode_pins(u32 *mode);
int rcar_rst_set_rproc_boot_addr(u64 boot_addr);
#else #else
static inline int rcar_rst_read_mode_pins(u32 *mode) { return -ENODEV; } static inline int rcar_rst_read_mode_pins(u32 *mode) { return -ENODEV; }
static inline int rcar_rst_set_rproc_boot_addr(u64 boot_addr) { return -ENODEV; }
#endif #endif
#endif /* __LINUX_SOC_RENESAS_RCAR_RST_H__ */ #endif /* __LINUX_SOC_RENESAS_RCAR_RST_H__ */

View File

@ -587,4 +587,18 @@ struct tee_client_driver {
#define to_tee_client_driver(d) \ #define to_tee_client_driver(d) \
container_of(d, struct tee_client_driver, driver) container_of(d, struct tee_client_driver, driver)
/**
* teedev_open() - Open a struct tee_device
* @teedev: Device to open
*
* @return a pointer to struct tee_context on success or an ERR_PTR on failure.
*/
struct tee_context *teedev_open(struct tee_device *teedev);
/**
* teedev_close_context() - closes a struct tee_context
* @ctx: The struct tee_context to close
*/
void teedev_close_context(struct tee_context *ctx);
#endif /*__TEE_DRV_H*/ #endif /*__TEE_DRV_H*/

View File

@ -57,6 +57,11 @@ struct rpcif_op {
} data; } data;
}; };
enum rpcif_type {
RPCIF_RCAR_GEN3,
RPCIF_RZ_G2L,
};
struct rpcif { struct rpcif {
struct device *dev; struct device *dev;
void __iomem *base; void __iomem *base;
@ -64,6 +69,7 @@ struct rpcif {
struct regmap *regmap; struct regmap *regmap;
struct reset_control *rstc; struct reset_control *rstc;
size_t size; size_t size;
enum rpcif_type type;
enum rpcif_data_dir dir; enum rpcif_data_dir dir;
u8 bus_size; u8 bus_size;
void *buffer; void *buffer;
@ -78,7 +84,7 @@ struct rpcif {
}; };
int rpcif_sw_init(struct rpcif *rpc, struct device *dev); int rpcif_sw_init(struct rpcif *rpc, struct device *dev);
void rpcif_hw_init(struct rpcif *rpc, bool hyperflash); int rpcif_hw_init(struct rpcif *rpc, bool hyperflash);
void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs, void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs,
size_t *len); size_t *len);
int rpcif_manual_xfer(struct rpcif *rpc); int rpcif_manual_xfer(struct rpcif *rpc);

View File

@ -39,4 +39,19 @@ devm_tegra_core_dev_init_opp_table(struct device *dev,
} }
#endif #endif
static inline int
devm_tegra_core_dev_init_opp_table_common(struct device *dev)
{
struct tegra_core_opp_params opp_params = {};
int err;
opp_params.init_state = true;
err = devm_tegra_core_dev_init_opp_table(dev, &opp_params);
if (err != -ENODEV)
return err;
return 0;
}
#endif /* __SOC_TEGRA_COMMON_H__ */ #endif /* __SOC_TEGRA_COMMON_H__ */