dmaengine updates for v6.6
New support: - Qualcomm SM6115 and QCM2290 dmaengine support - at_xdma support for microchip,sam9x7 controller Updates: - idxd updates for wq simplification and ats knob updates - fsl edma updates for v3 support - Xilinx AXI4-Stream control support - Yaml conversion for bcm dma binding -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAmT0xHEACgkQfBQHDyUj g0cUNw/+IN6UpnQyQ61D6Ljv1IjjmmhJS7hVVnkQvAzlFdsRdAw9Ez5jJB2qz7kU dAcFKDPTVGZaLYSeQFkmbHeRF8Sk8rCOBHTWMMM085LMttATVhRjth3m2gj4Jp0Y XOaJ/TWXGM9DAtojhOoEV+xtY8KdASkwrS7DiPYrwx8u/BTA3p8fa6v3ggyi/ttf PoPXfmXR7PupZM5CLDaPMDBW5aIveLbTSsXGlixRrWbccI2dH9RG/l5KKUPFq2Se LEIYtsa4ZK8OOb+WpUaZjJ5C7JgXMm0ZCXK9n8tqcdMjxjPAdOoy0lasaVLS0yw9 BiL4D5sFkUYAOf/9QTY3CSlzv7sjuSCBCGzppV5dYdliKCMx/qv34zNyBnWyhhgA oW6h0cWQU/kcX9AUptYn9bHWDb4/Cykd+g9fUlCa1En8DL4lMRkKojHGKieD0mWw A9p8W5p8K//g0FOjfPvqNhFP3iCkVCXMys1/mLZvKzofX6PEJc7lPPetY/YLYdBQ g02lb6X4FP6MIyPlAzrmudGnCkYuUWGzXE2K4Rs/uTJtfTEkVwICMEQrnAszSl3o 4hYVjcDfk+5/4guFr4gHRqMq58E+cVv+zV70r2ttnF79lX6sTJ/Uvr7cJa1qVhB9 0mPLf7GXoKVanrX0rH0w/67Lo7wrQVdd/94BmHyd0kd8lUh63FM= =LPWk -----END PGP SIGNATURE----- Merge tag 'dmaengine-6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine Pull dmaengine updates from Vinod Koul: "New controller support and updates to drivers. New support: - Qualcomm SM6115 and QCM2290 dmaengine support - at_xdma support for microchip,sam9x7 controller Updates: - idxd updates for wq simplification and ats knob updates - fsl edma updates for v3 support - Xilinx AXI4-Stream control support - Yaml conversion for bcm dma binding" * tag 'dmaengine-6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (53 commits) dmaengine: fsl-edma: integrate v3 support dt-bindings: fsl-dma: fsl-edma: add edma3 compatible string dmaengine: fsl-edma: move tcd into struct fsl_dma_chan dmaengine: fsl-edma: refactor chan_name setup and safety dmaengine: fsl-edma: move clearing of register interrupt into setup_irq function dmaengine: fsl-edma: refactor using devm_clk_get_enabled dmaengine: fsl-edma: simply ATTR_DSIZE and ATTR_SSIZE by using ffs() dmaengine: fsl-edma: move common IRQ handler to common.c dmaengine: fsl-edma: Remove enum edma_version dmaengine: fsl-edma: transition from bool fields to bitmask flags in drvdata dmaengine: fsl-edma: clean up EXPORT_SYMBOL_GPL in fsl-edma-common.c dmaengine: fsl-edma: fix build error when arch is s390 dmaengine: idxd: Fix issues with PRS disable sysfs knob dmaengine: idxd: Allow ATS disable update only for configurable devices dmaengine: xilinx_dma: Program interrupt delay timeout dmaengine: xilinx_dma: Use tasklet_hi_schedule for timing critical usecase dmaengine: xilinx_dma: Freeup active list based on descriptor completion bit dmaengine: xilinx_dma: Increase AXI DMA transaction segment count dmaengine: xilinx_dma: Pass AXI4-Stream control words to dma client dt-bindings: dmaengine: xilinx_dma: Add xlnx,irq-delay property ...
This commit is contained in:
commit
708283abf8
@ -84,7 +84,7 @@ What: /sys/bus/dsa/devices/dsa<m>/pasid_enabled
|
||||
Date: Oct 27, 2020
|
||||
KernelVersion: 5.11.0
|
||||
Contact: dmaengine@vger.kernel.org
|
||||
Description: To indicate if PASID (process address space identifier) is
|
||||
Description: To indicate if user PASID (process address space identifier) is
|
||||
enabled or not for this device.
|
||||
|
||||
What: /sys/bus/dsa/devices/dsa<m>/state
|
||||
|
@ -3,7 +3,8 @@
|
||||
* XDMA Controller
|
||||
Required properties:
|
||||
- compatible: Should be "atmel,sama5d4-dma", "microchip,sam9x60-dma" or
|
||||
"microchip,sama7g5-dma".
|
||||
"microchip,sama7g5-dma" or
|
||||
"microchip,sam9x7-dma", "atmel,sama5d4-dma".
|
||||
- reg: Should contain DMA registers location and length.
|
||||
- interrupts: Should contain DMA interrupt.
|
||||
- #dma-cells: Must be <1>, used to represent the number of integer cells in
|
||||
|
@ -1,83 +0,0 @@
|
||||
* BCM2835 DMA controller
|
||||
|
||||
The BCM2835 DMA controller has 16 channels in total.
|
||||
Only the lower 13 channels have an associated IRQ.
|
||||
Some arbitrary channels are used by the firmware
|
||||
(1,3,6,7 in the current firmware version).
|
||||
The channels 0,2 and 3 have special functionality
|
||||
and should not be used by the driver.
|
||||
|
||||
Required properties:
|
||||
- compatible: Should be "brcm,bcm2835-dma".
|
||||
- reg: Should contain DMA registers location and length.
|
||||
- interrupts: Should contain the DMA interrupts associated
|
||||
to the DMA channels in ascending order.
|
||||
- interrupt-names: Should contain the names of the interrupt
|
||||
in the form "dmaXX".
|
||||
Use "dma-shared-all" for the common interrupt line
|
||||
that is shared by all dma channels.
|
||||
- #dma-cells: Must be <1>, the cell in the dmas property of the
|
||||
client device represents the DREQ number.
|
||||
- brcm,dma-channel-mask: Bit mask representing the channels
|
||||
not used by the firmware in ascending order,
|
||||
i.e. first channel corresponds to LSB.
|
||||
|
||||
Example:
|
||||
|
||||
dma: dma@7e007000 {
|
||||
compatible = "brcm,bcm2835-dma";
|
||||
reg = <0x7e007000 0xf00>;
|
||||
interrupts = <1 16>,
|
||||
<1 17>,
|
||||
<1 18>,
|
||||
<1 19>,
|
||||
<1 20>,
|
||||
<1 21>,
|
||||
<1 22>,
|
||||
<1 23>,
|
||||
<1 24>,
|
||||
<1 25>,
|
||||
<1 26>,
|
||||
/* dma channel 11-14 share one irq */
|
||||
<1 27>,
|
||||
<1 27>,
|
||||
<1 27>,
|
||||
<1 27>,
|
||||
/* unused shared irq for all channels */
|
||||
<1 28>;
|
||||
interrupt-names = "dma0",
|
||||
"dma1",
|
||||
"dma2",
|
||||
"dma3",
|
||||
"dma4",
|
||||
"dma5",
|
||||
"dma6",
|
||||
"dma7",
|
||||
"dma8",
|
||||
"dma9",
|
||||
"dma10",
|
||||
"dma11",
|
||||
"dma12",
|
||||
"dma13",
|
||||
"dma14",
|
||||
"dma-shared-all";
|
||||
|
||||
#dma-cells = <1>;
|
||||
brcm,dma-channel-mask = <0x7f35>;
|
||||
};
|
||||
|
||||
|
||||
DMA clients connected to the BCM2835 DMA controller must use the format
|
||||
described in the dma.txt file, using a two-cell specifier for each channel.
|
||||
|
||||
Example:
|
||||
|
||||
bcm2835_i2s: i2s@7e203000 {
|
||||
compatible = "brcm,bcm2835-i2s";
|
||||
reg = < 0x7e203000 0x24>;
|
||||
clocks = <&clocks BCM2835_CLOCK_PCM>;
|
||||
|
||||
dmas = <&dma 2>,
|
||||
<&dma 3>;
|
||||
dma-names = "tx", "rx";
|
||||
};
|
102
Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.yaml
Normal file
102
Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.yaml
Normal file
@ -0,0 +1,102 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/dma/brcm,bcm2835-dma.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: BCM2835 DMA controller
|
||||
|
||||
maintainers:
|
||||
- Nicolas Saenz Julienne <nsaenz@kernel.org>
|
||||
|
||||
description:
|
||||
The BCM2835 DMA controller has 16 channels in total. Only the lower
|
||||
13 channels have an associated IRQ. Some arbitrary channels are used by the
|
||||
VideoCore firmware (1,3,6,7 in the current firmware version). The channels
|
||||
0, 2 and 3 have special functionality and should not be used by the driver.
|
||||
|
||||
allOf:
|
||||
- $ref: dma-controller.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: brcm,bcm2835-dma
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
description:
|
||||
Should contain the DMA interrupts associated to the DMA channels in
|
||||
ascending order.
|
||||
minItems: 1
|
||||
maxItems: 16
|
||||
|
||||
interrupt-names:
|
||||
minItems: 1
|
||||
maxItems: 16
|
||||
|
||||
'#dma-cells':
|
||||
description: The single cell represents the DREQ number.
|
||||
const: 1
|
||||
|
||||
brcm,dma-channel-mask:
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
description:
|
||||
Bitmask of available DMA channels in ascending order that are
|
||||
not reserved by firmware and are available to the
|
||||
kernel. i.e. first channel corresponds to LSB.
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
- "#dma-cells"
|
||||
- brcm,dma-channel-mask
|
||||
|
||||
examples:
|
||||
- |
|
||||
dma-controller@7e007000 {
|
||||
compatible = "brcm,bcm2835-dma";
|
||||
reg = <0x7e007000 0xf00>;
|
||||
interrupts = <1 16>,
|
||||
<1 17>,
|
||||
<1 18>,
|
||||
<1 19>,
|
||||
<1 20>,
|
||||
<1 21>,
|
||||
<1 22>,
|
||||
<1 23>,
|
||||
<1 24>,
|
||||
<1 25>,
|
||||
<1 26>,
|
||||
/* dma channel 11-14 share one irq */
|
||||
<1 27>,
|
||||
<1 27>,
|
||||
<1 27>,
|
||||
<1 27>,
|
||||
/* unused shared irq for all channels */
|
||||
<1 28>;
|
||||
interrupt-names = "dma0",
|
||||
"dma1",
|
||||
"dma2",
|
||||
"dma3",
|
||||
"dma4",
|
||||
"dma5",
|
||||
"dma6",
|
||||
"dma7",
|
||||
"dma8",
|
||||
"dma9",
|
||||
"dma10",
|
||||
"dma11",
|
||||
"dma12",
|
||||
"dma13",
|
||||
"dma14",
|
||||
"dma-shared-all";
|
||||
#dma-cells = <1>;
|
||||
brcm,dma-channel-mask = <0x7f35>;
|
||||
};
|
||||
|
||||
...
|
@ -21,32 +21,41 @@ properties:
|
||||
- enum:
|
||||
- fsl,vf610-edma
|
||||
- fsl,imx7ulp-edma
|
||||
- fsl,imx8qm-adma
|
||||
- fsl,imx8qm-edma
|
||||
- fsl,imx93-edma3
|
||||
- fsl,imx93-edma4
|
||||
- items:
|
||||
- const: fsl,ls1028a-edma
|
||||
- const: fsl,vf610-edma
|
||||
|
||||
reg:
|
||||
minItems: 2
|
||||
minItems: 1
|
||||
maxItems: 3
|
||||
|
||||
interrupts:
|
||||
minItems: 2
|
||||
maxItems: 17
|
||||
minItems: 1
|
||||
maxItems: 64
|
||||
|
||||
interrupt-names:
|
||||
minItems: 2
|
||||
maxItems: 17
|
||||
minItems: 1
|
||||
maxItems: 64
|
||||
|
||||
"#dma-cells":
|
||||
const: 2
|
||||
enum:
|
||||
- 2
|
||||
- 3
|
||||
|
||||
dma-channels:
|
||||
const: 32
|
||||
minItems: 1
|
||||
maxItems: 64
|
||||
|
||||
clocks:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
clock-names:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
big-endian:
|
||||
@ -65,6 +74,29 @@ required:
|
||||
|
||||
allOf:
|
||||
- $ref: dma-controller.yaml#
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- fsl,imx8qm-adma
|
||||
- fsl,imx8qm-edma
|
||||
- fsl,imx93-edma3
|
||||
- fsl,imx93-edma4
|
||||
then:
|
||||
properties:
|
||||
"#dma-cells":
|
||||
const: 3
|
||||
# It is not necessary to write the interrupt name for each channel.
|
||||
# instead, you can simply maintain the sequential IRQ numbers as
|
||||
# defined for the DMA channels.
|
||||
interrupt-names: false
|
||||
clock-names:
|
||||
items:
|
||||
- const: dma
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
@ -72,18 +104,26 @@ allOf:
|
||||
const: fsl,vf610-edma
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
minItems: 2
|
||||
clock-names:
|
||||
items:
|
||||
- const: dmamux0
|
||||
- const: dmamux1
|
||||
interrupts:
|
||||
minItems: 2
|
||||
maxItems: 2
|
||||
interrupt-names:
|
||||
items:
|
||||
- const: edma-tx
|
||||
- const: edma-err
|
||||
reg:
|
||||
minItems: 2
|
||||
maxItems: 3
|
||||
"#dma-cells":
|
||||
const: 2
|
||||
dma-channels:
|
||||
const: 32
|
||||
|
||||
- if:
|
||||
properties:
|
||||
@ -92,14 +132,22 @@ allOf:
|
||||
const: fsl,imx7ulp-edma
|
||||
then:
|
||||
properties:
|
||||
clock:
|
||||
minItems: 2
|
||||
clock-names:
|
||||
items:
|
||||
- const: dma
|
||||
- const: dmamux0
|
||||
interrupts:
|
||||
minItems: 2
|
||||
maxItems: 17
|
||||
reg:
|
||||
minItems: 2
|
||||
maxItems: 2
|
||||
"#dma-cells":
|
||||
const: 2
|
||||
dma-channels:
|
||||
const: 32
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
@ -153,3 +201,47 @@ examples:
|
||||
clock-names = "dma", "dmamux0";
|
||||
clocks = <&pcc2 IMX7ULP_CLK_DMA1>, <&pcc2 IMX7ULP_CLK_DMA_MUX1>;
|
||||
};
|
||||
|
||||
- |
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/clock/imx93-clock.h>
|
||||
|
||||
dma-controller@44000000 {
|
||||
compatible = "fsl,imx93-edma3";
|
||||
reg = <0x44000000 0x200000>;
|
||||
#dma-cells = <3>;
|
||||
dma-channels = <31>;
|
||||
interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&clk IMX93_CLK_EDMA1_GATE>;
|
||||
clock-names = "dma";
|
||||
};
|
||||
|
@ -15,13 +15,19 @@ allOf:
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
# APQ8064, IPQ8064 and MSM8960
|
||||
- qcom,bam-v1.3.0
|
||||
# MSM8974, APQ8074 and APQ8084
|
||||
- qcom,bam-v1.4.0
|
||||
# MSM8916 and SDM845
|
||||
- qcom,bam-v1.7.0
|
||||
oneOf:
|
||||
- enum:
|
||||
# APQ8064, IPQ8064 and MSM8960
|
||||
- qcom,bam-v1.3.0
|
||||
# MSM8974, APQ8074 and APQ8084
|
||||
- qcom,bam-v1.4.0
|
||||
# MSM8916, SDM630
|
||||
- qcom,bam-v1.7.0
|
||||
- items:
|
||||
- enum:
|
||||
# SDM845, SM6115, SM8150, SM8250 and QCM2290
|
||||
- qcom,bam-v1.7.4
|
||||
- const: qcom,bam-v1.7.0
|
||||
|
||||
clocks:
|
||||
maxItems: 1
|
||||
@ -38,7 +44,7 @@ properties:
|
||||
|
||||
iommus:
|
||||
minItems: 1
|
||||
maxItems: 4
|
||||
maxItems: 6
|
||||
|
||||
num-channels:
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
@ -81,6 +87,15 @@ required:
|
||||
- qcom,ee
|
||||
- reg
|
||||
|
||||
anyOf:
|
||||
- required:
|
||||
- qcom,powered-remotely
|
||||
- required:
|
||||
- qcom,controlled-remotely
|
||||
- required:
|
||||
- clocks
|
||||
- clock-names
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
|
@ -49,6 +49,12 @@ Optional properties for AXI DMA and MCDMA:
|
||||
register as configured in h/w. Takes values {8...26}. If the property
|
||||
is missing or invalid then the default value 23 is used. This is the
|
||||
maximum value that is supported by all IP versions.
|
||||
|
||||
Optional properties for AXI DMA:
|
||||
- xlnx,axistream-connected: Tells whether DMA is connected to AXI stream IP.
|
||||
- xlnx,irq-delay: Tells the interrupt delay timeout value. Valid range is from
|
||||
0-255. Setting this value to zero disables the delay timer interrupt.
|
||||
1 timeout interval = 125 * clock period of SG clock.
|
||||
Optional properties for VDMA:
|
||||
- xlnx,flush-fsync: Tells which channel to Flush on Frame sync.
|
||||
It takes following values:
|
||||
|
@ -474,25 +474,6 @@ config MXS_DMA
|
||||
Support the MXS DMA engine. This engine including APBH-DMA
|
||||
and APBX-DMA is integrated into some Freescale chips.
|
||||
|
||||
config MX3_IPU
|
||||
bool "MX3x Image Processing Unit support"
|
||||
depends on ARCH_MXC
|
||||
select DMA_ENGINE
|
||||
default y
|
||||
help
|
||||
If you plan to use the Image Processing unit in the i.MX3x, say
|
||||
Y here. If unsure, select Y.
|
||||
|
||||
config MX3_IPU_IRQS
|
||||
int "Number of dynamically mapped interrupts for IPU"
|
||||
depends on MX3_IPU
|
||||
range 2 137
|
||||
default 4
|
||||
help
|
||||
Out of 137 interrupt sources on i.MX31 IPU only very few are used.
|
||||
To avoid bloating the irq_desc[] array we allocate a sufficient
|
||||
number of IRQ slots and map them dynamically to specific sources.
|
||||
|
||||
config NBPFAXI_DMA
|
||||
tristate "Renesas Type-AXI NBPF DMA support"
|
||||
select DMA_ENGINE
|
||||
@ -699,7 +680,7 @@ config XGENE_DMA
|
||||
|
||||
config XILINX_DMA
|
||||
tristate "Xilinx AXI DMAS Engine"
|
||||
depends on (ARCH_ZYNQ || MICROBLAZE || ARM64)
|
||||
depends on HAS_IOMEM
|
||||
select DMA_ENGINE
|
||||
help
|
||||
Enable support for Xilinx AXI VDMA Soft IP.
|
||||
|
@ -32,8 +32,10 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw/
|
||||
obj-$(CONFIG_DW_EDMA) += dw-edma/
|
||||
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
|
||||
obj-$(CONFIG_FSL_DMA) += fsldma.o
|
||||
obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
|
||||
obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o
|
||||
fsl-edma-objs := fsl-edma-main.o fsl-edma-common.o
|
||||
obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
|
||||
mcf-edma-objs := mcf-edma-main.o fsl-edma-common.o
|
||||
obj-$(CONFIG_MCF_EDMA) += mcf-edma.o
|
||||
obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
|
||||
obj-$(CONFIG_FSL_RAID) += fsl_raid.o
|
||||
obj-$(CONFIG_HISI_DMA) += hisi_dma.o
|
||||
@ -55,7 +57,6 @@ obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
|
||||
obj-$(CONFIG_MV_XOR) += mv_xor.o
|
||||
obj-$(CONFIG_MV_XOR_V2) += mv_xor_v2.o
|
||||
obj-$(CONFIG_MXS_DMA) += mxs-dma.o
|
||||
obj-$(CONFIG_MX3_IPU) += ipu/
|
||||
obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
|
||||
obj-$(CONFIG_OWL_DMA) += owl-dma.o
|
||||
obj-$(CONFIG_PCH_DMA) += pch_dma.o
|
||||
|
@ -10,8 +10,9 @@
|
||||
#include <linux/device.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
@ -20,7 +20,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/overflow.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -35,7 +35,9 @@
|
||||
#include <linux/mailbox_client.h>
|
||||
#include <linux/mailbox/brcm-message.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/raid/pq.h>
|
||||
|
||||
|
@ -14,9 +14,8 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mpc52xx.h>
|
||||
|
@ -13,7 +13,6 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -1147,69 +1147,27 @@ int dma_async_device_register(struct dma_device *device)
|
||||
|
||||
device->owner = device->dev->driver->owner;
|
||||
|
||||
if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
|
||||
dev_err(device->dev,
|
||||
"Device claims capability %s, but op is not defined\n",
|
||||
"DMA_MEMCPY");
|
||||
return -EIO;
|
||||
}
|
||||
#define CHECK_CAP(_name, _type) \
|
||||
{ \
|
||||
if (dma_has_cap(_type, device->cap_mask) && !device->device_prep_##_name) { \
|
||||
dev_err(device->dev, \
|
||||
"Device claims capability %s, but op is not defined\n", \
|
||||
__stringify(_type)); \
|
||||
return -EIO; \
|
||||
} \
|
||||
}
|
||||
|
||||
if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
|
||||
dev_err(device->dev,
|
||||
"Device claims capability %s, but op is not defined\n",
|
||||
"DMA_XOR");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
|
||||
dev_err(device->dev,
|
||||
"Device claims capability %s, but op is not defined\n",
|
||||
"DMA_XOR_VAL");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
|
||||
dev_err(device->dev,
|
||||
"Device claims capability %s, but op is not defined\n",
|
||||
"DMA_PQ");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
|
||||
dev_err(device->dev,
|
||||
"Device claims capability %s, but op is not defined\n",
|
||||
"DMA_PQ_VAL");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
|
||||
dev_err(device->dev,
|
||||
"Device claims capability %s, but op is not defined\n",
|
||||
"DMA_MEMSET");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
|
||||
dev_err(device->dev,
|
||||
"Device claims capability %s, but op is not defined\n",
|
||||
"DMA_INTERRUPT");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
|
||||
dev_err(device->dev,
|
||||
"Device claims capability %s, but op is not defined\n",
|
||||
"DMA_CYCLIC");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
|
||||
dev_err(device->dev,
|
||||
"Device claims capability %s, but op is not defined\n",
|
||||
"DMA_INTERLEAVE");
|
||||
return -EIO;
|
||||
}
|
||||
CHECK_CAP(dma_memcpy, DMA_MEMCPY);
|
||||
CHECK_CAP(dma_xor, DMA_XOR);
|
||||
CHECK_CAP(dma_xor_val, DMA_XOR_VAL);
|
||||
CHECK_CAP(dma_pq, DMA_PQ);
|
||||
CHECK_CAP(dma_pq_val, DMA_PQ_VAL);
|
||||
CHECK_CAP(dma_memset, DMA_MEMSET);
|
||||
CHECK_CAP(dma_interrupt, DMA_INTERRUPT);
|
||||
CHECK_CAP(dma_cyclic, DMA_CYCLIC);
|
||||
CHECK_CAP(interleaved_dma, DMA_INTERLEAVE);
|
||||
|
||||
#undef CHECK_CAP
|
||||
|
||||
if (!device->device_tx_status) {
|
||||
dev_err(device->dev, "Device tx_status is not defined\n");
|
||||
|
@ -21,7 +21,6 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
@ -5,8 +5,10 @@
|
||||
* Based on TI crossbar driver written by Peter Ujfalusi <peter.ujfalusi@ti.com>
|
||||
*/
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/soc/renesas/r9a06g032-sysctrl.h>
|
||||
#include <linux/types.h>
|
||||
|
@ -1320,11 +1320,9 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
|
||||
struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
|
||||
struct ep93xx_dma_engine *edma;
|
||||
struct dma_device *dma_dev;
|
||||
size_t edma_size;
|
||||
int ret, i;
|
||||
|
||||
edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
|
||||
edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
|
||||
edma = kzalloc(struct_size(edma, channels, pdata->num_channels), GFP_KERNEL);
|
||||
if (!edma)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -7,6 +7,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/pm_domain.h>
|
||||
|
||||
#include "fsl-edma-common.h"
|
||||
|
||||
@ -40,14 +42,73 @@
|
||||
#define EDMA64_ERRH 0x28
|
||||
#define EDMA64_ERRL 0x2c
|
||||
|
||||
#define EDMA_TCD 0x1000
|
||||
void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan)
|
||||
{
|
||||
spin_lock(&fsl_chan->vchan.lock);
|
||||
|
||||
if (!fsl_chan->edesc) {
|
||||
/* terminate_all called before */
|
||||
spin_unlock(&fsl_chan->vchan.lock);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!fsl_chan->edesc->iscyclic) {
|
||||
list_del(&fsl_chan->edesc->vdesc.node);
|
||||
vchan_cookie_complete(&fsl_chan->edesc->vdesc);
|
||||
fsl_chan->edesc = NULL;
|
||||
fsl_chan->status = DMA_COMPLETE;
|
||||
fsl_chan->idle = true;
|
||||
} else {
|
||||
vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
|
||||
}
|
||||
|
||||
if (!fsl_chan->edesc)
|
||||
fsl_edma_xfer_desc(fsl_chan);
|
||||
|
||||
spin_unlock(&fsl_chan->vchan.lock);
|
||||
}
|
||||
|
||||
static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
|
||||
{
|
||||
u32 val, flags;
|
||||
|
||||
flags = fsl_edma_drvflags(fsl_chan);
|
||||
val = edma_readl_chreg(fsl_chan, ch_sbr);
|
||||
/* Remote/local swapped wrongly on iMX8 QM Audio edma */
|
||||
if (flags & FSL_EDMA_DRV_QUIRK_SWAPPED) {
|
||||
if (!fsl_chan->is_rxchan)
|
||||
val |= EDMA_V3_CH_SBR_RD;
|
||||
else
|
||||
val |= EDMA_V3_CH_SBR_WR;
|
||||
} else {
|
||||
if (fsl_chan->is_rxchan)
|
||||
val |= EDMA_V3_CH_SBR_RD;
|
||||
else
|
||||
val |= EDMA_V3_CH_SBR_WR;
|
||||
}
|
||||
|
||||
if (fsl_chan->is_remote)
|
||||
val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR);
|
||||
|
||||
edma_writel_chreg(fsl_chan, val, ch_sbr);
|
||||
|
||||
if (flags & FSL_EDMA_DRV_HAS_CHMUX)
|
||||
edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux);
|
||||
|
||||
val = edma_readl_chreg(fsl_chan, ch_csr);
|
||||
val |= EDMA_V3_CH_CSR_ERQ;
|
||||
edma_writel_chreg(fsl_chan, val, ch_csr);
|
||||
}
|
||||
|
||||
static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
|
||||
{
|
||||
struct edma_regs *regs = &fsl_chan->edma->regs;
|
||||
u32 ch = fsl_chan->vchan.chan.chan_id;
|
||||
|
||||
if (fsl_chan->edma->drvdata->version == v1) {
|
||||
if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
|
||||
return fsl_edma3_enable_request(fsl_chan);
|
||||
|
||||
if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
|
||||
edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
|
||||
edma_writeb(fsl_chan->edma, ch, regs->serq);
|
||||
} else {
|
||||
@ -59,12 +120,29 @@ static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
|
||||
}
|
||||
}
|
||||
|
||||
static void fsl_edma3_disable_request(struct fsl_edma_chan *fsl_chan)
|
||||
{
|
||||
u32 val = edma_readl_chreg(fsl_chan, ch_csr);
|
||||
u32 flags;
|
||||
|
||||
flags = fsl_edma_drvflags(fsl_chan);
|
||||
|
||||
if (flags & FSL_EDMA_DRV_HAS_CHMUX)
|
||||
edma_writel_chreg(fsl_chan, 0, ch_mux);
|
||||
|
||||
val &= ~EDMA_V3_CH_CSR_ERQ;
|
||||
edma_writel_chreg(fsl_chan, val, ch_csr);
|
||||
}
|
||||
|
||||
void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
|
||||
{
|
||||
struct edma_regs *regs = &fsl_chan->edma->regs;
|
||||
u32 ch = fsl_chan->vchan.chan.chan_id;
|
||||
|
||||
if (fsl_chan->edma->drvdata->version == v1) {
|
||||
if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
|
||||
return fsl_edma3_disable_request(fsl_chan);
|
||||
|
||||
if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
|
||||
edma_writeb(fsl_chan->edma, ch, regs->cerq);
|
||||
edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
|
||||
} else {
|
||||
@ -75,7 +153,6 @@ void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
|
||||
iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fsl_edma_disable_request);
|
||||
|
||||
static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
|
||||
u32 off, u32 slot, bool enable)
|
||||
@ -112,36 +189,33 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
|
||||
int endian_diff[4] = {3, 1, -1, -3};
|
||||
u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
|
||||
|
||||
if (!dmamux_nr)
|
||||
return;
|
||||
|
||||
chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
|
||||
ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
|
||||
|
||||
if (fsl_chan->edma->drvdata->mux_swap)
|
||||
if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_MUX_SWAP)
|
||||
ch_off += endian_diff[ch_off % 4];
|
||||
|
||||
muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
|
||||
slot = EDMAMUX_CHCFG_SOURCE(slot);
|
||||
|
||||
if (fsl_chan->edma->drvdata->version == v3)
|
||||
if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_CONFIG32)
|
||||
mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable);
|
||||
else
|
||||
mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
|
||||
|
||||
static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
|
||||
{
|
||||
switch (addr_width) {
|
||||
case 1:
|
||||
return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
|
||||
case 2:
|
||||
return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
|
||||
case 4:
|
||||
return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
|
||||
case 8:
|
||||
return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
|
||||
default:
|
||||
return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
|
||||
}
|
||||
u32 val;
|
||||
|
||||
if (addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
|
||||
addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
|
||||
val = ffs(addr_width) - 1;
|
||||
return val | (val << 8);
|
||||
}
|
||||
|
||||
void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
|
||||
@ -155,7 +229,6 @@ void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
|
||||
fsl_desc->tcd[i].ptcd);
|
||||
kfree(fsl_desc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fsl_edma_free_desc);
|
||||
|
||||
int fsl_edma_terminate_all(struct dma_chan *chan)
|
||||
{
|
||||
@ -170,9 +243,12 @@ int fsl_edma_terminate_all(struct dma_chan *chan)
|
||||
vchan_get_all_descriptors(&fsl_chan->vchan, &head);
|
||||
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
|
||||
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
|
||||
|
||||
if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_PD)
|
||||
pm_runtime_allow(fsl_chan->pd_dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fsl_edma_terminate_all);
|
||||
|
||||
int fsl_edma_pause(struct dma_chan *chan)
|
||||
{
|
||||
@ -188,7 +264,6 @@ int fsl_edma_pause(struct dma_chan *chan)
|
||||
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fsl_edma_pause);
|
||||
|
||||
int fsl_edma_resume(struct dma_chan *chan)
|
||||
{
|
||||
@ -204,7 +279,6 @@ int fsl_edma_resume(struct dma_chan *chan)
|
||||
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fsl_edma_resume);
|
||||
|
||||
static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
|
||||
{
|
||||
@ -265,36 +339,41 @@ int fsl_edma_slave_config(struct dma_chan *chan,
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fsl_edma_slave_config);
|
||||
|
||||
static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
|
||||
struct virt_dma_desc *vdesc, bool in_progress)
|
||||
{
|
||||
struct fsl_edma_desc *edesc = fsl_chan->edesc;
|
||||
struct edma_regs *regs = &fsl_chan->edma->regs;
|
||||
u32 ch = fsl_chan->vchan.chan.chan_id;
|
||||
enum dma_transfer_direction dir = edesc->dirn;
|
||||
dma_addr_t cur_addr, dma_addr;
|
||||
size_t len, size;
|
||||
u32 nbytes = 0;
|
||||
int i;
|
||||
|
||||
/* calculate the total size in this desc */
|
||||
for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
|
||||
len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
|
||||
* le16_to_cpu(edesc->tcd[i].vtcd->biter);
|
||||
for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) {
|
||||
nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes);
|
||||
if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
|
||||
nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
|
||||
len += nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter);
|
||||
}
|
||||
|
||||
if (!in_progress)
|
||||
return len;
|
||||
|
||||
if (dir == DMA_MEM_TO_DEV)
|
||||
cur_addr = edma_readl(fsl_chan->edma, ®s->tcd[ch].saddr);
|
||||
cur_addr = edma_read_tcdreg(fsl_chan, saddr);
|
||||
else
|
||||
cur_addr = edma_readl(fsl_chan->edma, ®s->tcd[ch].daddr);
|
||||
cur_addr = edma_read_tcdreg(fsl_chan, daddr);
|
||||
|
||||
/* figure out the finished and calculate the residue */
|
||||
for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
|
||||
size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
|
||||
* le16_to_cpu(edesc->tcd[i].vtcd->biter);
|
||||
nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes);
|
||||
if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
|
||||
nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
|
||||
|
||||
size = nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter);
|
||||
|
||||
if (dir == DMA_MEM_TO_DEV)
|
||||
dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
|
||||
else
|
||||
@ -340,14 +419,10 @@ enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
|
||||
|
||||
return fsl_chan->status;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fsl_edma_tx_status);
|
||||
|
||||
static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
|
||||
struct fsl_edma_hw_tcd *tcd)
|
||||
{
|
||||
struct fsl_edma_engine *edma = fsl_chan->edma;
|
||||
struct edma_regs *regs = &fsl_chan->edma->regs;
|
||||
u32 ch = fsl_chan->vchan.chan.chan_id;
|
||||
u16 csr = 0;
|
||||
|
||||
/*
|
||||
@ -356,23 +431,22 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
|
||||
* big- or little-endian obeying the eDMA engine model endian,
|
||||
* and this is performed from specific edma_write functions
|
||||
*/
|
||||
edma_writew(edma, 0, ®s->tcd[ch].csr);
|
||||
edma_write_tcdreg(fsl_chan, 0, csr);
|
||||
|
||||
edma_writel(edma, (s32)tcd->saddr, ®s->tcd[ch].saddr);
|
||||
edma_writel(edma, (s32)tcd->daddr, ®s->tcd[ch].daddr);
|
||||
edma_write_tcdreg(fsl_chan, tcd->saddr, saddr);
|
||||
edma_write_tcdreg(fsl_chan, tcd->daddr, daddr);
|
||||
|
||||
edma_writew(edma, (s16)tcd->attr, ®s->tcd[ch].attr);
|
||||
edma_writew(edma, tcd->soff, ®s->tcd[ch].soff);
|
||||
edma_write_tcdreg(fsl_chan, tcd->attr, attr);
|
||||
edma_write_tcdreg(fsl_chan, tcd->soff, soff);
|
||||
|
||||
edma_writel(edma, (s32)tcd->nbytes, ®s->tcd[ch].nbytes);
|
||||
edma_writel(edma, (s32)tcd->slast, ®s->tcd[ch].slast);
|
||||
edma_write_tcdreg(fsl_chan, tcd->nbytes, nbytes);
|
||||
edma_write_tcdreg(fsl_chan, tcd->slast, slast);
|
||||
|
||||
edma_writew(edma, (s16)tcd->citer, ®s->tcd[ch].citer);
|
||||
edma_writew(edma, (s16)tcd->biter, ®s->tcd[ch].biter);
|
||||
edma_writew(edma, (s16)tcd->doff, ®s->tcd[ch].doff);
|
||||
edma_write_tcdreg(fsl_chan, tcd->citer, citer);
|
||||
edma_write_tcdreg(fsl_chan, tcd->biter, biter);
|
||||
edma_write_tcdreg(fsl_chan, tcd->doff, doff);
|
||||
|
||||
edma_writel(edma, (s32)tcd->dlast_sga,
|
||||
®s->tcd[ch].dlast_sga);
|
||||
edma_write_tcdreg(fsl_chan, tcd->dlast_sga, dlast_sga);
|
||||
|
||||
if (fsl_chan->is_sw) {
|
||||
csr = le16_to_cpu(tcd->csr);
|
||||
@ -380,16 +454,19 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
|
||||
tcd->csr = cpu_to_le16(csr);
|
||||
}
|
||||
|
||||
edma_writew(edma, (s16)tcd->csr, ®s->tcd[ch].csr);
|
||||
edma_write_tcdreg(fsl_chan, tcd->csr, csr);
|
||||
}
|
||||
|
||||
static inline
|
||||
void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
|
||||
void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
|
||||
struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
|
||||
u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
|
||||
u16 biter, u16 doff, u32 dlast_sga, bool major_int,
|
||||
bool disable_req, bool enable_sg)
|
||||
{
|
||||
struct dma_slave_config *cfg = &fsl_chan->cfg;
|
||||
u16 csr = 0;
|
||||
u32 burst;
|
||||
|
||||
/*
|
||||
* eDMA hardware SGs require the TCDs to be stored in little
|
||||
@ -404,6 +481,21 @@ void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
|
||||
|
||||
tcd->soff = cpu_to_le16(soff);
|
||||
|
||||
if (fsl_chan->is_multi_fifo) {
|
||||
/* set mloff to support multiple fifo */
|
||||
burst = cfg->direction == DMA_DEV_TO_MEM ?
|
||||
cfg->src_addr_width : cfg->dst_addr_width;
|
||||
nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
|
||||
/* enable DMLOE/SMLOE */
|
||||
if (cfg->direction == DMA_MEM_TO_DEV) {
|
||||
nbytes |= EDMA_V3_TCD_NBYTES_DMLOE;
|
||||
nbytes &= ~EDMA_V3_TCD_NBYTES_SMLOE;
|
||||
} else {
|
||||
nbytes |= EDMA_V3_TCD_NBYTES_SMLOE;
|
||||
nbytes &= ~EDMA_V3_TCD_NBYTES_DMLOE;
|
||||
}
|
||||
}
|
||||
|
||||
tcd->nbytes = cpu_to_le32(nbytes);
|
||||
tcd->slast = cpu_to_le32(slast);
|
||||
|
||||
@ -422,6 +514,12 @@ void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
|
||||
if (enable_sg)
|
||||
csr |= EDMA_TCD_CSR_E_SG;
|
||||
|
||||
if (fsl_chan->is_rxchan)
|
||||
csr |= EDMA_TCD_CSR_ACTIVE;
|
||||
|
||||
if (fsl_chan->is_sw)
|
||||
csr |= EDMA_TCD_CSR_START;
|
||||
|
||||
tcd->csr = cpu_to_le16(csr);
|
||||
}
|
||||
|
||||
@ -461,6 +559,7 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
|
||||
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
|
||||
struct fsl_edma_desc *fsl_desc;
|
||||
dma_addr_t dma_buf_next;
|
||||
bool major_int = true;
|
||||
int sg_len, i;
|
||||
u32 src_addr, dst_addr, last_sg, nbytes;
|
||||
u16 soff, doff, iter;
|
||||
@ -504,23 +603,28 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
|
||||
src_addr = dma_buf_next;
|
||||
dst_addr = fsl_chan->dma_dev_addr;
|
||||
soff = fsl_chan->cfg.dst_addr_width;
|
||||
doff = 0;
|
||||
} else {
|
||||
doff = fsl_chan->is_multi_fifo ? 4 : 0;
|
||||
} else if (direction == DMA_DEV_TO_MEM) {
|
||||
src_addr = fsl_chan->dma_dev_addr;
|
||||
dst_addr = dma_buf_next;
|
||||
soff = 0;
|
||||
soff = fsl_chan->is_multi_fifo ? 4 : 0;
|
||||
doff = fsl_chan->cfg.src_addr_width;
|
||||
} else {
|
||||
/* DMA_DEV_TO_DEV */
|
||||
src_addr = fsl_chan->cfg.src_addr;
|
||||
dst_addr = fsl_chan->cfg.dst_addr;
|
||||
soff = doff = 0;
|
||||
major_int = false;
|
||||
}
|
||||
|
||||
fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
|
||||
fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
|
||||
fsl_chan->attr, soff, nbytes, 0, iter,
|
||||
iter, doff, last_sg, true, false, true);
|
||||
iter, doff, last_sg, major_int, false, true);
|
||||
dma_buf_next += period_len;
|
||||
}
|
||||
|
||||
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic);
|
||||
|
||||
struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
|
||||
struct dma_chan *chan, struct scatterlist *sgl,
|
||||
@ -564,23 +668,51 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
|
||||
dst_addr = fsl_chan->dma_dev_addr;
|
||||
soff = fsl_chan->cfg.dst_addr_width;
|
||||
doff = 0;
|
||||
} else {
|
||||
} else if (direction == DMA_DEV_TO_MEM) {
|
||||
src_addr = fsl_chan->dma_dev_addr;
|
||||
dst_addr = sg_dma_address(sg);
|
||||
soff = 0;
|
||||
doff = fsl_chan->cfg.src_addr_width;
|
||||
} else {
|
||||
/* DMA_DEV_TO_DEV */
|
||||
src_addr = fsl_chan->cfg.src_addr;
|
||||
dst_addr = fsl_chan->cfg.dst_addr;
|
||||
soff = 0;
|
||||
doff = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Choose the suitable burst length if sg_dma_len is not
|
||||
* multiple of burst length so that the whole transfer length is
|
||||
* multiple of minor loop(burst length).
|
||||
*/
|
||||
if (sg_dma_len(sg) % nbytes) {
|
||||
u32 width = (direction == DMA_DEV_TO_MEM) ? doff : soff;
|
||||
u32 burst = (direction == DMA_DEV_TO_MEM) ?
|
||||
fsl_chan->cfg.src_maxburst :
|
||||
fsl_chan->cfg.dst_maxburst;
|
||||
int j;
|
||||
|
||||
for (j = burst; j > 1; j--) {
|
||||
if (!(sg_dma_len(sg) % (j * width))) {
|
||||
nbytes = j * width;
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* Set burst size as 1 if there's no suitable one */
|
||||
if (j == 1)
|
||||
nbytes = width;
|
||||
}
|
||||
iter = sg_dma_len(sg) / nbytes;
|
||||
if (i < sg_len - 1) {
|
||||
last_sg = fsl_desc->tcd[(i + 1)].ptcd;
|
||||
fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
|
||||
fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
|
||||
dst_addr, fsl_chan->attr, soff,
|
||||
nbytes, 0, iter, iter, doff, last_sg,
|
||||
false, false, true);
|
||||
} else {
|
||||
last_sg = 0;
|
||||
fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
|
||||
fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
|
||||
dst_addr, fsl_chan->attr, soff,
|
||||
nbytes, 0, iter, iter, doff, last_sg,
|
||||
true, true, false);
|
||||
@ -589,7 +721,6 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
|
||||
|
||||
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg);
|
||||
|
||||
struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
|
||||
dma_addr_t dma_dst, dma_addr_t dma_src,
|
||||
@ -606,13 +737,12 @@ struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
|
||||
fsl_chan->is_sw = true;
|
||||
|
||||
/* To match with copy_align and max_seg_size so 1 tcd is enough */
|
||||
fsl_edma_fill_tcd(fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
|
||||
EDMA_TCD_ATTR_SSIZE_32BYTE | EDMA_TCD_ATTR_DSIZE_32BYTE,
|
||||
fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
|
||||
fsl_edma_get_tcd_attr(DMA_SLAVE_BUSWIDTH_32_BYTES),
|
||||
32, len, 0, 1, 1, 32, 0, true, true, false);
|
||||
|
||||
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fsl_edma_prep_memcpy);
|
||||
|
||||
void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
|
||||
{
|
||||
@ -629,7 +759,6 @@ void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
|
||||
fsl_chan->status = DMA_IN_PROGRESS;
|
||||
fsl_chan->idle = false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc);
|
||||
|
||||
void fsl_edma_issue_pending(struct dma_chan *chan)
|
||||
{
|
||||
@ -649,7 +778,6 @@ void fsl_edma_issue_pending(struct dma_chan *chan)
|
||||
|
||||
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fsl_edma_issue_pending);
|
||||
|
||||
int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
@ -660,7 +788,6 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
|
||||
32, 0);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources);
|
||||
|
||||
void fsl_edma_free_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
@ -683,7 +810,6 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
|
||||
fsl_chan->tcd_pool = NULL;
|
||||
fsl_chan->is_sw = false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources);
|
||||
|
||||
void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
|
||||
{
|
||||
@ -695,12 +821,10 @@ void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
|
||||
tasklet_kill(&chan->vchan.task);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
|
||||
|
||||
/*
|
||||
* On the 32 channels Vybrid/mpc577x edma version (here called "v1"),
|
||||
* register offsets are different compared to ColdFire mcf5441x 64 channels
|
||||
* edma (here called "v2").
|
||||
* On the 32 channels Vybrid/mpc577x edma version, register offsets are
|
||||
* different compared to ColdFire mcf5441x 64 channels edma.
|
||||
*
|
||||
* This function sets up register offsets as per proper declared version
|
||||
* so must be called in xxx_edma_probe() just after setting the
|
||||
@ -708,41 +832,30 @@ EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
|
||||
*/
|
||||
void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
|
||||
{
|
||||
bool is64 = !!(edma->drvdata->flags & FSL_EDMA_DRV_EDMA64);
|
||||
|
||||
edma->regs.cr = edma->membase + EDMA_CR;
|
||||
edma->regs.es = edma->membase + EDMA_ES;
|
||||
edma->regs.erql = edma->membase + EDMA_ERQ;
|
||||
edma->regs.eeil = edma->membase + EDMA_EEI;
|
||||
|
||||
edma->regs.serq = edma->membase + ((edma->drvdata->version == v2) ?
|
||||
EDMA64_SERQ : EDMA_SERQ);
|
||||
edma->regs.cerq = edma->membase + ((edma->drvdata->version == v2) ?
|
||||
EDMA64_CERQ : EDMA_CERQ);
|
||||
edma->regs.seei = edma->membase + ((edma->drvdata->version == v2) ?
|
||||
EDMA64_SEEI : EDMA_SEEI);
|
||||
edma->regs.ceei = edma->membase + ((edma->drvdata->version == v2) ?
|
||||
EDMA64_CEEI : EDMA_CEEI);
|
||||
edma->regs.cint = edma->membase + ((edma->drvdata->version == v2) ?
|
||||
EDMA64_CINT : EDMA_CINT);
|
||||
edma->regs.cerr = edma->membase + ((edma->drvdata->version == v2) ?
|
||||
EDMA64_CERR : EDMA_CERR);
|
||||
edma->regs.ssrt = edma->membase + ((edma->drvdata->version == v2) ?
|
||||
EDMA64_SSRT : EDMA_SSRT);
|
||||
edma->regs.cdne = edma->membase + ((edma->drvdata->version == v2) ?
|
||||
EDMA64_CDNE : EDMA_CDNE);
|
||||
edma->regs.intl = edma->membase + ((edma->drvdata->version == v2) ?
|
||||
EDMA64_INTL : EDMA_INTR);
|
||||
edma->regs.errl = edma->membase + ((edma->drvdata->version == v2) ?
|
||||
EDMA64_ERRL : EDMA_ERR);
|
||||
edma->regs.serq = edma->membase + (is64 ? EDMA64_SERQ : EDMA_SERQ);
|
||||
edma->regs.cerq = edma->membase + (is64 ? EDMA64_CERQ : EDMA_CERQ);
|
||||
edma->regs.seei = edma->membase + (is64 ? EDMA64_SEEI : EDMA_SEEI);
|
||||
edma->regs.ceei = edma->membase + (is64 ? EDMA64_CEEI : EDMA_CEEI);
|
||||
edma->regs.cint = edma->membase + (is64 ? EDMA64_CINT : EDMA_CINT);
|
||||
edma->regs.cerr = edma->membase + (is64 ? EDMA64_CERR : EDMA_CERR);
|
||||
edma->regs.ssrt = edma->membase + (is64 ? EDMA64_SSRT : EDMA_SSRT);
|
||||
edma->regs.cdne = edma->membase + (is64 ? EDMA64_CDNE : EDMA_CDNE);
|
||||
edma->regs.intl = edma->membase + (is64 ? EDMA64_INTL : EDMA_INTR);
|
||||
edma->regs.errl = edma->membase + (is64 ? EDMA64_ERRL : EDMA_ERR);
|
||||
|
||||
if (edma->drvdata->version == v2) {
|
||||
if (is64) {
|
||||
edma->regs.erqh = edma->membase + EDMA64_ERQH;
|
||||
edma->regs.eeih = edma->membase + EDMA64_EEIH;
|
||||
edma->regs.errh = edma->membase + EDMA64_ERRH;
|
||||
edma->regs.inth = edma->membase + EDMA64_INTH;
|
||||
}
|
||||
|
||||
edma->regs.tcd = edma->membase + EDMA_TCD;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fsl_edma_setup_regs);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
@ -29,16 +29,6 @@
|
||||
#define EDMA_TCD_ATTR_DMOD(x) (((x) & GENMASK(4, 0)) << 3)
|
||||
#define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
|
||||
#define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
|
||||
#define EDMA_TCD_ATTR_DSIZE_8BIT 0
|
||||
#define EDMA_TCD_ATTR_DSIZE_16BIT BIT(0)
|
||||
#define EDMA_TCD_ATTR_DSIZE_32BIT BIT(1)
|
||||
#define EDMA_TCD_ATTR_DSIZE_64BIT (BIT(0) | BIT(1))
|
||||
#define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(2) | BIT(0))
|
||||
#define EDMA_TCD_ATTR_SSIZE_8BIT 0
|
||||
#define EDMA_TCD_ATTR_SSIZE_16BIT (EDMA_TCD_ATTR_DSIZE_16BIT << 8)
|
||||
#define EDMA_TCD_ATTR_SSIZE_32BIT (EDMA_TCD_ATTR_DSIZE_32BIT << 8)
|
||||
#define EDMA_TCD_ATTR_SSIZE_64BIT (EDMA_TCD_ATTR_DSIZE_64BIT << 8)
|
||||
#define EDMA_TCD_ATTR_SSIZE_32BYTE (EDMA_TCD_ATTR_DSIZE_32BYTE << 8)
|
||||
|
||||
#define EDMA_TCD_CITER_CITER(x) ((x) & GENMASK(14, 0))
|
||||
#define EDMA_TCD_BITER_BITER(x) ((x) & GENMASK(14, 0))
|
||||
@ -52,16 +42,32 @@
|
||||
#define EDMA_TCD_CSR_ACTIVE BIT(6)
|
||||
#define EDMA_TCD_CSR_DONE BIT(7)
|
||||
|
||||
#define EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(x) ((x) & GENMASK(9, 0))
|
||||
#define EDMA_V3_TCD_NBYTES_MLOFF(x) (x << 10)
|
||||
#define EDMA_V3_TCD_NBYTES_DMLOE (1 << 30)
|
||||
#define EDMA_V3_TCD_NBYTES_SMLOE (1 << 31)
|
||||
|
||||
#define EDMAMUX_CHCFG_DIS 0x0
|
||||
#define EDMAMUX_CHCFG_ENBL 0x80
|
||||
#define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
|
||||
|
||||
#define DMAMUX_NR 2
|
||||
|
||||
#define EDMA_TCD 0x1000
|
||||
|
||||
#define FSL_EDMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
|
||||
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
|
||||
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
|
||||
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
|
||||
|
||||
#define EDMA_V3_CH_SBR_RD BIT(22)
|
||||
#define EDMA_V3_CH_SBR_WR BIT(21)
|
||||
#define EDMA_V3_CH_CSR_ERQ BIT(0)
|
||||
#define EDMA_V3_CH_CSR_EARQ BIT(1)
|
||||
#define EDMA_V3_CH_CSR_EEI BIT(2)
|
||||
#define EDMA_V3_CH_CSR_DONE BIT(30)
|
||||
#define EDMA_V3_CH_CSR_ACTIVE BIT(31)
|
||||
|
||||
enum fsl_edma_pm_state {
|
||||
RUNNING = 0,
|
||||
SUSPENDED,
|
||||
@ -81,6 +87,18 @@ struct fsl_edma_hw_tcd {
|
||||
__le16 biter;
|
||||
};
|
||||
|
||||
struct fsl_edma3_ch_reg {
|
||||
__le32 ch_csr;
|
||||
__le32 ch_es;
|
||||
__le32 ch_int;
|
||||
__le32 ch_sbr;
|
||||
__le32 ch_pri;
|
||||
__le32 ch_mux;
|
||||
__le32 ch_mattr; /* edma4, reserved for edma3 */
|
||||
__le32 ch_reserved;
|
||||
struct fsl_edma_hw_tcd tcd;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* These are iomem pointers, for both v32 and v64.
|
||||
*/
|
||||
@ -103,7 +121,6 @@ struct edma_regs {
|
||||
void __iomem *intl;
|
||||
void __iomem *errh;
|
||||
void __iomem *errl;
|
||||
struct fsl_edma_hw_tcd __iomem *tcd;
|
||||
};
|
||||
|
||||
struct fsl_edma_sw_tcd {
|
||||
@ -126,7 +143,20 @@ struct fsl_edma_chan {
|
||||
dma_addr_t dma_dev_addr;
|
||||
u32 dma_dev_size;
|
||||
enum dma_data_direction dma_dir;
|
||||
char chan_name[16];
|
||||
char chan_name[32];
|
||||
struct fsl_edma_hw_tcd __iomem *tcd;
|
||||
u32 real_count;
|
||||
struct work_struct issue_worker;
|
||||
struct platform_device *pdev;
|
||||
struct device *pd_dev;
|
||||
u32 srcid;
|
||||
struct clk *clk;
|
||||
int priority;
|
||||
int hw_chanid;
|
||||
int txirq;
|
||||
bool is_rxchan;
|
||||
bool is_remote;
|
||||
bool is_multi_fifo;
|
||||
};
|
||||
|
||||
struct fsl_edma_desc {
|
||||
@ -138,17 +168,32 @@ struct fsl_edma_desc {
|
||||
struct fsl_edma_sw_tcd tcd[];
|
||||
};
|
||||
|
||||
enum edma_version {
|
||||
v1, /* 32ch, Vybrid, mpc57x, etc */
|
||||
v2, /* 64ch Coldfire */
|
||||
v3, /* 32ch, i.mx7ulp */
|
||||
};
|
||||
#define FSL_EDMA_DRV_HAS_DMACLK BIT(0)
|
||||
#define FSL_EDMA_DRV_MUX_SWAP BIT(1)
|
||||
#define FSL_EDMA_DRV_CONFIG32 BIT(2)
|
||||
#define FSL_EDMA_DRV_WRAP_IO BIT(3)
|
||||
#define FSL_EDMA_DRV_EDMA64 BIT(4)
|
||||
#define FSL_EDMA_DRV_HAS_PD BIT(5)
|
||||
#define FSL_EDMA_DRV_HAS_CHCLK BIT(6)
|
||||
#define FSL_EDMA_DRV_HAS_CHMUX BIT(7)
|
||||
/* imx8 QM audio edma remote local swapped */
|
||||
#define FSL_EDMA_DRV_QUIRK_SWAPPED BIT(8)
|
||||
/* control and status register is in tcd address space, edma3 reg layout */
|
||||
#define FSL_EDMA_DRV_SPLIT_REG BIT(9)
|
||||
#define FSL_EDMA_DRV_BUS_8BYTE BIT(10)
|
||||
#define FSL_EDMA_DRV_DEV_TO_DEV BIT(11)
|
||||
#define FSL_EDMA_DRV_ALIGN_64BYTE BIT(12)
|
||||
|
||||
#define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \
|
||||
FSL_EDMA_DRV_BUS_8BYTE | \
|
||||
FSL_EDMA_DRV_DEV_TO_DEV | \
|
||||
FSL_EDMA_DRV_ALIGN_64BYTE)
|
||||
|
||||
struct fsl_edma_drvdata {
|
||||
enum edma_version version;
|
||||
u32 dmamuxs;
|
||||
bool has_dmaclk;
|
||||
bool mux_swap;
|
||||
u32 dmamuxs; /* only used before v3 */
|
||||
u32 chreg_off;
|
||||
u32 chreg_space_sz;
|
||||
u32 flags;
|
||||
int (*setup_irq)(struct platform_device *pdev,
|
||||
struct fsl_edma_engine *fsl_edma);
|
||||
};
|
||||
@ -159,6 +204,7 @@ struct fsl_edma_engine {
|
||||
void __iomem *muxbase[DMAMUX_NR];
|
||||
struct clk *muxclk[DMAMUX_NR];
|
||||
struct clk *dmaclk;
|
||||
struct clk *chclk;
|
||||
struct mutex fsl_edma_mutex;
|
||||
const struct fsl_edma_drvdata *drvdata;
|
||||
u32 n_chans;
|
||||
@ -166,9 +212,28 @@ struct fsl_edma_engine {
|
||||
int errirq;
|
||||
bool big_endian;
|
||||
struct edma_regs regs;
|
||||
u64 chan_masked;
|
||||
struct fsl_edma_chan chans[];
|
||||
};
|
||||
|
||||
#define edma_read_tcdreg(chan, __name) \
|
||||
(sizeof(chan->tcd->__name) == sizeof(u32) ? \
|
||||
edma_readl(chan->edma, &chan->tcd->__name) : \
|
||||
edma_readw(chan->edma, &chan->tcd->__name))
|
||||
|
||||
#define edma_write_tcdreg(chan, val, __name) \
|
||||
(sizeof(chan->tcd->__name) == sizeof(u32) ? \
|
||||
edma_writel(chan->edma, (u32 __force)val, &chan->tcd->__name) : \
|
||||
edma_writew(chan->edma, (u16 __force)val, &chan->tcd->__name))
|
||||
|
||||
#define edma_readl_chreg(chan, __name) \
|
||||
edma_readl(chan->edma, \
|
||||
(void __iomem *)&(container_of(chan->tcd, struct fsl_edma3_ch_reg, tcd)->__name))
|
||||
|
||||
#define edma_writel_chreg(chan, val, __name) \
|
||||
edma_writel(chan->edma, val, \
|
||||
(void __iomem *)&(container_of(chan->tcd, struct fsl_edma3_ch_reg, tcd)->__name))
|
||||
|
||||
/*
|
||||
* R/W functions for big- or little-endian registers:
|
||||
* The eDMA controller's endian is independent of the CPU core's endian.
|
||||
@ -183,6 +248,14 @@ static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
|
||||
return ioread32(addr);
|
||||
}
|
||||
|
||||
static inline u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr)
|
||||
{
|
||||
if (edma->big_endian)
|
||||
return ioread16be(addr);
|
||||
else
|
||||
return ioread16(addr);
|
||||
}
|
||||
|
||||
static inline void edma_writeb(struct fsl_edma_engine *edma,
|
||||
u8 val, void __iomem *addr)
|
||||
{
|
||||
@ -217,11 +290,23 @@ static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
|
||||
return container_of(chan, struct fsl_edma_chan, vchan.chan);
|
||||
}
|
||||
|
||||
static inline u32 fsl_edma_drvflags(struct fsl_edma_chan *fsl_chan)
|
||||
{
|
||||
return fsl_chan->edma->drvdata->flags;
|
||||
}
|
||||
|
||||
static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
|
||||
{
|
||||
return container_of(vd, struct fsl_edma_desc, vdesc);
|
||||
}
|
||||
|
||||
static inline void fsl_edma_err_chan_handler(struct fsl_edma_chan *fsl_chan)
|
||||
{
|
||||
fsl_chan->status = DMA_ERROR;
|
||||
fsl_chan->idle = true;
|
||||
}
|
||||
|
||||
void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan);
|
||||
void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan);
|
||||
void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
|
||||
unsigned int slot, bool enable);
|
||||
|
@ -18,9 +18,15 @@
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/pm_domain.h>
|
||||
|
||||
#include "fsl-edma-common.h"
|
||||
|
||||
#define ARGS_RX BIT(0)
|
||||
#define ARGS_REMOTE BIT(1)
|
||||
#define ARGS_MULTI_FIFO BIT(2)
|
||||
|
||||
static void fsl_edma_synchronize(struct dma_chan *chan)
|
||||
{
|
||||
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
|
||||
@ -33,7 +39,6 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
|
||||
struct fsl_edma_engine *fsl_edma = dev_id;
|
||||
unsigned int intr, ch;
|
||||
struct edma_regs *regs = &fsl_edma->regs;
|
||||
struct fsl_edma_chan *fsl_chan;
|
||||
|
||||
intr = edma_readl(fsl_edma, regs->intl);
|
||||
if (!intr)
|
||||
@ -42,36 +47,28 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
|
||||
for (ch = 0; ch < fsl_edma->n_chans; ch++) {
|
||||
if (intr & (0x1 << ch)) {
|
||||
edma_writeb(fsl_edma, EDMA_CINT_CINT(ch), regs->cint);
|
||||
|
||||
fsl_chan = &fsl_edma->chans[ch];
|
||||
|
||||
spin_lock(&fsl_chan->vchan.lock);
|
||||
|
||||
if (!fsl_chan->edesc) {
|
||||
/* terminate_all called before */
|
||||
spin_unlock(&fsl_chan->vchan.lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!fsl_chan->edesc->iscyclic) {
|
||||
list_del(&fsl_chan->edesc->vdesc.node);
|
||||
vchan_cookie_complete(&fsl_chan->edesc->vdesc);
|
||||
fsl_chan->edesc = NULL;
|
||||
fsl_chan->status = DMA_COMPLETE;
|
||||
fsl_chan->idle = true;
|
||||
} else {
|
||||
vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
|
||||
}
|
||||
|
||||
if (!fsl_chan->edesc)
|
||||
fsl_edma_xfer_desc(fsl_chan);
|
||||
|
||||
spin_unlock(&fsl_chan->vchan.lock);
|
||||
fsl_edma_tx_chan_handler(&fsl_edma->chans[ch]);
|
||||
}
|
||||
}
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct fsl_edma_chan *fsl_chan = dev_id;
|
||||
unsigned int intr;
|
||||
|
||||
intr = edma_readl_chreg(fsl_chan, ch_int);
|
||||
if (!intr)
|
||||
return IRQ_HANDLED;
|
||||
|
||||
edma_writel_chreg(fsl_chan, 1, ch_int);
|
||||
|
||||
fsl_edma_tx_chan_handler(fsl_chan);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct fsl_edma_engine *fsl_edma = dev_id;
|
||||
@ -86,8 +83,7 @@ static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
|
||||
if (err & (0x1 << ch)) {
|
||||
fsl_edma_disable_request(&fsl_edma->chans[ch]);
|
||||
edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), regs->cerr);
|
||||
fsl_edma->chans[ch].status = DMA_ERROR;
|
||||
fsl_edma->chans[ch].idle = true;
|
||||
fsl_edma_err_chan_handler(&fsl_edma->chans[ch]);
|
||||
}
|
||||
}
|
||||
return IRQ_HANDLED;
|
||||
@ -134,11 +130,58 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
|
||||
struct of_dma *ofdma)
|
||||
{
|
||||
struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
|
||||
struct dma_chan *chan, *_chan;
|
||||
struct fsl_edma_chan *fsl_chan;
|
||||
bool b_chmux;
|
||||
int i;
|
||||
|
||||
if (dma_spec->args_count != 3)
|
||||
return NULL;
|
||||
|
||||
b_chmux = !!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHMUX);
|
||||
|
||||
mutex_lock(&fsl_edma->fsl_edma_mutex);
|
||||
list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels,
|
||||
device_node) {
|
||||
|
||||
if (chan->client_count)
|
||||
continue;
|
||||
|
||||
fsl_chan = to_fsl_edma_chan(chan);
|
||||
i = fsl_chan - fsl_edma->chans;
|
||||
|
||||
chan = dma_get_slave_channel(chan);
|
||||
chan->device->privatecnt++;
|
||||
fsl_chan->priority = dma_spec->args[1];
|
||||
fsl_chan->is_rxchan = dma_spec->args[2] & ARGS_RX;
|
||||
fsl_chan->is_remote = dma_spec->args[2] & ARGS_REMOTE;
|
||||
fsl_chan->is_multi_fifo = dma_spec->args[2] & ARGS_MULTI_FIFO;
|
||||
|
||||
if (!b_chmux && i == dma_spec->args[0]) {
|
||||
mutex_unlock(&fsl_edma->fsl_edma_mutex);
|
||||
return chan;
|
||||
} else if (b_chmux && !fsl_chan->srcid) {
|
||||
/* if controller support channel mux, choose a free channel */
|
||||
fsl_chan->srcid = dma_spec->args[0];
|
||||
mutex_unlock(&fsl_edma->fsl_edma_mutex);
|
||||
return chan;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&fsl_edma->fsl_edma_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
|
||||
{
|
||||
int ret;
|
||||
|
||||
edma_writel(fsl_edma, ~0, fsl_edma->regs.intl);
|
||||
|
||||
fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx");
|
||||
if (fsl_edma->txirq < 0)
|
||||
return fsl_edma->txirq;
|
||||
@ -173,6 +216,37 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
|
||||
{
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < fsl_edma->n_chans; i++) {
|
||||
|
||||
struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
|
||||
|
||||
if (fsl_edma->chan_masked & BIT(i))
|
||||
continue;
|
||||
|
||||
/* request channel irq */
|
||||
fsl_chan->txirq = platform_get_irq(pdev, i);
|
||||
if (fsl_chan->txirq < 0) {
|
||||
dev_err(&pdev->dev, "Can't get chan %d's irq.\n", i);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(&pdev->dev, fsl_chan->txirq,
|
||||
fsl_edma3_tx_handler, IRQF_SHARED,
|
||||
fsl_chan->chan_name, fsl_chan);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Can't register chan%d's IRQ.\n", i);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
fsl_edma2_irq_init(struct platform_device *pdev,
|
||||
struct fsl_edma_engine *fsl_edma)
|
||||
@ -180,6 +254,8 @@ fsl_edma2_irq_init(struct platform_device *pdev,
|
||||
int i, ret, irq;
|
||||
int count;
|
||||
|
||||
edma_writel(fsl_edma, ~0, fsl_edma->regs.intl);
|
||||
|
||||
count = platform_irq_count(pdev);
|
||||
dev_dbg(&pdev->dev, "%s Found %d interrupts\r\n", __func__, count);
|
||||
if (count <= 2) {
|
||||
@ -197,8 +273,6 @@ fsl_edma2_irq_init(struct platform_device *pdev,
|
||||
if (irq < 0)
|
||||
return -ENXIO;
|
||||
|
||||
sprintf(fsl_edma->chans[i].chan_name, "eDMA2-CH%02d", i);
|
||||
|
||||
/* The last IRQ is for eDMA err */
|
||||
if (i == count - 1)
|
||||
ret = devm_request_irq(&pdev->dev, irq,
|
||||
@ -236,33 +310,110 @@ static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
|
||||
}
|
||||
|
||||
static struct fsl_edma_drvdata vf610_data = {
|
||||
.version = v1,
|
||||
.dmamuxs = DMAMUX_NR,
|
||||
.flags = FSL_EDMA_DRV_WRAP_IO,
|
||||
.chreg_off = EDMA_TCD,
|
||||
.chreg_space_sz = sizeof(struct fsl_edma_hw_tcd),
|
||||
.setup_irq = fsl_edma_irq_init,
|
||||
};
|
||||
|
||||
static struct fsl_edma_drvdata ls1028a_data = {
|
||||
.version = v1,
|
||||
.dmamuxs = DMAMUX_NR,
|
||||
.mux_swap = true,
|
||||
.flags = FSL_EDMA_DRV_MUX_SWAP | FSL_EDMA_DRV_WRAP_IO,
|
||||
.chreg_off = EDMA_TCD,
|
||||
.chreg_space_sz = sizeof(struct fsl_edma_hw_tcd),
|
||||
.setup_irq = fsl_edma_irq_init,
|
||||
};
|
||||
|
||||
static struct fsl_edma_drvdata imx7ulp_data = {
|
||||
.version = v3,
|
||||
.dmamuxs = 1,
|
||||
.has_dmaclk = true,
|
||||
.chreg_off = EDMA_TCD,
|
||||
.chreg_space_sz = sizeof(struct fsl_edma_hw_tcd),
|
||||
.flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_CONFIG32,
|
||||
.setup_irq = fsl_edma2_irq_init,
|
||||
};
|
||||
|
||||
static struct fsl_edma_drvdata imx8qm_data = {
|
||||
.flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3,
|
||||
.chreg_space_sz = 0x10000,
|
||||
.chreg_off = 0x10000,
|
||||
.setup_irq = fsl_edma3_irq_init,
|
||||
};
|
||||
|
||||
static struct fsl_edma_drvdata imx8qm_audio_data = {
|
||||
.flags = FSL_EDMA_DRV_QUIRK_SWAPPED | FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3,
|
||||
.chreg_space_sz = 0x10000,
|
||||
.chreg_off = 0x10000,
|
||||
.setup_irq = fsl_edma3_irq_init,
|
||||
};
|
||||
|
||||
static struct fsl_edma_drvdata imx93_data3 = {
|
||||
.flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3,
|
||||
.chreg_space_sz = 0x10000,
|
||||
.chreg_off = 0x10000,
|
||||
.setup_irq = fsl_edma3_irq_init,
|
||||
};
|
||||
|
||||
static struct fsl_edma_drvdata imx93_data4 = {
|
||||
.flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3,
|
||||
.chreg_space_sz = 0x8000,
|
||||
.chreg_off = 0x10000,
|
||||
.setup_irq = fsl_edma3_irq_init,
|
||||
};
|
||||
|
||||
static const struct of_device_id fsl_edma_dt_ids[] = {
|
||||
{ .compatible = "fsl,vf610-edma", .data = &vf610_data},
|
||||
{ .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
|
||||
{ .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
|
||||
{ .compatible = "fsl,imx8qm-edma", .data = &imx8qm_data},
|
||||
{ .compatible = "fsl,imx8qm-adma", .data = &imx8qm_audio_data},
|
||||
{ .compatible = "fsl,imx93-edma3", .data = &imx93_data3},
|
||||
{ .compatible = "fsl,imx93-edma4", .data = &imx93_data4},
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
|
||||
|
||||
static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
|
||||
{
|
||||
struct fsl_edma_chan *fsl_chan;
|
||||
struct device_link *link;
|
||||
struct device *pd_chan;
|
||||
struct device *dev;
|
||||
int i;
|
||||
|
||||
dev = &pdev->dev;
|
||||
|
||||
for (i = 0; i < fsl_edma->n_chans; i++) {
|
||||
if (fsl_edma->chan_masked & BIT(i))
|
||||
continue;
|
||||
|
||||
fsl_chan = &fsl_edma->chans[i];
|
||||
|
||||
pd_chan = dev_pm_domain_attach_by_id(dev, i);
|
||||
if (IS_ERR_OR_NULL(pd_chan)) {
|
||||
dev_err(dev, "Failed attach pd %d\n", i);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
|
||||
DL_FLAG_PM_RUNTIME |
|
||||
DL_FLAG_RPM_ACTIVE);
|
||||
if (IS_ERR(link)) {
|
||||
dev_err(dev, "Failed to add device_link to %d: %ld\n", i,
|
||||
PTR_ERR(link));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
fsl_chan->pd_dev = pd_chan;
|
||||
|
||||
pm_runtime_use_autosuspend(fsl_chan->pd_dev);
|
||||
pm_runtime_set_autosuspend_delay(fsl_chan->pd_dev, 200);
|
||||
pm_runtime_set_active(fsl_chan->pd_dev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fsl_edma_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct of_device_id *of_id =
|
||||
@ -270,9 +421,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct fsl_edma_engine *fsl_edma;
|
||||
const struct fsl_edma_drvdata *drvdata = NULL;
|
||||
struct fsl_edma_chan *fsl_chan;
|
||||
u32 chan_mask[2] = {0, 0};
|
||||
struct edma_regs *regs;
|
||||
int len, chans;
|
||||
int chans;
|
||||
int ret, i;
|
||||
|
||||
if (of_id)
|
||||
@ -288,8 +439,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
len = sizeof(*fsl_edma) + sizeof(*fsl_chan) * chans;
|
||||
fsl_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
|
||||
fsl_edma = devm_kzalloc(&pdev->dev, struct_size(fsl_edma, chans, chans),
|
||||
GFP_KERNEL);
|
||||
if (!fsl_edma)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -301,26 +452,42 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
||||
if (IS_ERR(fsl_edma->membase))
|
||||
return PTR_ERR(fsl_edma->membase);
|
||||
|
||||
fsl_edma_setup_regs(fsl_edma);
|
||||
regs = &fsl_edma->regs;
|
||||
if (!(drvdata->flags & FSL_EDMA_DRV_SPLIT_REG)) {
|
||||
fsl_edma_setup_regs(fsl_edma);
|
||||
regs = &fsl_edma->regs;
|
||||
}
|
||||
|
||||
if (drvdata->has_dmaclk) {
|
||||
fsl_edma->dmaclk = devm_clk_get(&pdev->dev, "dma");
|
||||
if (drvdata->flags & FSL_EDMA_DRV_HAS_DMACLK) {
|
||||
fsl_edma->dmaclk = devm_clk_get_enabled(&pdev->dev, "dma");
|
||||
if (IS_ERR(fsl_edma->dmaclk)) {
|
||||
dev_err(&pdev->dev, "Missing DMA block clock.\n");
|
||||
return PTR_ERR(fsl_edma->dmaclk);
|
||||
}
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(fsl_edma->dmaclk);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "DMA clk block failed.\n");
|
||||
return ret;
|
||||
if (drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) {
|
||||
fsl_edma->chclk = devm_clk_get_enabled(&pdev->dev, "mp");
|
||||
if (IS_ERR(fsl_edma->chclk)) {
|
||||
dev_err(&pdev->dev, "Missing MP block clock.\n");
|
||||
return PTR_ERR(fsl_edma->chclk);
|
||||
}
|
||||
}
|
||||
|
||||
ret = of_property_read_variable_u32_array(np, "dma-channel-mask", chan_mask, 1, 2);
|
||||
|
||||
if (ret > 0) {
|
||||
fsl_edma->chan_masked = chan_mask[1];
|
||||
fsl_edma->chan_masked <<= 32;
|
||||
fsl_edma->chan_masked |= chan_mask[0];
|
||||
}
|
||||
|
||||
for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
|
||||
char clkname[32];
|
||||
|
||||
/* eDMAv3 mux register move to TCD area if ch_mux exist */
|
||||
if (drvdata->flags & FSL_EDMA_DRV_SPLIT_REG)
|
||||
break;
|
||||
|
||||
fsl_edma->muxbase[i] = devm_platform_ioremap_resource(pdev,
|
||||
1 + i);
|
||||
if (IS_ERR(fsl_edma->muxbase[i])) {
|
||||
@ -330,26 +497,32 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
sprintf(clkname, "dmamux%d", i);
|
||||
fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
|
||||
fsl_edma->muxclk[i] = devm_clk_get_enabled(&pdev->dev, clkname);
|
||||
if (IS_ERR(fsl_edma->muxclk[i])) {
|
||||
dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
|
||||
/* on error: disable all previously enabled clks */
|
||||
fsl_disable_clocks(fsl_edma, i);
|
||||
return PTR_ERR(fsl_edma->muxclk[i]);
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(fsl_edma->muxclk[i]);
|
||||
if (ret)
|
||||
/* on error: disable all previously enabled clks */
|
||||
fsl_disable_clocks(fsl_edma, i);
|
||||
|
||||
}
|
||||
|
||||
fsl_edma->big_endian = of_property_read_bool(np, "big-endian");
|
||||
|
||||
if (drvdata->flags & FSL_EDMA_DRV_HAS_PD) {
|
||||
ret = fsl_edma3_attach_pd(pdev, fsl_edma);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&fsl_edma->dma_dev.channels);
|
||||
for (i = 0; i < fsl_edma->n_chans; i++) {
|
||||
struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
|
||||
int len;
|
||||
|
||||
if (fsl_edma->chan_masked & BIT(i))
|
||||
continue;
|
||||
|
||||
snprintf(fsl_chan->chan_name, sizeof(fsl_chan->chan_name), "%s-CH%02d",
|
||||
dev_name(&pdev->dev), i);
|
||||
|
||||
fsl_chan->edma = fsl_edma;
|
||||
fsl_chan->pm_state = RUNNING;
|
||||
@ -357,13 +530,19 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
||||
fsl_chan->idle = true;
|
||||
fsl_chan->dma_dir = DMA_NONE;
|
||||
fsl_chan->vchan.desc_free = fsl_edma_free_desc;
|
||||
|
||||
len = (drvdata->flags & FSL_EDMA_DRV_SPLIT_REG) ?
|
||||
offsetof(struct fsl_edma3_ch_reg, tcd) : 0;
|
||||
fsl_chan->tcd = fsl_edma->membase
|
||||
+ i * drvdata->chreg_space_sz + drvdata->chreg_off + len;
|
||||
|
||||
fsl_chan->pdev = pdev;
|
||||
vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
|
||||
|
||||
edma_writew(fsl_edma, 0x0, ®s->tcd[i].csr);
|
||||
edma_write_tcdreg(fsl_chan, 0, csr);
|
||||
fsl_edma_chan_mux(fsl_chan, 0, false);
|
||||
}
|
||||
|
||||
edma_writel(fsl_edma, ~0, regs->intl);
|
||||
ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -391,33 +570,47 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
||||
|
||||
fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
|
||||
fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
|
||||
fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
|
||||
|
||||
fsl_edma->dma_dev.copy_align = DMAENGINE_ALIGN_32_BYTES;
|
||||
if (drvdata->flags & FSL_EDMA_DRV_BUS_8BYTE) {
|
||||
fsl_edma->dma_dev.src_addr_widths |= BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
|
||||
fsl_edma->dma_dev.dst_addr_widths |= BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
|
||||
}
|
||||
|
||||
fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
|
||||
if (drvdata->flags & FSL_EDMA_DRV_DEV_TO_DEV)
|
||||
fsl_edma->dma_dev.directions |= BIT(DMA_DEV_TO_DEV);
|
||||
|
||||
fsl_edma->dma_dev.copy_align = drvdata->flags & FSL_EDMA_DRV_ALIGN_64BYTE ?
|
||||
DMAENGINE_ALIGN_64_BYTES :
|
||||
DMAENGINE_ALIGN_32_BYTES;
|
||||
|
||||
/* Per worst case 'nbytes = 1' take CITER as the max_seg_size */
|
||||
dma_set_max_seg_size(fsl_edma->dma_dev.dev, 0x3fff);
|
||||
|
||||
fsl_edma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
|
||||
|
||||
platform_set_drvdata(pdev, fsl_edma);
|
||||
|
||||
ret = dma_async_device_register(&fsl_edma->dma_dev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev,
|
||||
"Can't register Freescale eDMA engine. (%d)\n", ret);
|
||||
fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma);
|
||||
ret = of_dma_controller_register(np,
|
||||
drvdata->flags & FSL_EDMA_DRV_SPLIT_REG ? fsl_edma3_xlate : fsl_edma_xlate,
|
||||
fsl_edma);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev,
|
||||
"Can't register Freescale eDMA of_dma. (%d)\n", ret);
|
||||
dma_async_device_unregister(&fsl_edma->dma_dev);
|
||||
fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* enable round robin arbitration */
|
||||
edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
|
||||
if (!(drvdata->flags & FSL_EDMA_DRV_SPLIT_REG))
|
||||
edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -470,7 +663,7 @@ static int fsl_edma_resume_early(struct device *dev)
|
||||
for (i = 0; i < fsl_edma->n_chans; i++) {
|
||||
fsl_chan = &fsl_edma->chans[i];
|
||||
fsl_chan->pm_state = RUNNING;
|
||||
edma_writew(fsl_edma, 0x0, ®s->tcd[i].csr);
|
||||
edma_write_tcdreg(fsl_chan, 0, csr);
|
||||
if (fsl_chan->slave_id != 0)
|
||||
fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
|
||||
}
|
@ -13,10 +13,10 @@
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#include "virt-dma.h"
|
||||
#include "fsldma.h"
|
||||
|
@ -60,9 +60,10 @@
|
||||
*/
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/dmaengine.h>
|
||||
|
@ -28,9 +28,10 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/fsldma.h>
|
||||
#include "dmaengine.h"
|
||||
#include "fsldma.h"
|
||||
|
@ -769,8 +769,6 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
|
||||
goto err_alloc;
|
||||
}
|
||||
|
||||
memset(addr, 0, size);
|
||||
|
||||
spin_lock(&evl->lock);
|
||||
evl->log = addr;
|
||||
evl->dma = dma_addr;
|
||||
|
@ -660,8 +660,6 @@ int idxd_register_bus_type(void);
|
||||
void idxd_unregister_bus_type(void);
|
||||
int idxd_register_devices(struct idxd_device *idxd);
|
||||
void idxd_unregister_devices(struct idxd_device *idxd);
|
||||
int idxd_register_driver(void);
|
||||
void idxd_unregister_driver(void);
|
||||
void idxd_wqs_quiesce(struct idxd_device *idxd);
|
||||
bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc);
|
||||
void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count);
|
||||
@ -673,8 +671,6 @@ void idxd_mask_error_interrupts(struct idxd_device *idxd);
|
||||
void idxd_unmask_error_interrupts(struct idxd_device *idxd);
|
||||
|
||||
/* device control */
|
||||
int idxd_register_idxd_drv(void);
|
||||
void idxd_unregister_idxd_drv(void);
|
||||
int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
|
||||
void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
|
||||
int drv_enable_wq(struct idxd_wq *wq);
|
||||
@ -719,7 +715,6 @@ int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc);
|
||||
/* dmaengine */
|
||||
int idxd_register_dma_device(struct idxd_device *idxd);
|
||||
void idxd_unregister_dma_device(struct idxd_device *idxd);
|
||||
void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
|
||||
void idxd_dma_complete_txd(struct idxd_desc *desc,
|
||||
enum idxd_complete_type comp_type, bool free_desc);
|
||||
|
||||
|
@ -245,12 +245,11 @@ static void perfmon_pmu_event_update(struct perf_event *event)
|
||||
int shift = 64 - idxd->idxd_pmu->counter_width;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
prev_raw_count = local64_read(&hwc->prev_count);
|
||||
do {
|
||||
prev_raw_count = local64_read(&hwc->prev_count);
|
||||
new_raw_count = perfmon_pmu_read_counter(event);
|
||||
} while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
|
||||
new_raw_count) != prev_raw_count);
|
||||
|
||||
} while (!local64_try_cmpxchg(&hwc->prev_count,
|
||||
&prev_raw_count, new_raw_count));
|
||||
n = (new_raw_count << shift);
|
||||
p = (prev_raw_count << shift);
|
||||
|
||||
|
@ -1088,8 +1088,8 @@ static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute
|
||||
if (wq->state != IDXD_WQ_DISABLED)
|
||||
return -EPERM;
|
||||
|
||||
if (!idxd->hw.wq_cap.wq_ats_support)
|
||||
return -EOPNOTSUPP;
|
||||
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
|
||||
return -EPERM;
|
||||
|
||||
rc = kstrtobool(buf, &ats_dis);
|
||||
if (rc < 0)
|
||||
@ -1124,8 +1124,8 @@ static ssize_t wq_prs_disable_store(struct device *dev, struct device_attribute
|
||||
if (wq->state != IDXD_WQ_DISABLED)
|
||||
return -EPERM;
|
||||
|
||||
if (!idxd->hw.wq_cap.wq_prs_support)
|
||||
return -EOPNOTSUPP;
|
||||
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
|
||||
return -EPERM;
|
||||
|
||||
rc = kstrtobool(buf, &prs_dis);
|
||||
if (rc < 0)
|
||||
@ -1281,12 +1281,9 @@ static struct attribute *idxd_wq_attributes[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static bool idxd_wq_attr_op_config_invisible(struct attribute *attr,
|
||||
struct idxd_device *idxd)
|
||||
{
|
||||
return attr == &dev_attr_wq_op_config.attr &&
|
||||
!idxd->hw.wq_cap.op_config;
|
||||
}
|
||||
/* A WQ attr is invisible if the feature is not supported in WQCAP. */
|
||||
#define idxd_wq_attr_invisible(name, cap_field, a, idxd) \
|
||||
((a) == &dev_attr_wq_##name.attr && !(idxd)->hw.wq_cap.cap_field)
|
||||
|
||||
static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr,
|
||||
struct idxd_device *idxd)
|
||||
@ -1296,13 +1293,6 @@ static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr,
|
||||
idxd->data->type == IDXD_TYPE_IAX;
|
||||
}
|
||||
|
||||
static bool idxd_wq_attr_wq_prs_disable_invisible(struct attribute *attr,
|
||||
struct idxd_device *idxd)
|
||||
{
|
||||
return attr == &dev_attr_wq_prs_disable.attr &&
|
||||
!idxd->hw.wq_cap.wq_prs_support;
|
||||
}
|
||||
|
||||
static umode_t idxd_wq_attr_visible(struct kobject *kobj,
|
||||
struct attribute *attr, int n)
|
||||
{
|
||||
@ -1310,13 +1300,16 @@ static umode_t idxd_wq_attr_visible(struct kobject *kobj,
|
||||
struct idxd_wq *wq = confdev_to_wq(dev);
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
|
||||
if (idxd_wq_attr_op_config_invisible(attr, idxd))
|
||||
if (idxd_wq_attr_invisible(op_config, op_config, attr, idxd))
|
||||
return 0;
|
||||
|
||||
if (idxd_wq_attr_max_batch_size_invisible(attr, idxd))
|
||||
return 0;
|
||||
|
||||
if (idxd_wq_attr_wq_prs_disable_invisible(attr, idxd))
|
||||
if (idxd_wq_attr_invisible(prs_disable, wq_prs_support, attr, idxd))
|
||||
return 0;
|
||||
|
||||
if (idxd_wq_attr_invisible(ats_disable, wq_ats_support, attr, idxd))
|
||||
return 0;
|
||||
|
||||
return attr->mode;
|
||||
@ -1473,7 +1466,7 @@ static ssize_t pasid_enabled_show(struct device *dev,
|
||||
{
|
||||
struct idxd_device *idxd = confdev_to_idxd(dev);
|
||||
|
||||
return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
|
||||
return sysfs_emit(buf, "%u\n", device_user_pasid_enabled(idxd));
|
||||
}
|
||||
static DEVICE_ATTR_RO(pasid_enabled);
|
||||
|
||||
|
@ -17,7 +17,6 @@
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
@ -21,7 +21,7 @@
|
||||
#include <linux/clk.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_dma.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
|
@ -31,7 +31,6 @@
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
|
@ -51,7 +51,7 @@
|
||||
/* pack PCI B/D/F into a u16 */
|
||||
static inline u16 dcaid_from_pcidev(struct pci_dev *pci)
|
||||
{
|
||||
return (pci->bus->number << 8) | pci->devfn;
|
||||
return pci_dev_id(pci);
|
||||
}
|
||||
|
||||
static int dca_enabled_in_bios(struct pci_dev *pdev)
|
||||
|
@ -74,6 +74,7 @@ struct ioatdma_device {
|
||||
struct dca_provider *dca;
|
||||
enum ioat_irq_mode irq_mode;
|
||||
u32 cap;
|
||||
int chancnt;
|
||||
|
||||
/* shadow version for CB3.3 chan reset errata workaround */
|
||||
u64 msixtba0;
|
||||
|
@ -420,7 +420,7 @@ int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma)
|
||||
|
||||
msix:
|
||||
/* The number of MSI-X vectors should equal the number of channels */
|
||||
msixcnt = ioat_dma->dma_dev.chancnt;
|
||||
msixcnt = ioat_dma->chancnt;
|
||||
for (i = 0; i < msixcnt; i++)
|
||||
ioat_dma->msix_entries[i].entry = i;
|
||||
|
||||
@ -511,7 +511,7 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
|
||||
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
|
||||
dma->dev = &pdev->dev;
|
||||
|
||||
if (!dma->chancnt) {
|
||||
if (!ioat_dma->chancnt) {
|
||||
dev_err(dev, "channel enumeration error\n");
|
||||
goto err_setup_interrupts;
|
||||
}
|
||||
@ -567,15 +567,16 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
|
||||
struct device *dev = &ioat_dma->pdev->dev;
|
||||
struct dma_device *dma = &ioat_dma->dma_dev;
|
||||
u8 xfercap_log;
|
||||
int chancnt;
|
||||
int i;
|
||||
|
||||
INIT_LIST_HEAD(&dma->channels);
|
||||
dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
|
||||
dma->chancnt &= 0x1f; /* bits [4:0] valid */
|
||||
if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) {
|
||||
chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
|
||||
chancnt &= 0x1f; /* bits [4:0] valid */
|
||||
if (chancnt > ARRAY_SIZE(ioat_dma->idx)) {
|
||||
dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
|
||||
dma->chancnt, ARRAY_SIZE(ioat_dma->idx));
|
||||
dma->chancnt = ARRAY_SIZE(ioat_dma->idx);
|
||||
chancnt, ARRAY_SIZE(ioat_dma->idx));
|
||||
chancnt = ARRAY_SIZE(ioat_dma->idx);
|
||||
}
|
||||
xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
|
||||
xfercap_log &= 0x1f; /* bits [4:0] valid */
|
||||
@ -583,7 +584,7 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
|
||||
return;
|
||||
dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
|
||||
|
||||
for (i = 0; i < dma->chancnt; i++) {
|
||||
for (i = 0; i < chancnt; i++) {
|
||||
ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
|
||||
if (!ioat_chan)
|
||||
break;
|
||||
@ -596,7 +597,7 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
|
||||
break;
|
||||
}
|
||||
}
|
||||
dma->chancnt = i;
|
||||
ioat_dma->chancnt = i;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1,2 +0,0 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-y += ipu_irq.o ipu_idmac.o
|
File diff suppressed because it is too large
Load Diff
@ -1,173 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2008
|
||||
* Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
|
||||
*
|
||||
* Copyright (C) 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
|
||||
*/
|
||||
|
||||
#ifndef _IPU_INTERN_H_
|
||||
#define _IPU_INTERN_H_
|
||||
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
/* IPU Common registers */
|
||||
#define IPU_CONF 0x00
|
||||
#define IPU_CHA_BUF0_RDY 0x04
|
||||
#define IPU_CHA_BUF1_RDY 0x08
|
||||
#define IPU_CHA_DB_MODE_SEL 0x0C
|
||||
#define IPU_CHA_CUR_BUF 0x10
|
||||
#define IPU_FS_PROC_FLOW 0x14
|
||||
#define IPU_FS_DISP_FLOW 0x18
|
||||
#define IPU_TASKS_STAT 0x1C
|
||||
#define IPU_IMA_ADDR 0x20
|
||||
#define IPU_IMA_DATA 0x24
|
||||
#define IPU_INT_CTRL_1 0x28
|
||||
#define IPU_INT_CTRL_2 0x2C
|
||||
#define IPU_INT_CTRL_3 0x30
|
||||
#define IPU_INT_CTRL_4 0x34
|
||||
#define IPU_INT_CTRL_5 0x38
|
||||
#define IPU_INT_STAT_1 0x3C
|
||||
#define IPU_INT_STAT_2 0x40
|
||||
#define IPU_INT_STAT_3 0x44
|
||||
#define IPU_INT_STAT_4 0x48
|
||||
#define IPU_INT_STAT_5 0x4C
|
||||
#define IPU_BRK_CTRL_1 0x50
|
||||
#define IPU_BRK_CTRL_2 0x54
|
||||
#define IPU_BRK_STAT 0x58
|
||||
#define IPU_DIAGB_CTRL 0x5C
|
||||
|
||||
/* IPU_CONF Register bits */
|
||||
#define IPU_CONF_CSI_EN 0x00000001
|
||||
#define IPU_CONF_IC_EN 0x00000002
|
||||
#define IPU_CONF_ROT_EN 0x00000004
|
||||
#define IPU_CONF_PF_EN 0x00000008
|
||||
#define IPU_CONF_SDC_EN 0x00000010
|
||||
#define IPU_CONF_ADC_EN 0x00000020
|
||||
#define IPU_CONF_DI_EN 0x00000040
|
||||
#define IPU_CONF_DU_EN 0x00000080
|
||||
#define IPU_CONF_PXL_ENDIAN 0x00000100
|
||||
|
||||
/* Image Converter Registers */
|
||||
#define IC_CONF 0x88
|
||||
#define IC_PRP_ENC_RSC 0x8C
|
||||
#define IC_PRP_VF_RSC 0x90
|
||||
#define IC_PP_RSC 0x94
|
||||
#define IC_CMBP_1 0x98
|
||||
#define IC_CMBP_2 0x9C
|
||||
#define PF_CONF 0xA0
|
||||
#define IDMAC_CONF 0xA4
|
||||
#define IDMAC_CHA_EN 0xA8
|
||||
#define IDMAC_CHA_PRI 0xAC
|
||||
#define IDMAC_CHA_BUSY 0xB0
|
||||
|
||||
/* Image Converter Register bits */
|
||||
#define IC_CONF_PRPENC_EN 0x00000001
|
||||
#define IC_CONF_PRPENC_CSC1 0x00000002
|
||||
#define IC_CONF_PRPENC_ROT_EN 0x00000004
|
||||
#define IC_CONF_PRPVF_EN 0x00000100
|
||||
#define IC_CONF_PRPVF_CSC1 0x00000200
|
||||
#define IC_CONF_PRPVF_CSC2 0x00000400
|
||||
#define IC_CONF_PRPVF_CMB 0x00000800
|
||||
#define IC_CONF_PRPVF_ROT_EN 0x00001000
|
||||
#define IC_CONF_PP_EN 0x00010000
|
||||
#define IC_CONF_PP_CSC1 0x00020000
|
||||
#define IC_CONF_PP_CSC2 0x00040000
|
||||
#define IC_CONF_PP_CMB 0x00080000
|
||||
#define IC_CONF_PP_ROT_EN 0x00100000
|
||||
#define IC_CONF_IC_GLB_LOC_A 0x10000000
|
||||
#define IC_CONF_KEY_COLOR_EN 0x20000000
|
||||
#define IC_CONF_RWS_EN 0x40000000
|
||||
#define IC_CONF_CSI_MEM_WR_EN 0x80000000
|
||||
|
||||
#define IDMA_CHAN_INVALID 0x000000FF
|
||||
#define IDMA_IC_0 0x00000001
|
||||
#define IDMA_IC_1 0x00000002
|
||||
#define IDMA_IC_2 0x00000004
|
||||
#define IDMA_IC_3 0x00000008
|
||||
#define IDMA_IC_4 0x00000010
|
||||
#define IDMA_IC_5 0x00000020
|
||||
#define IDMA_IC_6 0x00000040
|
||||
#define IDMA_IC_7 0x00000080
|
||||
#define IDMA_IC_8 0x00000100
|
||||
#define IDMA_IC_9 0x00000200
|
||||
#define IDMA_IC_10 0x00000400
|
||||
#define IDMA_IC_11 0x00000800
|
||||
#define IDMA_IC_12 0x00001000
|
||||
#define IDMA_IC_13 0x00002000
|
||||
#define IDMA_SDC_BG 0x00004000
|
||||
#define IDMA_SDC_FG 0x00008000
|
||||
#define IDMA_SDC_MASK 0x00010000
|
||||
#define IDMA_SDC_PARTIAL 0x00020000
|
||||
#define IDMA_ADC_SYS1_WR 0x00040000
|
||||
#define IDMA_ADC_SYS2_WR 0x00080000
|
||||
#define IDMA_ADC_SYS1_CMD 0x00100000
|
||||
#define IDMA_ADC_SYS2_CMD 0x00200000
|
||||
#define IDMA_ADC_SYS1_RD 0x00400000
|
||||
#define IDMA_ADC_SYS2_RD 0x00800000
|
||||
#define IDMA_PF_QP 0x01000000
|
||||
#define IDMA_PF_BSP 0x02000000
|
||||
#define IDMA_PF_Y_IN 0x04000000
|
||||
#define IDMA_PF_U_IN 0x08000000
|
||||
#define IDMA_PF_V_IN 0x10000000
|
||||
#define IDMA_PF_Y_OUT 0x20000000
|
||||
#define IDMA_PF_U_OUT 0x40000000
|
||||
#define IDMA_PF_V_OUT 0x80000000
|
||||
|
||||
#define TSTAT_PF_H264_PAUSE 0x00000001
|
||||
#define TSTAT_CSI2MEM_MASK 0x0000000C
|
||||
#define TSTAT_CSI2MEM_OFFSET 2
|
||||
#define TSTAT_VF_MASK 0x00000600
|
||||
#define TSTAT_VF_OFFSET 9
|
||||
#define TSTAT_VF_ROT_MASK 0x000C0000
|
||||
#define TSTAT_VF_ROT_OFFSET 18
|
||||
#define TSTAT_ENC_MASK 0x00000180
|
||||
#define TSTAT_ENC_OFFSET 7
|
||||
#define TSTAT_ENC_ROT_MASK 0x00030000
|
||||
#define TSTAT_ENC_ROT_OFFSET 16
|
||||
#define TSTAT_PP_MASK 0x00001800
|
||||
#define TSTAT_PP_OFFSET 11
|
||||
#define TSTAT_PP_ROT_MASK 0x00300000
|
||||
#define TSTAT_PP_ROT_OFFSET 20
|
||||
#define TSTAT_PF_MASK 0x00C00000
|
||||
#define TSTAT_PF_OFFSET 22
|
||||
#define TSTAT_ADCSYS1_MASK 0x03000000
|
||||
#define TSTAT_ADCSYS1_OFFSET 24
|
||||
#define TSTAT_ADCSYS2_MASK 0x0C000000
|
||||
#define TSTAT_ADCSYS2_OFFSET 26
|
||||
|
||||
#define TASK_STAT_IDLE 0
|
||||
#define TASK_STAT_ACTIVE 1
|
||||
#define TASK_STAT_WAIT4READY 2
|
||||
|
||||
struct idmac {
|
||||
struct dma_device dma;
|
||||
};
|
||||
|
||||
struct ipu {
|
||||
void __iomem *reg_ipu;
|
||||
void __iomem *reg_ic;
|
||||
unsigned int irq_fn; /* IPU Function IRQ to the CPU */
|
||||
unsigned int irq_err; /* IPU Error IRQ to the CPU */
|
||||
unsigned int irq_base; /* Beginning of the IPU IRQ range */
|
||||
unsigned long channel_init_mask;
|
||||
spinlock_t lock;
|
||||
struct clk *ipu_clk;
|
||||
struct device *dev;
|
||||
struct idmac idmac;
|
||||
struct idmac_channel channel[IPU_CHANNELS_NUM];
|
||||
struct tasklet_struct tasklet;
|
||||
};
|
||||
|
||||
#define to_idmac(d) container_of(d, struct idmac, dma)
|
||||
|
||||
extern int ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev);
|
||||
extern void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev);
|
||||
|
||||
extern bool ipu_irq_status(uint32_t irq);
|
||||
extern int ipu_irq_map(unsigned int source);
|
||||
extern int ipu_irq_unmap(unsigned int source);
|
||||
|
||||
#endif
|
@ -1,367 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2008
|
||||
* Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/dma/ipu-dma.h>
|
||||
|
||||
#include "ipu_intern.h"
|
||||
|
||||
/*
|
||||
* Register read / write - shall be inlined by the compiler
|
||||
*/
|
||||
static u32 ipu_read_reg(struct ipu *ipu, unsigned long reg)
|
||||
{
|
||||
return __raw_readl(ipu->reg_ipu + reg);
|
||||
}
|
||||
|
||||
static void ipu_write_reg(struct ipu *ipu, u32 value, unsigned long reg)
|
||||
{
|
||||
__raw_writel(value, ipu->reg_ipu + reg);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* IPU IRQ chip driver
|
||||
*/
|
||||
|
||||
#define IPU_IRQ_NR_FN_BANKS 3
|
||||
#define IPU_IRQ_NR_ERR_BANKS 2
|
||||
#define IPU_IRQ_NR_BANKS (IPU_IRQ_NR_FN_BANKS + IPU_IRQ_NR_ERR_BANKS)
|
||||
|
||||
struct ipu_irq_bank {
|
||||
unsigned int control;
|
||||
unsigned int status;
|
||||
struct ipu *ipu;
|
||||
};
|
||||
|
||||
static struct ipu_irq_bank irq_bank[IPU_IRQ_NR_BANKS] = {
|
||||
/* 3 groups of functional interrupts */
|
||||
{
|
||||
.control = IPU_INT_CTRL_1,
|
||||
.status = IPU_INT_STAT_1,
|
||||
}, {
|
||||
.control = IPU_INT_CTRL_2,
|
||||
.status = IPU_INT_STAT_2,
|
||||
}, {
|
||||
.control = IPU_INT_CTRL_3,
|
||||
.status = IPU_INT_STAT_3,
|
||||
},
|
||||
/* 2 groups of error interrupts */
|
||||
{
|
||||
.control = IPU_INT_CTRL_4,
|
||||
.status = IPU_INT_STAT_4,
|
||||
}, {
|
||||
.control = IPU_INT_CTRL_5,
|
||||
.status = IPU_INT_STAT_5,
|
||||
},
|
||||
};
|
||||
|
||||
struct ipu_irq_map {
|
||||
unsigned int irq;
|
||||
int source;
|
||||
struct ipu_irq_bank *bank;
|
||||
struct ipu *ipu;
|
||||
};
|
||||
|
||||
static struct ipu_irq_map irq_map[CONFIG_MX3_IPU_IRQS];
|
||||
/* Protects allocations from the above array of maps */
|
||||
static DEFINE_MUTEX(map_lock);
|
||||
/* Protects register accesses and individual mappings */
|
||||
static DEFINE_RAW_SPINLOCK(bank_lock);
|
||||
|
||||
static struct ipu_irq_map *src2map(unsigned int src)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++)
|
||||
if (irq_map[i].source == src)
|
||||
return irq_map + i;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void ipu_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
struct ipu_irq_map *map = irq_data_get_irq_chip_data(d);
|
||||
struct ipu_irq_bank *bank;
|
||||
uint32_t reg;
|
||||
unsigned long lock_flags;
|
||||
|
||||
raw_spin_lock_irqsave(&bank_lock, lock_flags);
|
||||
|
||||
bank = map->bank;
|
||||
if (!bank) {
|
||||
raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
|
||||
pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
|
||||
return;
|
||||
}
|
||||
|
||||
reg = ipu_read_reg(bank->ipu, bank->control);
|
||||
reg |= (1UL << (map->source & 31));
|
||||
ipu_write_reg(bank->ipu, reg, bank->control);
|
||||
|
||||
raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
|
||||
}
|
||||
|
||||
static void ipu_irq_mask(struct irq_data *d)
|
||||
{
|
||||
struct ipu_irq_map *map = irq_data_get_irq_chip_data(d);
|
||||
struct ipu_irq_bank *bank;
|
||||
uint32_t reg;
|
||||
unsigned long lock_flags;
|
||||
|
||||
raw_spin_lock_irqsave(&bank_lock, lock_flags);
|
||||
|
||||
bank = map->bank;
|
||||
if (!bank) {
|
||||
raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
|
||||
pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
|
||||
return;
|
||||
}
|
||||
|
||||
reg = ipu_read_reg(bank->ipu, bank->control);
|
||||
reg &= ~(1UL << (map->source & 31));
|
||||
ipu_write_reg(bank->ipu, reg, bank->control);
|
||||
|
||||
raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
|
||||
}
|
||||
|
||||
static void ipu_irq_ack(struct irq_data *d)
|
||||
{
|
||||
struct ipu_irq_map *map = irq_data_get_irq_chip_data(d);
|
||||
struct ipu_irq_bank *bank;
|
||||
unsigned long lock_flags;
|
||||
|
||||
raw_spin_lock_irqsave(&bank_lock, lock_flags);
|
||||
|
||||
bank = map->bank;
|
||||
if (!bank) {
|
||||
raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
|
||||
pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
|
||||
return;
|
||||
}
|
||||
|
||||
ipu_write_reg(bank->ipu, 1UL << (map->source & 31), bank->status);
|
||||
raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* ipu_irq_status() - returns the current interrupt status of the specified IRQ.
|
||||
* @irq: interrupt line to get status for.
|
||||
* @return: true if the interrupt is pending/asserted or false if the
|
||||
* interrupt is not pending.
|
||||
*/
|
||||
bool ipu_irq_status(unsigned int irq)
|
||||
{
|
||||
struct ipu_irq_map *map = irq_get_chip_data(irq);
|
||||
struct ipu_irq_bank *bank;
|
||||
unsigned long lock_flags;
|
||||
bool ret;
|
||||
|
||||
raw_spin_lock_irqsave(&bank_lock, lock_flags);
|
||||
bank = map->bank;
|
||||
ret = bank && ipu_read_reg(bank->ipu, bank->status) &
|
||||
(1UL << (map->source & 31));
|
||||
raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ipu_irq_map() - map an IPU interrupt source to an IRQ number
|
||||
* @source: interrupt source bit position (see below)
|
||||
* @return: mapped IRQ number or negative error code
|
||||
*
|
||||
* The source parameter has to be explained further. On i.MX31 IPU has 137 IRQ
|
||||
* sources, they are broken down in 5 32-bit registers, like 32, 32, 24, 32, 17.
|
||||
* However, the source argument of this function is not the sequence number of
|
||||
* the possible IRQ, but rather its bit position. So, first interrupt in fourth
|
||||
* register has source number 96, and not 88. This makes calculations easier,
|
||||
* and also provides forward compatibility with any future IPU implementations
|
||||
* with any interrupt bit assignments.
|
||||
*/
|
||||
int ipu_irq_map(unsigned int source)
|
||||
{
|
||||
int i, ret = -ENOMEM;
|
||||
struct ipu_irq_map *map;
|
||||
|
||||
might_sleep();
|
||||
|
||||
mutex_lock(&map_lock);
|
||||
map = src2map(source);
|
||||
if (map) {
|
||||
pr_err("IPU: Source %u already mapped to IRQ %u\n", source, map->irq);
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) {
|
||||
if (irq_map[i].source < 0) {
|
||||
unsigned long lock_flags;
|
||||
|
||||
raw_spin_lock_irqsave(&bank_lock, lock_flags);
|
||||
irq_map[i].source = source;
|
||||
irq_map[i].bank = irq_bank + source / 32;
|
||||
raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
|
||||
|
||||
ret = irq_map[i].irq;
|
||||
pr_debug("IPU: mapped source %u to IRQ %u\n",
|
||||
source, ret);
|
||||
break;
|
||||
}
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&map_lock);
|
||||
|
||||
if (ret < 0)
|
||||
pr_err("IPU: couldn't map source %u: %d\n", source, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ipu_irq_unmap() - unmap an IPU interrupt source
|
||||
* @source: interrupt source bit position (see ipu_irq_map())
|
||||
* @return: 0 or negative error code
|
||||
*/
|
||||
int ipu_irq_unmap(unsigned int source)
|
||||
{
|
||||
int i, ret = -EINVAL;
|
||||
|
||||
might_sleep();
|
||||
|
||||
mutex_lock(&map_lock);
|
||||
for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) {
|
||||
if (irq_map[i].source == source) {
|
||||
unsigned long lock_flags;
|
||||
|
||||
pr_debug("IPU: unmapped source %u from IRQ %u\n",
|
||||
source, irq_map[i].irq);
|
||||
|
||||
raw_spin_lock_irqsave(&bank_lock, lock_flags);
|
||||
irq_map[i].source = -EINVAL;
|
||||
irq_map[i].bank = NULL;
|
||||
raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
|
||||
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&map_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Chained IRQ handler for IPU function and error interrupt */
|
||||
static void ipu_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
struct ipu *ipu = irq_desc_get_handler_data(desc);
|
||||
u32 status;
|
||||
int i, line;
|
||||
|
||||
for (i = 0; i < IPU_IRQ_NR_BANKS; i++) {
|
||||
struct ipu_irq_bank *bank = irq_bank + i;
|
||||
|
||||
raw_spin_lock(&bank_lock);
|
||||
status = ipu_read_reg(ipu, bank->status);
|
||||
/*
|
||||
* Don't think we have to clear all interrupts here, they will
|
||||
* be acked by ->handle_irq() (handle_level_irq). However, we
|
||||
* might want to clear unhandled interrupts after the loop...
|
||||
*/
|
||||
status &= ipu_read_reg(ipu, bank->control);
|
||||
raw_spin_unlock(&bank_lock);
|
||||
while ((line = ffs(status))) {
|
||||
struct ipu_irq_map *map;
|
||||
unsigned int irq;
|
||||
|
||||
line--;
|
||||
status &= ~(1UL << line);
|
||||
|
||||
raw_spin_lock(&bank_lock);
|
||||
map = src2map(32 * i + line);
|
||||
if (!map) {
|
||||
raw_spin_unlock(&bank_lock);
|
||||
pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
|
||||
line, i);
|
||||
continue;
|
||||
}
|
||||
irq = map->irq;
|
||||
raw_spin_unlock(&bank_lock);
|
||||
generic_handle_irq(irq);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static struct irq_chip ipu_irq_chip = {
|
||||
.name = "ipu_irq",
|
||||
.irq_ack = ipu_irq_ack,
|
||||
.irq_mask = ipu_irq_mask,
|
||||
.irq_unmask = ipu_irq_unmask,
|
||||
};
|
||||
|
||||
/* Install the IRQ handler */
|
||||
int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev)
|
||||
{
|
||||
unsigned int irq, i;
|
||||
int irq_base = irq_alloc_descs(-1, 0, CONFIG_MX3_IPU_IRQS,
|
||||
numa_node_id());
|
||||
|
||||
if (irq_base < 0)
|
||||
return irq_base;
|
||||
|
||||
for (i = 0; i < IPU_IRQ_NR_BANKS; i++)
|
||||
irq_bank[i].ipu = ipu;
|
||||
|
||||
for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) {
|
||||
int ret;
|
||||
|
||||
irq = irq_base + i;
|
||||
ret = irq_set_chip(irq, &ipu_irq_chip);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = irq_set_chip_data(irq, irq_map + i);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
irq_map[i].ipu = ipu;
|
||||
irq_map[i].irq = irq;
|
||||
irq_map[i].source = -EINVAL;
|
||||
irq_set_handler(irq, handle_level_irq);
|
||||
irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
|
||||
}
|
||||
|
||||
irq_set_chained_handler_and_data(ipu->irq_fn, ipu_irq_handler, ipu);
|
||||
|
||||
irq_set_chained_handler_and_data(ipu->irq_err, ipu_irq_handler, ipu);
|
||||
|
||||
ipu->irq_base = irq_base;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev)
|
||||
{
|
||||
unsigned int irq, irq_base;
|
||||
|
||||
irq_base = ipu->irq_base;
|
||||
|
||||
irq_set_chained_handler_and_data(ipu->irq_fn, NULL, NULL);
|
||||
|
||||
irq_set_chained_handler_and_data(ipu->irq_err, NULL, NULL);
|
||||
|
||||
for (irq = irq_base; irq < irq_base + CONFIG_MX3_IPU_IRQS; irq++) {
|
||||
irq_set_status_flags(irq, IRQ_NOREQUEST);
|
||||
irq_set_chip(irq, NULL);
|
||||
irq_set_chip_data(irq, NULL);
|
||||
}
|
||||
}
|
@ -1732,9 +1732,4 @@ static struct platform_driver intel_ldma_driver = {
|
||||
* registered DMA channels and DMA capabilities to clients before their
|
||||
* initialization.
|
||||
*/
|
||||
static int __init intel_ldma_init(void)
|
||||
{
|
||||
return platform_driver_register(&intel_ldma_driver);
|
||||
}
|
||||
|
||||
device_initcall(intel_ldma_init);
|
||||
builtin_platform_driver(intel_ldma_driver);
|
||||
|
@ -12,8 +12,10 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
|
@ -19,7 +19,6 @@ static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
|
||||
struct fsl_edma_engine *mcf_edma = dev_id;
|
||||
struct edma_regs *regs = &mcf_edma->regs;
|
||||
unsigned int ch;
|
||||
struct fsl_edma_chan *mcf_chan;
|
||||
u64 intmap;
|
||||
|
||||
intmap = ioread32(regs->inth);
|
||||
@ -31,31 +30,7 @@ static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
|
||||
for (ch = 0; ch < mcf_edma->n_chans; ch++) {
|
||||
if (intmap & BIT(ch)) {
|
||||
iowrite8(EDMA_MASK_CH(ch), regs->cint);
|
||||
|
||||
mcf_chan = &mcf_edma->chans[ch];
|
||||
|
||||
spin_lock(&mcf_chan->vchan.lock);
|
||||
|
||||
if (!mcf_chan->edesc) {
|
||||
/* terminate_all called before */
|
||||
spin_unlock(&mcf_chan->vchan.lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!mcf_chan->edesc->iscyclic) {
|
||||
list_del(&mcf_chan->edesc->vdesc.node);
|
||||
vchan_cookie_complete(&mcf_chan->edesc->vdesc);
|
||||
mcf_chan->edesc = NULL;
|
||||
mcf_chan->status = DMA_COMPLETE;
|
||||
mcf_chan->idle = true;
|
||||
} else {
|
||||
vchan_cyclic_callback(&mcf_chan->edesc->vdesc);
|
||||
}
|
||||
|
||||
if (!mcf_chan->edesc)
|
||||
fsl_edma_xfer_desc(mcf_chan);
|
||||
|
||||
spin_unlock(&mcf_chan->vchan.lock);
|
||||
fsl_edma_tx_chan_handler(&mcf_edma->chans[ch]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -76,8 +51,7 @@ static irqreturn_t mcf_edma_err_handler(int irq, void *dev_id)
|
||||
if (err & BIT(ch)) {
|
||||
fsl_edma_disable_request(&mcf_edma->chans[ch]);
|
||||
iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
|
||||
mcf_edma->chans[ch].status = DMA_ERROR;
|
||||
mcf_edma->chans[ch].idle = true;
|
||||
fsl_edma_err_chan_handler(&mcf_edma->chans[ch]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -172,7 +146,7 @@ static void mcf_edma_irq_free(struct platform_device *pdev,
|
||||
}
|
||||
|
||||
static struct fsl_edma_drvdata mcf_data = {
|
||||
.version = v2,
|
||||
.flags = FSL_EDMA_DRV_EDMA64,
|
||||
.setup_irq = mcf_edma_irq_init,
|
||||
};
|
||||
|
||||
@ -180,9 +154,8 @@ static int mcf_edma_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct mcf_edma_platform_data *pdata;
|
||||
struct fsl_edma_engine *mcf_edma;
|
||||
struct fsl_edma_chan *mcf_chan;
|
||||
struct edma_regs *regs;
|
||||
int ret, i, len, chans;
|
||||
int ret, i, chans;
|
||||
|
||||
pdata = dev_get_platdata(&pdev->dev);
|
||||
if (!pdata) {
|
||||
@ -197,8 +170,8 @@ static int mcf_edma_probe(struct platform_device *pdev)
|
||||
chans = pdata->dma_channels;
|
||||
}
|
||||
|
||||
len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans;
|
||||
mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
|
||||
mcf_edma = devm_kzalloc(&pdev->dev, struct_size(mcf_edma, chans, chans),
|
||||
GFP_KERNEL);
|
||||
if (!mcf_edma)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -227,7 +200,9 @@ static int mcf_edma_probe(struct platform_device *pdev)
|
||||
mcf_chan->dma_dir = DMA_NONE;
|
||||
mcf_chan->vchan.desc_free = fsl_edma_free_desc;
|
||||
vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
|
||||
iowrite32(0x0, ®s->tcd[i].csr);
|
||||
mcf_chan->tcd = mcf_edma->membase + EDMA_TCD
|
||||
+ i * sizeof(struct fsl_edma_hw_tcd);
|
||||
iowrite32(0x0, &mcf_chan->tcd->csr);
|
||||
}
|
||||
|
||||
iowrite32(~0, regs->inth);
|
@ -18,7 +18,6 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
@ -17,7 +17,6 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
@ -551,7 +550,6 @@ static int mtk_uart_apdma_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
pm_runtime_set_active(&pdev->dev);
|
||||
|
||||
rc = dma_async_device_register(&mtkd->ddev);
|
||||
if (rc)
|
||||
|
@ -36,11 +36,11 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#include <linux/random.h>
|
||||
|
||||
|
@ -21,7 +21,6 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/stmp_device.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/dma/mxs-dma.h>
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include <linux/log2.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -20,8 +20,9 @@
|
||||
#include <linux/io.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include "virt-dma.h"
|
||||
|
||||
@ -1116,7 +1117,7 @@ static int owl_dma_probe(struct platform_device *pdev)
|
||||
dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n",
|
||||
nr_channels, nr_requests);
|
||||
|
||||
od->devid = (enum owl_dma_id)of_device_get_match_data(&pdev->dev);
|
||||
od->devid = (uintptr_t)of_device_get_match_data(&pdev->dev);
|
||||
|
||||
od->nr_pchans = nr_channels;
|
||||
od->nr_vchans = nr_requests;
|
||||
|
@ -28,7 +28,7 @@
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <asm/dcr.h>
|
||||
#include <asm/dcr-regs.h>
|
||||
#include "adma.h"
|
||||
|
@ -2160,8 +2160,7 @@ static int gpi_probe(struct platform_device *pdev)
|
||||
return -ENOMEM;
|
||||
|
||||
gpi_dev->dev = &pdev->dev;
|
||||
gpi_dev->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
gpi_dev->regs = devm_ioremap_resource(gpi_dev->dev, gpi_dev->res);
|
||||
gpi_dev->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &gpi_dev->res);
|
||||
if (IS_ERR(gpi_dev->regs))
|
||||
return PTR_ERR(gpi_dev->regs);
|
||||
gpi_dev->ee_base = gpi_dev->regs;
|
||||
|
@ -45,12 +45,12 @@
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/property.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/acpi.h>
|
||||
@ -765,17 +765,15 @@ static int hidma_probe(struct platform_device *pdev)
|
||||
pm_runtime_set_active(&pdev->dev);
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
|
||||
trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
trca = devm_ioremap_resource(&pdev->dev, trca_resource);
|
||||
trca = devm_platform_get_and_ioremap_resource(pdev, 0, &trca_resource);
|
||||
if (IS_ERR(trca)) {
|
||||
rc = -ENOMEM;
|
||||
rc = PTR_ERR(trca);
|
||||
goto bailout;
|
||||
}
|
||||
|
||||
evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
evca = devm_ioremap_resource(&pdev->dev, evca_resource);
|
||||
evca = devm_platform_get_and_ioremap_resource(pdev, 1, &evca_resource);
|
||||
if (IS_ERR(evca)) {
|
||||
rc = -ENOMEM;
|
||||
rc = PTR_ERR(evca);
|
||||
goto bailout;
|
||||
}
|
||||
|
||||
@ -785,7 +783,7 @@ static int hidma_probe(struct platform_device *pdev)
|
||||
*/
|
||||
chirq = platform_get_irq(pdev, 0);
|
||||
if (chirq < 0) {
|
||||
rc = -ENODEV;
|
||||
rc = chirq;
|
||||
goto bailout;
|
||||
}
|
||||
|
||||
|
@ -176,10 +176,9 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
pm_runtime_get_sync(&pdev->dev);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
virtaddr = devm_ioremap_resource(&pdev->dev, res);
|
||||
virtaddr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
|
||||
if (IS_ERR(virtaddr)) {
|
||||
rc = -ENOMEM;
|
||||
rc = PTR_ERR(virtaddr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -9,6 +9,7 @@
|
||||
* Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/interrupt.h>
|
||||
@ -145,8 +146,8 @@ struct rz_dmac {
|
||||
#define CHCFG_REQD BIT(3)
|
||||
#define CHCFG_SEL(bits) ((bits) & 0x07)
|
||||
#define CHCFG_MEM_COPY (0x80400008)
|
||||
#define CHCFG_FILL_DDS(a) (((a) << 16) & GENMASK(19, 16))
|
||||
#define CHCFG_FILL_SDS(a) (((a) << 12) & GENMASK(15, 12))
|
||||
#define CHCFG_FILL_DDS_MASK GENMASK(19, 16)
|
||||
#define CHCFG_FILL_SDS_MASK GENMASK(15, 12)
|
||||
#define CHCFG_FILL_TM(a) (((a) & BIT(5)) << 22)
|
||||
#define CHCFG_FILL_AM(a) (((a) & GENMASK(4, 2)) << 6)
|
||||
#define CHCFG_FILL_LVL(a) (((a) & BIT(1)) << 5)
|
||||
@ -607,13 +608,15 @@ static int rz_dmac_config(struct dma_chan *chan,
|
||||
if (val == CHCFG_DS_INVALID)
|
||||
return -EINVAL;
|
||||
|
||||
channel->chcfg |= CHCFG_FILL_DDS(val);
|
||||
channel->chcfg &= ~CHCFG_FILL_DDS_MASK;
|
||||
channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val);
|
||||
|
||||
val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
|
||||
if (val == CHCFG_DS_INVALID)
|
||||
return -EINVAL;
|
||||
|
||||
channel->chcfg |= CHCFG_FILL_SDS(val);
|
||||
channel->chcfg &= ~CHCFG_FILL_SDS_MASK;
|
||||
channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -947,7 +950,6 @@ static int rz_dmac_probe(struct platform_device *pdev)
|
||||
dma_register_err:
|
||||
of_dma_controller_free(pdev->dev.of_node);
|
||||
err:
|
||||
reset_control_assert(dmac->rstc);
|
||||
channel_num = i ? i - 1 : 0;
|
||||
for (i = 0; i < channel_num; i++) {
|
||||
struct rz_dmac_chan *channel = &dmac->channels[i];
|
||||
@ -958,6 +960,7 @@ err:
|
||||
channel->lmdesc.base_dma);
|
||||
}
|
||||
|
||||
reset_control_assert(dmac->rstc);
|
||||
err_pm_runtime_put:
|
||||
pm_runtime_put(&pdev->dev);
|
||||
err_pm_disable:
|
||||
@ -971,6 +974,8 @@ static int rz_dmac_remove(struct platform_device *pdev)
|
||||
struct rz_dmac *dmac = platform_get_drvdata(pdev);
|
||||
unsigned int i;
|
||||
|
||||
dma_async_device_unregister(&dmac->engine);
|
||||
of_dma_controller_free(pdev->dev.of_node);
|
||||
for (i = 0; i < dmac->n_channels; i++) {
|
||||
struct rz_dmac_chan *channel = &dmac->channels[i];
|
||||
|
||||
@ -979,8 +984,6 @@ static int rz_dmac_remove(struct platform_device *pdev)
|
||||
channel->lmdesc.base,
|
||||
channel->lmdesc.base_dma);
|
||||
}
|
||||
of_dma_controller_free(pdev->dev.of_node);
|
||||
dma_async_device_unregister(&dmac->engine);
|
||||
reset_control_assert(dmac->rstc);
|
||||
pm_runtime_put(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
@ -23,7 +23,6 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/rculist.h>
|
||||
@ -678,7 +677,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
|
||||
int err, errirq, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
|
||||
struct sh_dmae_device *shdev;
|
||||
struct dma_device *dma_dev;
|
||||
struct resource *chan, *dmars, *errirq_res, *chanirq_res;
|
||||
struct resource *dmars, *errirq_res, *chanirq_res;
|
||||
|
||||
if (pdev->dev.of_node)
|
||||
pdata = of_device_get_match_data(&pdev->dev);
|
||||
@ -689,7 +688,6 @@ static int sh_dmae_probe(struct platform_device *pdev)
|
||||
if (!pdata || !pdata->channel_num)
|
||||
return -ENODEV;
|
||||
|
||||
chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
/* DMARS area is optional */
|
||||
dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
/*
|
||||
@ -709,7 +707,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
|
||||
* requested with the IRQF_SHARED flag
|
||||
*/
|
||||
errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||||
if (!chan || !errirq_res)
|
||||
if (!errirq_res)
|
||||
return -ENODEV;
|
||||
|
||||
shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
|
||||
@ -719,7 +717,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
|
||||
|
||||
dma_dev = &shdev->shdma_dev.dma_dev;
|
||||
|
||||
shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
|
||||
shdev->chan_reg = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(shdev->chan_reg))
|
||||
return PTR_ERR(shdev->chan_reg);
|
||||
if (dmars) {
|
||||
|
@ -15,7 +15,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
|
@ -3590,6 +3590,10 @@ static int __init d40_probe(struct platform_device *pdev)
|
||||
spin_lock_init(&base->lcla_pool.lock);
|
||||
|
||||
base->irq = platform_get_irq(pdev, 0);
|
||||
if (base->irq < 0) {
|
||||
ret = base->irq;
|
||||
goto destroy_cache;
|
||||
}
|
||||
|
||||
ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
|
||||
if (ret) {
|
||||
|
@ -1581,8 +1581,7 @@ static int stm32_dma_probe(struct platform_device *pdev)
|
||||
|
||||
dd = &dmadev->ddev;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
dmadev->base = devm_ioremap_resource(&pdev->dev, res);
|
||||
dmadev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
|
||||
if (IS_ERR(dmadev->base))
|
||||
return PTR_ERR(dmadev->base);
|
||||
|
||||
|
@ -15,8 +15,10 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -24,7 +24,6 @@
|
||||
#include <linux/log2.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
@ -14,8 +14,8 @@
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -13,7 +13,7 @@
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/minmax.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/reset.h>
|
||||
|
@ -17,7 +17,6 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm.h>
|
||||
|
@ -8,9 +8,10 @@
|
||||
#include <linux/clk.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
|
@ -3,14 +3,15 @@
|
||||
* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
|
||||
* Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
|
||||
*/
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/of_platform.h>
|
||||
|
||||
#define TI_XBAR_DRA7 0
|
||||
#define TI_XBAR_AM335X 1
|
||||
|
@ -20,7 +20,6 @@
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#include <linux/platform_data/edma.h>
|
||||
|
@ -3,6 +3,8 @@
|
||||
* Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
|
||||
* Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
|
||||
*/
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_platform.h>
|
||||
|
||||
int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
|
||||
{
|
||||
|
@ -20,7 +20,6 @@
|
||||
#include <linux/sys_soc.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/completion.h>
|
||||
|
@ -16,8 +16,8 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/of_device.h>
|
||||
|
||||
#include "../virt-dma.h"
|
||||
|
||||
|
@ -18,8 +18,9 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#include "dmaengine.h"
|
||||
|
||||
|
@ -41,10 +41,10 @@
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/io-64-nonatomic-lo-hi.h>
|
||||
@ -173,12 +173,15 @@
|
||||
#define XILINX_DMA_MAX_TRANS_LEN_MAX 23
|
||||
#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
|
||||
#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
|
||||
#define XILINX_DMA_CR_DELAY_MAX GENMASK(31, 24)
|
||||
#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
|
||||
#define XILINX_DMA_CR_COALESCE_SHIFT 16
|
||||
#define XILINX_DMA_CR_DELAY_SHIFT 24
|
||||
#define XILINX_DMA_BD_SOP BIT(27)
|
||||
#define XILINX_DMA_BD_EOP BIT(26)
|
||||
#define XILINX_DMA_BD_COMP_MASK BIT(31)
|
||||
#define XILINX_DMA_COALESCE_MAX 255
|
||||
#define XILINX_DMA_NUM_DESCS 255
|
||||
#define XILINX_DMA_NUM_DESCS 512
|
||||
#define XILINX_DMA_NUM_APP_WORDS 5
|
||||
|
||||
/* AXI CDMA Specific Registers/Offsets */
|
||||
@ -410,6 +413,7 @@ struct xilinx_dma_tx_descriptor {
|
||||
* @stop_transfer: Differentiate b/w DMA IP's quiesce
|
||||
* @tdest: TDEST value for mcdma
|
||||
* @has_vflip: S2MM vertical flip
|
||||
* @irq_delay: Interrupt delay timeout
|
||||
*/
|
||||
struct xilinx_dma_chan {
|
||||
struct xilinx_dma_device *xdev;
|
||||
@ -448,6 +452,7 @@ struct xilinx_dma_chan {
|
||||
int (*stop_transfer)(struct xilinx_dma_chan *chan);
|
||||
u16 tdest;
|
||||
bool has_vflip;
|
||||
u8 irq_delay;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -493,6 +498,7 @@ struct xilinx_dma_config {
|
||||
* @s2mm_chan_id: DMA s2mm channel identifier
|
||||
* @mm2s_chan_id: DMA mm2s channel identifier
|
||||
* @max_buffer_len: Max buffer length
|
||||
* @has_axistream_connected: AXI DMA connected to AXI Stream IP
|
||||
*/
|
||||
struct xilinx_dma_device {
|
||||
void __iomem *regs;
|
||||
@ -511,6 +517,7 @@ struct xilinx_dma_device {
|
||||
u32 s2mm_chan_id;
|
||||
u32 mm2s_chan_id;
|
||||
u32 max_buffer_len;
|
||||
bool has_axistream_connected;
|
||||
};
|
||||
|
||||
/* Macros */
|
||||
@ -623,6 +630,29 @@ static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* xilinx_dma_get_metadata_ptr- Populate metadata pointer and payload length
|
||||
* @tx: async transaction descriptor
|
||||
* @payload_len: metadata payload length
|
||||
* @max_len: metadata max length
|
||||
* Return: The app field pointer.
|
||||
*/
|
||||
static void *xilinx_dma_get_metadata_ptr(struct dma_async_tx_descriptor *tx,
|
||||
size_t *payload_len, size_t *max_len)
|
||||
{
|
||||
struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
|
||||
struct xilinx_axidma_tx_segment *seg;
|
||||
|
||||
*max_len = *payload_len = sizeof(u32) * XILINX_DMA_NUM_APP_WORDS;
|
||||
seg = list_first_entry(&desc->segments,
|
||||
struct xilinx_axidma_tx_segment, node);
|
||||
return seg->hw.app;
|
||||
}
|
||||
|
||||
static struct dma_descriptor_metadata_ops xilinx_dma_metadata_ops = {
|
||||
.get_ptr = xilinx_dma_get_metadata_ptr,
|
||||
};
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
* Descriptors and segments alloc and free
|
||||
*/
|
||||
@ -1535,6 +1565,9 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
|
||||
if (chan->has_sg)
|
||||
xilinx_write(chan, XILINX_DMA_REG_CURDESC,
|
||||
head_desc->async_tx.phys);
|
||||
reg &= ~XILINX_DMA_CR_DELAY_MAX;
|
||||
reg |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT;
|
||||
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
|
||||
|
||||
xilinx_dma_start(chan);
|
||||
|
||||
@ -1683,6 +1716,14 @@ static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
|
||||
return;
|
||||
|
||||
list_for_each_entry_safe(desc, next, &chan->active_list, node) {
|
||||
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
|
||||
struct xilinx_axidma_tx_segment *seg;
|
||||
|
||||
seg = list_last_entry(&desc->segments,
|
||||
struct xilinx_axidma_tx_segment, node);
|
||||
if (!(seg->hw.status & XILINX_DMA_BD_COMP_MASK) && chan->has_sg)
|
||||
break;
|
||||
}
|
||||
if (chan->has_sg && chan->xdev->dma_config->dmatype !=
|
||||
XDMA_TYPE_VDMA)
|
||||
desc->residue = xilinx_dma_get_residue(chan, desc);
|
||||
@ -1816,7 +1857,7 @@ static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
|
||||
spin_unlock(&chan->lock);
|
||||
}
|
||||
|
||||
tasklet_schedule(&chan->tasklet);
|
||||
tasklet_hi_schedule(&chan->tasklet);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
@ -1864,15 +1905,8 @@ static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
|
||||
}
|
||||
}
|
||||
|
||||
if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
|
||||
/*
|
||||
* Device takes too long to do the transfer when user requires
|
||||
* responsiveness.
|
||||
*/
|
||||
dev_dbg(chan->dev, "Inter-packet latency too long\n");
|
||||
}
|
||||
|
||||
if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
|
||||
if (status & (XILINX_DMA_DMASR_FRM_CNT_IRQ |
|
||||
XILINX_DMA_DMASR_DLY_CNT_IRQ)) {
|
||||
spin_lock(&chan->lock);
|
||||
xilinx_dma_complete_descriptor(chan);
|
||||
chan->idle = true;
|
||||
@ -2221,6 +2255,9 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
|
||||
segment->hw.control |= XILINX_DMA_BD_EOP;
|
||||
}
|
||||
|
||||
if (chan->xdev->has_axistream_connected)
|
||||
desc->async_tx.metadata_ops = &xilinx_dma_metadata_ops;
|
||||
|
||||
return &desc->async_tx;
|
||||
|
||||
error:
|
||||
@ -2796,6 +2833,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
|
||||
/* Retrieve the channel properties from the device tree */
|
||||
has_dre = of_property_read_bool(node, "xlnx,include-dre");
|
||||
|
||||
of_property_read_u8(node, "xlnx,irq-delay", &chan->irq_delay);
|
||||
|
||||
chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
|
||||
|
||||
err = of_property_read_u32(node, "xlnx,datawidth", &value);
|
||||
@ -3067,6 +3106,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
|
||||
}
|
||||
}
|
||||
|
||||
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
|
||||
xdev->has_axistream_connected =
|
||||
of_property_read_bool(node, "xlnx,axistream-connected");
|
||||
}
|
||||
|
||||
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
|
||||
err = of_property_read_u32(node, "xlnx,num-fstores",
|
||||
&num_frames);
|
||||
@ -3092,6 +3136,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
|
||||
else
|
||||
xdev->ext_addr = false;
|
||||
|
||||
/* Set metadata mode */
|
||||
if (xdev->has_axistream_connected)
|
||||
xdev->common.desc_metadata_modes = DESC_METADATA_ENGINE;
|
||||
|
||||
/* Set the dma mask bits */
|
||||
err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
|
||||
if (err < 0) {
|
||||
|
@ -11,8 +11,9 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/io-64-nonatomic-lo-hi.h>
|
||||
|
Loading…
x
Reference in New Issue
Block a user