dmaengine updates for v6.8

New support:
   - Loongson LS2X APB DMA controller
   - sf-pdma: mpfs-pdma support
   - Qualcomm X1E80100 GPI dma controller support
 
  Updates:
   - Xilinx XDMA updates to support interleaved DMA transfers
   - TI PSIL threads for AM62P and J722S and cfg register regions description
   - axi-dmac Improving the cyclic DMA transfers
   - Tegra Support dma-channel-mask property
   - Remaining platform remove callback returning void conversions
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAmWpXioACgkQfBQHDyUj
 g0cSqg//T2VTh1q48hRfxa/pqpZOs4GF9Vin2oFljHnAfq6G+g/lhoExchzwZgke
 pM0VRWtdqWgtF+/Nny50ij/5e4duapgQSwZdVRAkWz6HNVsVaXG+0WJGAoSPiBeN
 F/BFGF2BBPRorjo/ZGrY07jseIqG8CNdZ7v6XrHnfyGLzcadJ3kjphy4B6wNxxTJ
 JtlKKJRi6p0xRmbo40RAQnH6cqWEWHORNFsKIrsZtUV4EJq5d9Z8XwZMD09tOMBy
 F1SwkojleJECzpt4ci1Gv+TWpTG0rpt1x5dN4+jw7BwKi2JAybLs2K3a5NQqlo0D
 j0IKANxOYMHKJs5jZyfsgHbvSxqVQrLyXSRFcQsykhHWemnoqGCASWAFd4j0UBgn
 qMYvci64BSB5OmaZ0FRczCEIpfW62ASnyUpy8/pBU7idd+DFe0nhO8x3/0EaEhoB
 rhtP8eQxReoA4CIUQaSL0KiyqvNJ/UPmIQzELxlfiXfe/puSDG4Bxe4XrugUNhZt
 K89lL2IlpokkPj1e2CaKPoAn9Yz08DXne1eG17eBCs1ch8GeMeGTtKzlz2xYCqvl
 GN8pOmAHhtfrcJTBMn38kZ0Td2O7WgqpFxBros4/n5BtfuDtp2xhrABOrTkZTJfC
 AoMB545rqVBCDiuI2hPdtEhv5RO7k567tq52v5jPfatLi2XrdIQ=
 =+MiK
 -----END PGP SIGNATURE-----

Merge tag 'dmaengine-6.8-rc1' into fixes

dmaengine updates for v6.8

 New support:
  - Loongson LS2X APB DMA controller
  - sf-pdma: mpfs-pdma support
  - Qualcomm X1E80100 GPI dma controller support

 Updates:
  - Xilinx XDMA updates to support interleaved DMA transfers
  - TI PSIL threads for AM62P and J722S and cfg register regions description
  - axi-dmac Improving the cyclic DMA transfers
  - Tegra Support dma-channel-mask property
  - Remaining platform remove callback returning void conversions
This commit is contained in:
Vinod Koul 2024-01-19 17:03:06 +05:30
commit b93216d3be
39 changed files with 1870 additions and 285 deletions

View File

@ -19,19 +19,4 @@ properties:
additionalProperties: true
examples:
- |
dma: dma-controller@48000000 {
compatible = "ti,omap-sdma";
reg = <0x48000000 0x1000>;
interrupts = <0 12 0x4>,
<0 13 0x4>,
<0 14 0x4>,
<0 15 0x4>;
#dma-cells = <1>;
dma-channels = <32>;
dma-requests = <127>;
dma-channel-mask = <0xfffe>;
};
...

View File

@ -40,15 +40,4 @@ required:
additionalProperties: true
examples:
- |
sdma_xbar: dma-router@4a002b78 {
compatible = "ti,dra7-dma-crossbar";
reg = <0x4a002b78 0xfc>;
#dma-cells = <1>;
dma-requests = <205>;
ti,dma-safe-map = <0>;
dma-masters = <&sdma>;
};
...

View File

@ -0,0 +1,62 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/loongson,ls2x-apbdma.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Loongson LS2X APB DMA controller
description:
The Loongson LS2X APB DMA controller is used for transferring data
between system memory and the peripherals on the APB bus.
maintainers:
- Binbin Zhou <zhoubinbin@loongson.cn>
allOf:
- $ref: dma-controller.yaml#
properties:
compatible:
oneOf:
- const: loongson,ls2k1000-apbdma
- items:
- const: loongson,ls2k0500-apbdma
- const: loongson,ls2k1000-apbdma
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
maxItems: 1
'#dma-cells':
const: 1
required:
- compatible
- reg
- interrupts
- clocks
- '#dma-cells'
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/clock/loongson,ls2k-clk.h>
dma-controller@1fe00c00 {
compatible = "loongson,ls2k1000-apbdma";
reg = <0x1fe00c00 0x8>;
interrupt-parent = <&liointc1>;
interrupts = <12 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk LOONGSON2_APB_CLK>;
#dma-cells = <1>;
};
...

View File

@ -53,6 +53,9 @@ properties:
ADMA_CHn_CTRL register.
const: 1
dma-channel-mask:
maxItems: 1
required:
- compatible
- reg

View File

@ -32,6 +32,8 @@ properties:
- qcom,sm8350-gpi-dma
- qcom,sm8450-gpi-dma
- qcom,sm8550-gpi-dma
- qcom,sm8650-gpi-dma
- qcom,x1e80100-gpi-dma
- const: qcom,sm6350-gpi-dma
- items:
- enum:

View File

@ -16,7 +16,7 @@ properties:
compatible:
items:
- enum:
- renesas,r9a07g043-dmac # RZ/G2UL
- renesas,r9a07g043-dmac # RZ/G2UL and RZ/Five
- renesas,r9a07g044-dmac # RZ/G2{L,LC}
- renesas,r9a07g054-dmac # RZ/V2L
- const: renesas,rz-dmac

View File

@ -29,6 +29,7 @@ properties:
compatible:
items:
- enum:
- microchip,mpfs-pdma
- sifive,fu540-c000-pdma
- const: sifive,pdma0
description:

View File

@ -37,11 +37,11 @@ properties:
reg:
minItems: 3
maxItems: 5
maxItems: 9
reg-names:
minItems: 3
maxItems: 5
maxItems: 9
"#dma-cells":
const: 3
@ -141,7 +141,10 @@ allOf:
ti,sci-rm-range-tchan: false
reg:
maxItems: 3
items:
- description: BCDMA Control /Status Registers region
- description: RX Channel Realtime Registers region
- description: Ring Realtime Registers region
reg-names:
items:
@ -161,14 +164,29 @@ allOf:
properties:
reg:
minItems: 5
items:
- description: BCDMA Control /Status Registers region
- description: Block Copy Channel Realtime Registers region
- description: RX Channel Realtime Registers region
- description: TX Channel Realtime Registers region
- description: Ring Realtime Registers region
- description: Ring Configuration Registers region
- description: TX Channel Configuration Registers region
- description: RX Channel Configuration Registers region
- description: Block Copy Channel Configuration Registers region
reg-names:
minItems: 5
items:
- const: gcfg
- const: bchanrt
- const: rchanrt
- const: tchanrt
- const: ringrt
- const: ring
- const: tchan
- const: rchan
- const: bchan
required:
- ti,sci-rm-range-bchan
@ -184,7 +202,11 @@ allOf:
ti,sci-rm-range-bchan: false
reg:
maxItems: 4
items:
- description: BCDMA Control /Status Registers region
- description: RX Channel Realtime Registers region
- description: TX Channel Realtime Registers region
- description: Ring Realtime Registers region
reg-names:
items:
@ -220,8 +242,13 @@ examples:
<0x0 0x4c000000 0x0 0x20000>,
<0x0 0x4a820000 0x0 0x20000>,
<0x0 0x4aa40000 0x0 0x20000>,
<0x0 0x4bc00000 0x0 0x100000>;
reg-names = "gcfg", "bchanrt", "rchanrt", "tchanrt", "ringrt";
<0x0 0x4bc00000 0x0 0x100000>,
<0x0 0x48600000 0x0 0x8000>,
<0x0 0x484a4000 0x0 0x2000>,
<0x0 0x484c2000 0x0 0x2000>,
<0x0 0x48420000 0x0 0x2000>;
reg-names = "gcfg", "bchanrt", "rchanrt", "tchanrt", "ringrt",
"ring", "tchan", "rchan", "bchan";
msi-parent = <&inta_main_dmss>;
#dma-cells = <3>;

View File

@ -45,14 +45,28 @@ properties:
The second cell is the ASEL value for the channel
reg:
maxItems: 4
minItems: 4
items:
- description: Packet DMA Control /Status Registers region
- description: RX Channel Realtime Registers region
- description: TX Channel Realtime Registers region
- description: Ring Realtime Registers region
- description: Ring Configuration Registers region
- description: TX Configuration Registers region
- description: RX Configuration Registers region
- description: RX Flow Configuration Registers region
reg-names:
minItems: 4
items:
- const: gcfg
- const: rchanrt
- const: tchanrt
- const: ringrt
- const: ring
- const: tchan
- const: rchan
- const: rflow
msi-parent: true
@ -136,8 +150,14 @@ examples:
reg = <0x0 0x485c0000 0x0 0x100>,
<0x0 0x4a800000 0x0 0x20000>,
<0x0 0x4aa00000 0x0 0x40000>,
<0x0 0x4b800000 0x0 0x400000>;
reg-names = "gcfg", "rchanrt", "tchanrt", "ringrt";
<0x0 0x4b800000 0x0 0x400000>,
<0x0 0x485e0000 0x0 0x20000>,
<0x0 0x484a0000 0x0 0x4000>,
<0x0 0x484c0000 0x0 0x2000>,
<0x0 0x48430000 0x0 0x4000>;
reg-names = "gcfg", "rchanrt", "tchanrt", "ringrt",
"ring", "tchan", "rchan", "rflow";
msi-parent = <&inta_main_dmss>;
#dma-cells = <2>;

View File

@ -69,13 +69,24 @@ properties:
- ti,j721e-navss-mcu-udmap
reg:
maxItems: 3
minItems: 3
items:
- description: UDMA-P Control /Status Registers region
- description: RX Channel Realtime Registers region
- description: TX Channel Realtime Registers region
- description: TX Configuration Registers region
- description: RX Configuration Registers region
- description: RX Flow Configuration Registers region
reg-names:
minItems: 3
items:
- const: gcfg
- const: rchanrt
- const: tchanrt
- const: tchan
- const: rchan
- const: rflow
msi-parent: true
@ -158,8 +169,11 @@ examples:
compatible = "ti,am654-navss-main-udmap";
reg = <0x0 0x31150000 0x0 0x100>,
<0x0 0x34000000 0x0 0x100000>,
<0x0 0x35000000 0x0 0x100000>;
reg-names = "gcfg", "rchanrt", "tchanrt";
<0x0 0x35000000 0x0 0x100000>,
<0x0 0x30b00000 0x0 0x20000>,
<0x0 0x30c00000 0x0 0x8000>,
<0x0 0x30d00000 0x0 0x4000>;
reg-names = "gcfg", "rchanrt", "tchanrt", "tchan", "rchan", "rflow";
#dma-cells = <1>;
ti,ringacc = <&ringacc>;

View File

@ -12507,6 +12507,13 @@ S: Maintained
F: Documentation/devicetree/bindings/gpio/loongson,ls-gpio.yaml
F: drivers/gpio/gpio-loongson-64bit.c
LOONGSON LS2X APB DMA DRIVER
M: Binbin Zhou <zhoubinbin@loongson.cn>
L: dmaengine@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/dma/loongson,ls2x-apbdma.yaml
F: drivers/dma/ls2x-apb-dma.c
LOONGSON LS2X I2C DRIVER
M: Binbin Zhou <zhoubinbin@loongson.cn>
L: linux-i2c@vger.kernel.org

View File

@ -378,6 +378,20 @@ config LPC18XX_DMAMUX
Enable support for DMA on NXP LPC18xx/43xx platforms
with PL080 and multiplexed DMA request lines.
config LS2X_APB_DMA
tristate "Loongson LS2X APB DMA support"
depends on LOONGARCH || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
Support for the Loongson LS2X APB DMA controller driver. The
DMA controller is having single DMA channel which can be
configured for different peripherals like audio, nand, sdio
etc which is in APB bus.
This DMA controller transfers data from memory to peripheral fifo.
It does not support memory to memory data transfer.
config MCF_EDMA
tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs"
depends on M5441x || COMPILE_TEST

View File

@ -48,6 +48,7 @@ obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-y += idxd/
obj-$(CONFIG_K3_DMA) += k3dma.o
obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
obj-$(CONFIG_LS2X_APB_DMA) += ls2x-apb-dma.o
obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o
obj-$(CONFIG_MILBEAUT_XDMAC) += milbeaut-xdmac.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o

View File

@ -57,6 +57,8 @@
#define REG_BUS_WIDTH(ch) (0x8040 + (ch) * 0x200)
#define BUS_WIDTH_WORD_SIZE GENMASK(3, 0)
#define BUS_WIDTH_FRAME_SIZE GENMASK(7, 4)
#define BUS_WIDTH_8BIT 0x00
#define BUS_WIDTH_16BIT 0x01
#define BUS_WIDTH_32BIT 0x02
@ -740,7 +742,8 @@ static int admac_device_config(struct dma_chan *chan,
struct admac_data *ad = adchan->host;
bool is_tx = admac_chan_direction(adchan->no) == DMA_MEM_TO_DEV;
int wordsize = 0;
u32 bus_width = 0;
u32 bus_width = readl_relaxed(ad->base + REG_BUS_WIDTH(adchan->no)) &
~(BUS_WIDTH_WORD_SIZE | BUS_WIDTH_FRAME_SIZE);
switch (is_tx ? config->dst_addr_width : config->src_addr_width) {
case DMA_SLAVE_BUSWIDTH_1_BYTE:

View File

@ -81,9 +81,13 @@
#define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438
#define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c
#define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450
#define AXI_DMAC_REG_CURRENT_SG_ID 0x454
#define AXI_DMAC_REG_SG_ADDRESS 0x47c
#define AXI_DMAC_REG_SG_ADDRESS_HIGH 0x4bc
#define AXI_DMAC_CTRL_ENABLE BIT(0)
#define AXI_DMAC_CTRL_PAUSE BIT(1)
#define AXI_DMAC_CTRL_ENABLE_SG BIT(2)
#define AXI_DMAC_IRQ_SOT BIT(0)
#define AXI_DMAC_IRQ_EOT BIT(1)
@ -97,20 +101,35 @@
/* The maximum ID allocated by the hardware is 31 */
#define AXI_DMAC_SG_UNUSED 32U
/* Flags for axi_dmac_hw_desc.flags */
#define AXI_DMAC_HW_FLAG_LAST BIT(0)
#define AXI_DMAC_HW_FLAG_IRQ BIT(1)
struct axi_dmac_hw_desc {
u32 flags;
u32 id;
u64 dest_addr;
u64 src_addr;
u64 next_sg_addr;
u32 y_len;
u32 x_len;
u32 src_stride;
u32 dst_stride;
u64 __pad[2];
};
struct axi_dmac_sg {
dma_addr_t src_addr;
dma_addr_t dest_addr;
unsigned int x_len;
unsigned int y_len;
unsigned int dest_stride;
unsigned int src_stride;
unsigned int id;
unsigned int partial_len;
bool schedule_when_free;
struct axi_dmac_hw_desc *hw;
dma_addr_t hw_phys;
};
struct axi_dmac_desc {
struct virt_dma_desc vdesc;
struct axi_dmac_chan *chan;
bool cyclic;
bool have_partial_xfer;
@ -139,6 +158,7 @@ struct axi_dmac_chan {
bool hw_partial_xfer;
bool hw_cyclic;
bool hw_2d;
bool hw_sg;
};
struct axi_dmac {
@ -213,9 +233,11 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
unsigned int flags = 0;
unsigned int val;
val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
if (val) /* Queue is full, wait for the next SOT IRQ */
return;
if (!chan->hw_sg) {
val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
if (val) /* Queue is full, wait for the next SOT IRQ */
return;
}
desc = chan->next_desc;
@ -229,14 +251,15 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
sg = &desc->sg[desc->num_submitted];
/* Already queued in cyclic mode. Wait for it to finish */
if (sg->id != AXI_DMAC_SG_UNUSED) {
if (sg->hw->id != AXI_DMAC_SG_UNUSED) {
sg->schedule_when_free = true;
return;
}
desc->num_submitted++;
if (desc->num_submitted == desc->num_sgs ||
desc->have_partial_xfer) {
if (chan->hw_sg) {
chan->next_desc = NULL;
} else if (++desc->num_submitted == desc->num_sgs ||
desc->have_partial_xfer) {
if (desc->cyclic)
desc->num_submitted = 0; /* Start again */
else
@ -246,32 +269,42 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
chan->next_desc = desc;
}
sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
sg->hw->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
if (axi_dmac_dest_is_mem(chan)) {
axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr);
axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride);
}
if (!chan->hw_sg) {
if (axi_dmac_dest_is_mem(chan)) {
axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->hw->dest_addr);
axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->hw->dst_stride);
}
if (axi_dmac_src_is_mem(chan)) {
axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr);
axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride);
if (axi_dmac_src_is_mem(chan)) {
axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->hw->src_addr);
axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->hw->src_stride);
}
}
/*
* If the hardware supports cyclic transfers and there is no callback to
* call and only a single segment, enable hw cyclic mode to avoid
* unnecessary interrupts.
* call, enable hw cyclic mode to avoid unnecessary interrupts.
*/
if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback &&
desc->num_sgs == 1)
flags |= AXI_DMAC_FLAG_CYCLIC;
if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback) {
if (chan->hw_sg)
desc->sg[desc->num_sgs - 1].hw->flags &= ~AXI_DMAC_HW_FLAG_IRQ;
else if (desc->num_sgs == 1)
flags |= AXI_DMAC_FLAG_CYCLIC;
}
if (chan->hw_partial_xfer)
flags |= AXI_DMAC_FLAG_PARTIAL_REPORT;
axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
if (chan->hw_sg) {
axi_dmac_write(dmac, AXI_DMAC_REG_SG_ADDRESS, (u32)sg->hw_phys);
axi_dmac_write(dmac, AXI_DMAC_REG_SG_ADDRESS_HIGH,
(u64)sg->hw_phys >> 32);
} else {
axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->hw->x_len);
axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->hw->y_len);
}
axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1);
}
@ -286,9 +319,9 @@ static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan,
struct axi_dmac_sg *sg)
{
if (chan->hw_2d)
return sg->x_len * sg->y_len;
return (sg->hw->x_len + 1) * (sg->hw->y_len + 1);
else
return sg->x_len;
return (sg->hw->x_len + 1);
}
static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
@ -307,9 +340,9 @@ static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
list_for_each_entry(desc, &chan->active_descs, vdesc.node) {
for (i = 0; i < desc->num_sgs; i++) {
sg = &desc->sg[i];
if (sg->id == AXI_DMAC_SG_UNUSED)
if (sg->hw->id == AXI_DMAC_SG_UNUSED)
continue;
if (sg->id == id) {
if (sg->hw->id == id) {
desc->have_partial_xfer = true;
sg->partial_len = len;
found_sg = true;
@ -348,6 +381,9 @@ static void axi_dmac_compute_residue(struct axi_dmac_chan *chan,
rslt->result = DMA_TRANS_NOERROR;
rslt->residue = 0;
if (chan->hw_sg)
return;
/*
* We get here if the last completed segment is partial, which
* means we can compute the residue from that segment onwards
@ -374,36 +410,47 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
(completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE))
axi_dmac_dequeue_partial_xfers(chan);
do {
sg = &active->sg[active->num_completed];
if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
break;
if (!(BIT(sg->id) & completed_transfers))
break;
active->num_completed++;
sg->id = AXI_DMAC_SG_UNUSED;
if (sg->schedule_when_free) {
sg->schedule_when_free = false;
start_next = true;
}
if (sg->partial_len)
axi_dmac_compute_residue(chan, active);
if (active->cyclic)
if (chan->hw_sg) {
if (active->cyclic) {
vchan_cyclic_callback(&active->vdesc);
if (active->num_completed == active->num_sgs ||
sg->partial_len) {
if (active->cyclic) {
active->num_completed = 0; /* wrap around */
} else {
list_del(&active->vdesc.node);
vchan_cookie_complete(&active->vdesc);
active = axi_dmac_active_desc(chan);
}
} else {
list_del(&active->vdesc.node);
vchan_cookie_complete(&active->vdesc);
active = axi_dmac_active_desc(chan);
start_next = !!active;
}
} while (active);
} else {
do {
sg = &active->sg[active->num_completed];
if (sg->hw->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
break;
if (!(BIT(sg->hw->id) & completed_transfers))
break;
active->num_completed++;
sg->hw->id = AXI_DMAC_SG_UNUSED;
if (sg->schedule_when_free) {
sg->schedule_when_free = false;
start_next = true;
}
if (sg->partial_len)
axi_dmac_compute_residue(chan, active);
if (active->cyclic)
vchan_cyclic_callback(&active->vdesc);
if (active->num_completed == active->num_sgs ||
sg->partial_len) {
if (active->cyclic) {
active->num_completed = 0; /* wrap around */
} else {
list_del(&active->vdesc.node);
vchan_cookie_complete(&active->vdesc);
active = axi_dmac_active_desc(chan);
}
}
} while (active);
}
return start_next;
}
@ -467,8 +514,12 @@ static void axi_dmac_issue_pending(struct dma_chan *c)
struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
struct axi_dmac *dmac = chan_to_axi_dmac(chan);
unsigned long flags;
u32 ctrl = AXI_DMAC_CTRL_ENABLE;
axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE);
if (chan->hw_sg)
ctrl |= AXI_DMAC_CTRL_ENABLE_SG;
axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, ctrl);
spin_lock_irqsave(&chan->vchan.lock, flags);
if (vchan_issue_pending(&chan->vchan))
@ -476,22 +527,58 @@ static void axi_dmac_issue_pending(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
static struct axi_dmac_desc *
axi_dmac_alloc_desc(struct axi_dmac_chan *chan, unsigned int num_sgs)
{
struct axi_dmac *dmac = chan_to_axi_dmac(chan);
struct device *dev = dmac->dma_dev.dev;
struct axi_dmac_hw_desc *hws;
struct axi_dmac_desc *desc;
dma_addr_t hw_phys;
unsigned int i;
desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT);
if (!desc)
return NULL;
desc->num_sgs = num_sgs;
desc->chan = chan;
for (i = 0; i < num_sgs; i++)
desc->sg[i].id = AXI_DMAC_SG_UNUSED;
hws = dma_alloc_coherent(dev, PAGE_ALIGN(num_sgs * sizeof(*hws)),
&hw_phys, GFP_ATOMIC);
if (!hws) {
kfree(desc);
return NULL;
}
for (i = 0; i < num_sgs; i++) {
desc->sg[i].hw = &hws[i];
desc->sg[i].hw_phys = hw_phys + i * sizeof(*hws);
hws[i].id = AXI_DMAC_SG_UNUSED;
hws[i].flags = 0;
/* Link hardware descriptors */
hws[i].next_sg_addr = hw_phys + (i + 1) * sizeof(*hws);
}
/* The last hardware descriptor will trigger an interrupt */
desc->sg[num_sgs - 1].hw->flags = AXI_DMAC_HW_FLAG_LAST | AXI_DMAC_HW_FLAG_IRQ;
return desc;
}
static void axi_dmac_free_desc(struct axi_dmac_desc *desc)
{
struct axi_dmac *dmac = chan_to_axi_dmac(desc->chan);
struct device *dev = dmac->dma_dev.dev;
struct axi_dmac_hw_desc *hw = desc->sg[0].hw;
dma_addr_t hw_phys = desc->sg[0].hw_phys;
dma_free_coherent(dev, PAGE_ALIGN(desc->num_sgs * sizeof(*hw)),
hw, hw_phys);
kfree(desc);
}
static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
enum dma_transfer_direction direction, dma_addr_t addr,
unsigned int num_periods, unsigned int period_len,
@ -508,26 +595,24 @@ static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
segment_size = ((segment_size - 1) | chan->length_align_mask) + 1;
for (i = 0; i < num_periods; i++) {
len = period_len;
while (len > segment_size) {
for (len = period_len; len > segment_size; sg++) {
if (direction == DMA_DEV_TO_MEM)
sg->dest_addr = addr;
sg->hw->dest_addr = addr;
else
sg->src_addr = addr;
sg->x_len = segment_size;
sg->y_len = 1;
sg++;
sg->hw->src_addr = addr;
sg->hw->x_len = segment_size - 1;
sg->hw->y_len = 0;
sg->hw->flags = 0;
addr += segment_size;
len -= segment_size;
}
if (direction == DMA_DEV_TO_MEM)
sg->dest_addr = addr;
sg->hw->dest_addr = addr;
else
sg->src_addr = addr;
sg->x_len = len;
sg->y_len = 1;
sg->hw->src_addr = addr;
sg->hw->x_len = len - 1;
sg->hw->y_len = 0;
sg++;
addr += len;
}
@ -554,7 +639,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
for_each_sg(sgl, sg, sg_len, i)
num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length);
desc = axi_dmac_alloc_desc(num_sgs);
desc = axi_dmac_alloc_desc(chan, num_sgs);
if (!desc)
return NULL;
@ -563,7 +648,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
for_each_sg(sgl, sg, sg_len, i) {
if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
!axi_dmac_check_len(chan, sg_dma_len(sg))) {
kfree(desc);
axi_dmac_free_desc(desc);
return NULL;
}
@ -583,7 +668,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
{
struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
struct axi_dmac_desc *desc;
unsigned int num_periods, num_segments;
unsigned int num_periods, num_segments, num_sgs;
if (direction != chan->direction)
return NULL;
@ -597,11 +682,16 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
num_periods = buf_len / period_len;
num_segments = DIV_ROUND_UP(period_len, chan->max_length);
num_sgs = num_periods * num_segments;
desc = axi_dmac_alloc_desc(num_periods * num_segments);
desc = axi_dmac_alloc_desc(chan, num_sgs);
if (!desc)
return NULL;
/* Chain the last descriptor to the first, and remove its "last" flag */
desc->sg[num_sgs - 1].hw->next_sg_addr = desc->sg[0].hw_phys;
desc->sg[num_sgs - 1].hw->flags &= ~AXI_DMAC_HW_FLAG_LAST;
axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods,
period_len, desc->sg);
@ -653,26 +743,26 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
return NULL;
}
desc = axi_dmac_alloc_desc(1);
desc = axi_dmac_alloc_desc(chan, 1);
if (!desc)
return NULL;
if (axi_dmac_src_is_mem(chan)) {
desc->sg[0].src_addr = xt->src_start;
desc->sg[0].src_stride = xt->sgl[0].size + src_icg;
desc->sg[0].hw->src_addr = xt->src_start;
desc->sg[0].hw->src_stride = xt->sgl[0].size + src_icg;
}
if (axi_dmac_dest_is_mem(chan)) {
desc->sg[0].dest_addr = xt->dst_start;
desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg;
desc->sg[0].hw->dest_addr = xt->dst_start;
desc->sg[0].hw->dst_stride = xt->sgl[0].size + dst_icg;
}
if (chan->hw_2d) {
desc->sg[0].x_len = xt->sgl[0].size;
desc->sg[0].y_len = xt->numf;
desc->sg[0].hw->x_len = xt->sgl[0].size - 1;
desc->sg[0].hw->y_len = xt->numf - 1;
} else {
desc->sg[0].x_len = xt->sgl[0].size * xt->numf;
desc->sg[0].y_len = 1;
desc->sg[0].hw->x_len = xt->sgl[0].size * xt->numf - 1;
desc->sg[0].hw->y_len = 0;
}
if (flags & DMA_CYCLIC)
@ -688,7 +778,7 @@ static void axi_dmac_free_chan_resources(struct dma_chan *c)
static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
{
kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
axi_dmac_free_desc(to_axi_dmac_desc(vdesc));
}
static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
@ -714,6 +804,9 @@ static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
case AXI_DMAC_REG_CURRENT_DEST_ADDR:
case AXI_DMAC_REG_PARTIAL_XFER_LEN:
case AXI_DMAC_REG_PARTIAL_XFER_ID:
case AXI_DMAC_REG_CURRENT_SG_ID:
case AXI_DMAC_REG_SG_ADDRESS:
case AXI_DMAC_REG_SG_ADDRESS_HIGH:
return true;
default:
return false;
@ -866,6 +959,10 @@ static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version)
if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
chan->hw_cyclic = true;
axi_dmac_write(dmac, AXI_DMAC_REG_SG_ADDRESS, 0xffffffff);
if (axi_dmac_read(dmac, AXI_DMAC_REG_SG_ADDRESS))
chan->hw_sg = true;
axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1);
if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1)
chan->hw_2d = true;
@ -911,6 +1008,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
struct axi_dmac *dmac;
struct regmap *regmap;
unsigned int version;
u32 irq_mask = 0;
int ret;
dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
@ -966,6 +1064,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
dma_dev->directions = BIT(dmac->chan.direction);
dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
dma_dev->max_sg_burst = 31; /* 31 SGs maximum in one burst */
INIT_LIST_HEAD(&dma_dev->channels);
dmac->chan.vchan.desc_free = axi_dmac_desc_free;
@ -977,7 +1076,10 @@ static int axi_dmac_probe(struct platform_device *pdev)
dma_dev->copy_align = (dmac->chan.address_align_mask + 1);
axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
if (dmac->chan.hw_sg)
irq_mask |= AXI_DMAC_IRQ_SOT;
axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, irq_mask);
if (of_dma_is_coherent(pdev->dev.of_node)) {
ret = axi_dmac_read(dmac, AXI_DMAC_REG_COHERENCY_DESC);

View File

@ -1103,6 +1103,9 @@ EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
static void __dma_async_device_channel_unregister(struct dma_device *device,
struct dma_chan *chan)
{
if (chan->local == NULL)
return;
WARN_ONCE(!device->device_release && chan->client_count,
"%s called while %d clients hold a reference\n",
__func__, chan->client_count);

View File

@ -21,6 +21,10 @@
#include <linux/slab.h>
#include <linux/wait.h>
static bool nobounce;
module_param(nobounce, bool, 0644);
MODULE_PARM_DESC(nobounce, "Prevent using swiotlb buffer (default: use swiotlb buffer)");
static unsigned int test_buf_size = 16384;
module_param(test_buf_size, uint, 0644);
MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
@ -90,6 +94,7 @@ MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
/**
* struct dmatest_params - test parameters.
* @nobounce: prevent using swiotlb buffer
* @buf_size: size of the memcpy test buffer
* @channel: bus ID of the channel to test
* @device: bus ID of the DMA Engine to test
@ -106,6 +111,7 @@ MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
* @polled: use polling for completion instead of interrupts
*/
struct dmatest_params {
bool nobounce;
unsigned int buf_size;
char channel[20];
char device[32];
@ -215,6 +221,7 @@ struct dmatest_done {
struct dmatest_data {
u8 **raw;
u8 **aligned;
gfp_t gfp_flags;
unsigned int cnt;
unsigned int off;
};
@ -533,7 +540,7 @@ static int dmatest_alloc_test_data(struct dmatest_data *d,
goto err;
for (i = 0; i < d->cnt; i++) {
d->raw[i] = kmalloc(buf_size + align, GFP_KERNEL);
d->raw[i] = kmalloc(buf_size + align, d->gfp_flags);
if (!d->raw[i])
goto err;
@ -655,6 +662,13 @@ static int dmatest_func(void *data)
goto err_free_coefs;
}
src->gfp_flags = GFP_KERNEL;
dst->gfp_flags = GFP_KERNEL;
if (params->nobounce) {
src->gfp_flags = GFP_DMA;
dst->gfp_flags = GFP_DMA;
}
if (dmatest_alloc_test_data(src, buf_size, align) < 0)
goto err_free_coefs;
@ -1093,6 +1107,7 @@ static void add_threaded_test(struct dmatest_info *info)
struct dmatest_params *params = &info->params;
/* Copy test parameters */
params->nobounce = nobounce;
params->buf_size = test_buf_size;
strscpy(params->channel, strim(test_channel), sizeof(params->channel));
strscpy(params->device, strim(test_device), sizeof(params->device));

View File

@ -9,6 +9,7 @@
* Vybrid and Layerscape SoCs.
*/
#include <dt-bindings/dma/fsl-edma.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
@ -21,10 +22,6 @@
#include "fsl-edma-common.h"
#define ARGS_RX BIT(0)
#define ARGS_REMOTE BIT(1)
#define ARGS_MULTI_FIFO BIT(2)
static void fsl_edma_synchronize(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
@ -153,9 +150,15 @@ static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
i = fsl_chan - fsl_edma->chans;
fsl_chan->priority = dma_spec->args[1];
fsl_chan->is_rxchan = dma_spec->args[2] & ARGS_RX;
fsl_chan->is_remote = dma_spec->args[2] & ARGS_REMOTE;
fsl_chan->is_multi_fifo = dma_spec->args[2] & ARGS_MULTI_FIFO;
fsl_chan->is_rxchan = dma_spec->args[2] & FSL_EDMA_RX;
fsl_chan->is_remote = dma_spec->args[2] & FSL_EDMA_REMOTE;
fsl_chan->is_multi_fifo = dma_spec->args[2] & FSL_EDMA_MULTI_FIFO;
if ((dma_spec->args[2] & FSL_EDMA_EVEN_CH) && (i & 0x1))
continue;
if ((dma_spec->args[2] & FSL_EDMA_ODD_CH) && !(i & 0x1))
continue;
if (!b_chmux && i == dma_spec->args[0]) {
chan = dma_get_slave_channel(chan);

View File

@ -165,7 +165,7 @@ static void idxd_cdev_dev_release(struct device *dev)
struct idxd_wq *wq = idxd_cdev->wq;
cdev_ctx = &ictx[wq->idxd->data->type];
ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor);
kfree(idxd_cdev);
}
@ -463,7 +463,7 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
cdev = &idxd_cdev->cdev;
dev = cdev_dev(idxd_cdev);
cdev_ctx = &ictx[wq->idxd->data->type];
minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
minor = ida_alloc_max(&cdev_ctx->minor_ida, MINORMASK, GFP_KERNEL);
if (minor < 0) {
kfree(idxd_cdev);
return minor;

View File

@ -802,6 +802,9 @@ err_bmap:
static void idxd_device_evl_free(struct idxd_device *idxd)
{
void *evl_log;
unsigned int evl_log_size;
dma_addr_t evl_dma;
union gencfg_reg gencfg;
union genctrl_reg genctrl;
struct device *dev = &idxd->pdev->dev;
@ -822,11 +825,15 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET);
iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
dma_free_coherent(dev, evl->log_size, evl->log, evl->dma);
bitmap_free(evl->bmap);
evl_log = evl->log;
evl_log_size = evl->log_size;
evl_dma = evl->dma;
evl->log = NULL;
evl->size = IDXD_EVL_SIZE_MIN;
spin_unlock(&evl->lock);
dma_free_coherent(dev, evl_log_size, evl_log, evl_dma);
}
static void idxd_group_config_write(struct idxd_group *group)

705
drivers/dma/ls2x-apb-dma.c Normal file
View File

@ -0,0 +1,705 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for the Loongson LS2X APB DMA Controller
*
* Copyright (C) 2017-2023 Loongson Corporation
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "dmaengine.h"
#include "virt-dma.h"
/* Global Configuration Register */
#define LDMA_ORDER_ERG 0x0
/* Bitfield definitions */
/* Bitfields in Global Configuration Register */
#define LDMA_64BIT_EN BIT(0) /* 1: 64 bit support */
#define LDMA_UNCOHERENT_EN BIT(1) /* 0: cache, 1: uncache */
#define LDMA_ASK_VALID BIT(2)
#define LDMA_START BIT(3) /* DMA start operation */
#define LDMA_STOP BIT(4) /* DMA stop operation */
#define LDMA_CONFIG_MASK GENMASK(4, 0) /* DMA controller config bits mask */
/* Bitfields in ndesc_addr field of HW decriptor */
#define LDMA_DESC_EN BIT(0) /*1: The next descriptor is valid */
#define LDMA_DESC_ADDR_LOW GENMASK(31, 1)
/* Bitfields in cmd field of HW decriptor */
#define LDMA_INT BIT(1) /* Enable DMA interrupts */
#define LDMA_DATA_DIRECTION BIT(12) /* 1: write to device, 0: read from device */
#define LDMA_SLAVE_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
#define LDMA_MAX_TRANS_LEN U32_MAX
/*-- descriptors -----------------------------------------------------*/
/*
* struct ls2x_dma_hw_desc - DMA HW descriptor
* @ndesc_addr: the next descriptor low address.
* @mem_addr: memory low address.
* @apb_addr: device buffer address.
* @len: length of a piece of carried content, in words.
* @step_len: length between two moved memory data blocks.
* @step_times: number of blocks to be carried in a single DMA operation.
* @cmd: descriptor command or state.
* @stats: DMA status.
* @high_ndesc_addr: the next descriptor high address.
* @high_mem_addr: memory high address.
* @reserved: reserved
*/
struct ls2x_dma_hw_desc {
u32 ndesc_addr;
u32 mem_addr;
u32 apb_addr;
u32 len;
u32 step_len;
u32 step_times;
u32 cmd;
u32 stats;
u32 high_ndesc_addr;
u32 high_mem_addr;
u32 reserved[2];
} __packed;
/*
* struct ls2x_dma_sg - ls2x dma scatter gather entry
* @hw: the pointer to DMA HW descriptor.
* @llp: physical address of the DMA HW descriptor.
* @phys: destination or source address(mem).
* @len: number of Bytes to read.
*/
struct ls2x_dma_sg {
struct ls2x_dma_hw_desc *hw;
dma_addr_t llp;
dma_addr_t phys;
u32 len;
};
/*
* struct ls2x_dma_desc - software descriptor
* @vdesc: pointer to the virtual dma descriptor.
* @cyclic: flag to dma cyclic
* @burst_size: burst size of transaction, in words.
* @desc_num: number of sg entries.
* @direction: transfer direction, to or from device.
* @status: dma controller status.
* @sg: array of sgs.
*/
struct ls2x_dma_desc {
struct virt_dma_desc vdesc;
bool cyclic;
size_t burst_size;
u32 desc_num;
enum dma_transfer_direction direction;
enum dma_status status;
struct ls2x_dma_sg sg[] __counted_by(desc_num);
};
/*-- Channels --------------------------------------------------------*/
/*
* struct ls2x_dma_chan - internal representation of an LS2X APB DMA channel
* @vchan: virtual dma channel entry.
* @desc: pointer to the ls2x sw dma descriptor.
* @pool: hw desc table
* @irq: irq line
* @sconfig: configuration for slave transfers, passed via .device_config
*/
struct ls2x_dma_chan {
struct virt_dma_chan vchan;
struct ls2x_dma_desc *desc;
void *pool;
int irq;
struct dma_slave_config sconfig;
};
/*-- Controller ------------------------------------------------------*/
/*
* struct ls2x_dma_priv - LS2X APB DMAC specific information
* @ddev: dmaengine dma_device object members
* @dma_clk: DMAC clock source
* @regs: memory mapped register base
* @lchan: channel to store ls2x_dma_chan structures
*/
struct ls2x_dma_priv {
struct dma_device ddev;
struct clk *dma_clk;
void __iomem *regs;
struct ls2x_dma_chan lchan;
};
/*-- Helper functions ------------------------------------------------*/
static inline struct ls2x_dma_desc *to_ldma_desc(struct virt_dma_desc *vdesc)
{
return container_of(vdesc, struct ls2x_dma_desc, vdesc);
}
static inline struct ls2x_dma_chan *to_ldma_chan(struct dma_chan *chan)
{
return container_of(chan, struct ls2x_dma_chan, vchan.chan);
}
static inline struct ls2x_dma_priv *to_ldma_priv(struct dma_device *ddev)
{
return container_of(ddev, struct ls2x_dma_priv, ddev);
}
static struct device *chan2dev(struct dma_chan *chan)
{
return &chan->dev->device;
}
static void ls2x_dma_desc_free(struct virt_dma_desc *vdesc)
{
struct ls2x_dma_chan *lchan = to_ldma_chan(vdesc->tx.chan);
struct ls2x_dma_desc *desc = to_ldma_desc(vdesc);
int i;
for (i = 0; i < desc->desc_num; i++) {
if (desc->sg[i].hw)
dma_pool_free(lchan->pool, desc->sg[i].hw,
desc->sg[i].llp);
}
kfree(desc);
}
static void ls2x_dma_write_cmd(struct ls2x_dma_chan *lchan, bool cmd)
{
struct ls2x_dma_priv *priv = to_ldma_priv(lchan->vchan.chan.device);
u64 val;
val = lo_hi_readq(priv->regs + LDMA_ORDER_ERG) & ~LDMA_CONFIG_MASK;
val |= LDMA_64BIT_EN | cmd;
lo_hi_writeq(val, priv->regs + LDMA_ORDER_ERG);
}
static void ls2x_dma_start_transfer(struct ls2x_dma_chan *lchan)
{
struct ls2x_dma_priv *priv = to_ldma_priv(lchan->vchan.chan.device);
struct ls2x_dma_sg *ldma_sg;
struct virt_dma_desc *vdesc;
u64 val;
/* Get the next descriptor */
vdesc = vchan_next_desc(&lchan->vchan);
if (!vdesc) {
lchan->desc = NULL;
return;
}
list_del(&vdesc->node);
lchan->desc = to_ldma_desc(vdesc);
ldma_sg = &lchan->desc->sg[0];
/* Start DMA */
lo_hi_writeq(0, priv->regs + LDMA_ORDER_ERG);
val = (ldma_sg->llp & ~LDMA_CONFIG_MASK) | LDMA_64BIT_EN | LDMA_START;
lo_hi_writeq(val, priv->regs + LDMA_ORDER_ERG);
}
static size_t ls2x_dmac_detect_burst(struct ls2x_dma_chan *lchan)
{
u32 maxburst, buswidth;
/* Reject definitely invalid configurations */
if ((lchan->sconfig.src_addr_width & LDMA_SLAVE_BUSWIDTHS) &&
(lchan->sconfig.dst_addr_width & LDMA_SLAVE_BUSWIDTHS))
return 0;
if (lchan->sconfig.direction == DMA_MEM_TO_DEV) {
maxburst = lchan->sconfig.dst_maxburst;
buswidth = lchan->sconfig.dst_addr_width;
} else {
maxburst = lchan->sconfig.src_maxburst;
buswidth = lchan->sconfig.src_addr_width;
}
/* If maxburst is zero, fallback to LDMA_MAX_TRANS_LEN */
return maxburst ? (maxburst * buswidth) >> 2 : LDMA_MAX_TRANS_LEN;
}
static void ls2x_dma_fill_desc(struct ls2x_dma_chan *lchan, u32 sg_index,
struct ls2x_dma_desc *desc)
{
struct ls2x_dma_sg *ldma_sg = &desc->sg[sg_index];
u32 num_segments, segment_size;
if (desc->direction == DMA_MEM_TO_DEV) {
ldma_sg->hw->cmd = LDMA_INT | LDMA_DATA_DIRECTION;
ldma_sg->hw->apb_addr = lchan->sconfig.dst_addr;
} else {
ldma_sg->hw->cmd = LDMA_INT;
ldma_sg->hw->apb_addr = lchan->sconfig.src_addr;
}
ldma_sg->hw->mem_addr = lower_32_bits(ldma_sg->phys);
ldma_sg->hw->high_mem_addr = upper_32_bits(ldma_sg->phys);
/* Split into multiple equally sized segments if necessary */
num_segments = DIV_ROUND_UP((ldma_sg->len + 3) >> 2, desc->burst_size);
segment_size = DIV_ROUND_UP((ldma_sg->len + 3) >> 2, num_segments);
/* Word count register takes input in words */
ldma_sg->hw->len = segment_size;
ldma_sg->hw->step_times = num_segments;
ldma_sg->hw->step_len = 0;
/* lets make a link list */
if (sg_index) {
desc->sg[sg_index - 1].hw->ndesc_addr = ldma_sg->llp | LDMA_DESC_EN;
desc->sg[sg_index - 1].hw->high_ndesc_addr = upper_32_bits(ldma_sg->llp);
}
}
/*-- DMA Engine API --------------------------------------------------*/
/*
* ls2x_dma_alloc_chan_resources - allocate resources for DMA channel
* @chan: allocate descriptor resources for this channel
*
* return - the number of allocated descriptors
*/
static int ls2x_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
/* Create a pool of consistent memory blocks for hardware descriptors */
lchan->pool = dma_pool_create(dev_name(chan2dev(chan)),
chan->device->dev, PAGE_SIZE,
__alignof__(struct ls2x_dma_hw_desc), 0);
if (!lchan->pool) {
dev_err(chan2dev(chan), "No memory for descriptors\n");
return -ENOMEM;
}
return 1;
}
/*
* ls2x_dma_free_chan_resources - free all channel resources
* @chan: DMA channel
*/
static void ls2x_dma_free_chan_resources(struct dma_chan *chan)
{
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
vchan_free_chan_resources(to_virt_chan(chan));
dma_pool_destroy(lchan->pool);
lchan->pool = NULL;
}
/*
* ls2x_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
* @chan: DMA channel
* @sgl: scatterlist to transfer to/from
* @sg_len: number of entries in @scatterlist
* @direction: DMA direction
* @flags: tx descriptor status flags
* @context: transaction context (ignored)
*
* Return: Async transaction descriptor on success and NULL on failure
*/
static struct dma_async_tx_descriptor *
ls2x_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
u32 sg_len, enum dma_transfer_direction direction,
unsigned long flags, void *context)
{
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
struct ls2x_dma_desc *desc;
struct scatterlist *sg;
size_t burst_size;
int i;
if (unlikely(!sg_len || !is_slave_direction(direction)))
return NULL;
burst_size = ls2x_dmac_detect_burst(lchan);
if (!burst_size)
return NULL;
desc = kzalloc(struct_size(desc, sg, sg_len), GFP_NOWAIT);
if (!desc)
return NULL;
desc->desc_num = sg_len;
desc->direction = direction;
desc->burst_size = burst_size;
for_each_sg(sgl, sg, sg_len, i) {
struct ls2x_dma_sg *ldma_sg = &desc->sg[i];
/* Allocate DMA capable memory for hardware descriptor */
ldma_sg->hw = dma_pool_alloc(lchan->pool, GFP_NOWAIT, &ldma_sg->llp);
if (!ldma_sg->hw) {
desc->desc_num = i;
ls2x_dma_desc_free(&desc->vdesc);
return NULL;
}
ldma_sg->phys = sg_dma_address(sg);
ldma_sg->len = sg_dma_len(sg);
ls2x_dma_fill_desc(lchan, i, desc);
}
/* Setting the last descriptor enable bit */
desc->sg[sg_len - 1].hw->ndesc_addr &= ~LDMA_DESC_EN;
desc->status = DMA_IN_PROGRESS;
return vchan_tx_prep(&lchan->vchan, &desc->vdesc, flags);
}
/*
* ls2x_dma_prep_dma_cyclic - prepare the cyclic DMA transfer
* @chan: the DMA channel to prepare
* @buf_addr: physical DMA address where the buffer starts
* @buf_len: total number of bytes for the entire buffer
* @period_len: number of bytes for each period
* @direction: transfer direction, to or from device
* @flags: tx descriptor status flags
*
* Return: Async transaction descriptor on success and NULL on failure
*/
static struct dma_async_tx_descriptor *
ls2x_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
struct ls2x_dma_desc *desc;
size_t burst_size;
u32 num_periods;
int i;
if (unlikely(!buf_len || !period_len))
return NULL;
if (unlikely(!is_slave_direction(direction)))
return NULL;
burst_size = ls2x_dmac_detect_burst(lchan);
if (!burst_size)
return NULL;
num_periods = buf_len / period_len;
desc = kzalloc(struct_size(desc, sg, num_periods), GFP_NOWAIT);
if (!desc)
return NULL;
desc->desc_num = num_periods;
desc->direction = direction;
desc->burst_size = burst_size;
/* Build cyclic linked list */
for (i = 0; i < num_periods; i++) {
struct ls2x_dma_sg *ldma_sg = &desc->sg[i];
/* Allocate DMA capable memory for hardware descriptor */
ldma_sg->hw = dma_pool_alloc(lchan->pool, GFP_NOWAIT, &ldma_sg->llp);
if (!ldma_sg->hw) {
desc->desc_num = i;
ls2x_dma_desc_free(&desc->vdesc);
return NULL;
}
ldma_sg->phys = buf_addr + period_len * i;
ldma_sg->len = period_len;
ls2x_dma_fill_desc(lchan, i, desc);
}
/* Lets make a cyclic list */
desc->sg[num_periods - 1].hw->ndesc_addr = desc->sg[0].llp | LDMA_DESC_EN;
desc->sg[num_periods - 1].hw->high_ndesc_addr = upper_32_bits(desc->sg[0].llp);
desc->cyclic = true;
desc->status = DMA_IN_PROGRESS;
return vchan_tx_prep(&lchan->vchan, &desc->vdesc, flags);
}
/*
* ls2x_slave_config - set slave configuration for channel
* @chan: dma channel
* @cfg: slave configuration
*
* Sets slave configuration for channel
*/
static int ls2x_dma_slave_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
memcpy(&lchan->sconfig, config, sizeof(*config));
return 0;
}
/*
* ls2x_dma_issue_pending - push pending transactions to the hardware
* @chan: channel
*
* When this function is called, all pending transactions are pushed to the
* hardware and executed.
*/
static void ls2x_dma_issue_pending(struct dma_chan *chan)
{
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
unsigned long flags;
spin_lock_irqsave(&lchan->vchan.lock, flags);
if (vchan_issue_pending(&lchan->vchan) && !lchan->desc)
ls2x_dma_start_transfer(lchan);
spin_unlock_irqrestore(&lchan->vchan.lock, flags);
}
/*
* ls2x_dma_terminate_all - terminate all transactions
* @chan: channel
*
* Stops all DMA transactions.
*/
static int ls2x_dma_terminate_all(struct dma_chan *chan)
{
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&lchan->vchan.lock, flags);
/* Setting stop cmd */
ls2x_dma_write_cmd(lchan, LDMA_STOP);
if (lchan->desc) {
vchan_terminate_vdesc(&lchan->desc->vdesc);
lchan->desc = NULL;
}
vchan_get_all_descriptors(&lchan->vchan, &head);
spin_unlock_irqrestore(&lchan->vchan.lock, flags);
vchan_dma_desc_free_list(&lchan->vchan, &head);
return 0;
}
/*
* ls2x_dma_synchronize - Synchronizes the termination of transfers to the
* current context.
* @chan: channel
*/
static void ls2x_dma_synchronize(struct dma_chan *chan)
{
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
vchan_synchronize(&lchan->vchan);
}
static int ls2x_dma_pause(struct dma_chan *chan)
{
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
unsigned long flags;
spin_lock_irqsave(&lchan->vchan.lock, flags);
if (lchan->desc && lchan->desc->status == DMA_IN_PROGRESS) {
ls2x_dma_write_cmd(lchan, LDMA_STOP);
lchan->desc->status = DMA_PAUSED;
}
spin_unlock_irqrestore(&lchan->vchan.lock, flags);
return 0;
}
static int ls2x_dma_resume(struct dma_chan *chan)
{
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
unsigned long flags;
spin_lock_irqsave(&lchan->vchan.lock, flags);
if (lchan->desc && lchan->desc->status == DMA_PAUSED) {
lchan->desc->status = DMA_IN_PROGRESS;
ls2x_dma_write_cmd(lchan, LDMA_START);
}
spin_unlock_irqrestore(&lchan->vchan.lock, flags);
return 0;
}
/*
* ls2x_dma_isr - LS2X DMA Interrupt handler
* @irq: IRQ number
* @dev_id: Pointer to ls2x_dma_chan
*
* Return: IRQ_HANDLED/IRQ_NONE
*/
static irqreturn_t ls2x_dma_isr(int irq, void *dev_id)
{
struct ls2x_dma_chan *lchan = dev_id;
struct ls2x_dma_desc *desc;
spin_lock(&lchan->vchan.lock);
desc = lchan->desc;
if (desc) {
if (desc->cyclic) {
vchan_cyclic_callback(&desc->vdesc);
} else {
desc->status = DMA_COMPLETE;
vchan_cookie_complete(&desc->vdesc);
ls2x_dma_start_transfer(lchan);
}
/* ls2x_dma_start_transfer() updates lchan->desc */
if (!lchan->desc)
ls2x_dma_write_cmd(lchan, LDMA_STOP);
}
spin_unlock(&lchan->vchan.lock);
return IRQ_HANDLED;
}
static int ls2x_dma_chan_init(struct platform_device *pdev,
struct ls2x_dma_priv *priv)
{
struct ls2x_dma_chan *lchan = &priv->lchan;
struct device *dev = &pdev->dev;
int ret;
lchan->irq = platform_get_irq(pdev, 0);
if (lchan->irq < 0)
return lchan->irq;
ret = devm_request_irq(dev, lchan->irq, ls2x_dma_isr, IRQF_TRIGGER_RISING,
dev_name(&pdev->dev), lchan);
if (ret)
return ret;
/* Initialize channels related values */
INIT_LIST_HEAD(&priv->ddev.channels);
lchan->vchan.desc_free = ls2x_dma_desc_free;
vchan_init(&lchan->vchan, &priv->ddev);
return 0;
}
/*
* ls2x_dma_probe - Driver probe function
* @pdev: Pointer to the platform_device structure
*
* Return: '0' on success and failure value on error
*/
static int ls2x_dma_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ls2x_dma_priv *priv;
struct dma_device *ddev;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return dev_err_probe(dev, PTR_ERR(priv->regs),
"devm_platform_ioremap_resource failed.\n");
priv->dma_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->dma_clk))
return dev_err_probe(dev, PTR_ERR(priv->dma_clk), "devm_clk_get failed.\n");
ret = clk_prepare_enable(priv->dma_clk);
if (ret)
return dev_err_probe(dev, ret, "clk_prepare_enable failed.\n");
ret = ls2x_dma_chan_init(pdev, priv);
if (ret)
goto disable_clk;
ddev = &priv->ddev;
ddev->dev = dev;
dma_cap_zero(ddev->cap_mask);
dma_cap_set(DMA_SLAVE, ddev->cap_mask);
dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
ddev->device_alloc_chan_resources = ls2x_dma_alloc_chan_resources;
ddev->device_free_chan_resources = ls2x_dma_free_chan_resources;
ddev->device_tx_status = dma_cookie_status;
ddev->device_issue_pending = ls2x_dma_issue_pending;
ddev->device_prep_slave_sg = ls2x_dma_prep_slave_sg;
ddev->device_prep_dma_cyclic = ls2x_dma_prep_dma_cyclic;
ddev->device_config = ls2x_dma_slave_config;
ddev->device_terminate_all = ls2x_dma_terminate_all;
ddev->device_synchronize = ls2x_dma_synchronize;
ddev->device_pause = ls2x_dma_pause;
ddev->device_resume = ls2x_dma_resume;
ddev->src_addr_widths = LDMA_SLAVE_BUSWIDTHS;
ddev->dst_addr_widths = LDMA_SLAVE_BUSWIDTHS;
ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
ret = dma_async_device_register(&priv->ddev);
if (ret < 0)
goto disable_clk;
ret = of_dma_controller_register(dev->of_node, of_dma_xlate_by_chan_id, priv);
if (ret < 0)
goto unregister_dmac;
platform_set_drvdata(pdev, priv);
dev_info(dev, "Loongson LS2X APB DMA driver registered successfully.\n");
return 0;
unregister_dmac:
dma_async_device_unregister(&priv->ddev);
disable_clk:
clk_disable_unprepare(priv->dma_clk);
return ret;
}
/*
* ls2x_dma_remove - Driver remove function
* @pdev: Pointer to the platform_device structure
*/
static void ls2x_dma_remove(struct platform_device *pdev)
{
struct ls2x_dma_priv *priv = platform_get_drvdata(pdev);
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&priv->ddev);
clk_disable_unprepare(priv->dma_clk);
}
static const struct of_device_id ls2x_dma_of_match_table[] = {
{ .compatible = "loongson,ls2k1000-apbdma" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ls2x_dma_of_match_table);
static struct platform_driver ls2x_dmac_driver = {
.probe = ls2x_dma_probe,
.remove_new = ls2x_dma_remove,
.driver = {
.name = "ls2x-apbdma",
.of_match_table = ls2x_dma_of_match_table,
},
};
module_platform_driver(ls2x_dmac_driver);
MODULE_DESCRIPTION("Loongson LS2X APB DMA Controller driver");
MODULE_AUTHOR("Loongson Technology Corporation Limited");
MODULE_LICENSE("GPL");

View File

@ -531,7 +531,7 @@ disable_clk:
return ret;
}
static int milbeaut_hdmac_remove(struct platform_device *pdev)
static void milbeaut_hdmac_remove(struct platform_device *pdev)
{
struct milbeaut_hdmac_device *mdev = platform_get_drvdata(pdev);
struct dma_chan *chan;
@ -546,16 +546,21 @@ static int milbeaut_hdmac_remove(struct platform_device *pdev)
*/
list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
ret = dmaengine_terminate_sync(chan);
if (ret)
return ret;
if (ret) {
/*
* This results in resource leakage and maybe also
* use-after-free errors as e.g. *mdev is kfreed.
*/
dev_alert(&pdev->dev, "Failed to terminate channel %d (%pe)\n",
chan->chan_id, ERR_PTR(ret));
return;
}
milbeaut_hdmac_free_chan_resources(chan);
}
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&mdev->ddev);
clk_disable_unprepare(mdev->clk);
return 0;
}
static const struct of_device_id milbeaut_hdmac_match[] = {
@ -566,7 +571,7 @@ MODULE_DEVICE_TABLE(of, milbeaut_hdmac_match);
static struct platform_driver milbeaut_hdmac_driver = {
.probe = milbeaut_hdmac_probe,
.remove = milbeaut_hdmac_remove,
.remove_new = milbeaut_hdmac_remove,
.driver = {
.name = "milbeaut-m10v-hdmac",
.of_match_table = milbeaut_hdmac_match,

View File

@ -368,7 +368,7 @@ disable_xdmac:
return ret;
}
static int milbeaut_xdmac_remove(struct platform_device *pdev)
static void milbeaut_xdmac_remove(struct platform_device *pdev)
{
struct milbeaut_xdmac_device *mdev = platform_get_drvdata(pdev);
struct dma_chan *chan;
@ -383,8 +383,15 @@ static int milbeaut_xdmac_remove(struct platform_device *pdev)
*/
list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
ret = dmaengine_terminate_sync(chan);
if (ret)
return ret;
if (ret) {
/*
* This results in resource leakage and maybe also
* use-after-free errors as e.g. *mdev is kfreed.
*/
dev_alert(&pdev->dev, "Failed to terminate channel %d (%pe)\n",
chan->chan_id, ERR_PTR(ret));
return;
}
milbeaut_xdmac_free_chan_resources(chan);
}
@ -392,8 +399,6 @@ static int milbeaut_xdmac_remove(struct platform_device *pdev)
dma_async_device_unregister(&mdev->ddev);
disable_xdmac(mdev);
return 0;
}
static const struct of_device_id milbeaut_xdmac_match[] = {
@ -404,7 +409,7 @@ MODULE_DEVICE_TABLE(of, milbeaut_xdmac_match);
static struct platform_driver milbeaut_xdmac_driver = {
.probe = milbeaut_xdmac_probe,
.remove = milbeaut_xdmac_remove,
.remove_new = milbeaut_xdmac_remove,
.driver = {
.name = "milbeaut-m10v-xdmac",
.of_match_table = milbeaut_xdmac_match,

View File

@ -1053,6 +1053,9 @@ static bool _trigger(struct pl330_thread *thrd)
thrd->req_running = idx;
if (desc->rqtype == DMA_MEM_TO_DEV || desc->rqtype == DMA_DEV_TO_MEM)
UNTIL(thrd, PL330_STATE_WFP);
return true;
}

View File

@ -20,10 +20,13 @@
#include <linux/mod_devicetable.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/slab.h>
#include "sf-pdma.h"
#define PDMA_QUIRK_NO_STRICT_ORDERING BIT(0)
#ifndef readq
static inline unsigned long long readq(void __iomem *addr)
{
@ -65,7 +68,7 @@ static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan)
static void sf_pdma_fill_desc(struct sf_pdma_desc *desc,
u64 dst, u64 src, u64 size)
{
desc->xfer_type = PDMA_FULL_SPEED;
desc->xfer_type = desc->chan->pdma->transfer_type;
desc->xfer_size = size;
desc->dst_addr = dst;
desc->src_addr = src;
@ -492,6 +495,7 @@ static void sf_pdma_setup_chans(struct sf_pdma *pdma)
static int sf_pdma_probe(struct platform_device *pdev)
{
const struct sf_pdma_driver_platdata *ddata;
struct sf_pdma *pdma;
int ret, n_chans;
const enum dma_slave_buswidth widths =
@ -517,6 +521,14 @@ static int sf_pdma_probe(struct platform_device *pdev)
pdma->n_chans = n_chans;
pdma->transfer_type = PDMA_FULL_SPEED | PDMA_STRICT_ORDERING;
ddata = device_get_match_data(&pdev->dev);
if (ddata) {
if (ddata->quirks & PDMA_QUIRK_NO_STRICT_ORDERING)
pdma->transfer_type &= ~PDMA_STRICT_ORDERING;
}
pdma->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdma->membase))
return PTR_ERR(pdma->membase);
@ -563,7 +575,20 @@ static int sf_pdma_probe(struct platform_device *pdev)
return ret;
}
ret = of_dma_controller_register(pdev->dev.of_node,
of_dma_xlate_by_chan_id, pdma);
if (ret < 0) {
dev_err(&pdev->dev,
"Can't register SiFive Platform OF_DMA. (%d)\n", ret);
goto err_unregister;
}
return 0;
err_unregister:
dma_async_device_unregister(&pdma->dma_dev);
return ret;
}
static void sf_pdma_remove(struct platform_device *pdev)
@ -583,12 +608,25 @@ static void sf_pdma_remove(struct platform_device *pdev)
tasklet_kill(&ch->err_tasklet);
}
if (pdev->dev.of_node)
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&pdma->dma_dev);
}
static const struct sf_pdma_driver_platdata mpfs_pdma = {
.quirks = PDMA_QUIRK_NO_STRICT_ORDERING,
};
static const struct of_device_id sf_pdma_dt_ids[] = {
{ .compatible = "sifive,fu540-c000-pdma" },
{ .compatible = "sifive,pdma0" },
{
.compatible = "sifive,fu540-c000-pdma",
}, {
.compatible = "sifive,pdma0",
}, {
.compatible = "microchip,mpfs-pdma",
.data = &mpfs_pdma,
},
{},
};
MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids);

View File

@ -48,7 +48,8 @@
#define PDMA_ERR_STATUS_MASK GENMASK(31, 31)
/* Transfer Type */
#define PDMA_FULL_SPEED 0xFF000008
#define PDMA_FULL_SPEED 0xFF000000
#define PDMA_STRICT_ORDERING BIT(3)
/* Error Recovery */
#define MAX_RETRY 1
@ -112,8 +113,13 @@ struct sf_pdma {
struct dma_device dma_dev;
void __iomem *membase;
void __iomem *mappedbase;
u32 transfer_type;
u32 n_chans;
struct sf_pdma_chan chans[] __counted_by(n_chans);
};
struct sf_pdma_driver_platdata {
u32 quirks;
};
#endif /* _SF_PDMA_H */

View File

@ -31,13 +31,11 @@
/**
* struct stedma40_platform_data - Configuration struct for the dma device.
*
* @dev_tx: mapping between destination event line and io address
* @dev_rx: mapping between source event line and io address
* @disabled_channels: A vector, ending with -1, that marks physical channels
* that are for different reasons not available for the driver.
* @soft_lli_chans: A vector, that marks physical channels will use LLI by SW
* which avoids HW bug that exists in some versions of the controller.
* SoftLLI introduces relink overhead that could impact performace for
* SoftLLI introduces relink overhead that could impact performance for
* certain use cases.
* @num_of_soft_lli_chans: The number of channels that needs to be configured
* to use SoftLLI.
@ -184,7 +182,7 @@ static __maybe_unused u32 d40_backup_regs[] = {
/*
* since 9540 and 8540 has the same HW revision
* use v4a for 9540 or ealier
* use v4a for 9540 or earlier
* use v4b for 8540 or later
* HW revision:
* DB8500ed has revision 0
@ -411,7 +409,7 @@ struct d40_desc {
*
* @base: The virtual address of LCLA. 18 bit aligned.
* @dma_addr: DMA address, if mapped
* @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
* @base_unaligned: The original kmalloc pointer, if kmalloc is used.
* This pointer is only there for clean-up on error.
* @pages: The number of pages needed for all physical channels.
* Only used later for clean-up on error
@ -1655,7 +1653,7 @@ static void dma_tasklet(struct tasklet_struct *t)
return;
check_pending_tx:
/* Rescue manouver if receiving double interrupts */
/* Rescue maneuver if receiving double interrupts */
if (d40c->pending_tx > 0)
d40c->pending_tx--;
spin_unlock_irqrestore(&d40c->lock, flags);
@ -3412,7 +3410,7 @@ static int __init d40_lcla_allocate(struct d40_base *base)
base->lcla_pool.base = (void *)page_list[i];
} else {
/*
* After many attempts and no succees with finding the correct
* After many attempts and no success with finding the correct
* alignment, try with allocating a big buffer.
*/
dev_warn(base->dev,

View File

@ -153,6 +153,7 @@ struct tegra_adma {
void __iomem *base_addr;
struct clk *ahub_clk;
unsigned int nr_channels;
unsigned long *dma_chan_mask;
unsigned long rx_requests_reserved;
unsigned long tx_requests_reserved;
@ -741,6 +742,10 @@ static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
for (i = 0; i < tdma->nr_channels; i++) {
tdc = &tdma->channels[i];
/* skip for reserved channels */
if (!tdc->tdma)
continue;
ch_reg = &tdc->ch_regs;
ch_reg->cmd = tdma_ch_read(tdc, ADMA_CH_CMD);
/* skip if channel is not active */
@ -779,6 +784,9 @@ static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
for (i = 0; i < tdma->nr_channels; i++) {
tdc = &tdma->channels[i];
/* skip for reserved channels */
if (!tdc->tdma)
continue;
ch_reg = &tdc->ch_regs;
/* skip if channel was not active earlier */
if (!ch_reg->cmd)
@ -867,10 +875,31 @@ static int tegra_adma_probe(struct platform_device *pdev)
return PTR_ERR(tdma->ahub_clk);
}
tdma->dma_chan_mask = devm_kzalloc(&pdev->dev,
BITS_TO_LONGS(tdma->nr_channels) * sizeof(unsigned long),
GFP_KERNEL);
if (!tdma->dma_chan_mask)
return -ENOMEM;
/* Enable all channels by default */
bitmap_fill(tdma->dma_chan_mask, tdma->nr_channels);
ret = of_property_read_u32_array(pdev->dev.of_node, "dma-channel-mask",
(u32 *)tdma->dma_chan_mask,
BITS_TO_U32(tdma->nr_channels));
if (ret < 0 && (ret != -EINVAL)) {
dev_err(&pdev->dev, "dma-channel-mask is not complete.\n");
return ret;
}
INIT_LIST_HEAD(&tdma->dma_dev.channels);
for (i = 0; i < tdma->nr_channels; i++) {
struct tegra_adma_chan *tdc = &tdma->channels[i];
/* skip for reserved channels */
if (!test_bit(i, tdma->dma_chan_mask))
continue;
tdc->chan_addr = tdma->base_addr + cdata->ch_base_offset
+ (cdata->ch_reg_size * i);
@ -957,8 +986,10 @@ static void tegra_adma_remove(struct platform_device *pdev)
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&tdma->dma_dev);
for (i = 0; i < tdma->nr_channels; ++i)
irq_dispose_mapping(tdma->channels[i].irq);
for (i = 0; i < tdma->nr_channels; ++i) {
if (tdma->channels[i].irq)
irq_dispose_mapping(tdma->channels[i].irq);
}
pm_runtime_disable(&pdev->dev);
}

View File

@ -12,6 +12,7 @@ k3-psil-lib-objs := k3-psil.o \
k3-psil-j721s2.o \
k3-psil-am62.o \
k3-psil-am62a.o \
k3-psil-j784s4.o
k3-psil-j784s4.o \
k3-psil-am62p.o
obj-$(CONFIG_TI_K3_PSIL) += k3-psil-lib.o
obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o

View File

@ -0,0 +1,325 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com
*/
#include <linux/kernel.h>
#include "k3-psil-priv.h"
#define PSIL_PDMA_XY_TR(x) \
{ \
.thread_id = x, \
.ep_config = { \
.ep_type = PSIL_EP_PDMA_XY, \
.mapped_channel_id = -1, \
.default_flow_id = -1, \
}, \
}
#define PSIL_PDMA_XY_PKT(x) \
{ \
.thread_id = x, \
.ep_config = { \
.ep_type = PSIL_EP_PDMA_XY, \
.mapped_channel_id = -1, \
.default_flow_id = -1, \
.pkt_mode = 1, \
}, \
}
#define PSIL_ETHERNET(x, ch, flow_base, flow_cnt) \
{ \
.thread_id = x, \
.ep_config = { \
.ep_type = PSIL_EP_NATIVE, \
.pkt_mode = 1, \
.needs_epib = 1, \
.psd_size = 16, \
.mapped_channel_id = ch, \
.flow_start = flow_base, \
.flow_num = flow_cnt, \
.default_flow_id = flow_base, \
}, \
}
#define PSIL_SAUL(x, ch, flow_base, flow_cnt, default_flow, tx) \
{ \
.thread_id = x, \
.ep_config = { \
.ep_type = PSIL_EP_NATIVE, \
.pkt_mode = 1, \
.needs_epib = 1, \
.psd_size = 64, \
.mapped_channel_id = ch, \
.flow_start = flow_base, \
.flow_num = flow_cnt, \
.default_flow_id = default_flow, \
.notdpkt = tx, \
}, \
}
#define PSIL_PDMA_MCASP(x) \
{ \
.thread_id = x, \
.ep_config = { \
.ep_type = PSIL_EP_PDMA_XY, \
.pdma_acc32 = 1, \
.pdma_burst = 1, \
}, \
}
#define PSIL_CSI2RX(x) \
{ \
.thread_id = x, \
.ep_config = { \
.ep_type = PSIL_EP_NATIVE, \
}, \
}
/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
static struct psil_ep am62p_src_ep_map[] = {
/* SAUL */
PSIL_SAUL(0x7504, 20, 35, 8, 35, 0),
PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
/* PDMA_MAIN0 - SPI0-2 */
PSIL_PDMA_XY_PKT(0x4300),
PSIL_PDMA_XY_PKT(0x4301),
PSIL_PDMA_XY_PKT(0x4302),
PSIL_PDMA_XY_PKT(0x4303),
PSIL_PDMA_XY_PKT(0x4304),
PSIL_PDMA_XY_PKT(0x4305),
PSIL_PDMA_XY_PKT(0x4306),
PSIL_PDMA_XY_PKT(0x4307),
PSIL_PDMA_XY_PKT(0x4308),
PSIL_PDMA_XY_PKT(0x4309),
PSIL_PDMA_XY_PKT(0x430a),
PSIL_PDMA_XY_PKT(0x430b),
/* PDMA_MAIN1 - UART0-6 */
PSIL_PDMA_XY_PKT(0x4400),
PSIL_PDMA_XY_PKT(0x4401),
PSIL_PDMA_XY_PKT(0x4402),
PSIL_PDMA_XY_PKT(0x4403),
PSIL_PDMA_XY_PKT(0x4404),
PSIL_PDMA_XY_PKT(0x4405),
PSIL_PDMA_XY_PKT(0x4406),
/* PDMA_MAIN2 - MCASP0-2 */
PSIL_PDMA_MCASP(0x4500),
PSIL_PDMA_MCASP(0x4501),
PSIL_PDMA_MCASP(0x4502),
/* CPSW3G */
PSIL_ETHERNET(0x4600, 19, 19, 16),
/* CSI2RX */
PSIL_CSI2RX(0x5000),
PSIL_CSI2RX(0x5001),
PSIL_CSI2RX(0x5002),
PSIL_CSI2RX(0x5003),
PSIL_CSI2RX(0x5004),
PSIL_CSI2RX(0x5005),
PSIL_CSI2RX(0x5006),
PSIL_CSI2RX(0x5007),
PSIL_CSI2RX(0x5008),
PSIL_CSI2RX(0x5009),
PSIL_CSI2RX(0x500a),
PSIL_CSI2RX(0x500b),
PSIL_CSI2RX(0x500c),
PSIL_CSI2RX(0x500d),
PSIL_CSI2RX(0x500e),
PSIL_CSI2RX(0x500f),
PSIL_CSI2RX(0x5010),
PSIL_CSI2RX(0x5011),
PSIL_CSI2RX(0x5012),
PSIL_CSI2RX(0x5013),
PSIL_CSI2RX(0x5014),
PSIL_CSI2RX(0x5015),
PSIL_CSI2RX(0x5016),
PSIL_CSI2RX(0x5017),
PSIL_CSI2RX(0x5018),
PSIL_CSI2RX(0x5019),
PSIL_CSI2RX(0x501a),
PSIL_CSI2RX(0x501b),
PSIL_CSI2RX(0x501c),
PSIL_CSI2RX(0x501d),
PSIL_CSI2RX(0x501e),
PSIL_CSI2RX(0x501f),
PSIL_CSI2RX(0x5000),
PSIL_CSI2RX(0x5001),
PSIL_CSI2RX(0x5002),
PSIL_CSI2RX(0x5003),
PSIL_CSI2RX(0x5004),
PSIL_CSI2RX(0x5005),
PSIL_CSI2RX(0x5006),
PSIL_CSI2RX(0x5007),
PSIL_CSI2RX(0x5008),
PSIL_CSI2RX(0x5009),
PSIL_CSI2RX(0x500a),
PSIL_CSI2RX(0x500b),
PSIL_CSI2RX(0x500c),
PSIL_CSI2RX(0x500d),
PSIL_CSI2RX(0x500e),
PSIL_CSI2RX(0x500f),
PSIL_CSI2RX(0x5010),
PSIL_CSI2RX(0x5011),
PSIL_CSI2RX(0x5012),
PSIL_CSI2RX(0x5013),
PSIL_CSI2RX(0x5014),
PSIL_CSI2RX(0x5015),
PSIL_CSI2RX(0x5016),
PSIL_CSI2RX(0x5017),
PSIL_CSI2RX(0x5018),
PSIL_CSI2RX(0x5019),
PSIL_CSI2RX(0x501a),
PSIL_CSI2RX(0x501b),
PSIL_CSI2RX(0x501c),
PSIL_CSI2RX(0x501d),
PSIL_CSI2RX(0x501e),
PSIL_CSI2RX(0x501f),
/* CSIRX 1-3 (only for J722S) */
PSIL_CSI2RX(0x5100),
PSIL_CSI2RX(0x5101),
PSIL_CSI2RX(0x5102),
PSIL_CSI2RX(0x5103),
PSIL_CSI2RX(0x5104),
PSIL_CSI2RX(0x5105),
PSIL_CSI2RX(0x5106),
PSIL_CSI2RX(0x5107),
PSIL_CSI2RX(0x5108),
PSIL_CSI2RX(0x5109),
PSIL_CSI2RX(0x510a),
PSIL_CSI2RX(0x510b),
PSIL_CSI2RX(0x510c),
PSIL_CSI2RX(0x510d),
PSIL_CSI2RX(0x510e),
PSIL_CSI2RX(0x510f),
PSIL_CSI2RX(0x5110),
PSIL_CSI2RX(0x5111),
PSIL_CSI2RX(0x5112),
PSIL_CSI2RX(0x5113),
PSIL_CSI2RX(0x5114),
PSIL_CSI2RX(0x5115),
PSIL_CSI2RX(0x5116),
PSIL_CSI2RX(0x5117),
PSIL_CSI2RX(0x5118),
PSIL_CSI2RX(0x5119),
PSIL_CSI2RX(0x511a),
PSIL_CSI2RX(0x511b),
PSIL_CSI2RX(0x511c),
PSIL_CSI2RX(0x511d),
PSIL_CSI2RX(0x511e),
PSIL_CSI2RX(0x511f),
PSIL_CSI2RX(0x5200),
PSIL_CSI2RX(0x5201),
PSIL_CSI2RX(0x5202),
PSIL_CSI2RX(0x5203),
PSIL_CSI2RX(0x5204),
PSIL_CSI2RX(0x5205),
PSIL_CSI2RX(0x5206),
PSIL_CSI2RX(0x5207),
PSIL_CSI2RX(0x5208),
PSIL_CSI2RX(0x5209),
PSIL_CSI2RX(0x520a),
PSIL_CSI2RX(0x520b),
PSIL_CSI2RX(0x520c),
PSIL_CSI2RX(0x520d),
PSIL_CSI2RX(0x520e),
PSIL_CSI2RX(0x520f),
PSIL_CSI2RX(0x5210),
PSIL_CSI2RX(0x5211),
PSIL_CSI2RX(0x5212),
PSIL_CSI2RX(0x5213),
PSIL_CSI2RX(0x5214),
PSIL_CSI2RX(0x5215),
PSIL_CSI2RX(0x5216),
PSIL_CSI2RX(0x5217),
PSIL_CSI2RX(0x5218),
PSIL_CSI2RX(0x5219),
PSIL_CSI2RX(0x521a),
PSIL_CSI2RX(0x521b),
PSIL_CSI2RX(0x521c),
PSIL_CSI2RX(0x521d),
PSIL_CSI2RX(0x521e),
PSIL_CSI2RX(0x521f),
PSIL_CSI2RX(0x5300),
PSIL_CSI2RX(0x5301),
PSIL_CSI2RX(0x5302),
PSIL_CSI2RX(0x5303),
PSIL_CSI2RX(0x5304),
PSIL_CSI2RX(0x5305),
PSIL_CSI2RX(0x5306),
PSIL_CSI2RX(0x5307),
PSIL_CSI2RX(0x5308),
PSIL_CSI2RX(0x5309),
PSIL_CSI2RX(0x530a),
PSIL_CSI2RX(0x530b),
PSIL_CSI2RX(0x530c),
PSIL_CSI2RX(0x530d),
PSIL_CSI2RX(0x530e),
PSIL_CSI2RX(0x530f),
PSIL_CSI2RX(0x5310),
PSIL_CSI2RX(0x5311),
PSIL_CSI2RX(0x5312),
PSIL_CSI2RX(0x5313),
PSIL_CSI2RX(0x5314),
PSIL_CSI2RX(0x5315),
PSIL_CSI2RX(0x5316),
PSIL_CSI2RX(0x5317),
PSIL_CSI2RX(0x5318),
PSIL_CSI2RX(0x5319),
PSIL_CSI2RX(0x531a),
PSIL_CSI2RX(0x531b),
PSIL_CSI2RX(0x531c),
PSIL_CSI2RX(0x531d),
PSIL_CSI2RX(0x531e),
PSIL_CSI2RX(0x531f),
};
/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
static struct psil_ep am62p_dst_ep_map[] = {
/* SAUL */
PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
/* PDMA_MAIN0 - SPI0-2 */
PSIL_PDMA_XY_PKT(0xc300),
PSIL_PDMA_XY_PKT(0xc301),
PSIL_PDMA_XY_PKT(0xc302),
PSIL_PDMA_XY_PKT(0xc303),
PSIL_PDMA_XY_PKT(0xc304),
PSIL_PDMA_XY_PKT(0xc305),
PSIL_PDMA_XY_PKT(0xc306),
PSIL_PDMA_XY_PKT(0xc307),
PSIL_PDMA_XY_PKT(0xc308),
PSIL_PDMA_XY_PKT(0xc309),
PSIL_PDMA_XY_PKT(0xc30a),
PSIL_PDMA_XY_PKT(0xc30b),
/* PDMA_MAIN1 - UART0-6 */
PSIL_PDMA_XY_PKT(0xc400),
PSIL_PDMA_XY_PKT(0xc401),
PSIL_PDMA_XY_PKT(0xc402),
PSIL_PDMA_XY_PKT(0xc403),
PSIL_PDMA_XY_PKT(0xc404),
PSIL_PDMA_XY_PKT(0xc405),
PSIL_PDMA_XY_PKT(0xc406),
/* PDMA_MAIN2 - MCASP0-2 */
PSIL_PDMA_MCASP(0xc500),
PSIL_PDMA_MCASP(0xc501),
PSIL_PDMA_MCASP(0xc502),
/* CPSW3G */
PSIL_ETHERNET(0xc600, 19, 19, 8),
PSIL_ETHERNET(0xc601, 20, 27, 8),
PSIL_ETHERNET(0xc602, 21, 35, 8),
PSIL_ETHERNET(0xc603, 22, 43, 8),
PSIL_ETHERNET(0xc604, 23, 51, 8),
PSIL_ETHERNET(0xc605, 24, 59, 8),
PSIL_ETHERNET(0xc606, 25, 67, 8),
PSIL_ETHERNET(0xc607, 26, 75, 8),
};
struct psil_ep_map am62p_ep_map = {
.name = "am62p",
.src = am62p_src_ep_map,
.src_count = ARRAY_SIZE(am62p_src_ep_map),
.dst = am62p_dst_ep_map,
.dst_count = ARRAY_SIZE(am62p_dst_ep_map),
};

View File

@ -45,5 +45,6 @@ extern struct psil_ep_map j721s2_ep_map;
extern struct psil_ep_map am62_ep_map;
extern struct psil_ep_map am62a_ep_map;
extern struct psil_ep_map j784s4_ep_map;
extern struct psil_ep_map am62p_ep_map;
#endif /* K3_PSIL_PRIV_H_ */

View File

@ -26,6 +26,8 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "AM62X", .data = &am62_ep_map },
{ .family = "AM62AX", .data = &am62a_ep_map },
{ .family = "J784S4", .data = &j784s4_ep_map },
{ .family = "AM62PX", .data = &am62p_ep_map },
{ .family = "J722S", .data = &am62p_ep_map },
{ /* sentinel */ }
};

View File

@ -4441,6 +4441,8 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "AM62X", .data = &am64_soc_data },
{ .family = "AM62AX", .data = &am64_soc_data },
{ .family = "J784S4", .data = &j721e_soc_data },
{ .family = "AM62PX", .data = &am64_soc_data },
{ .family = "J722S", .data = &am64_soc_data },
{ /* sentinel */ }
};

View File

@ -453,7 +453,7 @@ disable_clk:
return ret;
}
static int uniphier_mdmac_remove(struct platform_device *pdev)
static void uniphier_mdmac_remove(struct platform_device *pdev)
{
struct uniphier_mdmac_device *mdev = platform_get_drvdata(pdev);
struct dma_chan *chan;
@ -468,16 +468,21 @@ static int uniphier_mdmac_remove(struct platform_device *pdev)
*/
list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
ret = dmaengine_terminate_sync(chan);
if (ret)
return ret;
if (ret) {
/*
* This results in resource leakage and maybe also
* use-after-free errors as e.g. *mdev is kfreed.
*/
dev_alert(&pdev->dev, "Failed to terminate channel %d (%pe)\n",
chan->chan_id, ERR_PTR(ret));
return;
}
uniphier_mdmac_free_chan_resources(chan);
}
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&mdev->ddev);
clk_disable_unprepare(mdev->clk);
return 0;
}
static const struct of_device_id uniphier_mdmac_match[] = {
@ -488,7 +493,7 @@ MODULE_DEVICE_TABLE(of, uniphier_mdmac_match);
static struct platform_driver uniphier_mdmac_driver = {
.probe = uniphier_mdmac_probe,
.remove = uniphier_mdmac_remove,
.remove_new = uniphier_mdmac_remove,
.driver = {
.name = "uniphier-mio-dmac",
.of_match_table = uniphier_mdmac_match,

View File

@ -563,7 +563,7 @@ out_unregister_dmac:
return ret;
}
static int uniphier_xdmac_remove(struct platform_device *pdev)
static void uniphier_xdmac_remove(struct platform_device *pdev)
{
struct uniphier_xdmac_device *xdev = platform_get_drvdata(pdev);
struct dma_device *ddev = &xdev->ddev;
@ -579,15 +579,20 @@ static int uniphier_xdmac_remove(struct platform_device *pdev)
*/
list_for_each_entry(chan, &ddev->channels, device_node) {
ret = dmaengine_terminate_sync(chan);
if (ret)
return ret;
if (ret) {
/*
* This results in resource leakage and maybe also
* use-after-free errors as e.g. *xdev is kfreed.
*/
dev_alert(&pdev->dev, "Failed to terminate channel %d (%pe)\n",
chan->chan_id, ERR_PTR(ret));
return;
}
uniphier_xdmac_free_chan_resources(chan);
}
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(ddev);
return 0;
}
static const struct of_device_id uniphier_xdmac_match[] = {
@ -598,7 +603,7 @@ MODULE_DEVICE_TABLE(of, uniphier_xdmac_match);
static struct platform_driver uniphier_xdmac_driver = {
.probe = uniphier_xdmac_probe,
.remove = uniphier_xdmac_remove,
.remove_new = uniphier_xdmac_remove,
.driver = {
.name = "uniphier-xdmac",
.of_match_table = uniphier_xdmac_match,

View File

@ -64,9 +64,10 @@ struct xdma_hw_desc {
__le64 next_desc;
};
#define XDMA_DESC_SIZE sizeof(struct xdma_hw_desc)
#define XDMA_DESC_BLOCK_SIZE (XDMA_DESC_SIZE * XDMA_DESC_ADJACENT)
#define XDMA_DESC_BLOCK_ALIGN 4096
#define XDMA_DESC_SIZE sizeof(struct xdma_hw_desc)
#define XDMA_DESC_BLOCK_SIZE (XDMA_DESC_SIZE * XDMA_DESC_ADJACENT)
#define XDMA_DESC_BLOCK_ALIGN 32
#define XDMA_DESC_BLOCK_BOUNDARY 4096
/*
* Channel registers
@ -76,6 +77,7 @@ struct xdma_hw_desc {
#define XDMA_CHAN_CONTROL_W1S 0x8
#define XDMA_CHAN_CONTROL_W1C 0xc
#define XDMA_CHAN_STATUS 0x40
#define XDMA_CHAN_STATUS_RC 0x44
#define XDMA_CHAN_COMPLETED_DESC 0x48
#define XDMA_CHAN_ALIGNMENTS 0x4c
#define XDMA_CHAN_INTR_ENABLE 0x90
@ -101,6 +103,7 @@ struct xdma_hw_desc {
#define CHAN_CTRL_IE_MAGIC_STOPPED BIT(4)
#define CHAN_CTRL_IE_IDLE_STOPPED BIT(6)
#define CHAN_CTRL_IE_READ_ERROR GENMASK(13, 9)
#define CHAN_CTRL_IE_WRITE_ERROR GENMASK(18, 14)
#define CHAN_CTRL_IE_DESC_ERROR GENMASK(23, 19)
#define CHAN_CTRL_NON_INCR_ADDR BIT(25)
#define CHAN_CTRL_POLL_MODE_WB BIT(26)
@ -111,8 +114,17 @@ struct xdma_hw_desc {
CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
CHAN_CTRL_IE_MAGIC_STOPPED | \
CHAN_CTRL_IE_READ_ERROR | \
CHAN_CTRL_IE_WRITE_ERROR | \
CHAN_CTRL_IE_DESC_ERROR)
#define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START
#define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
CHAN_CTRL_IE_MAGIC_STOPPED | \
CHAN_CTRL_IE_READ_ERROR | \
CHAN_CTRL_IE_WRITE_ERROR | \
CHAN_CTRL_IE_DESC_ERROR)
/* bits of the channel interrupt enable mask */
#define CHAN_IM_DESC_ERROR BIT(19)
#define CHAN_IM_READ_ERROR BIT(9)
@ -134,18 +146,6 @@ struct xdma_hw_desc {
#define XDMA_SGDMA_DESC_ADJ 0x4088
#define XDMA_SGDMA_DESC_CREDIT 0x408c
/* bits of the SG DMA control register */
#define XDMA_CTRL_RUN_STOP BIT(0)
#define XDMA_CTRL_IE_DESC_STOPPED BIT(1)
#define XDMA_CTRL_IE_DESC_COMPLETED BIT(2)
#define XDMA_CTRL_IE_DESC_ALIGN_MISMATCH BIT(3)
#define XDMA_CTRL_IE_MAGIC_STOPPED BIT(4)
#define XDMA_CTRL_IE_IDLE_STOPPED BIT(6)
#define XDMA_CTRL_IE_READ_ERROR GENMASK(13, 9)
#define XDMA_CTRL_IE_DESC_ERROR GENMASK(23, 19)
#define XDMA_CTRL_NON_INCR_ADDR BIT(25)
#define XDMA_CTRL_POLL_MODE_WB BIT(26)
/*
* interrupt registers
*/

View File

@ -78,27 +78,31 @@ struct xdma_chan {
* @vdesc: Virtual DMA descriptor
* @chan: DMA channel pointer
* @dir: Transferring direction of the request
* @dev_addr: Physical address on DMA device side
* @desc_blocks: Hardware descriptor blocks
* @dblk_num: Number of hardware descriptor blocks
* @desc_num: Number of hardware descriptors
* @completed_desc_num: Completed hardware descriptors
* @cyclic: Cyclic transfer vs. scatter-gather
* @interleaved_dma: Interleaved DMA transfer
* @periods: Number of periods in the cyclic transfer
* @period_size: Size of a period in bytes in cyclic transfers
* @frames_left: Number of frames left in interleaved DMA transfer
* @error: tx error flag
*/
struct xdma_desc {
struct virt_dma_desc vdesc;
struct xdma_chan *chan;
enum dma_transfer_direction dir;
u64 dev_addr;
struct xdma_desc_block *desc_blocks;
u32 dblk_num;
u32 desc_num;
u32 completed_desc_num;
bool cyclic;
bool interleaved_dma;
u32 periods;
u32 period_size;
u32 frames_left;
bool error;
};
#define XDMA_DEV_STATUS_REG_DMA BIT(0)
@ -276,6 +280,7 @@ xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num, bool cyclic)
sw_desc->chan = chan;
sw_desc->desc_num = desc_num;
sw_desc->cyclic = cyclic;
sw_desc->error = false;
dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT);
sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks),
GFP_NOWAIT);
@ -371,6 +376,31 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
return ret;
xchan->busy = true;
return 0;
}
/**
* xdma_xfer_stop - Stop DMA transfer
* @xchan: DMA channel pointer
*/
static int xdma_xfer_stop(struct xdma_chan *xchan)
{
int ret;
u32 val;
struct xdma_device *xdev = xchan->xdev_hdl;
/* clear run stop bit to prevent any further auto-triggering */
ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
CHAN_CTRL_RUN_STOP);
if (ret)
return ret;
/* Clear the channel status register */
ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &val);
if (ret)
return ret;
return 0;
}
@ -475,6 +505,84 @@ static void xdma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
}
/**
* xdma_terminate_all - Terminate all transactions
* @chan: DMA channel pointer
*/
static int xdma_terminate_all(struct dma_chan *chan)
{
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
struct virt_dma_desc *vd;
unsigned long flags;
LIST_HEAD(head);
xdma_xfer_stop(xdma_chan);
spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
xdma_chan->busy = false;
vd = vchan_next_desc(&xdma_chan->vchan);
if (vd) {
list_del(&vd->node);
dma_cookie_complete(&vd->tx);
vchan_terminate_vdesc(vd);
}
vchan_get_all_descriptors(&xdma_chan->vchan, &head);
list_splice_tail(&head, &xdma_chan->vchan.desc_terminated);
spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
return 0;
}
/**
* xdma_synchronize - Synchronize terminated transactions
* @chan: DMA channel pointer
*/
static void xdma_synchronize(struct dma_chan *chan)
{
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
vchan_synchronize(&xdma_chan->vchan);
}
/**
* xdma_fill_descs - Fill hardware descriptors with contiguous memory block addresses
* @sw_desc - tx descriptor state container
* @src_addr - Value for a ->src_addr field of a first descriptor
* @dst_addr - Value for a ->dst_addr field of a first descriptor
* @size - Total size of a contiguous memory block
* @filled_descs_num - Number of filled hardware descriptors for corresponding sw_desc
*/
static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr,
u64 dst_addr, u32 size, u32 filled_descs_num)
{
u32 left = size, len, desc_num = filled_descs_num;
struct xdma_desc_block *dblk;
struct xdma_hw_desc *desc;
dblk = sw_desc->desc_blocks + (desc_num / XDMA_DESC_ADJACENT);
desc = dblk->virt_addr;
desc += desc_num & XDMA_DESC_ADJACENT_MASK;
do {
len = min_t(u32, left, XDMA_DESC_BLEN_MAX);
/* set hardware descriptor */
desc->bytes = cpu_to_le32(len);
desc->src_addr = cpu_to_le64(src_addr);
desc->dst_addr = cpu_to_le64(dst_addr);
if (!(++desc_num & XDMA_DESC_ADJACENT_MASK))
desc = (++dblk)->virt_addr;
else
desc++;
src_addr += len;
dst_addr += len;
left -= len;
} while (left);
return desc_num - filled_descs_num;
}
/**
* xdma_prep_device_sg - prepare a descriptor for a DMA transaction
* @chan: DMA channel pointer
@ -491,13 +599,10 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
{
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
struct dma_async_tx_descriptor *tx_desc;
u32 desc_num = 0, i, len, rest;
struct xdma_desc_block *dblk;
struct xdma_hw_desc *desc;
struct xdma_desc *sw_desc;
u64 dev_addr, *src, *dst;
u32 desc_num = 0, i;
u64 addr, dev_addr, *src, *dst;
struct scatterlist *sg;
u64 addr;
for_each_sg(sgl, sg, sg_len, i)
desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX);
@ -506,6 +611,8 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (!sw_desc)
return NULL;
sw_desc->dir = dir;
sw_desc->cyclic = false;
sw_desc->interleaved_dma = false;
if (dir == DMA_MEM_TO_DEV) {
dev_addr = xdma_chan->cfg.dst_addr;
@ -517,32 +624,11 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
dst = &addr;
}
dblk = sw_desc->desc_blocks;
desc = dblk->virt_addr;
desc_num = 1;
desc_num = 0;
for_each_sg(sgl, sg, sg_len, i) {
addr = sg_dma_address(sg);
rest = sg_dma_len(sg);
do {
len = min_t(u32, rest, XDMA_DESC_BLEN_MAX);
/* set hardware descriptor */
desc->bytes = cpu_to_le32(len);
desc->src_addr = cpu_to_le64(*src);
desc->dst_addr = cpu_to_le64(*dst);
if (!(desc_num & XDMA_DESC_ADJACENT_MASK)) {
dblk++;
desc = dblk->virt_addr;
} else {
desc++;
}
desc_num++;
dev_addr += len;
addr += len;
rest -= len;
} while (rest);
desc_num += xdma_fill_descs(sw_desc, *src, *dst, sg_dma_len(sg), desc_num);
dev_addr += sg_dma_len(sg);
}
tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
@ -576,9 +662,9 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
struct xdma_device *xdev = xdma_chan->xdev_hdl;
unsigned int periods = size / period_size;
struct dma_async_tx_descriptor *tx_desc;
struct xdma_desc_block *dblk;
struct xdma_hw_desc *desc;
struct xdma_desc *sw_desc;
u64 addr, dev_addr, *src, *dst;
u32 desc_num;
unsigned int i;
/*
@ -602,22 +688,23 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
sw_desc->periods = periods;
sw_desc->period_size = period_size;
sw_desc->dir = dir;
sw_desc->interleaved_dma = false;
dblk = sw_desc->desc_blocks;
desc = dblk->virt_addr;
addr = address;
if (dir == DMA_MEM_TO_DEV) {
dev_addr = xdma_chan->cfg.dst_addr;
src = &addr;
dst = &dev_addr;
} else {
dev_addr = xdma_chan->cfg.src_addr;
src = &dev_addr;
dst = &addr;
}
/* fill hardware descriptor */
desc_num = 0;
for (i = 0; i < periods; i++) {
desc->bytes = cpu_to_le32(period_size);
if (dir == DMA_MEM_TO_DEV) {
desc->src_addr = cpu_to_le64(address + i * period_size);
desc->dst_addr = cpu_to_le64(xdma_chan->cfg.dst_addr);
} else {
desc->src_addr = cpu_to_le64(xdma_chan->cfg.src_addr);
desc->dst_addr = cpu_to_le64(address + i * period_size);
}
desc++;
desc_num += xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num);
addr += i * period_size;
}
tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
@ -632,6 +719,57 @@ failed:
return NULL;
}
/**
* xdma_prep_interleaved_dma - Prepare virtual descriptor for interleaved DMA transfers
* @chan: DMA channel
* @xt: DMA transfer template
* @flags: tx flags
*/
static struct dma_async_tx_descriptor *
xdma_prep_interleaved_dma(struct dma_chan *chan,
struct dma_interleaved_template *xt,
unsigned long flags)
{
int i;
u32 desc_num = 0, period_size = 0;
struct dma_async_tx_descriptor *tx_desc;
struct xdma_chan *xchan = to_xdma_chan(chan);
struct xdma_desc *sw_desc;
u64 src_addr, dst_addr;
for (i = 0; i < xt->frame_size; ++i)
desc_num += DIV_ROUND_UP(xt->sgl[i].size, XDMA_DESC_BLEN_MAX);
sw_desc = xdma_alloc_desc(xchan, desc_num, false);
if (!sw_desc)
return NULL;
sw_desc->dir = xt->dir;
sw_desc->interleaved_dma = true;
sw_desc->cyclic = flags & DMA_PREP_REPEAT;
sw_desc->frames_left = xt->numf;
sw_desc->periods = xt->numf;
desc_num = 0;
src_addr = xt->src_start;
dst_addr = xt->dst_start;
for (i = 0; i < xt->frame_size; ++i) {
desc_num += xdma_fill_descs(sw_desc, src_addr, dst_addr, xt->sgl[i].size, desc_num);
src_addr += dmaengine_get_src_icg(xt, &xt->sgl[i]) + xt->src_inc ?
xt->sgl[i].size : 0;
dst_addr += dmaengine_get_dst_icg(xt, &xt->sgl[i]) + xt->dst_inc ?
xt->sgl[i].size : 0;
period_size += xt->sgl[i].size;
}
sw_desc->period_size = period_size;
tx_desc = vchan_tx_prep(&xchan->vchan, &sw_desc->vdesc, flags);
if (tx_desc)
return tx_desc;
xdma_free_desc(&sw_desc->vdesc);
return NULL;
}
/**
* xdma_device_config - Configure the DMA channel
* @chan: DMA channel
@ -677,9 +815,8 @@ static int xdma_alloc_chan_resources(struct dma_chan *chan)
return -EINVAL;
}
xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan),
dev, XDMA_DESC_BLOCK_SIZE,
XDMA_DESC_BLOCK_ALIGN, 0);
xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan), dev, XDMA_DESC_BLOCK_SIZE,
XDMA_DESC_BLOCK_ALIGN, XDMA_DESC_BLOCK_BOUNDARY);
if (!xdma_chan->desc_pool) {
xdma_err(xdev, "unable to allocate descriptor pool");
return -ENOMEM;
@ -706,20 +843,20 @@ static enum dma_status xdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie
spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
vd = vchan_find_desc(&xdma_chan->vchan, cookie);
if (vd)
desc = to_xdma_desc(vd);
if (!desc || !desc->cyclic) {
spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
return ret;
if (!vd)
goto out;
desc = to_xdma_desc(vd);
if (desc->error) {
ret = DMA_ERROR;
} else if (desc->cyclic) {
period_idx = desc->completed_desc_num % desc->periods;
residue = (desc->periods - period_idx) * desc->period_size;
dma_set_residue(state, residue);
}
period_idx = desc->completed_desc_num % desc->periods;
residue = (desc->periods - period_idx) * desc->period_size;
out:
spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
dma_set_residue(state, residue);
return ret;
}
@ -732,11 +869,12 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
{
struct xdma_chan *xchan = dev_id;
u32 complete_desc_num = 0;
struct xdma_device *xdev;
struct virt_dma_desc *vd;
struct xdma_device *xdev = xchan->xdev_hdl;
struct virt_dma_desc *vd, *next_vd;
struct xdma_desc *desc;
int ret;
u32 st;
bool repeat_tx;
spin_lock(&xchan->vchan.lock);
@ -745,45 +883,75 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
if (!vd)
goto out;
xchan->busy = false;
desc = to_xdma_desc(vd);
xdev = xchan->xdev_hdl;
/* Clear-on-read the status register */
ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &st);
if (ret)
goto out;
st &= XDMA_CHAN_STATUS_MASK;
if ((st & XDMA_CHAN_ERROR_MASK) ||
!(st & (CHAN_CTRL_IE_DESC_COMPLETED | CHAN_CTRL_IE_DESC_STOPPED))) {
desc->error = true;
xdma_err(xdev, "channel error, status register value: 0x%x", st);
goto out;
}
ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC,
&complete_desc_num);
if (ret)
goto out;
desc->completed_desc_num += complete_desc_num;
desc = to_xdma_desc(vd);
if (desc->interleaved_dma) {
xchan->busy = false;
desc->completed_desc_num += complete_desc_num;
if (complete_desc_num == XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) {
xdma_xfer_start(xchan);
goto out;
}
if (desc->cyclic) {
ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS,
&st);
if (ret)
/* last desc of any frame */
desc->frames_left--;
if (desc->frames_left)
goto out;
regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_STATUS, st);
/* last desc of the last frame */
repeat_tx = vd->tx.flags & DMA_PREP_REPEAT;
next_vd = list_first_entry_or_null(&vd->node, struct virt_dma_desc, node);
if (next_vd)
repeat_tx = repeat_tx && !(next_vd->tx.flags & DMA_PREP_LOAD_EOT);
if (repeat_tx) {
desc->frames_left = desc->periods;
desc->completed_desc_num = 0;
vchan_cyclic_callback(vd);
} else {
list_del(&vd->node);
vchan_cookie_complete(vd);
}
/* start (or continue) the tx of a first desc on the vc.desc_issued list, if any */
xdma_xfer_start(xchan);
} else if (!desc->cyclic) {
xchan->busy = false;
desc->completed_desc_num += complete_desc_num;
/* if all data blocks are transferred, remove and complete the request */
if (desc->completed_desc_num == desc->desc_num) {
list_del(&vd->node);
vchan_cookie_complete(vd);
goto out;
}
if (desc->completed_desc_num > desc->desc_num ||
complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
goto out;
/* transfer the rest of data */
xdma_xfer_start(xchan);
} else {
desc->completed_desc_num = complete_desc_num;
vchan_cyclic_callback(vd);
goto out;
}
/*
* if all data blocks are transferred, remove and complete the request
*/
if (desc->completed_desc_num == desc->desc_num) {
list_del(&vd->node);
vchan_cookie_complete(vd);
goto out;
}
if (desc->completed_desc_num > desc->desc_num ||
complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
goto out;
/* transfer the rest of data (SG only) */
xdma_xfer_start(xchan);
out:
spin_unlock(&xchan->vchan.lock);
return IRQ_HANDLED;
@ -1080,6 +1248,9 @@ static int xdma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask);
dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask);
dma_cap_set(DMA_CYCLIC, xdev->dma_dev.cap_mask);
dma_cap_set(DMA_INTERLEAVE, xdev->dma_dev.cap_mask);
dma_cap_set(DMA_REPEAT, xdev->dma_dev.cap_mask);
dma_cap_set(DMA_LOAD_EOT, xdev->dma_dev.cap_mask);
xdev->dma_dev.dev = &pdev->dev;
xdev->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
@ -1089,10 +1260,13 @@ static int xdma_probe(struct platform_device *pdev)
xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
xdev->dma_dev.device_config = xdma_device_config;
xdev->dma_dev.device_issue_pending = xdma_issue_pending;
xdev->dma_dev.device_terminate_all = xdma_terminate_all;
xdev->dma_dev.device_synchronize = xdma_synchronize;
xdev->dma_dev.filter.map = pdata->device_map;
xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
xdev->dma_dev.filter.fn = xdma_filter_fn;
xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic;
xdev->dma_dev.device_prep_interleaved_dma = xdma_prep_interleaved_dma;
ret = dma_async_device_register(&xdev->dma_dev);
if (ret) {

View File

@ -309,7 +309,7 @@ static ssize_t xilinx_dpdma_debugfs_desc_done_irq_read(char *buf)
out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR);
out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
out_str_len);
out_str_len + 1);
snprintf(buf, out_str_len, "%d",
dpdma_debugfs.xilinx_dpdma_irq_done_count);

View File

@ -0,0 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
#ifndef _FSL_EDMA_DT_BINDING_H_
#define _FSL_EDMA_DT_BINDING_H_
/* Receive Channel */
#define FSL_EDMA_RX 0x1
/* iMX8 audio remote DMA */
#define FSL_EDMA_REMOTE 0x2
/* FIFO is continue memory region */
#define FSL_EDMA_MULTI_FIFO 0x4
/* Channel need stick to even channel */
#define FSL_EDMA_EVEN_CH 0x8
/* Channel need stick to odd channel */
#define FSL_EDMA_ODD_CH 0x10
#endif