dmaengine fixes for v6.9
Driver fixes for: - Revert pl330 issue_pending waits until WFP state due to regression reported in Bluetooth loading - Xilinx driver fixes for synchronization, buffer offsets, locking and kdoc - idxd fixes for spinlock and preventing the migration of the perf context to an invalid target - idma driver fix for interrupt handling when powered off - Tegra driver residual calculation fix - Owl driver register access fix -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAmYsjEcACgkQfBQHDyUj g0dOXA//QjGv2paJuKSQXwY0Am6ovjYVztYKKJmX42eiRPf2elt3/eRyfwrvnnNx 6bCedPlLjt230VxQ+DQWA+U6XRSEZ55xTqu+NE8Owoqy3WuS7Q7l5p1Lx3nPMg9O 2W0GkJ7q++i4XBnDBPSGQZqOf3sFAaM4fEv2Yq7s4qHUipyGVKUH6IKBBQFIf//q slw32tL77/pcViYkNLJW6fDgRY8ZnrBNuLPoWpp7DRraYxRjiPk4WAy7mA2/UnzE kPrPZUxJXlXVBeRcaTEjse3TkRGp6QK4XOW8XQlBIUv9IVsN1WNPQaYcTcrywBwY 18mJt6JNLZVR/31Va/QQfYG6y1mw5Fja9fiLhT9lKqD3iBwLBzxCmrs+NUZgBneg sJyR/JYxnUR7d8enNfLCUwZDvCiqwHdHXjeakWNg/7gec7OG6WjpWeFd8SEbpxfU XGaXJTSAFpgHFbLOZvIXWn1nWarpEBBqCNz3dOHN34eCCnh5j+bx5za4k4u/jjSz 9sZjYLO+gn8dqhPNAtIs0wkyEIECoJ8iIhd6sTTkdTKebw8REXbIbey3hY83TTb1 PRuevuoYOBACCs4QS1Gs+8hM5gjU9qw8MVeFk5WuNxY7y2Gxeh+4NmXow/Nerd24 +NoWibWv/pzlnWCPOfHmbEPG8QRljnLa07nXQMkLrbCbKu0DALI= =Q5fC -----END PGP SIGNATURE----- Merge tag 'dmaengine-fix-6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine Pull dmaengine fixes from Vinod Koul: - Revert pl330 issue_pending waits until WFP state due to regression reported in Bluetooth loading - Xilinx driver fixes for synchronization, buffer offsets, locking and kdoc - idxd fixes for spinlock and preventing the migration of the perf context to an invalid target - idma driver fix for interrupt handling when powered off - Tegra driver residual calculation fix - Owl driver register access fix * tag 'dmaengine-fix-6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: dmaengine: idxd: Fix oops during rmmod on single-CPU platforms dmaengine: xilinx: xdma: Clarify kdoc in XDMA driver dmaengine: xilinx: xdma: Fix synchronization issue dmaengine: xilinx: xdma: Fix wrong offsets in the buffers addresses in dma descriptor dma: xilinx_dpdma: Fix locking dmaengine: idxd: Convert spinlock to mutex to lock evl workqueue idma64: Don't try to serve interrupts when device is powered off dmaengine: tegra186: Fix residual calculation dmaengine: owl: fix register access functions dmaengine: Revert "dmaengine: pl330: issue_pending waits until WFP state"
This commit is contained in:
commit
6fba14a7b5
@ -171,6 +171,10 @@ static irqreturn_t idma64_irq(int irq, void *dev)
|
|||||||
u32 status_err;
|
u32 status_err;
|
||||||
unsigned short i;
|
unsigned short i;
|
||||||
|
|
||||||
|
/* Since IRQ may be shared, check if DMA controller is powered on */
|
||||||
|
if (status == GENMASK(31, 0))
|
||||||
|
return IRQ_NONE;
|
||||||
|
|
||||||
dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
|
dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
|
||||||
|
|
||||||
/* Check if we have any interrupt from the DMA controller */
|
/* Check if we have any interrupt from the DMA controller */
|
||||||
|
@ -342,7 +342,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
|
|||||||
if (!evl)
|
if (!evl)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock(&evl->lock);
|
mutex_lock(&evl->lock);
|
||||||
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
||||||
t = status.tail;
|
t = status.tail;
|
||||||
h = status.head;
|
h = status.head;
|
||||||
@ -354,9 +354,8 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
|
|||||||
set_bit(h, evl->bmap);
|
set_bit(h, evl->bmap);
|
||||||
h = (h + 1) % size;
|
h = (h + 1) % size;
|
||||||
}
|
}
|
||||||
spin_unlock(&evl->lock);
|
|
||||||
|
|
||||||
drain_workqueue(wq->wq);
|
drain_workqueue(wq->wq);
|
||||||
|
mutex_unlock(&evl->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int idxd_cdev_release(struct inode *node, struct file *filep)
|
static int idxd_cdev_release(struct inode *node, struct file *filep)
|
||||||
|
@ -66,7 +66,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
|
|||||||
if (!evl || !evl->log)
|
if (!evl || !evl->log)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
spin_lock(&evl->lock);
|
mutex_lock(&evl->lock);
|
||||||
|
|
||||||
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
||||||
t = evl_status.tail;
|
t = evl_status.tail;
|
||||||
@ -87,7 +87,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
|
|||||||
dump_event_entry(idxd, s, i, &count, processed);
|
dump_event_entry(idxd, s, i, &count, processed);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&evl->lock);
|
mutex_unlock(&evl->lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -775,7 +775,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
|
|||||||
goto err_alloc;
|
goto err_alloc;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&evl->lock);
|
mutex_lock(&evl->lock);
|
||||||
evl->log = addr;
|
evl->log = addr;
|
||||||
evl->dma = dma_addr;
|
evl->dma = dma_addr;
|
||||||
evl->log_size = size;
|
evl->log_size = size;
|
||||||
@ -796,7 +796,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
|
|||||||
gencfg.evl_en = 1;
|
gencfg.evl_en = 1;
|
||||||
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
|
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
|
||||||
|
|
||||||
spin_unlock(&evl->lock);
|
mutex_unlock(&evl->lock);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_alloc:
|
err_alloc:
|
||||||
@ -819,7 +819,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
|
|||||||
if (!gencfg.evl_en)
|
if (!gencfg.evl_en)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock(&evl->lock);
|
mutex_lock(&evl->lock);
|
||||||
gencfg.evl_en = 0;
|
gencfg.evl_en = 0;
|
||||||
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
|
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
|
||||||
|
|
||||||
@ -836,7 +836,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
|
|||||||
evl_dma = evl->dma;
|
evl_dma = evl->dma;
|
||||||
evl->log = NULL;
|
evl->log = NULL;
|
||||||
evl->size = IDXD_EVL_SIZE_MIN;
|
evl->size = IDXD_EVL_SIZE_MIN;
|
||||||
spin_unlock(&evl->lock);
|
mutex_unlock(&evl->lock);
|
||||||
|
|
||||||
dma_free_coherent(dev, evl_log_size, evl_log, evl_dma);
|
dma_free_coherent(dev, evl_log_size, evl_log, evl_dma);
|
||||||
}
|
}
|
||||||
|
@ -293,7 +293,7 @@ struct idxd_driver_data {
|
|||||||
|
|
||||||
struct idxd_evl {
|
struct idxd_evl {
|
||||||
/* Lock to protect event log access. */
|
/* Lock to protect event log access. */
|
||||||
spinlock_t lock;
|
struct mutex lock;
|
||||||
void *log;
|
void *log;
|
||||||
dma_addr_t dma;
|
dma_addr_t dma;
|
||||||
/* Total size of event log = number of entries * entry size. */
|
/* Total size of event log = number of entries * entry size. */
|
||||||
|
@ -354,7 +354,7 @@ static int idxd_init_evl(struct idxd_device *idxd)
|
|||||||
if (!evl)
|
if (!evl)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
spin_lock_init(&evl->lock);
|
mutex_init(&evl->lock);
|
||||||
evl->size = IDXD_EVL_SIZE_MIN;
|
evl->size = IDXD_EVL_SIZE_MIN;
|
||||||
|
|
||||||
idxd_name = dev_name(idxd_confdev(idxd));
|
idxd_name = dev_name(idxd_confdev(idxd));
|
||||||
|
@ -363,7 +363,7 @@ static void process_evl_entries(struct idxd_device *idxd)
|
|||||||
evl_status.bits = 0;
|
evl_status.bits = 0;
|
||||||
evl_status.int_pending = 1;
|
evl_status.int_pending = 1;
|
||||||
|
|
||||||
spin_lock(&evl->lock);
|
mutex_lock(&evl->lock);
|
||||||
/* Clear interrupt pending bit */
|
/* Clear interrupt pending bit */
|
||||||
iowrite32(evl_status.bits_upper32,
|
iowrite32(evl_status.bits_upper32,
|
||||||
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
|
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
|
||||||
@ -380,7 +380,7 @@ static void process_evl_entries(struct idxd_device *idxd)
|
|||||||
|
|
||||||
evl_status.head = h;
|
evl_status.head = h;
|
||||||
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
||||||
spin_unlock(&evl->lock);
|
mutex_unlock(&evl->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
irqreturn_t idxd_misc_thread(int vec, void *data)
|
irqreturn_t idxd_misc_thread(int vec, void *data)
|
||||||
|
@ -528,14 +528,11 @@ static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
target = cpumask_any_but(cpu_online_mask, cpu);
|
target = cpumask_any_but(cpu_online_mask, cpu);
|
||||||
|
|
||||||
/* migrate events if there is a valid target */
|
/* migrate events if there is a valid target */
|
||||||
if (target < nr_cpu_ids)
|
if (target < nr_cpu_ids) {
|
||||||
cpumask_set_cpu(target, &perfmon_dsa_cpu_mask);
|
cpumask_set_cpu(target, &perfmon_dsa_cpu_mask);
|
||||||
else
|
|
||||||
target = -1;
|
|
||||||
|
|
||||||
perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
|
perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -250,7 +250,7 @@ static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
|
|||||||
else
|
else
|
||||||
regval &= ~val;
|
regval &= ~val;
|
||||||
|
|
||||||
writel(val, pchan->base + reg);
|
writel(regval, pchan->base + reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
|
static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
|
||||||
@ -274,7 +274,7 @@ static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
|
|||||||
else
|
else
|
||||||
regval &= ~val;
|
regval &= ~val;
|
||||||
|
|
||||||
writel(val, od->base + reg);
|
writel(regval, od->base + reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
|
static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
|
||||||
|
@ -1053,9 +1053,6 @@ static bool _trigger(struct pl330_thread *thrd)
|
|||||||
|
|
||||||
thrd->req_running = idx;
|
thrd->req_running = idx;
|
||||||
|
|
||||||
if (desc->rqtype == DMA_MEM_TO_DEV || desc->rqtype == DMA_DEV_TO_MEM)
|
|
||||||
UNTIL(thrd, PL330_STATE_WFP);
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -746,6 +746,9 @@ static int tegra_dma_get_residual(struct tegra_dma_channel *tdc)
|
|||||||
bytes_xfer = dma_desc->bytes_xfer +
|
bytes_xfer = dma_desc->bytes_xfer +
|
||||||
sg_req[dma_desc->sg_idx].len - (wcount * 4);
|
sg_req[dma_desc->sg_idx].len - (wcount * 4);
|
||||||
|
|
||||||
|
if (dma_desc->bytes_req == bytes_xfer)
|
||||||
|
return 0;
|
||||||
|
|
||||||
residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req);
|
residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req);
|
||||||
|
|
||||||
return residual;
|
return residual;
|
||||||
|
@ -117,6 +117,9 @@ struct xdma_hw_desc {
|
|||||||
CHAN_CTRL_IE_WRITE_ERROR | \
|
CHAN_CTRL_IE_WRITE_ERROR | \
|
||||||
CHAN_CTRL_IE_DESC_ERROR)
|
CHAN_CTRL_IE_DESC_ERROR)
|
||||||
|
|
||||||
|
/* bits of the channel status register */
|
||||||
|
#define XDMA_CHAN_STATUS_BUSY BIT(0)
|
||||||
|
|
||||||
#define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START
|
#define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START
|
||||||
|
|
||||||
#define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
|
#define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
|
||||||
|
@ -71,6 +71,8 @@ struct xdma_chan {
|
|||||||
enum dma_transfer_direction dir;
|
enum dma_transfer_direction dir;
|
||||||
struct dma_slave_config cfg;
|
struct dma_slave_config cfg;
|
||||||
u32 irq;
|
u32 irq;
|
||||||
|
struct completion last_interrupt;
|
||||||
|
bool stop_requested;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -376,6 +378,8 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
xchan->busy = true;
|
xchan->busy = true;
|
||||||
|
xchan->stop_requested = false;
|
||||||
|
reinit_completion(&xchan->last_interrupt);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -387,7 +391,6 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
|
|||||||
static int xdma_xfer_stop(struct xdma_chan *xchan)
|
static int xdma_xfer_stop(struct xdma_chan *xchan)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
u32 val;
|
|
||||||
struct xdma_device *xdev = xchan->xdev_hdl;
|
struct xdma_device *xdev = xchan->xdev_hdl;
|
||||||
|
|
||||||
/* clear run stop bit to prevent any further auto-triggering */
|
/* clear run stop bit to prevent any further auto-triggering */
|
||||||
@ -395,13 +398,7 @@ static int xdma_xfer_stop(struct xdma_chan *xchan)
|
|||||||
CHAN_CTRL_RUN_STOP);
|
CHAN_CTRL_RUN_STOP);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* Clear the channel status register */
|
|
||||||
ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &val);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -474,6 +471,8 @@ static int xdma_alloc_channels(struct xdma_device *xdev,
|
|||||||
xchan->xdev_hdl = xdev;
|
xchan->xdev_hdl = xdev;
|
||||||
xchan->base = base + i * XDMA_CHAN_STRIDE;
|
xchan->base = base + i * XDMA_CHAN_STRIDE;
|
||||||
xchan->dir = dir;
|
xchan->dir = dir;
|
||||||
|
xchan->stop_requested = false;
|
||||||
|
init_completion(&xchan->last_interrupt);
|
||||||
|
|
||||||
ret = xdma_channel_init(xchan);
|
ret = xdma_channel_init(xchan);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -521,6 +520,7 @@ static int xdma_terminate_all(struct dma_chan *chan)
|
|||||||
spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
|
spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
|
||||||
|
|
||||||
xdma_chan->busy = false;
|
xdma_chan->busy = false;
|
||||||
|
xdma_chan->stop_requested = true;
|
||||||
vd = vchan_next_desc(&xdma_chan->vchan);
|
vd = vchan_next_desc(&xdma_chan->vchan);
|
||||||
if (vd) {
|
if (vd) {
|
||||||
list_del(&vd->node);
|
list_del(&vd->node);
|
||||||
@ -542,17 +542,26 @@ static int xdma_terminate_all(struct dma_chan *chan)
|
|||||||
static void xdma_synchronize(struct dma_chan *chan)
|
static void xdma_synchronize(struct dma_chan *chan)
|
||||||
{
|
{
|
||||||
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
|
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
|
||||||
|
struct xdma_device *xdev = xdma_chan->xdev_hdl;
|
||||||
|
int st = 0;
|
||||||
|
|
||||||
|
/* If the engine continues running, wait for the last interrupt */
|
||||||
|
regmap_read(xdev->rmap, xdma_chan->base + XDMA_CHAN_STATUS, &st);
|
||||||
|
if (st & XDMA_CHAN_STATUS_BUSY)
|
||||||
|
wait_for_completion_timeout(&xdma_chan->last_interrupt, msecs_to_jiffies(1000));
|
||||||
|
|
||||||
vchan_synchronize(&xdma_chan->vchan);
|
vchan_synchronize(&xdma_chan->vchan);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* xdma_fill_descs - Fill hardware descriptors with contiguous memory block addresses
|
* xdma_fill_descs() - Fill hardware descriptors for one contiguous memory chunk.
|
||||||
* @sw_desc: tx descriptor state container
|
* More than one descriptor will be used if the size is bigger
|
||||||
* @src_addr: Value for a ->src_addr field of a first descriptor
|
* than XDMA_DESC_BLEN_MAX.
|
||||||
* @dst_addr: Value for a ->dst_addr field of a first descriptor
|
* @sw_desc: Descriptor container
|
||||||
* @size: Total size of a contiguous memory block
|
* @src_addr: First value for the ->src_addr field
|
||||||
* @filled_descs_num: Number of filled hardware descriptors for corresponding sw_desc
|
* @dst_addr: First value for the ->dst_addr field
|
||||||
|
* @size: Size of the contiguous memory block
|
||||||
|
* @filled_descs_num: Index of the first descriptor to take care of in @sw_desc
|
||||||
*/
|
*/
|
||||||
static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr,
|
static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr,
|
||||||
u64 dst_addr, u32 size, u32 filled_descs_num)
|
u64 dst_addr, u32 size, u32 filled_descs_num)
|
||||||
@ -704,7 +713,7 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
|
|||||||
desc_num = 0;
|
desc_num = 0;
|
||||||
for (i = 0; i < periods; i++) {
|
for (i = 0; i < periods; i++) {
|
||||||
desc_num += xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num);
|
desc_num += xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num);
|
||||||
addr += i * period_size;
|
addr += period_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
|
tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
|
||||||
@ -876,6 +885,9 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
|
|||||||
u32 st;
|
u32 st;
|
||||||
bool repeat_tx;
|
bool repeat_tx;
|
||||||
|
|
||||||
|
if (xchan->stop_requested)
|
||||||
|
complete(&xchan->last_interrupt);
|
||||||
|
|
||||||
spin_lock(&xchan->vchan.lock);
|
spin_lock(&xchan->vchan.lock);
|
||||||
|
|
||||||
/* get submitted request */
|
/* get submitted request */
|
||||||
|
@ -214,7 +214,8 @@ struct xilinx_dpdma_tx_desc {
|
|||||||
* @running: true if the channel is running
|
* @running: true if the channel is running
|
||||||
* @first_frame: flag for the first frame of stream
|
* @first_frame: flag for the first frame of stream
|
||||||
* @video_group: flag if multi-channel operation is needed for video channels
|
* @video_group: flag if multi-channel operation is needed for video channels
|
||||||
* @lock: lock to access struct xilinx_dpdma_chan
|
* @lock: lock to access struct xilinx_dpdma_chan. Must be taken before
|
||||||
|
* @vchan.lock, if both are to be held.
|
||||||
* @desc_pool: descriptor allocation pool
|
* @desc_pool: descriptor allocation pool
|
||||||
* @err_task: error IRQ bottom half handler
|
* @err_task: error IRQ bottom half handler
|
||||||
* @desc: References to descriptors being processed
|
* @desc: References to descriptors being processed
|
||||||
@ -1097,12 +1098,14 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
|
|||||||
* Complete the active descriptor, if any, promote the pending
|
* Complete the active descriptor, if any, promote the pending
|
||||||
* descriptor to active, and queue the next transfer, if any.
|
* descriptor to active, and queue the next transfer, if any.
|
||||||
*/
|
*/
|
||||||
|
spin_lock(&chan->vchan.lock);
|
||||||
if (chan->desc.active)
|
if (chan->desc.active)
|
||||||
vchan_cookie_complete(&chan->desc.active->vdesc);
|
vchan_cookie_complete(&chan->desc.active->vdesc);
|
||||||
chan->desc.active = pending;
|
chan->desc.active = pending;
|
||||||
chan->desc.pending = NULL;
|
chan->desc.pending = NULL;
|
||||||
|
|
||||||
xilinx_dpdma_chan_queue_transfer(chan);
|
xilinx_dpdma_chan_queue_transfer(chan);
|
||||||
|
spin_unlock(&chan->vchan.lock);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock_irqrestore(&chan->lock, flags);
|
spin_unlock_irqrestore(&chan->lock, flags);
|
||||||
@ -1264,10 +1267,12 @@ static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
|
|||||||
struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
|
struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&chan->vchan.lock, flags);
|
spin_lock_irqsave(&chan->lock, flags);
|
||||||
|
spin_lock(&chan->vchan.lock);
|
||||||
if (vchan_issue_pending(&chan->vchan))
|
if (vchan_issue_pending(&chan->vchan))
|
||||||
xilinx_dpdma_chan_queue_transfer(chan);
|
xilinx_dpdma_chan_queue_transfer(chan);
|
||||||
spin_unlock_irqrestore(&chan->vchan.lock, flags);
|
spin_unlock(&chan->vchan.lock);
|
||||||
|
spin_unlock_irqrestore(&chan->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xilinx_dpdma_config(struct dma_chan *dchan,
|
static int xilinx_dpdma_config(struct dma_chan *dchan,
|
||||||
@ -1495,7 +1500,9 @@ static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
|
|||||||
XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
|
XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
|
||||||
|
|
||||||
spin_lock_irqsave(&chan->lock, flags);
|
spin_lock_irqsave(&chan->lock, flags);
|
||||||
|
spin_lock(&chan->vchan.lock);
|
||||||
xilinx_dpdma_chan_queue_transfer(chan);
|
xilinx_dpdma_chan_queue_transfer(chan);
|
||||||
|
spin_unlock(&chan->vchan.lock);
|
||||||
spin_unlock_irqrestore(&chan->lock, flags);
|
spin_unlock_irqrestore(&chan->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user