[PATCH 1/5] dmaengine: xilinx: xdma: Add transfer termination callbacks

From: Jan Kuliga
Date: Wed Nov 22 2023 - 17:19:43 EST


The xdma driver currently doesn't implement proper transfer termination
callbacks. Therefore, there is no way to gracefully terminate the
on-going DMA transactions. That is particularly useful for cyclic DMA
transfers. Implement these callbacks.

Signed-off-by: Jan Kuliga <jankul@xxxxxxxxxxxxxxxx>
---
drivers/dma/xilinx/xdma-regs.h | 1 +
drivers/dma/xilinx/xdma.c | 55 ++++++++++++++++++++++++++++++++++
2 files changed, 56 insertions(+)

diff --git a/drivers/dma/xilinx/xdma-regs.h b/drivers/dma/xilinx/xdma-regs.h
index e641a5083e14..1f17ce165f92 100644
--- a/drivers/dma/xilinx/xdma-regs.h
+++ b/drivers/dma/xilinx/xdma-regs.h
@@ -76,6 +76,7 @@ struct xdma_hw_desc {
#define XDMA_CHAN_CONTROL_W1S 0x8
#define XDMA_CHAN_CONTROL_W1C 0xc
#define XDMA_CHAN_STATUS 0x40
+#define XDMA_CHAN_STATUS_RC 0x44
#define XDMA_CHAN_COMPLETED_DESC 0x48
#define XDMA_CHAN_ALIGNMENTS 0x4c
#define XDMA_CHAN_INTR_ENABLE 0x90
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index 84a88029226f..58539a093de2 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -632,6 +632,59 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
return NULL;
}

+/**
+ * xdma_terminate_all - Halt the DMA channel
+ * @chan: DMA channel
+ */
+static int xdma_terminate_all(struct dma_chan *chan)
+{
+ int ret;
+ u32 val;
+ unsigned long flags;
+ struct xdma_chan *xchan = to_xdma_chan(chan);
+ struct xdma_device *xdev = xchan->xdev_hdl;
+ struct virt_dma_desc *vd;
+ LIST_HEAD(head);
+
+ /* Clear the RUN bit to stop the transfer */
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
+ CHAN_CTRL_RUN_STOP);
+ if (ret)
+ return ret;
+
+ /* Clear the channel status register */
+ ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &val);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&xchan->vchan.lock, flags);
+
+ /* Don't care if there were no descriptors issued */
+ vd = vchan_next_desc(&xchan->vchan);
+ if (vd) {
+ list_del(&vd->node);
+ vchan_terminate_vdesc(vd);
+ }
+ vchan_get_all_descriptors(&xchan->vchan, &head);
+ list_splice_tail_init(&head, &xchan->vchan.desc_terminated);
+
+ xchan->busy = false;
+ spin_unlock_irqrestore(&xchan->vchan.lock, flags);
+
+ return 0;
+}
+
+/**
+ * xdma_synchronize - Synchronize current execution context to the DMA channel
+ * @chan: DMA channel
+ */
+static void xdma_synchronize(struct dma_chan *chan)
+{
+ struct xdma_chan *xchan = to_xdma_chan(chan);
+
+ vchan_synchronize(&xchan->vchan);
+}
+
/**
* xdma_device_config - Configure the DMA channel
* @chan: DMA channel
@@ -1093,6 +1146,8 @@ static int xdma_probe(struct platform_device *pdev)
xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
xdev->dma_dev.filter.fn = xdma_filter_fn;
xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic;
+ xdev->dma_dev.device_terminate_all = xdma_terminate_all;
+ xdev->dma_dev.device_synchronize = xdma_synchronize;

ret = dma_async_device_register(&xdev->dma_dev);
if (ret) {
--
2.34.1