[PATCH] dmaengine: dw: fix byte order of hw descriptor fields

From: Mans Rullgard
Date: Sun Dec 20 2015 - 11:54:21 EST


If the DMA controller uses a different byte order than the host CPU,
the hardware linked list descriptor fields need to be byte-swapped.

This patch makes the driver write these fields using the same byte
order it uses for mmio accesses to the DMA engine. I do not know
if this is guaranteed to always be correct.

Signed-off-by: Mans Rullgard <mans@xxxxxxxxx>
---
drivers/dma/dw/core.c | 84 +++++++++++++++++++++++++++------------------------
drivers/dma/dw/regs.h | 26 +++++++++++-----
2 files changed, 63 insertions(+), 47 deletions(-)

diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 7067b6d..b954904 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -209,12 +209,12 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
* Software emulation of LLP mode relies on interrupts to continue
* multi block transfer.
*/
- ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
+ ctllo = dw_lli_read(desc->lli.ctllo) | DWC_CTLL_INT_EN;

- channel_writel(dwc, SAR, desc->lli.sar);
- channel_writel(dwc, DAR, desc->lli.dar);
+ channel_writel(dwc, SAR, dw_lli_read(desc->lli.sar));
+ channel_writel(dwc, DAR, dw_lli_read(desc->lli.dar));
channel_writel(dwc, CTL_LO, ctllo);
- channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
+ channel_writel(dwc, CTL_HI, dw_lli_read(desc->lli.ctlhi));
channel_set_bit(dw, CH_EN, dwc->mask);

/* Move pointer to next descriptor */
@@ -432,7 +432,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
}

/* Check first descriptors llp */
- if (desc->lli.llp == llp) {
+ if (dw_lli_read(desc->lli.llp) == llp) {
/* This one is currently in progress */
dwc->residue -= dwc_get_sent(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -441,7 +441,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)

dwc->residue -= desc->len;
list_for_each_entry(child, &desc->tx_list, desc_node) {
- if (child->lli.llp == llp) {
+ if (dw_lli_read(child->lli.llp) == llp) {
/* Currently in progress */
dwc->residue -= dwc_get_sent(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -730,16 +730,16 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if (!desc)
goto err_desc_get;

- desc->lli.sar = src + offset;
- desc->lli.dar = dest + offset;
- desc->lli.ctllo = ctllo;
- desc->lli.ctlhi = xfer_count;
+ dw_lli_write(desc->lli.sar, src + offset);
+ dw_lli_write(desc->lli.dar, dest + offset);
+ dw_lli_write(desc->lli.ctllo, ctllo);
+ dw_lli_write(desc->lli.ctlhi, xfer_count);
desc->len = xfer_count << src_width;

if (!first) {
first = desc;
} else {
- prev->lli.llp = desc->txd.phys;
+ dw_lli_write(prev->lli.llp, desc->txd.phys);
list_add_tail(&desc->desc_node,
&first->tx_list);
}
@@ -748,7 +748,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,

if (flags & DMA_PREP_INTERRUPT)
/* Trigger interrupt after last block */
- prev->lli.ctllo |= DWC_CTLL_INT_EN;
+ dw_lli_or(prev->lli.ctllo, DWC_CTLL_INT_EN);

prev->lli.llp = 0;
first->txd.flags = flags;
@@ -818,9 +818,10 @@ slave_sg_todev_fill_desc:
if (!desc)
goto err_desc_get;

- desc->lli.sar = mem;
- desc->lli.dar = reg;
- desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
+ dw_lli_write(desc->lli.sar, mem);
+ dw_lli_write(desc->lli.dar, reg);
+ dw_lli_write(desc->lli.ctllo,
+ ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
if ((len >> mem_width) > dwc->block_size) {
dlen = dwc->block_size << mem_width;
mem += dlen;
@@ -830,13 +831,13 @@ slave_sg_todev_fill_desc:
len = 0;
}

- desc->lli.ctlhi = dlen >> mem_width;
+ dw_lli_write(desc->lli.ctlhi, dlen >> mem_width);
desc->len = dlen;

if (!first) {
first = desc;
} else {
- prev->lli.llp = desc->txd.phys;
+ dw_lli_write(prev->lli.llp, desc->txd.phys);
list_add_tail(&desc->desc_node,
&first->tx_list);
}
@@ -875,9 +876,10 @@ slave_sg_fromdev_fill_desc:
if (!desc)
goto err_desc_get;

- desc->lli.sar = reg;
- desc->lli.dar = mem;
- desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
+ dw_lli_write(desc->lli.sar, reg);
+ dw_lli_write(desc->lli.dar, mem);
+ dw_lli_write(desc->lli.ctllo,
+ ctllo | DWC_CTLL_DST_WIDTH(mem_width));
if ((len >> reg_width) > dwc->block_size) {
dlen = dwc->block_size << reg_width;
mem += dlen;
@@ -886,13 +888,13 @@ slave_sg_fromdev_fill_desc:
dlen = len;
len = 0;
}
- desc->lli.ctlhi = dlen >> reg_width;
+ dw_lli_write(desc->lli.ctlhi, dlen >> reg_width);
desc->len = dlen;

if (!first) {
first = desc;
} else {
- prev->lli.llp = desc->txd.phys;
+ dw_lli_write(prev->lli.llp, desc->txd.phys);
list_add_tail(&desc->desc_node,
&first->tx_list);
}
@@ -909,7 +911,7 @@ slave_sg_fromdev_fill_desc:

if (flags & DMA_PREP_INTERRUPT)
/* Trigger interrupt after last block */
- prev->lli.ctllo |= DWC_CTLL_INT_EN;
+ dw_lli_or(prev->lli.ctllo, DWC_CTLL_INT_EN);

prev->lli.llp = 0;
first->total_len = total_len;
@@ -1393,50 +1395,52 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,

switch (direction) {
case DMA_MEM_TO_DEV:
- desc->lli.dar = sconfig->dst_addr;
- desc->lli.sar = buf_addr + (period_len * i);
- desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
+ dw_lli_write(desc->lli.dar, sconfig->dst_addr);
+ dw_lli_write(desc->lli.sar,
+ buf_addr + (period_len * i));
+ dw_lli_write(desc->lli.ctllo, (DWC_DEFAULT_CTLLO(chan)
| DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_FIX
| DWC_CTLL_SRC_INC
- | DWC_CTLL_INT_EN);
+ | DWC_CTLL_INT_EN));

- desc->lli.ctllo |= sconfig->device_fc ?
- DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
- DWC_CTLL_FC(DW_DMA_FC_D_M2P);
+ dw_lli_or(desc->lli.ctllo, sconfig->device_fc ?
+ DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
+ DWC_CTLL_FC(DW_DMA_FC_D_M2P));

break;
case DMA_DEV_TO_MEM:
- desc->lli.dar = buf_addr + (period_len * i);
- desc->lli.sar = sconfig->src_addr;
- desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
+ dw_lli_write(desc->lli.dar,
+ buf_addr + (period_len * i));
+ dw_lli_write(desc->lli.sar, sconfig->src_addr);
+ dw_lli_write(desc->lli.ctllo, (DWC_DEFAULT_CTLLO(chan)
| DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_DST_INC
| DWC_CTLL_SRC_FIX
- | DWC_CTLL_INT_EN);
+ | DWC_CTLL_INT_EN));

- desc->lli.ctllo |= sconfig->device_fc ?
- DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
- DWC_CTLL_FC(DW_DMA_FC_D_P2M);
+ dw_lli_or(desc->lli.ctllo, sconfig->device_fc ?
+ DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
+ DWC_CTLL_FC(DW_DMA_FC_D_P2M));

break;
default:
break;
}

- desc->lli.ctlhi = (period_len >> reg_width);
+ dw_lli_write(desc->lli.ctlhi, (period_len >> reg_width));
cdesc->desc[i] = desc;

if (last)
- last->lli.llp = desc->txd.phys;
+ dw_lli_write(last->lli.llp, desc->txd.phys);

last = desc;
}

/* Let's make a cyclic list */
- last->lli.llp = cdesc->desc[0]->txd.phys;
+ dw_lli_write(last->lli.llp, cdesc->desc[0]->txd.phys);

dev_dbg(chan2dev(&dwc->chan),
"cyclic prepared buf %pad len %zu period %zu periods %d\n",
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index 241ff2b..84f05de 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -308,20 +308,32 @@ static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
return container_of(ddev, struct dw_dma, dma);
}

+#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
+typedef __be32 dw_u32;
+#define dw_lli_read(s) be32_to_cpu(s)
+#define dw_lli_write(d, v) ((d) = cpu_to_be32(v))
+#else
+typedef __le32 dw_u32;
+#define dw_lli_read(s) le32_to_cpu(s)
+#define dw_lli_write(d, v) ((d) = cpu_to_le32(v))
+#endif
+
+#define dw_lli_or(d, v) dw_lli_write(d, dw_lli_read(d) | (v))
+
/* LLI == Linked List Item; a.k.a. DMA block descriptor */
struct dw_lli {
/* values that are not changed by hardware */
- u32 sar;
- u32 dar;
- u32 llp; /* chain to next lli */
- u32 ctllo;
+ dw_u32 sar;
+ dw_u32 dar;
+ dw_u32 llp; /* chain to next lli */
+ dw_u32 ctllo;
/* values that may get written back: */
- u32 ctlhi;
+ dw_u32 ctlhi;
/* sstat and dstat can snapshot peripheral register state.
* silicon config may discard either or both...
*/
- u32 sstat;
- u32 dstat;
+ dw_u32 sstat;
+ dw_u32 dstat;
};

struct dw_desc {
--
2.6.3


--=-=-=--
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/