[PATCH v4 19/24] dmaengine: dw-edma: Use non-atomic io-64 methods

From: Serge Semin
Date: Thu Jul 28 2022 - 10:31:04 EST


Instead of splitting the 64-bits IOs up into two 32-bits ones it's
possible to use the already available non-atomic readq/writeq methods
implemented exactly for such cases. They are defined in the dedicated
header files io-64-nonatomic-lo-hi.h/io-64-nonatomic-hi-lo.h. So in case
if the 64-bits readq/writeq methods are unavailable on some platforms at
consideration, the corresponding drivers can have any of these headers
included and stop locally re-implementing the 64-bits IO accessors taking
into account the non-atomic nature of the included methods. Let's do that
in the DW eDMA driver too. Note by doing so we can discard the
CONFIG_64BIT config ifdefs from the code.

Signed-off-by: Serge Semin <Sergey.Semin@xxxxxxxxxxxxxxxxxxxx>
Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@xxxxxxxxxx>
Tested-by: Manivannan Sadhasivam <manivannan.sadhasivam@xxxxxxxxxx>
Acked-By: Vinod Koul <vkoul@xxxxxxxxxx>
---
drivers/dma/dw-edma/dw-edma-v0-core.c | 63 ++++++++-------------------
1 file changed, 18 insertions(+), 45 deletions(-)

diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index bb017d37c9ad..51a34b43434c 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -8,6 +8,8 @@

#include <linux/bitfield.h>

+#include <linux/io-64-nonatomic-lo-hi.h>
+
#include "dw-edma-core.h"
#include "dw-edma-v0-core.h"
#include "dw-edma-v0-regs.h"
@@ -53,8 +55,6 @@ static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
SET_32(dw, rd_##name, value); \
} while (0)

-#ifdef CONFIG_64BIT
-
#define SET_64(dw, name, value) \
writeq(value, &(__dw_regs(dw)->name))

@@ -80,8 +80,6 @@ static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
SET_64(dw, rd_##name, value); \
} while (0)

-#endif /* CONFIG_64BIT */
-
#define SET_COMPAT(dw, name, value) \
writel(value, &(__dw_regs(dw)->type.unroll.name))

@@ -164,14 +162,13 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
#define SET_LL_32(ll, value) \
writel(value, ll)

-#ifdef CONFIG_64BIT
-
static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
u64 value, void __iomem *addr)
{
+ unsigned long flags;
+
if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel;
- unsigned long flags;

raw_spin_lock_irqsave(&dw->lock, flags);

@@ -181,22 +178,22 @@ static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,

writel(viewport_sel,
&(__dw_regs(dw)->type.legacy.viewport_sel));
- writeq(value, addr);
+ }

+ writeq(value, addr);
+
+ if (dw->chip->mf == EDMA_MF_EDMA_LEGACY)
raw_spin_unlock_irqrestore(&dw->lock, flags);
- } else {
- writeq(value, addr);
- }
}

static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
const void __iomem *addr)
{
- u32 value;
+ unsigned long flags;
+ u64 value;

if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel;
- unsigned long flags;

raw_spin_lock_irqsave(&dw->lock, flags);

@@ -206,12 +203,12 @@ static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,

writel(viewport_sel,
&(__dw_regs(dw)->type.legacy.viewport_sel));
- value = readq(addr);
+ }
+
+ value = readq(addr);

+ if (dw->chip->mf == EDMA_MF_EDMA_LEGACY)
raw_spin_unlock_irqrestore(&dw->lock, flags);
- } else {
- value = readq(addr);
- }

return value;
}
@@ -225,8 +222,6 @@ static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
#define SET_LL_64(ll, value) \
writeq(value, ll)

-#endif /* CONFIG_64BIT */
-
/* eDMA management callbacks */
void dw_edma_v0_core_off(struct dw_edma *dw)
{
@@ -325,19 +320,10 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
/* Transfer size */
SET_LL_32(&lli[i].transfer_size, child->sz);
/* SAR */
- #ifdef CONFIG_64BIT
- SET_LL_64(&lli[i].sar.reg, child->sar);
- #else /* CONFIG_64BIT */
- SET_LL_32(&lli[i].sar.lsb, lower_32_bits(child->sar));
- SET_LL_32(&lli[i].sar.msb, upper_32_bits(child->sar));
- #endif /* CONFIG_64BIT */
+ SET_LL_64(&lli[i].sar.reg, child->sar);
/* DAR */
- #ifdef CONFIG_64BIT
- SET_LL_64(&lli[i].dar.reg, child->dar);
- #else /* CONFIG_64BIT */
- SET_LL_32(&lli[i].dar.lsb, lower_32_bits(child->dar));
- SET_LL_32(&lli[i].dar.msb, upper_32_bits(child->dar));
- #endif /* CONFIG_64BIT */
+ SET_LL_64(&lli[i].dar.reg, child->dar);
+
i++;
}

@@ -349,12 +335,7 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
/* Channel control */
SET_LL_32(&llp->control, control);
/* Linked list */
- #ifdef CONFIG_64BIT
- SET_LL_64(&llp->llp.reg, chunk->ll_region.paddr);
- #else /* CONFIG_64BIT */
- SET_LL_32(&llp->llp.lsb, lower_32_bits(chunk->ll_region.paddr));
- SET_LL_32(&llp->llp.msb, upper_32_bits(chunk->ll_region.paddr));
- #endif /* CONFIG_64BIT */
+ SET_LL_64(&llp->llp.reg, chunk->ll_region.paddr);
}

void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
@@ -417,19 +398,11 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
SET_CH_32(dw, chan->dir, chan->id, ch_control1,
(DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
/* Linked list */
-
- #ifdef CONFIG_64BIT
/* llp is not aligned on 64bit -> keep 32bit accesses */
SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
lower_32_bits(chunk->ll_region.paddr));
SET_CH_32(dw, chan->dir, chan->id, llp.msb,
upper_32_bits(chunk->ll_region.paddr));
- #else /* CONFIG_64BIT */
- SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
- lower_32_bits(chunk->ll_region.paddr));
- SET_CH_32(dw, chan->dir, chan->id, llp.msb,
- upper_32_bits(chunk->ll_region.paddr));
- #endif /* CONFIG_64BIT */
}
/* Doorbell */
SET_RW_32(dw, chan->dir, doorbell,
--
2.35.1