[tip:locking/arch] locking,arch,hexagon: Fold atomic_ops

From: tip-bot for Peter Zijlstra
Date: Thu Aug 14 2014 - 13:21:11 EST


Commit-ID: 50f853e38b0b90a5703ab14b70e20eb5a8ccd5de
Gitweb: http://git.kernel.org/tip/50f853e38b0b90a5703ab14b70e20eb5a8ccd5de
Author: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
AuthorDate: Sun, 23 Mar 2014 18:20:26 +0100
Committer: Ingo Molnar <mingo@xxxxxxxxxx>
CommitDate: Thu, 14 Aug 2014 12:48:06 +0200

locking,arch,hexagon: Fold atomic_ops

OK, no LoC saved in this case because the !return variants were
defined in terms of the return ops. Still do it because this also
prepares for easy addition of new ops.

Signed-off-by: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Acked-by: Richard Kuo <rkuo@xxxxxxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Paul E. McKenney <paulmck@xxxxxxxxxxxxxxxxxx>
Cc: Vineet Gupta <vgupta@xxxxxxxxxxxx>
Cc: linux-hexagon@xxxxxxxxxxxxxxx
Link: http://lkml.kernel.org/r/20140508135852.171567636@xxxxxxxxxxxxx
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
---
arch/hexagon/include/asm/atomic.h | 68 +++++++++++++++++++++------------------
1 file changed, 37 insertions(+), 31 deletions(-)

diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index de916b1..93d0702 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -94,41 +94,47 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return __oldval;
}

-static inline int atomic_add_return(int i, atomic_t *v)
-{
- int output;
-
- __asm__ __volatile__ (
- "1: %0 = memw_locked(%1);\n"
- " %0 = add(%0,%2);\n"
- " memw_locked(%1,P3)=%0;\n"
- " if !P3 jump 1b;\n"
- : "=&r" (output)
- : "r" (&v->counter), "r" (i)
- : "memory", "p3"
- );
- return output;
-
+#define ATOMIC_OP(op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ int output; \
+ \
+ __asm__ __volatile__ ( \
+ "1: %0 = memw_locked(%1);\n" \
+ " %0 = "#op "(%0,%2);\n" \
+ " memw_locked(%1,P3)=%0;\n" \
+ " if !P3 jump 1b;\n" \
+ : "=&r" (output) \
+ : "r" (&v->counter), "r" (i) \
+ : "memory", "p3" \
+ ); \
+} \
+
+#define ATOMIC_OP_RETURN(op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ int output; \
+ \
+ __asm__ __volatile__ ( \
+ "1: %0 = memw_locked(%1);\n" \
+ " %0 = "#op "(%0,%2);\n" \
+ " memw_locked(%1,P3)=%0;\n" \
+ " if !P3 jump 1b;\n" \
+ : "=&r" (output) \
+ : "r" (&v->counter), "r" (i) \
+ : "memory", "p3" \
+ ); \
+ return output; \
}

-#define atomic_add(i, v) atomic_add_return(i, (v))
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)

-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- int output;
- __asm__ __volatile__ (
- "1: %0 = memw_locked(%1);\n"
- " %0 = sub(%0,%2);\n"
- " memw_locked(%1,P3)=%0\n"
- " if !P3 jump 1b;\n"
- : "=&r" (output)
- : "r" (&v->counter), "r" (i)
- : "memory", "p3"
- );
- return output;
-}
+ATOMIC_OPS(add)
+ATOMIC_OPS(sub)

-#define atomic_sub(i, v) atomic_sub_return(i, (v))
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP

/**
* __atomic_add_unless - add unless the number is a given value
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/