summaryrefslogtreecommitdiffstats
path: root/arch/avr32
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-04-18 01:16:08 +0200
committerIngo Molnar <mingo@kernel.org>2016-06-16 10:48:23 +0200
commit1a6eafacd4811cdc1b138faee858527658eee4e1 (patch)
tree0ffdf7f955afd37ce86b066741c0f4b328b9d63f /arch/avr32
parent2efe95fe695270ae1a225805f016303505972d86 (diff)
downloadlinux-1a6eafacd4811cdc1b138faee858527658eee4e1.tar.gz
linux-1a6eafacd4811cdc1b138faee858527658eee4e1.tar.xz
locking/atomic, arch/avr32: Implement atomic_fetch_{add,sub,and,or,xor}()
Implement FETCH-OP atomic primitives, these are very similar to the existing OP-RETURN primitives we already have, except they return the value of the atomic variable _before_ modification. This is especially useful for irreversible operations -- such as bitops (because it becomes impossible to reconstruct the state prior to modification). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Hans-Christian Noren Egtvedt <egtvedt@samfundet.no> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Haavard Skinnemoen <hskinnemoen@gmail.com> Cc: Hans-Christian Egtvedt <egtvedt@samfundet.no> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-arch@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/avr32')
-rw-r--r--arch/avr32/include/asm/atomic.h56
1 files changed, 51 insertions, 5 deletions
diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h
index d74fd8ce980a..b8681fd495ef 100644
--- a/arch/avr32/include/asm/atomic.h
+++ b/arch/avr32/include/asm/atomic.h
@@ -41,21 +41,51 @@ static inline int __atomic_##op##_return(int i, atomic_t *v) \
return result; \
}
+#define ATOMIC_FETCH_OP(op, asm_op, asm_con) \
+static inline int __atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ int result, val; \
+ \
+ asm volatile( \
+ "/* atomic_fetch_" #op " */\n" \
+ "1: ssrf 5\n" \
+ " ld.w %0, %3\n" \
+ " mov %1, %0\n" \
+ " " #asm_op " %1, %4\n" \
+ " stcond %2, %1\n" \
+ " brne 1b" \
+ : "=&r" (result), "=&r" (val), "=o" (v->counter) \
+ : "m" (v->counter), #asm_con (i) \
+ : "cc"); \
+ \
+ return result; \
+}
+
ATOMIC_OP_RETURN(sub, sub, rKs21)
ATOMIC_OP_RETURN(add, add, r)
+ATOMIC_FETCH_OP (sub, sub, rKs21)
+ATOMIC_FETCH_OP (add, add, r)
+
+#define atomic_fetch_or atomic_fetch_or
-#define ATOMIC_OP(op, asm_op) \
+#define ATOMIC_OPS(op, asm_op) \
ATOMIC_OP_RETURN(op, asm_op, r) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
(void)__atomic_##op##_return(i, v); \
+} \
+ATOMIC_FETCH_OP(op, asm_op, r) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ return __atomic_fetch_##op(i, v); \
}
-ATOMIC_OP(and, and)
-ATOMIC_OP(or, or)
-ATOMIC_OP(xor, eor)
+ATOMIC_OPS(and, and)
+ATOMIC_OPS(or, or)
+ATOMIC_OPS(xor, eor)
-#undef ATOMIC_OP
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
/*
@@ -87,6 +117,14 @@ static inline int atomic_add_return(int i, atomic_t *v)
return __atomic_add_return(i, v);
}
+static inline int atomic_fetch_add(int i, atomic_t *v)
+{
+ if (IS_21BIT_CONST(i))
+ return __atomic_fetch_sub(-i, v);
+
+ return __atomic_fetch_add(i, v);
+}
+
/*
* atomic_sub_return - subtract the atomic variable
* @i: integer value to subtract
@@ -102,6 +140,14 @@ static inline int atomic_sub_return(int i, atomic_t *v)
return __atomic_add_return(-i, v);
}
+static inline int atomic_fetch_sub(int i, atomic_t *v)
+{
+ if (IS_21BIT_CONST(i))
+ return __atomic_fetch_sub(i, v);
+
+ return __atomic_fetch_add(-i, v);
+}
+
/*
* __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t