summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>2012-10-14 19:49:04 +0800
committerSascha Hauer <s.hauer@pengutronix.de>2012-10-15 19:53:06 +0200
commita8a9542a71cb2c7befeb04a3da2a76f8bb5c4bbe (patch)
treed9167c0181220a8a76c21714a38c8b6d5053f388 /include
parentf99ea5c9d8cfddf3a1d6b181d0dbb26e06a86d2b (diff)
downloadbarebox-a8a9542a71cb2c7befeb04a3da2a76f8bb5c4bbe.tar.gz
barebox-a8a9542a71cb2c7befeb04a3da2a76f8bb5c4bbe.tar.xz
import include/linux/math64.h
need by mtd_dataflash Signed-off-by: Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/math64.h121
1 files changed, 121 insertions, 0 deletions
diff --git a/include/linux/math64.h b/include/linux/math64.h
new file mode 100644
index 0000000000..71dd6d7109
--- /dev/null
+++ b/include/linux/math64.h
@@ -0,0 +1,121 @@
+#ifndef _LINUX_MATH64_H
+#define _LINUX_MATH64_H
+
+#include <linux/types.h>
+#include <asm-generic/div64.h>
+
+#if BITS_PER_LONG == 64
+
+#define div64_long(x,y) div64_s64((x),(y))
+
+/**
+ * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
+ *
+ * This is commonly provided by 32bit archs to provide an optimized 64bit
+ * divide.
+ */
+static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+{
+ *remainder = dividend % divisor;
+ return dividend / divisor;
+}
+
+/**
+ * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
+ */
+static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
+{
+ *remainder = dividend % divisor;
+ return dividend / divisor;
+}
+
+/**
+ * div64_u64 - unsigned 64bit divide with 64bit divisor
+ */
+static inline u64 div64_u64(u64 dividend, u64 divisor)
+{
+ return dividend / divisor;
+}
+
+/**
+ * div64_s64 - signed 64bit divide with 64bit divisor
+ */
+static inline s64 div64_s64(s64 dividend, s64 divisor)
+{
+ return dividend / divisor;
+}
+
+#elif BITS_PER_LONG == 32
+
+#define div64_long(x,y) div_s64((x),(y))
+
+#ifndef div_u64_rem
+static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+{
+ *remainder = do_div(dividend, divisor);
+ return dividend;
+}
+#endif
+
+#ifndef div_s64_rem
+extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
+#endif
+
+#ifndef div64_u64
+extern u64 div64_u64(u64 dividend, u64 divisor);
+#endif
+
+#ifndef div64_s64
+extern s64 div64_s64(s64 dividend, s64 divisor);
+#endif
+
+#endif /* BITS_PER_LONG */
+
+/**
+ * div_u64 - unsigned 64bit divide with 32bit divisor
+ *
+ * This is the most common 64bit divide and should be used if possible,
+ * as many 32bit archs can optimize this variant better than a full 64bit
+ * divide.
+ */
+#ifndef div_u64
+static inline u64 div_u64(u64 dividend, u32 divisor)
+{
+ u32 remainder;
+ return div_u64_rem(dividend, divisor, &remainder);
+}
+#endif
+
+/**
+ * div_s64 - signed 64bit divide with 32bit divisor
+ */
+#ifndef div_s64
+static inline s64 div_s64(s64 dividend, s32 divisor)
+{
+ s32 remainder;
+ return div_s64_rem(dividend, divisor, &remainder);
+}
+#endif
+
+u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
+
+static __always_inline u32
+__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
+{
+ u32 ret = 0;
+
+ while (dividend >= divisor) {
+ /* The following asm() prevents the compiler from
+ optimising this loop into a modulo operation. */
+ asm("" : "+rm"(dividend));
+
+ dividend -= divisor;
+ ret++;
+ }
+
+ *remainder = dividend;
+
+ return ret;
+}
+
+#endif /* _LINUX_MATH64_H */