summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/atomic.h19
-rw-r--r--include/asm-generic/bug.h5
-rw-r--r--include/asm-generic/cmpxchg-local.h64
-rw-r--r--include/asm-generic/cmpxchg.h26
-rw-r--r--include/asm-generic/errno.h11
-rw-r--r--include/asm-generic/io-typeconfused.h75
-rw-r--r--include/asm-generic/io.h247
-rw-r--r--include/asm-generic/memory_layout.h6
-rw-r--r--include/asm-generic/pointer.h32
-rw-r--r--include/asm-generic/uaccess.h205
-rw-r--r--include/asm-generic/word-at-a-time.h121
11 files changed, 760 insertions, 51 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 6e63b8e8e7..74429e1c37 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -8,12 +8,13 @@
#ifndef __ASM_GENERIC_ATOMIC_H
#define __ASM_GENERIC_ATOMIC_H
+#include <linux/types.h>
+
#ifdef CONFIG_SMP
#error SMP not supported
#endif
#define ATOMIC_INIT(i) { (i) }
-#ifdef CONFIG_64BIT
typedef struct { s64 counter; } atomic64_t;
#define atomic64_read(v) ((v)->counter)
@@ -59,7 +60,7 @@ static inline int atomic64_add_negative(s64 i, volatile atomic64_t *v)
return val < 0;
}
-#else
+
typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) { (i) }
@@ -77,16 +78,19 @@ static inline void atomic_sub(int i, volatile atomic_t *v)
v->counter -= i;
}
-static inline void atomic_inc(volatile atomic_t *v)
+static inline int atomic_inc_return(volatile atomic_t *v)
{
- v->counter += 1;
+ return ++v->counter;
}
-static inline void atomic_dec(volatile atomic_t *v)
+static inline int atomic_dec_return(volatile atomic_t *v)
{
- v->counter -= 1;
+ return --v->counter;
}
+#define atomic_inc(v) ((void)atomic_inc_return(v))
+#define atomic_dec(v) ((void)atomic_dec_return(v))
+
static inline int atomic_dec_and_test(volatile atomic_t *v)
{
int val;
@@ -111,7 +115,8 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{
*addr &= ~mask;
}
-#endif
+
+#define atomic_cmpxchg(v, o, n) cmpxchg(&((v)->counter), o, n)
/* Atomic operations are already serializing on ARM */
#define smp_mb__before_atomic_dec() barrier()
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 5d0a458eae..18a1b419ff 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -62,4 +62,9 @@
unlikely(__ret_warn_once); \
})
+#define ASSERT(expr) do { \
+ if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) \
+ BUG_ON(!(expr)); \
+} while (0)
+
#endif
diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h
new file mode 100644
index 0000000000..d93b103d5c
--- /dev/null
+++ b/include/asm-generic/cmpxchg-local.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_CMPXCHG_LOCAL_H
+#define __ASM_GENERIC_CMPXCHG_LOCAL_H
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+extern unsigned long wrong_size_cmpxchg(volatile void *ptr)
+ __noreturn;
+
+/*
+ * Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned
+ * long parameter, supporting various types of architectures.
+ */
+static inline unsigned long __generic_cmpxchg_local(volatile void *ptr,
+ unsigned long old, unsigned long new, int size)
+{
+ unsigned long prev;
+
+ /*
+ * Sanity checking, compile-time.
+ */
+ if (size == 8 && sizeof(unsigned long) != 8)
+ wrong_size_cmpxchg(ptr);
+
+ switch (size) {
+ case 1: prev = *(u8 *)ptr;
+ if (prev == old)
+ *(u8 *)ptr = (u8)new;
+ break;
+ case 2: prev = *(u16 *)ptr;
+ if (prev == old)
+ *(u16 *)ptr = (u16)new;
+ break;
+ case 4: prev = *(u32 *)ptr;
+ if (prev == old)
+ *(u32 *)ptr = (u32)new;
+ break;
+ case 8: prev = *(u64 *)ptr;
+ if (prev == old)
+ *(u64 *)ptr = (u64)new;
+ break;
+ default:
+ wrong_size_cmpxchg(ptr);
+ }
+ return prev;
+}
+
+/*
+ * Generic version of __cmpxchg64_local. Takes an u64 parameter.
+ */
+static inline u64 __generic_cmpxchg64_local(volatile void *ptr,
+ u64 old, u64 new)
+{
+ u64 prev;
+
+ prev = *(u64 *)ptr;
+ if (prev == old)
+ *(u64 *)ptr = new;
+
+ return prev;
+}
+
+#endif
diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h
new file mode 100644
index 0000000000..b90524135e
--- /dev/null
+++ b/include/asm-generic/cmpxchg.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Generic non-atomic cmpxchg for porting kernel code.
+ */
+
+#ifndef __ASM_GENERIC_CMPXCHG_H
+#define __ASM_GENERIC_CMPXCHG_H
+
+#ifdef CONFIG_SMP
+#error "Cannot use generic cmpxchg on SMP"
+#endif
+
+#include <asm-generic/cmpxchg-local.h>
+
+#define generic_cmpxchg_local(ptr, o, n) ({ \
+ ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr)))); \
+})
+
+#define generic_cmpxchg64_local(ptr, o, n) \
+ __generic_cmpxchg64_local((ptr), (o), (n))
+
+#define cmpxchg generic_cmpxchg_local
+#define cmpxchg64 generic_cmpxchg64_local
+
+#endif
diff --git a/include/asm-generic/errno.h b/include/asm-generic/errno.h
index a96f8864df..7629d5c8dd 100644
--- a/include/asm-generic/errno.h
+++ b/include/asm-generic/errno.h
@@ -134,15 +134,4 @@
#define EKEYREVOKED 128 /* Key has been revoked */
#define EKEYREJECTED 129 /* Key was rejected by service */
-/* Should never be seen by user programs */
-#define ERESTARTSYS 512
-#define ERESTARTNOINTR 513
-#define ERESTARTNOHAND 514 /* restart if no handler.. */
-#define ENOIOCTLCMD 515 /* No ioctl command */
-#define EPROBE_DEFER 517 /* Driver requests probe retry */
-
-#define ENOTSUPP 524 /* Operation is not supported */
-
-#define _LAST_ERRNO 524
-
#endif
diff --git a/include/asm-generic/io-typeconfused.h b/include/asm-generic/io-typeconfused.h
new file mode 100644
index 0000000000..d25ed7db24
--- /dev/null
+++ b/include/asm-generic/io-typeconfused.h
@@ -0,0 +1,75 @@
+/* Generic I/O port emulation, based on MN10300 code
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef __ASM_GENERIC_IO_TYPECONFUSED_H
+#define __ASM_GENERIC_IO_TYPECONFUSED_H
+
+#include <linux/string.h> /* for memset() and memcpy() */
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+/*****************************************************************************/
+/*
+ * Unlike the definitions in <asm-generic/io.h>, these macros don't complain
+ * about integer arguments and just silently cast them to pointers. This is
+ * a common cause of bugs, but lots of existing code depends on this, so
+ * this header is provided as a transitory measure.
+ */
+
+#ifndef __raw_readb
+#define __raw_readb(a) (__chk_io_ptr(a), *(volatile unsigned char __force *)(a))
+#endif
+
+#ifndef __raw_readw
+#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
+#endif
+
+#ifndef __raw_readl
+#define __raw_readl(a) (__chk_io_ptr(a), *(volatile unsigned int __force *)(a))
+#endif
+
+#ifndef readb
+#define readb __raw_readb
+#endif
+
+#ifndef readw
+#define readw(addr) __le16_to_cpu(__raw_readw(addr))
+#endif
+
+#ifndef readl
+#define readl(addr) __le32_to_cpu(__raw_readl(addr))
+#endif
+
+#ifndef __raw_writeb
+#define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile unsigned char __force *)(a) = (v))
+#endif
+
+#ifndef __raw_writew
+#define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v))
+#endif
+
+#ifndef __raw_writel
+#define __raw_writel(v,a) (__chk_io_ptr(a), *(volatile unsigned int __force *)(a) = (v))
+#endif
+
+#ifndef writeb
+#define writeb __raw_writeb
+#endif
+
+#ifndef writew
+#define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr)
+#endif
+
+#ifndef writel
+#define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr)
+#endif
+
+#endif /* __ASM_GENERIC_IO_TYPECONFUSED_H */
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index acb70509d1..123ad5488f 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -12,81 +12,228 @@
#define __ASM_GENERIC_IO_H
#include <linux/string.h> /* for memset() and memcpy() */
+#include <linux/compiler.h>
+#include <linux/instruction_pointer.h>
#include <linux/types.h>
#include <asm/byteorder.h>
-/*****************************************************************************/
+#ifndef __LINUX_IO_STRICT_PROTOTYPES__
+#include <asm-generic/io-typeconfused.h>
+#endif
+
+#define __io_br() barrier()
+#define __io_ar(v) barrier()
+#define __io_bw() barrier()
+#define __io_pbw() __io_bw()
+#define __io_paw() __io_aw()
+#define __io_aw() do { } while (0)
+#define __io_pbr() __io_br()
+#define __io_par(v) __io_ar(v)
+
+static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0) {}
+static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0) {}
+static inline void log_read_mmio(u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0) {}
+static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0) {}
+
/*
- * readX/writeX() are used to access memory mapped devices. On some
- * architectures the memory mapped IO stuff needs to be accessed
- * differently. On the simple architectures, we just read/write the
- * memory location directly.
+ * __raw_{read,write}{b,w,l,q}() access memory in native endianness.
+ *
+ * On some architectures memory mapped IO needs to be accessed differently.
+ * On the simple architectures, we just read/write the memory location
+ * directly.
*/
#ifndef __raw_readb
-#define __raw_readb(a) (__chk_io_ptr(a), *(volatile unsigned char __force *)(a))
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ return *(const volatile u8 __force *)addr;
+}
#endif
#ifndef __raw_readw
-#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ return *(const volatile u16 __force *)addr;
+}
#endif
#ifndef __raw_readl
-#define __raw_readl(a) (__chk_io_ptr(a), *(volatile unsigned int __force *)(a))
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ return *(const volatile u32 __force *)addr;
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef __raw_readq
+#define __raw_readq __raw_readq
+static inline u64 __raw_readq(const volatile void __iomem *addr)
+{
+ return *(const volatile u64 __force *)addr;
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+#ifndef __raw_writeb
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 value, volatile void __iomem *addr)
+{
+ *(volatile u8 __force *)addr = value;
+}
+#endif
+
+#ifndef __raw_writew
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 value, volatile void __iomem *addr)
+{
+ *(volatile u16 __force *)addr = value;
+}
+#endif
+
+#ifndef __raw_writel
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 value, volatile void __iomem *addr)
+{
+ *(volatile u32 __force *)addr = value;
+}
#endif
+#ifdef CONFIG_64BIT
+#ifndef __raw_writeq
+#define __raw_writeq __raw_writeq
+static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
+{
+ *(volatile u64 __force *)addr = value;
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+/*
+ * {read,write}{b,w,l,q}() access little endian memory and return result in
+ * native endianness.
+ */
+
#ifndef readb
-#define readb __raw_readb
+#define readb readb
+static inline u8 readb(const volatile void __iomem *addr)
+{
+ u8 val;
+
+ log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
+ __io_br();
+ val = __raw_readb(addr);
+ __io_ar(val);
+ log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
+ return val;
+}
#endif
#ifndef readw
-#define readw(addr) __le16_to_cpu(__raw_readw(addr))
+#define readw readw
+static inline u16 readw(const volatile void __iomem *addr)
+{
+ u16 val;
+
+ log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
+ __io_br();
+ val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
+ __io_ar(val);
+ log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
+ return val;
+}
#endif
#ifndef readl
-#define readl(addr) __le32_to_cpu(__raw_readl(addr))
-#endif
+#define readl readl
+static inline u32 readl(const volatile void __iomem *addr)
+{
+ u32 val;
-#ifndef __raw_writeb
-#define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile unsigned char __force *)(a) = (v))
+ log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
+ __io_br();
+ val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
+ __io_ar(val);
+ log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
+ return val;
+}
#endif
-#ifndef __raw_writew
-#define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v))
-#endif
+#ifdef CONFIG_64BIT
+#ifndef readq
+#define readq readq
+static inline u64 readq(const volatile void __iomem *addr)
+{
+ u64 val;
-#ifndef __raw_writel
-#define __raw_writel(v,a) (__chk_io_ptr(a), *(volatile unsigned int __force *)(a) = (v))
+ log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+ __io_br();
+ val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
+ __io_ar(val);
+ log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+ return val;
+}
#endif
+#endif /* CONFIG_64BIT */
#ifndef writeb
-#define writeb __raw_writeb
+#define writeb writeb
+static inline void writeb(u8 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ __io_bw();
+ __raw_writeb(value, addr);
+ __io_aw();
+ log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+}
#endif
#ifndef writew
-#define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr)
+#define writew writew
+static inline void writew(u16 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ __io_bw();
+ __raw_writew((u16 __force)cpu_to_le16(value), addr);
+ __io_aw();
+ log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+}
#endif
#ifndef writel
-#define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr)
-#endif
-
-#ifdef CONFIG_64BIT
-static inline u64 __raw_readq(const volatile void __iomem *addr)
+#define writel writel
+static inline void writel(u32 value, volatile void __iomem *addr)
{
- return *(const volatile u64 __force *) addr;
+ log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ __io_bw();
+ __raw_writel((u32 __force)__cpu_to_le32(value), addr);
+ __io_aw();
+ log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
}
-#define readq(addr) __le64_to_cpu(__raw_readq(addr))
+#endif
-static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
+#ifdef CONFIG_64BIT
+#ifndef writeq
+#define writeq writeq
+static inline void writeq(u64 value, volatile void __iomem *addr)
{
- *(volatile u64 __force *) addr = b;
+ log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ __io_bw();
+ __raw_writeq((u64 __force)__cpu_to_le64(value), addr);
+ __io_aw();
+ log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
}
-#define writeq(b,addr) __raw_writeq(__cpu_to_le64(b),addr)
#endif
+#endif /* CONFIG_64BIT */
#ifndef PCI_IOBASE
-#define PCI_IOBASE ((void __iomem *)0)
+#define PCI_IOBASE ((void __iomem *)RELOC_HIDE((void *)0, 0))
#endif
#ifndef IO_SPACE_LIMIT
@@ -768,7 +915,7 @@ static inline void iowrite64_rep(volatile void __iomem *addr,
*/
#ifndef virt_to_phys
#define virt_to_phys virt_to_phys
-static inline unsigned long virt_to_phys(volatile void *mem)
+static inline unsigned long virt_to_phys(const volatile void *mem)
{
return (unsigned long)mem;
}
@@ -846,4 +993,38 @@ static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
#include <asm-generic/bitio.h>
+#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
+
+#ifndef readq_relaxed
+#define readq_relaxed(addr) readq(addr)
+#endif
+
+#ifndef readl_relaxed
+#define readl_relaxed(addr) readl(addr)
+#endif
+
+#ifndef readw_relaxed
+#define readw_relaxed(addr) readw(addr)
+#endif
+
+#ifndef readb_relaxed
+#define readb_relaxed(addr) readb(addr)
+#endif
+
+#ifndef writeq_relaxed
+#define writeq_relaxed(val, addr) writeq((val), (addr))
+#endif
+
+#ifndef writel_relaxed
+#define writel_relaxed(val, addr) writel((val), (addr))
+#endif
+
+#ifndef writew_relaxed
+#define writew_relaxed(val, addr) writew((val), (addr))
+#endif
+
+#ifndef writeb_relaxed
+#define writeb_relaxed(val, addr) writeb((val), (addr))
+#endif
+
#endif /* __ASM_GENERIC_IO_H */
diff --git a/include/asm-generic/memory_layout.h b/include/asm-generic/memory_layout.h
index 7593e18da1..6af1db8113 100644
--- a/include/asm-generic/memory_layout.h
+++ b/include/asm-generic/memory_layout.h
@@ -19,6 +19,12 @@
#define OPTEE_SIZE 0
#endif
+#ifdef CONFIG_OPTEE_SHM_SIZE
+#define OPTEE_SHM_SIZE CONFIG_OPTEE_SHM_SIZE
+#else
+#define OPTEE_SHM_SIZE 0
+#endif
+
#define HEAD_TEXT_BASE MALLOC_BASE
#define MALLOC_SIZE CONFIG_MALLOC_SIZE
#define STACK_SIZE CONFIG_STACK_SIZE
diff --git a/include/asm-generic/pointer.h b/include/asm-generic/pointer.h
new file mode 100644
index 0000000000..89817ce59e
--- /dev/null
+++ b/include/asm-generic/pointer.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_GENERIC_PTR_H___
+#define __ASM_GENERIC_PTR_H___
+
+#if __SIZEOF_POINTER__ == 8
+#ifdef __ASSEMBLY__
+#define ASM_PTR .quad
+#define ASM_SZPTR 8
+#define ASM_LGPTR 3
+#define ASM_LD_PTR(x) QUAD(x)
+#else
+#define ASM_PTR ".quad"
+#define ASM_SZPTR "8"
+#define ASM_LGPTR "3"
+#endif
+#elif __SIZEOF_POINTER__ == 4
+#ifdef __ASSEMBLY__
+#define ASM_PTR .word
+#define ASM_SZPTR 4
+#define ASM_LGPTR 2
+#define ASM_LD_PTR(x) LONG(x)
+#else
+#define ASM_PTR ".word"
+#define ASM_SZPTR "4"
+#define ASM_LGPTR "2"
+#endif
+#else
+#error "Unexpected __SIZEOF_POINTER__"
+#endif
+
+#endif /* __ASM_GENERIC_PTR_H___ */
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
new file mode 100644
index 0000000000..73f1a895fd
--- /dev/null
+++ b/include/asm-generic/uaccess.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_UACCESS_H
+#define __ASM_GENERIC_UACCESS_H
+
+/*
+ * User space memory access functions, these should work
+ * on any machine that has kernel and user data in the same
+ * address space, e.g. all NOMMU machines.
+ */
+#include <linux/barebox-wrapper.h>
+#include <linux/bug.h>
+#include <linux/string.h>
+#include <asm/unaligned.h>
+
+static inline void might_fault(void) { }
+static inline int access_ok(const void __user *ptr, unsigned long size) { return 1; }
+
+static __always_inline int
+__get_user_fn(size_t size, const void __user *from, void *to)
+{
+ BUILD_BUG_ON(!__builtin_constant_p(size));
+
+ switch (size) {
+ case 1:
+ *(u8 *)to = *((u8 __force *)from);
+ return 0;
+ case 2:
+ *(u16 *)to = get_unaligned((u16 __force *)from);
+ return 0;
+ case 4:
+ *(u32 *)to = get_unaligned((u32 __force *)from);
+ return 0;
+ case 8:
+ *(u64 *)to = get_unaligned((u64 __force *)from);
+ return 0;
+ default:
+ BUILD_BUG();
+ return 0;
+ }
+
+}
+#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
+
+static __always_inline int
+__put_user_fn(size_t size, void __user *to, void *from)
+{
+ BUILD_BUG_ON(!__builtin_constant_p(size));
+
+ switch (size) {
+ case 1:
+ *(u8 __force *)to = *(u8 *)from;
+ return 0;
+ case 2:
+ put_unaligned(*(u16 *)from, (u16 __force *)to);
+ return 0;
+ case 4:
+ put_unaligned(*(u32 *)from, (u32 __force *)to);
+ return 0;
+ case 8:
+ put_unaligned(*(u64 *)from, (u64 __force *)to);
+ return 0;
+ default:
+ BUILD_BUG();
+ return 0;
+ }
+}
+#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
+
+#define __get_kernel_nofault(dst, src, type, err_label) \
+do { \
+ *((type *)dst) = get_unaligned((type *)(src)); \
+ if (0) /* make sure the label looks used to the compiler */ \
+ goto err_label; \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, err_label) \
+do { \
+ put_unaligned(*((type *)src), (type *)(dst)); \
+ if (0) /* make sure the label looks used to the compiler */ \
+ goto err_label; \
+} while (0)
+
+static inline __must_check unsigned long
+raw_copy_from_user(void *to, const void __user * from, unsigned long n)
+{
+ memcpy(to, (const void __force *)from, n);
+ return 0;
+}
+
+static inline __must_check unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ memcpy((void __force *)to, from, n);
+ return 0;
+}
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ * This version just falls back to copy_{from,to}_user, which should
+ * provide a fast-path for small values.
+ */
+#define __put_user(x, ptr) \
+({ \
+ __typeof__(*(ptr)) __x = (x); \
+ int __pu_err = -EFAULT; \
+ __chk_user_ptr(ptr); \
+ switch (sizeof (*(ptr))) { \
+ case 1: \
+ case 2: \
+ case 4: \
+ case 8: \
+ __pu_err = __put_user_fn(sizeof (*(ptr)), \
+ ptr, &__x); \
+ break; \
+ default: \
+ __put_user_bad(); \
+ break; \
+ } \
+ __pu_err; \
+})
+
+#define put_user(x, ptr) \
+({ \
+ void __user *__p = (ptr); \
+ might_fault(); \
+ access_ok(__p, sizeof(*ptr)) ? \
+ __put_user((x), ((__typeof__(*(ptr)) __user *)__p)) : \
+ -EFAULT; \
+})
+
+extern int __put_user_bad(void) __attribute__((noreturn));
+
+#define __get_user(x, ptr) \
+({ \
+ int __gu_err = -EFAULT; \
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: { \
+ unsigned char __x = 0; \
+ __gu_err = __get_user_fn(sizeof (*(ptr)), \
+ ptr, &__x); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ }; \
+ case 2: { \
+ unsigned short __x = 0; \
+ __gu_err = __get_user_fn(sizeof (*(ptr)), \
+ ptr, &__x); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ }; \
+ case 4: { \
+ unsigned int __x = 0; \
+ __gu_err = __get_user_fn(sizeof (*(ptr)), \
+ ptr, &__x); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ }; \
+ case 8: { \
+ unsigned long long __x = 0; \
+ __gu_err = __get_user_fn(sizeof (*(ptr)), \
+ ptr, &__x); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ }; \
+ default: \
+ __get_user_bad(); \
+ break; \
+ } \
+ __gu_err; \
+})
+
+#define get_user(x, ptr) \
+({ \
+ const void __user *__p = (ptr); \
+ might_fault(); \
+ access_ok(__p, sizeof(*ptr)) ? \
+ __get_user((x), (__typeof__(*(ptr)) __user *)__p) :\
+ ((x) = (__typeof__(*(ptr)))0,-EFAULT); \
+})
+
+extern int __get_user_bad(void) __attribute__((noreturn));
+
+/*
+ * Zero Userspace
+ */
+static inline __must_check unsigned long
+__clear_user(void __user *to, unsigned long n)
+{
+ memset((void __force *)to, 0, n);
+ return 0;
+}
+
+static inline __must_check unsigned long
+clear_user(void __user *to, unsigned long n)
+{
+ might_fault();
+ if (!access_ok(to, n))
+ return n;
+
+ return __clear_user(to, n);
+}
+
+#endif /* __ASM_GENERIC_UACCESS_H */
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
new file mode 100644
index 0000000000..95a1d21410
--- /dev/null
+++ b/include/asm-generic/word-at-a-time.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+#include <linux/kernel.h>
+#include <asm/byteorder.h>
+
+#ifdef __BIG_ENDIAN
+
+struct word_at_a_time {
+ const unsigned long high_bits, low_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) }
+
+/* Bit set in the bytes that have a zero */
+static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c)
+{
+ unsigned long mask = (val & c->low_bits) + c->low_bits;
+ return ~(mask | rhs);
+}
+
+#define create_zero_mask(mask) (mask)
+
+static inline long find_zero(unsigned long mask)
+{
+ long byte = 0;
+#ifdef CONFIG_64BIT
+ if (mask >> 32)
+ mask >>= 32;
+ else
+ byte = 4;
+#endif
+ if (mask >> 16)
+ mask >>= 16;
+ else
+ byte += 2;
+ return (mask >> 8) ? byte : byte + 1;
+}
+
+static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+{
+ unsigned long rhs = val | c->low_bits;
+ *data = rhs;
+ return (val + c->high_bits) & ~rhs;
+}
+
+#ifndef zero_bytemask
+#define zero_bytemask(mask) (~1ul << __fls(mask))
+#endif
+
+#else
+
+/*
+ * The optimal byte mask counting is probably going to be something
+ * that is architecture-specific. If you have a reliably fast
+ * bit count instruction, that might be better than the multiply
+ * and shift, for example.
+ */
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Jan Achrenius on G+: microoptimized version of
+ * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
+ * that works for the bytemasks without having to
+ * mask them first.
+ */
+static inline long count_masked_bytes(unsigned long mask)
+{
+ return mask*0x0001020304050608ul >> 56;
+}
+
+#else /* 32-bit case */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+ /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+ long a = (0x0ff0001+mask) >> 23;
+ /* Fix the 1 for 00 case */
+ return a & mask;
+}
+
+#endif
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return count_masked_bytes(mask);
+}
+
+#endif /* __BIG_ENDIAN */
+
+#endif /* _ASM_WORD_AT_A_TIME_H */