summaryrefslogtreecommitdiffstats
path: root/arch/tile/include/asm/futex.h
blob: e64a1b75fc386c4786d6a4988a4f5eb71bbfbd27 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
/*
 * Copyright 2010 Tilera Corporation. All Rights Reserved.
 *
 *   This program is free software; you can redistribute it and/or
 *   modify it under the terms of the GNU General Public License
 *   as published by the Free Software Foundation, version 2.
 *
 *   This program is distributed in the hope that it will be useful, but
 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 *   NON INFRINGEMENT.  See the GNU General Public License for
 *   more details.
 *
 * These routines make two important assumptions:
 *
 * 1. atomic_t is really an int and can be freely cast back and forth
 *    (validated in __init_atomic_per_cpu).
 *
 * 2. userspace uses sys_cmpxchg() for all atomic operations, thus using
 *    the same locking convention that all the kernel atomic routines use.
 */

#ifndef _ASM_TILE_FUTEX_H
#define _ASM_TILE_FUTEX_H

#ifndef __ASSEMBLY__

#include <linux/futex.h>
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <asm/atomic.h>

/*
 * Support macros for futex operations.  Do not use these macros directly.
 * They assume "ret", "val", "oparg", and "uaddr" in the lexical context.
 * __futex_cmpxchg() additionally assumes "oldval".
 */

#ifdef __tilegx__

#define __futex_asm(OP) \
	asm("1: {" #OP " %1, %3, %4; movei %0, 0 }\n"		\
	    ".pushsection .fixup,\"ax\"\n"			\
	    "0: { movei %0, %5; j 9f }\n"			\
	    ".section __ex_table,\"a\"\n"			\
	    ".align 8\n"					\
	    ".quad 1b, 0b\n"					\
	    ".popsection\n"					\
	    "9:"						\
	    : "=r" (ret), "=r" (val), "+m" (*(uaddr))		\
	    : "r" (uaddr), "r" (oparg), "i" (-EFAULT))

#define __futex_set() __futex_asm(exch4)
#define __futex_add() __futex_asm(fetchadd4)
#define __futex_or() __futex_asm(fetchor4)
#define __futex_andn() ({ oparg = ~oparg; __futex_asm(fetchand4); })
#define __futex_cmpxchg() \
	({ __insn_mtspr(SPR_CMPEXCH_VALUE, oldval); __futex_asm(cmpexch4); })

#define __futex_xor()						\
	({							\
		u32 oldval, n = oparg;				\
		if ((ret = __get_user(oldval, uaddr)) == 0) {	\
			do {					\
				oparg = oldval ^ n;		\
				__futex_cmpxchg();		\
			} while (ret == 0 && oldval != val);	\
		}						\
	})

/* No need to prefetch, since the atomic ops go to the home cache anyway. */
#define __futex_prolog()

#else

#define __futex_call(FN)						\
	{								\
		struct __get_user gu = FN((u32 __force *)uaddr, lock, oparg); \
		val = gu.val;						\
		ret = gu.err;						\
	}

#define __futex_set() __futex_call(__atomic32_xchg)
#define __futex_add() __futex_call(__atomic32_xchg_add)
#define __futex_or() __futex_call(__atomic32_fetch_or)
#define __futex_andn() __futex_call(__atomic32_fetch_andn)
#define __futex_xor() __futex_call(__atomic32_fetch_xor)

#define __futex_cmpxchg()						\
	{								\
		struct __get_user gu = __atomic32_cmpxchg((u32 __force *)uaddr, \
							  lock, oldval, oparg); \
		val = gu.val;						\
		ret = gu.err;						\
	}

/*
 * Find the lock pointer for the atomic calls to use, and issue a
 * prefetch to the user address to bring it into cache.  Similar to
 * __atomic_setup(), but we can't do a read into the L1 since it might
 * fault; instead we do a prefetch into the L2.
 */
#define __futex_prolog()					\
	int *lock;						\
	__insn_prefetch(uaddr);					\
	lock = __atomic_hashed_lock((int __force *)uaddr)
#endif

static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{
	int op = (encoded_op >> 28) & 7;
	int cmp = (encoded_op >> 24) & 15;
	int oparg = (encoded_op << 8) >> 20;
	int cmparg = (encoded_op << 20) >> 20;
	int uninitialized_var(val), ret;

	__futex_prolog();

	/* The 32-bit futex code makes this assumption, so validate it here. */
	BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));

	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
		oparg = 1 << oparg;

	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
		return -EFAULT;

	pagefault_disable();
	switch (op) {
	case FUTEX_OP_SET:
		__futex_set();
		break;
	case FUTEX_OP_ADD:
		__futex_add();
		break;
	case FUTEX_OP_OR:
		__futex_or();
		break;
	case FUTEX_OP_ANDN:
		__futex_andn();
		break;
	case FUTEX_OP_XOR:
		__futex_xor();
		break;
	default:
		ret = -ENOSYS;
		break;
	}
	pagefault_enable();

	if (!ret) {
		switch (cmp) {
		case FUTEX_OP_CMP_EQ:
			ret = (val == cmparg);
			break;
		case FUTEX_OP_CMP_NE:
			ret = (val != cmparg);
			break;
		case FUTEX_OP_CMP_LT:
			ret = (val < cmparg);
			break;
		case FUTEX_OP_CMP_GE:
			ret = (val >= cmparg);
			break;
		case FUTEX_OP_CMP_LE:
			ret = (val <= cmparg);
			break;
		case FUTEX_OP_CMP_GT:
			ret = (val > cmparg);
			break;
		default:
			ret = -ENOSYS;
		}
	}
	return ret;
}

static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
						u32 oldval, u32 oparg)
{
	int ret, val;

	__futex_prolog();

	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
		return -EFAULT;

	__futex_cmpxchg();

	*uval = val;
	return ret;
}

#endif /* !__ASSEMBLY__ */

#endif /* _ASM_TILE_FUTEX_H */