blob: 7b00d26f472edfd98b663ebf2a13b4d122bd2894 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * include/asm-xtensa/atomic.h
3 *
4 * Atomic operations that C can't guarantee us. Useful for resource counting..
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2008 Tensilica Inc.
11 */
12
13#ifndef _XTENSA_ATOMIC_H
14#define _XTENSA_ATOMIC_H
15
16#include <linux/stringify.h>
17#include <linux/types.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000018#include <asm/processor.h>
19#include <asm/cmpxchg.h>
20#include <asm/barrier.h>
21
22#define ATOMIC_INIT(i) { (i) }
23
24/*
25 * This Xtensa implementation assumes that the right mechanism
26 * for exclusion is for locking interrupts to level EXCM_LEVEL.
27 *
28 * Locking interrupts looks like this:
29 *
30 * rsil a15, TOPLEVEL
31 * <code>
32 * wsr a15, PS
33 * rsync
34 *
35 * Note that a15 is used here because the register allocation
36 * done by the compiler is not guaranteed and a window overflow
37 * may not occur between the rsil and wsr instructions. By using
38 * a15 in the rsil, the machine is guaranteed to be in a state
39 * where no register reference will cause an overflow.
40 */
41
42/**
43 * atomic_read - read atomic variable
44 * @v: pointer of type atomic_t
45 *
46 * Atomically reads the value of @v.
47 */
48#define atomic_read(v) READ_ONCE((v)->counter)
49
50/**
51 * atomic_set - set atomic variable
52 * @v: pointer of type atomic_t
53 * @i: required value
54 *
55 * Atomically sets the value of @v to @i.
56 */
57#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
58
David Brazdil0f672f62019-12-10 10:32:29 +000059#if XCHAL_HAVE_EXCLUSIVE
60#define ATOMIC_OP(op) \
61static inline void atomic_##op(int i, atomic_t *v) \
62{ \
63 unsigned long tmp; \
64 int result; \
65 \
66 __asm__ __volatile__( \
67 "1: l32ex %1, %3\n" \
68 " " #op " %0, %1, %2\n" \
69 " s32ex %0, %3\n" \
70 " getex %0\n" \
71 " beqz %0, 1b\n" \
72 : "=&a" (result), "=&a" (tmp) \
73 : "a" (i), "a" (v) \
74 : "memory" \
75 ); \
76} \
77
78#define ATOMIC_OP_RETURN(op) \
79static inline int atomic_##op##_return(int i, atomic_t *v) \
80{ \
81 unsigned long tmp; \
82 int result; \
83 \
84 __asm__ __volatile__( \
85 "1: l32ex %1, %3\n" \
86 " " #op " %0, %1, %2\n" \
87 " s32ex %0, %3\n" \
88 " getex %0\n" \
89 " beqz %0, 1b\n" \
90 " " #op " %0, %1, %2\n" \
91 : "=&a" (result), "=&a" (tmp) \
92 : "a" (i), "a" (v) \
93 : "memory" \
94 ); \
95 \
96 return result; \
97}
98
99#define ATOMIC_FETCH_OP(op) \
100static inline int atomic_fetch_##op(int i, atomic_t *v) \
101{ \
102 unsigned long tmp; \
103 int result; \
104 \
105 __asm__ __volatile__( \
106 "1: l32ex %1, %3\n" \
107 " " #op " %0, %1, %2\n" \
108 " s32ex %0, %3\n" \
109 " getex %0\n" \
110 " beqz %0, 1b\n" \
111 : "=&a" (result), "=&a" (tmp) \
112 : "a" (i), "a" (v) \
113 : "memory" \
114 ); \
115 \
116 return tmp; \
117}
118
119#elif XCHAL_HAVE_S32C1I
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000120#define ATOMIC_OP(op) \
121static inline void atomic_##op(int i, atomic_t * v) \
122{ \
123 unsigned long tmp; \
124 int result; \
125 \
126 __asm__ __volatile__( \
127 "1: l32i %1, %3, 0\n" \
128 " wsr %1, scompare1\n" \
129 " " #op " %0, %1, %2\n" \
130 " s32c1i %0, %3, 0\n" \
131 " bne %0, %1, 1b\n" \
132 : "=&a" (result), "=&a" (tmp) \
133 : "a" (i), "a" (v) \
134 : "memory" \
135 ); \
136} \
137
138#define ATOMIC_OP_RETURN(op) \
139static inline int atomic_##op##_return(int i, atomic_t * v) \
140{ \
141 unsigned long tmp; \
142 int result; \
143 \
144 __asm__ __volatile__( \
145 "1: l32i %1, %3, 0\n" \
146 " wsr %1, scompare1\n" \
147 " " #op " %0, %1, %2\n" \
148 " s32c1i %0, %3, 0\n" \
149 " bne %0, %1, 1b\n" \
150 " " #op " %0, %0, %2\n" \
151 : "=&a" (result), "=&a" (tmp) \
152 : "a" (i), "a" (v) \
153 : "memory" \
154 ); \
155 \
156 return result; \
157}
158
159#define ATOMIC_FETCH_OP(op) \
160static inline int atomic_fetch_##op(int i, atomic_t * v) \
161{ \
162 unsigned long tmp; \
163 int result; \
164 \
165 __asm__ __volatile__( \
166 "1: l32i %1, %3, 0\n" \
167 " wsr %1, scompare1\n" \
168 " " #op " %0, %1, %2\n" \
169 " s32c1i %0, %3, 0\n" \
170 " bne %0, %1, 1b\n" \
171 : "=&a" (result), "=&a" (tmp) \
172 : "a" (i), "a" (v) \
173 : "memory" \
174 ); \
175 \
176 return result; \
177}
178
179#else /* XCHAL_HAVE_S32C1I */
180
181#define ATOMIC_OP(op) \
182static inline void atomic_##op(int i, atomic_t * v) \
183{ \
184 unsigned int vval; \
185 \
186 __asm__ __volatile__( \
187 " rsil a15, "__stringify(TOPLEVEL)"\n"\
188 " l32i %0, %2, 0\n" \
189 " " #op " %0, %0, %1\n" \
190 " s32i %0, %2, 0\n" \
191 " wsr a15, ps\n" \
192 " rsync\n" \
193 : "=&a" (vval) \
194 : "a" (i), "a" (v) \
195 : "a15", "memory" \
196 ); \
197} \
198
199#define ATOMIC_OP_RETURN(op) \
200static inline int atomic_##op##_return(int i, atomic_t * v) \
201{ \
202 unsigned int vval; \
203 \
204 __asm__ __volatile__( \
205 " rsil a15,"__stringify(TOPLEVEL)"\n" \
206 " l32i %0, %2, 0\n" \
207 " " #op " %0, %0, %1\n" \
208 " s32i %0, %2, 0\n" \
209 " wsr a15, ps\n" \
210 " rsync\n" \
211 : "=&a" (vval) \
212 : "a" (i), "a" (v) \
213 : "a15", "memory" \
214 ); \
215 \
216 return vval; \
217}
218
219#define ATOMIC_FETCH_OP(op) \
220static inline int atomic_fetch_##op(int i, atomic_t * v) \
221{ \
222 unsigned int tmp, vval; \
223 \
224 __asm__ __volatile__( \
225 " rsil a15,"__stringify(TOPLEVEL)"\n" \
226 " l32i %0, %3, 0\n" \
227 " " #op " %1, %0, %2\n" \
228 " s32i %1, %3, 0\n" \
229 " wsr a15, ps\n" \
230 " rsync\n" \
231 : "=&a" (vval), "=&a" (tmp) \
232 : "a" (i), "a" (v) \
233 : "a15", "memory" \
234 ); \
235 \
236 return vval; \
237}
238
239#endif /* XCHAL_HAVE_S32C1I */
240
241#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) ATOMIC_OP_RETURN(op)
242
243ATOMIC_OPS(add)
244ATOMIC_OPS(sub)
245
246#undef ATOMIC_OPS
247#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
248
249ATOMIC_OPS(and)
250ATOMIC_OPS(or)
251ATOMIC_OPS(xor)
252
253#undef ATOMIC_OPS
254#undef ATOMIC_FETCH_OP
255#undef ATOMIC_OP_RETURN
256#undef ATOMIC_OP
257
258#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
259#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
260
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000261#endif /* _XTENSA_ATOMIC_H */