blob: c452359c9cb8aec089438cd5ee23f1fac9150a77 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2017 SiFive
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#ifndef _ASM_RISCV_ATOMIC_H
13#define _ASM_RISCV_ATOMIC_H
14
15#ifdef CONFIG_GENERIC_ATOMIC64
16# include <asm-generic/atomic64.h>
17#else
18# if (__riscv_xlen < 64)
19# error "64-bit atomics require XLEN to be at least 64"
20# endif
21#endif
22
23#include <asm/cmpxchg.h>
24#include <asm/barrier.h>
25
26#define ATOMIC_INIT(i) { (i) }
27
28#define __atomic_acquire_fence() \
29 __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
30
31#define __atomic_release_fence() \
32 __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
33
34static __always_inline int atomic_read(const atomic_t *v)
35{
36 return READ_ONCE(v->counter);
37}
38static __always_inline void atomic_set(atomic_t *v, int i)
39{
40 WRITE_ONCE(v->counter, i);
41}
42
43#ifndef CONFIG_GENERIC_ATOMIC64
44#define ATOMIC64_INIT(i) { (i) }
45static __always_inline long atomic64_read(const atomic64_t *v)
46{
47 return READ_ONCE(v->counter);
48}
49static __always_inline void atomic64_set(atomic64_t *v, long i)
50{
51 WRITE_ONCE(v->counter, i);
52}
53#endif
54
55/*
56 * First, the atomic ops that have no ordering constraints and therefor don't
57 * have the AQ or RL bits set. These don't return anything, so there's only
58 * one version to worry about.
59 */
60#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
61static __always_inline \
62void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
63{ \
64 __asm__ __volatile__ ( \
65 " amo" #asm_op "." #asm_type " zero, %1, %0" \
66 : "+A" (v->counter) \
67 : "r" (I) \
68 : "memory"); \
69} \
70
71#ifdef CONFIG_GENERIC_ATOMIC64
72#define ATOMIC_OPS(op, asm_op, I) \
73 ATOMIC_OP (op, asm_op, I, w, int, )
74#else
75#define ATOMIC_OPS(op, asm_op, I) \
76 ATOMIC_OP (op, asm_op, I, w, int, ) \
77 ATOMIC_OP (op, asm_op, I, d, long, 64)
78#endif
79
80ATOMIC_OPS(add, add, i)
81ATOMIC_OPS(sub, add, -i)
82ATOMIC_OPS(and, and, i)
83ATOMIC_OPS( or, or, i)
84ATOMIC_OPS(xor, xor, i)
85
86#undef ATOMIC_OP
87#undef ATOMIC_OPS
88
89/*
90 * Atomic ops that have ordered, relaxed, acquire, and release variants.
91 * There's two flavors of these: the arithmatic ops have both fetch and return
92 * versions, while the logical ops only have fetch versions.
93 */
94#define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
95static __always_inline \
96c_type atomic##prefix##_fetch_##op##_relaxed(c_type i, \
97 atomic##prefix##_t *v) \
98{ \
99 register c_type ret; \
100 __asm__ __volatile__ ( \
101 " amo" #asm_op "." #asm_type " %1, %2, %0" \
102 : "+A" (v->counter), "=r" (ret) \
103 : "r" (I) \
104 : "memory"); \
105 return ret; \
106} \
107static __always_inline \
108c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
109{ \
110 register c_type ret; \
111 __asm__ __volatile__ ( \
112 " amo" #asm_op "." #asm_type ".aqrl %1, %2, %0" \
113 : "+A" (v->counter), "=r" (ret) \
114 : "r" (I) \
115 : "memory"); \
116 return ret; \
117}
118
119#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
120static __always_inline \
121c_type atomic##prefix##_##op##_return_relaxed(c_type i, \
122 atomic##prefix##_t *v) \
123{ \
124 return atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
125} \
126static __always_inline \
127c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
128{ \
129 return atomic##prefix##_fetch_##op(i, v) c_op I; \
130}
131
132#ifdef CONFIG_GENERIC_ATOMIC64
133#define ATOMIC_OPS(op, asm_op, c_op, I) \
134 ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
135 ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
136#else
137#define ATOMIC_OPS(op, asm_op, c_op, I) \
138 ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
139 ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
140 ATOMIC_FETCH_OP( op, asm_op, I, d, long, 64) \
141 ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, long, 64)
142#endif
143
144ATOMIC_OPS(add, add, +, i)
145ATOMIC_OPS(sub, add, +, -i)
146
147#define atomic_add_return_relaxed atomic_add_return_relaxed
148#define atomic_sub_return_relaxed atomic_sub_return_relaxed
149#define atomic_add_return atomic_add_return
150#define atomic_sub_return atomic_sub_return
151
152#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
153#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
154#define atomic_fetch_add atomic_fetch_add
155#define atomic_fetch_sub atomic_fetch_sub
156
157#ifndef CONFIG_GENERIC_ATOMIC64
158#define atomic64_add_return_relaxed atomic64_add_return_relaxed
159#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
160#define atomic64_add_return atomic64_add_return
161#define atomic64_sub_return atomic64_sub_return
162
163#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
164#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
165#define atomic64_fetch_add atomic64_fetch_add
166#define atomic64_fetch_sub atomic64_fetch_sub
167#endif
168
169#undef ATOMIC_OPS
170
171#ifdef CONFIG_GENERIC_ATOMIC64
172#define ATOMIC_OPS(op, asm_op, I) \
173 ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
174#else
175#define ATOMIC_OPS(op, asm_op, I) \
176 ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
177 ATOMIC_FETCH_OP(op, asm_op, I, d, long, 64)
178#endif
179
180ATOMIC_OPS(and, and, i)
181ATOMIC_OPS( or, or, i)
182ATOMIC_OPS(xor, xor, i)
183
184#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
185#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
186#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
187#define atomic_fetch_and atomic_fetch_and
188#define atomic_fetch_or atomic_fetch_or
189#define atomic_fetch_xor atomic_fetch_xor
190
191#ifndef CONFIG_GENERIC_ATOMIC64
192#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
193#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
194#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
195#define atomic64_fetch_and atomic64_fetch_and
196#define atomic64_fetch_or atomic64_fetch_or
197#define atomic64_fetch_xor atomic64_fetch_xor
198#endif
199
200#undef ATOMIC_OPS
201
202#undef ATOMIC_FETCH_OP
203#undef ATOMIC_OP_RETURN
204
205/* This is required to provide a full barrier on success. */
206static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
207{
208 int prev, rc;
209
210 __asm__ __volatile__ (
211 "0: lr.w %[p], %[c]\n"
212 " beq %[p], %[u], 1f\n"
213 " add %[rc], %[p], %[a]\n"
214 " sc.w.rl %[rc], %[rc], %[c]\n"
215 " bnez %[rc], 0b\n"
216 " fence rw, rw\n"
217 "1:\n"
218 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
219 : [a]"r" (a), [u]"r" (u)
220 : "memory");
221 return prev;
222}
223#define atomic_fetch_add_unless atomic_fetch_add_unless
224
225#ifndef CONFIG_GENERIC_ATOMIC64
226static __always_inline long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
227{
228 long prev, rc;
229
230 __asm__ __volatile__ (
231 "0: lr.d %[p], %[c]\n"
232 " beq %[p], %[u], 1f\n"
233 " add %[rc], %[p], %[a]\n"
234 " sc.d.rl %[rc], %[rc], %[c]\n"
235 " bnez %[rc], 0b\n"
236 " fence rw, rw\n"
237 "1:\n"
238 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
239 : [a]"r" (a), [u]"r" (u)
240 : "memory");
241 return prev;
242}
243#define atomic64_fetch_add_unless atomic64_fetch_add_unless
244#endif
245
246/*
247 * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
248 * {cmp,}xchg and the operations that return, so they need a full barrier.
249 */
250#define ATOMIC_OP(c_t, prefix, size) \
251static __always_inline \
252c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \
253{ \
254 return __xchg_relaxed(&(v->counter), n, size); \
255} \
256static __always_inline \
257c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \
258{ \
259 return __xchg_acquire(&(v->counter), n, size); \
260} \
261static __always_inline \
262c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
263{ \
264 return __xchg_release(&(v->counter), n, size); \
265} \
266static __always_inline \
267c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
268{ \
269 return __xchg(&(v->counter), n, size); \
270} \
271static __always_inline \
272c_t atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
273 c_t o, c_t n) \
274{ \
275 return __cmpxchg_relaxed(&(v->counter), o, n, size); \
276} \
277static __always_inline \
278c_t atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \
279 c_t o, c_t n) \
280{ \
281 return __cmpxchg_acquire(&(v->counter), o, n, size); \
282} \
283static __always_inline \
284c_t atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \
285 c_t o, c_t n) \
286{ \
287 return __cmpxchg_release(&(v->counter), o, n, size); \
288} \
289static __always_inline \
290c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
291{ \
292 return __cmpxchg(&(v->counter), o, n, size); \
293}
294
295#ifdef CONFIG_GENERIC_ATOMIC64
296#define ATOMIC_OPS() \
297 ATOMIC_OP( int, , 4)
298#else
299#define ATOMIC_OPS() \
300 ATOMIC_OP( int, , 4) \
301 ATOMIC_OP(long, 64, 8)
302#endif
303
304ATOMIC_OPS()
305
306#undef ATOMIC_OPS
307#undef ATOMIC_OP
308
309static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
310{
311 int prev, rc;
312
313 __asm__ __volatile__ (
314 "0: lr.w %[p], %[c]\n"
315 " sub %[rc], %[p], %[o]\n"
316 " bltz %[rc], 1f\n"
317 " sc.w.rl %[rc], %[rc], %[c]\n"
318 " bnez %[rc], 0b\n"
319 " fence rw, rw\n"
320 "1:\n"
321 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
322 : [o]"r" (offset)
323 : "memory");
324 return prev - offset;
325}
326
327#define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1)
328
329#ifndef CONFIG_GENERIC_ATOMIC64
330static __always_inline long atomic64_sub_if_positive(atomic64_t *v, int offset)
331{
332 long prev, rc;
333
334 __asm__ __volatile__ (
335 "0: lr.d %[p], %[c]\n"
336 " sub %[rc], %[p], %[o]\n"
337 " bltz %[rc], 1f\n"
338 " sc.d.rl %[rc], %[rc], %[c]\n"
339 " bnez %[rc], 0b\n"
340 " fence rw, rw\n"
341 "1:\n"
342 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
343 : [o]"r" (offset)
344 : "memory");
345 return prev - offset;
346}
347
348#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(v, 1)
349#endif
350
351#endif /* _ASM_RISCV_ATOMIC_H */