blob: 4ab895d7111f60c98406335e27f13313aed880fe [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Atomic operations for the Hexagon architecture
4 *
5 * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006 */
7
8#ifndef _ASM_ATOMIC_H
9#define _ASM_ATOMIC_H
10
11#include <linux/types.h>
12#include <asm/cmpxchg.h>
13#include <asm/barrier.h>
14
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000015/* Normal writes in our arch don't clear lock reservations */
16
17static inline void atomic_set(atomic_t *v, int new)
18{
19 asm volatile(
20 "1: r6 = memw_locked(%0);\n"
21 " memw_locked(%0,p0) = %1;\n"
22 " if (!P0) jump 1b;\n"
23 :
24 : "r" (&v->counter), "r" (new)
25 : "memory", "p0", "r6"
26 );
27}
28
29#define atomic_set_release(v, i) atomic_set((v), (i))
30
31/**
32 * atomic_read - reads a word, atomically
33 * @v: pointer to atomic value
34 *
35 * Assumes all word reads on our architecture are atomic.
36 */
37#define atomic_read(v) READ_ONCE((v)->counter)
38
39/**
40 * atomic_xchg - atomic
41 * @v: pointer to memory to change
42 * @new: new value (technically passed in a register -- see xchg)
43 */
44#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
45
46
47/**
48 * atomic_cmpxchg - atomic compare-and-exchange values
49 * @v: pointer to value to change
50 * @old: desired old value to match
51 * @new: new value to put in
52 *
53 * Parameters are then pointer, value-in-register, value-in-register,
54 * and the output is the old value.
55 *
56 * Apparently this is complicated for archs that don't support
57 * the memw_locked like we do (or it's broken or whatever).
58 *
59 * Kind of the lynchpin of the rest of the generically defined routines.
60 * Remember V2 had that bug with dotnew predicate set by memw_locked.
61 *
62 * "old" is "expected" old val, __oldval is actual old value
63 */
64static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
65{
66 int __oldval;
67
68 asm volatile(
69 "1: %0 = memw_locked(%1);\n"
70 " { P0 = cmp.eq(%0,%2);\n"
71 " if (!P0.new) jump:nt 2f; }\n"
72 " memw_locked(%1,P0) = %3;\n"
73 " if (!P0) jump 1b;\n"
74 "2:\n"
75 : "=&r" (__oldval)
76 : "r" (&v->counter), "r" (old), "r" (new)
77 : "memory", "p0"
78 );
79
80 return __oldval;
81}
82
83#define ATOMIC_OP(op) \
84static inline void atomic_##op(int i, atomic_t *v) \
85{ \
86 int output; \
87 \
88 __asm__ __volatile__ ( \
89 "1: %0 = memw_locked(%1);\n" \
90 " %0 = "#op "(%0,%2);\n" \
91 " memw_locked(%1,P3)=%0;\n" \
Olivier Deprez0e641232021-09-23 10:07:05 +020092 " if (!P3) jump 1b;\n" \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000093 : "=&r" (output) \
94 : "r" (&v->counter), "r" (i) \
95 : "memory", "p3" \
96 ); \
97} \
98
99#define ATOMIC_OP_RETURN(op) \
100static inline int atomic_##op##_return(int i, atomic_t *v) \
101{ \
102 int output; \
103 \
104 __asm__ __volatile__ ( \
105 "1: %0 = memw_locked(%1);\n" \
106 " %0 = "#op "(%0,%2);\n" \
107 " memw_locked(%1,P3)=%0;\n" \
Olivier Deprez0e641232021-09-23 10:07:05 +0200108 " if (!P3) jump 1b;\n" \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000109 : "=&r" (output) \
110 : "r" (&v->counter), "r" (i) \
111 : "memory", "p3" \
112 ); \
113 return output; \
114}
115
116#define ATOMIC_FETCH_OP(op) \
117static inline int atomic_fetch_##op(int i, atomic_t *v) \
118{ \
119 int output, val; \
120 \
121 __asm__ __volatile__ ( \
122 "1: %0 = memw_locked(%2);\n" \
123 " %1 = "#op "(%0,%3);\n" \
124 " memw_locked(%2,P3)=%1;\n" \
Olivier Deprez0e641232021-09-23 10:07:05 +0200125 " if (!P3) jump 1b;\n" \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000126 : "=&r" (output), "=&r" (val) \
127 : "r" (&v->counter), "r" (i) \
128 : "memory", "p3" \
129 ); \
130 return output; \
131}
132
133#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
134
135ATOMIC_OPS(add)
136ATOMIC_OPS(sub)
137
138#undef ATOMIC_OPS
139#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
140
141ATOMIC_OPS(and)
142ATOMIC_OPS(or)
143ATOMIC_OPS(xor)
144
145#undef ATOMIC_OPS
146#undef ATOMIC_FETCH_OP
147#undef ATOMIC_OP_RETURN
148#undef ATOMIC_OP
149
150/**
151 * atomic_fetch_add_unless - add unless the number is a given value
152 * @v: pointer to value
153 * @a: amount to add
154 * @u: unless value is equal to u
155 *
156 * Returns old value.
157 *
158 */
159
160static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
161{
162 int __oldval;
163 register int tmp;
164
165 asm volatile(
166 "1: %0 = memw_locked(%2);"
167 " {"
168 " p3 = cmp.eq(%0, %4);"
169 " if (p3.new) jump:nt 2f;"
170 " %1 = add(%0, %3);"
171 " }"
172 " memw_locked(%2, p3) = %1;"
173 " {"
Olivier Deprez0e641232021-09-23 10:07:05 +0200174 " if (!p3) jump 1b;"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000175 " }"
176 "2:"
177 : "=&r" (__oldval), "=&r" (tmp)
178 : "r" (v), "r" (a), "r" (u)
179 : "memory", "p3"
180 );
181 return __oldval;
182}
183#define atomic_fetch_add_unless atomic_fetch_add_unless
184
185#endif