Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Atomic operations that C can't guarantee us. Useful for |
| 3 | * resource counting etc.. |
| 4 | * |
| 5 | * But use these as seldom as possible since they are much more slower |
| 6 | * than regular operations. |
| 7 | * |
| 8 | * This file is subject to the terms and conditions of the GNU General Public |
| 9 | * License. See the file "COPYING" in the main directory of this archive |
| 10 | * for more details. |
| 11 | * |
| 12 | * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle |
| 13 | */ |
| 14 | #ifndef _ASM_ATOMIC_H |
| 15 | #define _ASM_ATOMIC_H |
| 16 | |
| 17 | #include <linux/irqflags.h> |
| 18 | #include <linux/types.h> |
| 19 | #include <asm/barrier.h> |
| 20 | #include <asm/compiler.h> |
| 21 | #include <asm/cpu-features.h> |
| 22 | #include <asm/cmpxchg.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 23 | #include <asm/llsc.h> |
| 24 | #include <asm/sync.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 25 | #include <asm/war.h> |
| 26 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 27 | #define ATOMIC_OPS(pfx, type) \ |
| 28 | static __always_inline type pfx##_read(const pfx##_t *v) \ |
| 29 | { \ |
| 30 | return READ_ONCE(v->counter); \ |
| 31 | } \ |
| 32 | \ |
| 33 | static __always_inline void pfx##_set(pfx##_t *v, type i) \ |
| 34 | { \ |
| 35 | WRITE_ONCE(v->counter, i); \ |
| 36 | } \ |
| 37 | \ |
| 38 | static __always_inline type pfx##_cmpxchg(pfx##_t *v, type o, type n) \ |
| 39 | { \ |
| 40 | return cmpxchg(&v->counter, o, n); \ |
| 41 | } \ |
| 42 | \ |
| 43 | static __always_inline type pfx##_xchg(pfx##_t *v, type n) \ |
| 44 | { \ |
| 45 | return xchg(&v->counter, n); \ |
| 46 | } |
| 47 | |
| 48 | ATOMIC_OPS(atomic, int) |
| 49 | |
| 50 | #ifdef CONFIG_64BIT |
| 51 | # define ATOMIC64_INIT(i) { (i) } |
| 52 | ATOMIC_OPS(atomic64, s64) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 53 | #endif |
| 54 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 55 | #define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \ |
| 56 | static __inline__ void pfx##_##op(type i, pfx##_t * v) \ |
| 57 | { \ |
| 58 | type temp; \ |
| 59 | \ |
| 60 | if (!kernel_uses_llsc) { \ |
| 61 | unsigned long flags; \ |
| 62 | \ |
| 63 | raw_local_irq_save(flags); \ |
| 64 | v->counter c_op i; \ |
| 65 | raw_local_irq_restore(flags); \ |
| 66 | return; \ |
| 67 | } \ |
| 68 | \ |
| 69 | __asm__ __volatile__( \ |
| 70 | " .set push \n" \ |
| 71 | " .set " MIPS_ISA_LEVEL " \n" \ |
| 72 | " " __SYNC(full, loongson3_war) " \n" \ |
| 73 | "1: " #ll " %0, %1 # " #pfx "_" #op " \n" \ |
| 74 | " " #asm_op " %0, %2 \n" \ |
| 75 | " " #sc " %0, %1 \n" \ |
| 76 | "\t" __SC_BEQZ "%0, 1b \n" \ |
| 77 | " .set pop \n" \ |
| 78 | : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
| 79 | : "Ir" (i) : __LLSC_CLOBBER); \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 80 | } |
| 81 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 82 | #define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \ |
| 83 | static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v) \ |
| 84 | { \ |
| 85 | type temp, result; \ |
| 86 | \ |
| 87 | if (!kernel_uses_llsc) { \ |
| 88 | unsigned long flags; \ |
| 89 | \ |
| 90 | raw_local_irq_save(flags); \ |
| 91 | result = v->counter; \ |
| 92 | result c_op i; \ |
| 93 | v->counter = result; \ |
| 94 | raw_local_irq_restore(flags); \ |
| 95 | return result; \ |
| 96 | } \ |
| 97 | \ |
| 98 | __asm__ __volatile__( \ |
| 99 | " .set push \n" \ |
| 100 | " .set " MIPS_ISA_LEVEL " \n" \ |
| 101 | " " __SYNC(full, loongson3_war) " \n" \ |
| 102 | "1: " #ll " %1, %2 # " #pfx "_" #op "_return\n" \ |
| 103 | " " #asm_op " %0, %1, %3 \n" \ |
| 104 | " " #sc " %0, %2 \n" \ |
| 105 | "\t" __SC_BEQZ "%0, 1b \n" \ |
| 106 | " " #asm_op " %0, %1, %3 \n" \ |
| 107 | " .set pop \n" \ |
| 108 | : "=&r" (result), "=&r" (temp), \ |
| 109 | "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
| 110 | : "Ir" (i) : __LLSC_CLOBBER); \ |
| 111 | \ |
| 112 | return result; \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 113 | } |
| 114 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 115 | #define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \ |
| 116 | static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \ |
| 117 | { \ |
| 118 | int temp, result; \ |
| 119 | \ |
| 120 | if (!kernel_uses_llsc) { \ |
| 121 | unsigned long flags; \ |
| 122 | \ |
| 123 | raw_local_irq_save(flags); \ |
| 124 | result = v->counter; \ |
| 125 | v->counter c_op i; \ |
| 126 | raw_local_irq_restore(flags); \ |
| 127 | return result; \ |
| 128 | } \ |
| 129 | \ |
| 130 | __asm__ __volatile__( \ |
| 131 | " .set push \n" \ |
| 132 | " .set " MIPS_ISA_LEVEL " \n" \ |
| 133 | " " __SYNC(full, loongson3_war) " \n" \ |
| 134 | "1: " #ll " %1, %2 # " #pfx "_fetch_" #op "\n" \ |
| 135 | " " #asm_op " %0, %1, %3 \n" \ |
| 136 | " " #sc " %0, %2 \n" \ |
| 137 | "\t" __SC_BEQZ "%0, 1b \n" \ |
| 138 | " .set pop \n" \ |
| 139 | " move %0, %1 \n" \ |
| 140 | : "=&r" (result), "=&r" (temp), \ |
| 141 | "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
| 142 | : "Ir" (i) : __LLSC_CLOBBER); \ |
| 143 | \ |
| 144 | return result; \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 145 | } |
| 146 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 147 | #undef ATOMIC_OPS |
| 148 | #define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \ |
| 149 | ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \ |
| 150 | ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \ |
| 151 | ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 152 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 153 | ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc) |
| 154 | ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 155 | |
| 156 | #define atomic_add_return_relaxed atomic_add_return_relaxed |
| 157 | #define atomic_sub_return_relaxed atomic_sub_return_relaxed |
| 158 | #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed |
| 159 | #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed |
| 160 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 161 | #ifdef CONFIG_64BIT |
| 162 | ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd) |
| 163 | ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd) |
| 164 | # define atomic64_add_return_relaxed atomic64_add_return_relaxed |
| 165 | # define atomic64_sub_return_relaxed atomic64_sub_return_relaxed |
| 166 | # define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed |
| 167 | # define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed |
| 168 | #endif /* CONFIG_64BIT */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 169 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 170 | #undef ATOMIC_OPS |
| 171 | #define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \ |
| 172 | ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \ |
| 173 | ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) |
| 174 | |
| 175 | ATOMIC_OPS(atomic, and, int, &=, and, ll, sc) |
| 176 | ATOMIC_OPS(atomic, or, int, |=, or, ll, sc) |
| 177 | ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 178 | |
| 179 | #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed |
| 180 | #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed |
| 181 | #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed |
| 182 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 183 | #ifdef CONFIG_64BIT |
| 184 | ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd) |
| 185 | ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd) |
| 186 | ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd) |
| 187 | # define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed |
| 188 | # define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed |
| 189 | # define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed |
| 190 | #endif |
| 191 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 192 | #undef ATOMIC_OPS |
| 193 | #undef ATOMIC_FETCH_OP |
| 194 | #undef ATOMIC_OP_RETURN |
| 195 | #undef ATOMIC_OP |
| 196 | |
| 197 | /* |
| 198 | * atomic_sub_if_positive - conditionally subtract integer from atomic variable |
| 199 | * @i: integer value to subtract |
| 200 | * @v: pointer of type atomic_t |
| 201 | * |
| 202 | * Atomically test @v and subtract @i if @v is greater or equal than @i. |
| 203 | * The function returns the old value of @v minus @i. |
| 204 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 205 | #define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \ |
| 206 | static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \ |
| 207 | { \ |
| 208 | type temp, result; \ |
| 209 | \ |
| 210 | smp_mb__before_atomic(); \ |
| 211 | \ |
| 212 | if (!kernel_uses_llsc) { \ |
| 213 | unsigned long flags; \ |
| 214 | \ |
| 215 | raw_local_irq_save(flags); \ |
| 216 | result = v->counter; \ |
| 217 | result -= i; \ |
| 218 | if (result >= 0) \ |
| 219 | v->counter = result; \ |
| 220 | raw_local_irq_restore(flags); \ |
| 221 | smp_mb__after_atomic(); \ |
| 222 | return result; \ |
| 223 | } \ |
| 224 | \ |
| 225 | __asm__ __volatile__( \ |
| 226 | " .set push \n" \ |
| 227 | " .set " MIPS_ISA_LEVEL " \n" \ |
| 228 | " " __SYNC(full, loongson3_war) " \n" \ |
| 229 | "1: " #ll " %1, %2 # atomic_sub_if_positive\n" \ |
| 230 | " .set pop \n" \ |
| 231 | " " #op " %0, %1, %3 \n" \ |
| 232 | " move %1, %0 \n" \ |
| 233 | " bltz %0, 2f \n" \ |
| 234 | " .set push \n" \ |
| 235 | " .set " MIPS_ISA_LEVEL " \n" \ |
| 236 | " " #sc " %1, %2 \n" \ |
| 237 | " " __SC_BEQZ "%1, 1b \n" \ |
| 238 | "2: " __SYNC(full, loongson3_war) " \n" \ |
| 239 | " .set pop \n" \ |
| 240 | : "=&r" (result), "=&r" (temp), \ |
| 241 | "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
| 242 | : "Ir" (i) \ |
| 243 | : __LLSC_CLOBBER); \ |
| 244 | \ |
| 245 | /* \ |
| 246 | * In the Loongson3 workaround case we already have a \ |
| 247 | * completion barrier at 2: above, which is needed due to the \ |
| 248 | * bltz that can branch to code outside of the LL/SC loop. As \ |
| 249 | * such, we don't need to emit another barrier here. \ |
| 250 | */ \ |
| 251 | if (__SYNC_loongson3_war == 0) \ |
| 252 | smp_mb__after_atomic(); \ |
| 253 | \ |
| 254 | return result; \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 255 | } |
| 256 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 257 | ATOMIC_SIP_OP(atomic, int, subu, ll, sc) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 258 | #define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) |
| 259 | |
| 260 | #ifdef CONFIG_64BIT |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 261 | ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 262 | #define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 263 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 264 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 265 | #undef ATOMIC_SIP_OP |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 266 | |
| 267 | #endif /* _ASM_ATOMIC_H */ |