Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* |
| 3 | * Copyright IBM Corp. 1999, 2016 |
| 4 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, |
| 5 | * Denis Joseph Barrow, |
| 6 | * Arnd Bergmann, |
| 7 | */ |
| 8 | |
| 9 | #ifndef __ARCH_S390_ATOMIC__ |
| 10 | #define __ARCH_S390_ATOMIC__ |
| 11 | |
| 12 | #include <linux/compiler.h> |
| 13 | #include <linux/types.h> |
| 14 | #include <asm/atomic_ops.h> |
| 15 | #include <asm/barrier.h> |
| 16 | #include <asm/cmpxchg.h> |
| 17 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 18 | static inline int atomic_read(const atomic_t *v) |
| 19 | { |
| 20 | int c; |
| 21 | |
| 22 | asm volatile( |
| 23 | " l %0,%1\n" |
| 24 | : "=d" (c) : "Q" (v->counter)); |
| 25 | return c; |
| 26 | } |
| 27 | |
| 28 | static inline void atomic_set(atomic_t *v, int i) |
| 29 | { |
| 30 | asm volatile( |
| 31 | " st %1,%0\n" |
| 32 | : "=Q" (v->counter) : "d" (i)); |
| 33 | } |
| 34 | |
| 35 | static inline int atomic_add_return(int i, atomic_t *v) |
| 36 | { |
| 37 | return __atomic_add_barrier(i, &v->counter) + i; |
| 38 | } |
| 39 | |
| 40 | static inline int atomic_fetch_add(int i, atomic_t *v) |
| 41 | { |
| 42 | return __atomic_add_barrier(i, &v->counter); |
| 43 | } |
| 44 | |
| 45 | static inline void atomic_add(int i, atomic_t *v) |
| 46 | { |
| 47 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 48 | /* |
| 49 | * Order of conditions is important to circumvent gcc 10 bug: |
| 50 | * https://gcc.gnu.org/pipermail/gcc-patches/2020-July/549318.html |
| 51 | */ |
| 52 | if ((i > -129) && (i < 128) && __builtin_constant_p(i)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 53 | __atomic_add_const(i, &v->counter); |
| 54 | return; |
| 55 | } |
| 56 | #endif |
| 57 | __atomic_add(i, &v->counter); |
| 58 | } |
| 59 | |
| 60 | #define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v) |
| 61 | #define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v) |
| 62 | #define atomic_fetch_sub(_i, _v) atomic_fetch_add(-(int)(_i), _v) |
| 63 | |
| 64 | #define ATOMIC_OPS(op) \ |
| 65 | static inline void atomic_##op(int i, atomic_t *v) \ |
| 66 | { \ |
| 67 | __atomic_##op(i, &v->counter); \ |
| 68 | } \ |
| 69 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ |
| 70 | { \ |
| 71 | return __atomic_##op##_barrier(i, &v->counter); \ |
| 72 | } |
| 73 | |
| 74 | ATOMIC_OPS(and) |
| 75 | ATOMIC_OPS(or) |
| 76 | ATOMIC_OPS(xor) |
| 77 | |
| 78 | #undef ATOMIC_OPS |
| 79 | |
| 80 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
| 81 | |
| 82 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
| 83 | { |
| 84 | return __atomic_cmpxchg(&v->counter, old, new); |
| 85 | } |
| 86 | |
| 87 | #define ATOMIC64_INIT(i) { (i) } |
| 88 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 89 | static inline s64 atomic64_read(const atomic64_t *v) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 90 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 91 | s64 c; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 92 | |
| 93 | asm volatile( |
| 94 | " lg %0,%1\n" |
| 95 | : "=d" (c) : "Q" (v->counter)); |
| 96 | return c; |
| 97 | } |
| 98 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 99 | static inline void atomic64_set(atomic64_t *v, s64 i) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 100 | { |
| 101 | asm volatile( |
| 102 | " stg %1,%0\n" |
| 103 | : "=Q" (v->counter) : "d" (i)); |
| 104 | } |
| 105 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 106 | static inline s64 atomic64_add_return(s64 i, atomic64_t *v) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 107 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 108 | return __atomic64_add_barrier(i, (long *)&v->counter) + i; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 109 | } |
| 110 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 111 | static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 112 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 113 | return __atomic64_add_barrier(i, (long *)&v->counter); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 114 | } |
| 115 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 116 | static inline void atomic64_add(s64 i, atomic64_t *v) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 117 | { |
| 118 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 119 | /* |
| 120 | * Order of conditions is important to circumvent gcc 10 bug: |
| 121 | * https://gcc.gnu.org/pipermail/gcc-patches/2020-July/549318.html |
| 122 | */ |
| 123 | if ((i > -129) && (i < 128) && __builtin_constant_p(i)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 124 | __atomic64_add_const(i, (long *)&v->counter); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 125 | return; |
| 126 | } |
| 127 | #endif |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 128 | __atomic64_add(i, (long *)&v->counter); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 129 | } |
| 130 | |
| 131 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
| 132 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 133 | static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 134 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 135 | return __atomic64_cmpxchg((long *)&v->counter, old, new); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 136 | } |
| 137 | |
| 138 | #define ATOMIC64_OPS(op) \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 139 | static inline void atomic64_##op(s64 i, atomic64_t *v) \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 140 | { \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 141 | __atomic64_##op(i, (long *)&v->counter); \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 142 | } \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 143 | static inline long atomic64_fetch_##op(s64 i, atomic64_t *v) \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 144 | { \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 145 | return __atomic64_##op##_barrier(i, (long *)&v->counter); \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 146 | } |
| 147 | |
| 148 | ATOMIC64_OPS(and) |
| 149 | ATOMIC64_OPS(or) |
| 150 | ATOMIC64_OPS(xor) |
| 151 | |
| 152 | #undef ATOMIC64_OPS |
| 153 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 154 | #define atomic64_sub_return(_i, _v) atomic64_add_return(-(s64)(_i), _v) |
| 155 | #define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(s64)(_i), _v) |
| 156 | #define atomic64_sub(_i, _v) atomic64_add(-(s64)(_i), _v) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 157 | |
| 158 | #endif /* __ARCH_S390_ATOMIC__ */ |