blob: 491ad53a0d4e8102f661bd6f1eccbc9cdac0c830 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright IBM Corp. 1999, 2016
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 * Denis Joseph Barrow,
6 * Arnd Bergmann,
7 */
8
9#ifndef __ARCH_S390_ATOMIC__
10#define __ARCH_S390_ATOMIC__
11
12#include <linux/compiler.h>
13#include <linux/types.h>
14#include <asm/atomic_ops.h>
15#include <asm/barrier.h>
16#include <asm/cmpxchg.h>
17
18#define ATOMIC_INIT(i) { (i) }
19
20static inline int atomic_read(const atomic_t *v)
21{
22 int c;
23
24 asm volatile(
25 " l %0,%1\n"
26 : "=d" (c) : "Q" (v->counter));
27 return c;
28}
29
30static inline void atomic_set(atomic_t *v, int i)
31{
32 asm volatile(
33 " st %1,%0\n"
34 : "=Q" (v->counter) : "d" (i));
35}
36
37static inline int atomic_add_return(int i, atomic_t *v)
38{
39 return __atomic_add_barrier(i, &v->counter) + i;
40}
41
42static inline int atomic_fetch_add(int i, atomic_t *v)
43{
44 return __atomic_add_barrier(i, &v->counter);
45}
46
47static inline void atomic_add(int i, atomic_t *v)
48{
49#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
50 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
51 __atomic_add_const(i, &v->counter);
52 return;
53 }
54#endif
55 __atomic_add(i, &v->counter);
56}
57
58#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
59#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
60#define atomic_fetch_sub(_i, _v) atomic_fetch_add(-(int)(_i), _v)
61
62#define ATOMIC_OPS(op) \
63static inline void atomic_##op(int i, atomic_t *v) \
64{ \
65 __atomic_##op(i, &v->counter); \
66} \
67static inline int atomic_fetch_##op(int i, atomic_t *v) \
68{ \
69 return __atomic_##op##_barrier(i, &v->counter); \
70}
71
72ATOMIC_OPS(and)
73ATOMIC_OPS(or)
74ATOMIC_OPS(xor)
75
76#undef ATOMIC_OPS
77
78#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
79
80static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
81{
82 return __atomic_cmpxchg(&v->counter, old, new);
83}
84
85#define ATOMIC64_INIT(i) { (i) }
86
David Brazdil0f672f62019-12-10 10:32:29 +000087static inline s64 atomic64_read(const atomic64_t *v)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000088{
David Brazdil0f672f62019-12-10 10:32:29 +000089 s64 c;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000090
91 asm volatile(
92 " lg %0,%1\n"
93 : "=d" (c) : "Q" (v->counter));
94 return c;
95}
96
David Brazdil0f672f62019-12-10 10:32:29 +000097static inline void atomic64_set(atomic64_t *v, s64 i)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000098{
99 asm volatile(
100 " stg %1,%0\n"
101 : "=Q" (v->counter) : "d" (i));
102}
103
David Brazdil0f672f62019-12-10 10:32:29 +0000104static inline s64 atomic64_add_return(s64 i, atomic64_t *v)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000105{
David Brazdil0f672f62019-12-10 10:32:29 +0000106 return __atomic64_add_barrier(i, (long *)&v->counter) + i;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000107}
108
David Brazdil0f672f62019-12-10 10:32:29 +0000109static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000110{
David Brazdil0f672f62019-12-10 10:32:29 +0000111 return __atomic64_add_barrier(i, (long *)&v->counter);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000112}
113
David Brazdil0f672f62019-12-10 10:32:29 +0000114static inline void atomic64_add(s64 i, atomic64_t *v)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000115{
116#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
117 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000118 __atomic64_add_const(i, (long *)&v->counter);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000119 return;
120 }
121#endif
David Brazdil0f672f62019-12-10 10:32:29 +0000122 __atomic64_add(i, (long *)&v->counter);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000123}
124
125#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
126
David Brazdil0f672f62019-12-10 10:32:29 +0000127static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000128{
David Brazdil0f672f62019-12-10 10:32:29 +0000129 return __atomic64_cmpxchg((long *)&v->counter, old, new);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000130}
131
132#define ATOMIC64_OPS(op) \
David Brazdil0f672f62019-12-10 10:32:29 +0000133static inline void atomic64_##op(s64 i, atomic64_t *v) \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000134{ \
David Brazdil0f672f62019-12-10 10:32:29 +0000135 __atomic64_##op(i, (long *)&v->counter); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000136} \
David Brazdil0f672f62019-12-10 10:32:29 +0000137static inline long atomic64_fetch_##op(s64 i, atomic64_t *v) \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000138{ \
David Brazdil0f672f62019-12-10 10:32:29 +0000139 return __atomic64_##op##_barrier(i, (long *)&v->counter); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000140}
141
142ATOMIC64_OPS(and)
143ATOMIC64_OPS(or)
144ATOMIC64_OPS(xor)
145
146#undef ATOMIC64_OPS
147
David Brazdil0f672f62019-12-10 10:32:29 +0000148#define atomic64_sub_return(_i, _v) atomic64_add_return(-(s64)(_i), _v)
149#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(s64)(_i), _v)
150#define atomic64_sub(_i, _v) atomic64_add(-(s64)(_i), _v)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000151
152#endif /* __ARCH_S390_ATOMIC__ */