blob: 7d1f6a49bfae1192d25a672e34a438ca65288a89 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_BITOPS_H
3#define _ASM_X86_BITOPS_H
4
5/*
6 * Copyright 1992, Linus Torvalds.
7 *
8 * Note: inlines with more than a single statement should be marked
9 * __always_inline to avoid problems with older gcc's inlining heuristics.
10 */
11
12#ifndef _LINUX_BITOPS_H
13#error only <linux/bitops.h> can be included directly
14#endif
15
16#include <linux/compiler.h>
17#include <asm/alternative.h>
18#include <asm/rmwcc.h>
19#include <asm/barrier.h>
20
21#if BITS_PER_LONG == 32
22# define _BITOPS_LONG_SHIFT 5
23#elif BITS_PER_LONG == 64
24# define _BITOPS_LONG_SHIFT 6
25#else
26# error "Unexpected BITS_PER_LONG"
27#endif
28
29#define BIT_64(n) (U64_C(1) << (n))
30
31/*
32 * These have to be done with inline assembly: that way the bit-setting
33 * is guaranteed to be atomic. All bit operations return 0 if the bit
34 * was cleared before the operation and != 0 if it was not.
35 *
36 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
37 */
38
David Brazdil0f672f62019-12-10 10:32:29 +000039#define RLONG_ADDR(x) "m" (*(volatile long *) (x))
40#define WBYTE_ADDR(x) "+m" (*(volatile char *) (x))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000041
David Brazdil0f672f62019-12-10 10:32:29 +000042#define ADDR RLONG_ADDR(addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000043
44/*
45 * We do the locked ops that don't return the old value as
46 * a mask operation on a byte.
47 */
David Brazdil0f672f62019-12-10 10:32:29 +000048#define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000049#define CONST_MASK(nr) (1 << ((nr) & 7))
50
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000051static __always_inline void
David Brazdil0f672f62019-12-10 10:32:29 +000052arch_set_bit(long nr, volatile unsigned long *addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000053{
David Brazdil0f672f62019-12-10 10:32:29 +000054 if (__builtin_constant_p(nr)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000055 asm volatile(LOCK_PREFIX "orb %1,%0"
56 : CONST_MASK_ADDR(nr, addr)
57 : "iq" ((u8)CONST_MASK(nr))
58 : "memory");
59 } else {
60 asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
David Brazdil0f672f62019-12-10 10:32:29 +000061 : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000062 }
63}
64
David Brazdil0f672f62019-12-10 10:32:29 +000065static __always_inline void
66arch___set_bit(long nr, volatile unsigned long *addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000067{
David Brazdil0f672f62019-12-10 10:32:29 +000068 asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000069}
70
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000071static __always_inline void
David Brazdil0f672f62019-12-10 10:32:29 +000072arch_clear_bit(long nr, volatile unsigned long *addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000073{
David Brazdil0f672f62019-12-10 10:32:29 +000074 if (__builtin_constant_p(nr)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000075 asm volatile(LOCK_PREFIX "andb %1,%0"
76 : CONST_MASK_ADDR(nr, addr)
77 : "iq" ((u8)~CONST_MASK(nr)));
78 } else {
79 asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
David Brazdil0f672f62019-12-10 10:32:29 +000080 : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000081 }
82}
83
David Brazdil0f672f62019-12-10 10:32:29 +000084static __always_inline void
85arch_clear_bit_unlock(long nr, volatile unsigned long *addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000086{
87 barrier();
David Brazdil0f672f62019-12-10 10:32:29 +000088 arch_clear_bit(nr, addr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000089}
90
David Brazdil0f672f62019-12-10 10:32:29 +000091static __always_inline void
92arch___clear_bit(long nr, volatile unsigned long *addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000093{
David Brazdil0f672f62019-12-10 10:32:29 +000094 asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000095}
96
David Brazdil0f672f62019-12-10 10:32:29 +000097static __always_inline bool
98arch_clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000099{
100 bool negative;
101 asm volatile(LOCK_PREFIX "andb %2,%1"
102 CC_SET(s)
David Brazdil0f672f62019-12-10 10:32:29 +0000103 : CC_OUT(s) (negative), WBYTE_ADDR(addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000104 : "ir" ((char) ~(1 << nr)) : "memory");
105 return negative;
106}
David Brazdil0f672f62019-12-10 10:32:29 +0000107#define arch_clear_bit_unlock_is_negative_byte \
108 arch_clear_bit_unlock_is_negative_byte
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000109
David Brazdil0f672f62019-12-10 10:32:29 +0000110static __always_inline void
111arch___clear_bit_unlock(long nr, volatile unsigned long *addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000112{
David Brazdil0f672f62019-12-10 10:32:29 +0000113 arch___clear_bit(nr, addr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114}
115
David Brazdil0f672f62019-12-10 10:32:29 +0000116static __always_inline void
117arch___change_bit(long nr, volatile unsigned long *addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000118{
David Brazdil0f672f62019-12-10 10:32:29 +0000119 asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000120}
121
David Brazdil0f672f62019-12-10 10:32:29 +0000122static __always_inline void
123arch_change_bit(long nr, volatile unsigned long *addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000124{
David Brazdil0f672f62019-12-10 10:32:29 +0000125 if (__builtin_constant_p(nr)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000126 asm volatile(LOCK_PREFIX "xorb %1,%0"
127 : CONST_MASK_ADDR(nr, addr)
128 : "iq" ((u8)CONST_MASK(nr)));
129 } else {
130 asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
David Brazdil0f672f62019-12-10 10:32:29 +0000131 : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000132 }
133}
134
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000135static __always_inline bool
David Brazdil0f672f62019-12-10 10:32:29 +0000136arch_test_and_set_bit(long nr, volatile unsigned long *addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000137{
David Brazdil0f672f62019-12-10 10:32:29 +0000138 return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000139}
140
David Brazdil0f672f62019-12-10 10:32:29 +0000141static __always_inline bool
142arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr)
143{
144 return arch_test_and_set_bit(nr, addr);
145}
146
147static __always_inline bool
148arch___test_and_set_bit(long nr, volatile unsigned long *addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000149{
150 bool oldbit;
151
152 asm(__ASM_SIZE(bts) " %2,%1"
153 CC_SET(c)
David Brazdil0f672f62019-12-10 10:32:29 +0000154 : CC_OUT(c) (oldbit)
155 : ADDR, "Ir" (nr) : "memory");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000156 return oldbit;
157}
158
David Brazdil0f672f62019-12-10 10:32:29 +0000159static __always_inline bool
160arch_test_and_clear_bit(long nr, volatile unsigned long *addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000161{
David Brazdil0f672f62019-12-10 10:32:29 +0000162 return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000163}
164
David Brazdil0f672f62019-12-10 10:32:29 +0000165/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000166 * Note: the operation is performed atomically with respect to
167 * the local CPU, but not other CPUs. Portable code should not
168 * rely on this behaviour.
169 * KVM relies on this behaviour on x86 for modifying memory that is also
170 * accessed from a hypervisor on the same CPU if running in a VM: don't change
171 * this without also updating arch/x86/kernel/kvm.c
172 */
David Brazdil0f672f62019-12-10 10:32:29 +0000173static __always_inline bool
174arch___test_and_clear_bit(long nr, volatile unsigned long *addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000175{
176 bool oldbit;
177
178 asm volatile(__ASM_SIZE(btr) " %2,%1"
179 CC_SET(c)
David Brazdil0f672f62019-12-10 10:32:29 +0000180 : CC_OUT(c) (oldbit)
181 : ADDR, "Ir" (nr) : "memory");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000182 return oldbit;
183}
184
David Brazdil0f672f62019-12-10 10:32:29 +0000185static __always_inline bool
186arch___test_and_change_bit(long nr, volatile unsigned long *addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000187{
188 bool oldbit;
189
190 asm volatile(__ASM_SIZE(btc) " %2,%1"
191 CC_SET(c)
David Brazdil0f672f62019-12-10 10:32:29 +0000192 : CC_OUT(c) (oldbit)
193 : ADDR, "Ir" (nr) : "memory");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000194
195 return oldbit;
196}
197
David Brazdil0f672f62019-12-10 10:32:29 +0000198static __always_inline bool
199arch_test_and_change_bit(long nr, volatile unsigned long *addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000200{
David Brazdil0f672f62019-12-10 10:32:29 +0000201 return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000202}
203
204static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
205{
206 return ((1UL << (nr & (BITS_PER_LONG-1))) &
207 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
208}
209
210static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
211{
212 bool oldbit;
213
214 asm volatile(__ASM_SIZE(bt) " %2,%1"
215 CC_SET(c)
216 : CC_OUT(c) (oldbit)
David Brazdil0f672f62019-12-10 10:32:29 +0000217 : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000218
219 return oldbit;
220}
221
David Brazdil0f672f62019-12-10 10:32:29 +0000222#define arch_test_bit(nr, addr) \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000223 (__builtin_constant_p((nr)) \
224 ? constant_test_bit((nr), (addr)) \
225 : variable_test_bit((nr), (addr)))
226
227/**
228 * __ffs - find first set bit in word
229 * @word: The word to search
230 *
231 * Undefined if no bit exists, so code should check against 0 first.
232 */
233static __always_inline unsigned long __ffs(unsigned long word)
234{
235 asm("rep; bsf %1,%0"
236 : "=r" (word)
237 : "rm" (word));
238 return word;
239}
240
241/**
242 * ffz - find first zero bit in word
243 * @word: The word to search
244 *
245 * Undefined if no zero exists, so code should check against ~0UL first.
246 */
247static __always_inline unsigned long ffz(unsigned long word)
248{
249 asm("rep; bsf %1,%0"
250 : "=r" (word)
251 : "r" (~word));
252 return word;
253}
254
255/*
256 * __fls: find last set bit in word
257 * @word: The word to search
258 *
259 * Undefined if no set bit exists, so code should check against 0 first.
260 */
261static __always_inline unsigned long __fls(unsigned long word)
262{
263 asm("bsr %1,%0"
264 : "=r" (word)
265 : "rm" (word));
266 return word;
267}
268
269#undef ADDR
270
271#ifdef __KERNEL__
272/**
273 * ffs - find first set bit in word
274 * @x: the word to search
275 *
276 * This is defined the same way as the libc and compiler builtin ffs
277 * routines, therefore differs in spirit from the other bitops.
278 *
279 * ffs(value) returns 0 if value is 0 or the position of the first
280 * set bit if value is nonzero. The first (least significant) bit
281 * is at position 1.
282 */
283static __always_inline int ffs(int x)
284{
285 int r;
286
287#ifdef CONFIG_X86_64
288 /*
289 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
290 * dest reg is undefined if x==0, but their CPU architect says its
291 * value is written to set it to the same as before, except that the
292 * top 32 bits will be cleared.
293 *
294 * We cannot do this on 32 bits because at the very least some
295 * 486 CPUs did not behave this way.
296 */
297 asm("bsfl %1,%0"
298 : "=r" (r)
299 : "rm" (x), "0" (-1));
300#elif defined(CONFIG_X86_CMOV)
301 asm("bsfl %1,%0\n\t"
302 "cmovzl %2,%0"
303 : "=&r" (r) : "rm" (x), "r" (-1));
304#else
305 asm("bsfl %1,%0\n\t"
306 "jnz 1f\n\t"
307 "movl $-1,%0\n"
308 "1:" : "=r" (r) : "rm" (x));
309#endif
310 return r + 1;
311}
312
313/**
314 * fls - find last set bit in word
315 * @x: the word to search
316 *
317 * This is defined in a similar way as the libc and compiler builtin
318 * ffs, but returns the position of the most significant set bit.
319 *
320 * fls(value) returns 0 if value is 0 or the position of the last
321 * set bit if value is nonzero. The last (most significant) bit is
322 * at position 32.
323 */
David Brazdil0f672f62019-12-10 10:32:29 +0000324static __always_inline int fls(unsigned int x)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000325{
326 int r;
327
328#ifdef CONFIG_X86_64
329 /*
330 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
331 * dest reg is undefined if x==0, but their CPU architect says its
332 * value is written to set it to the same as before, except that the
333 * top 32 bits will be cleared.
334 *
335 * We cannot do this on 32 bits because at the very least some
336 * 486 CPUs did not behave this way.
337 */
338 asm("bsrl %1,%0"
339 : "=r" (r)
340 : "rm" (x), "0" (-1));
341#elif defined(CONFIG_X86_CMOV)
342 asm("bsrl %1,%0\n\t"
343 "cmovzl %2,%0"
344 : "=&r" (r) : "rm" (x), "rm" (-1));
345#else
346 asm("bsrl %1,%0\n\t"
347 "jnz 1f\n\t"
348 "movl $-1,%0\n"
349 "1:" : "=r" (r) : "rm" (x));
350#endif
351 return r + 1;
352}
353
354/**
355 * fls64 - find last set bit in a 64-bit word
356 * @x: the word to search
357 *
358 * This is defined in a similar way as the libc and compiler builtin
359 * ffsll, but returns the position of the most significant set bit.
360 *
361 * fls64(value) returns 0 if value is 0 or the position of the last
362 * set bit if value is nonzero. The last (most significant) bit is
363 * at position 64.
364 */
365#ifdef CONFIG_X86_64
366static __always_inline int fls64(__u64 x)
367{
368 int bitpos = -1;
369 /*
370 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
371 * dest reg is undefined if x==0, but their CPU architect says its
372 * value is written to set it to the same as before.
373 */
374 asm("bsrq %1,%q0"
375 : "+r" (bitpos)
376 : "rm" (x));
377 return bitpos + 1;
378}
379#else
380#include <asm-generic/bitops/fls64.h>
381#endif
382
383#include <asm-generic/bitops/find.h>
384
385#include <asm-generic/bitops/sched.h>
386
387#include <asm/arch_hweight.h>
388
389#include <asm-generic/bitops/const_hweight.h>
390
David Brazdil0f672f62019-12-10 10:32:29 +0000391#include <asm-generic/bitops-instrumented.h>
392
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000393#include <asm-generic/bitops/le.h>
394
395#include <asm-generic/bitops/ext2-atomic-setbit.h>
396
397#endif /* __KERNEL__ */
398#endif /* _ASM_X86_BITOPS_H */