Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org) |
| 7 | */ |
| 8 | #ifndef __ASM_BARRIER_H |
| 9 | #define __ASM_BARRIER_H |
| 10 | |
| 11 | #include <asm/addrspace.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 12 | #include <asm/sync.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 13 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 14 | static inline void __sync(void) |
| 15 | { |
| 16 | asm volatile(__SYNC(full, always) ::: "memory"); |
| 17 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 18 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 19 | static inline void rmb(void) |
| 20 | { |
| 21 | asm volatile(__SYNC(rmb, always) ::: "memory"); |
| 22 | } |
| 23 | #define rmb rmb |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 24 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 25 | static inline void wmb(void) |
| 26 | { |
| 27 | asm volatile(__SYNC(wmb, always) ::: "memory"); |
| 28 | } |
| 29 | #define wmb wmb |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 30 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 31 | #define fast_mb() __sync() |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 32 | |
| 33 | #define __fast_iob() \ |
| 34 | __asm__ __volatile__( \ |
| 35 | ".set push\n\t" \ |
| 36 | ".set noreorder\n\t" \ |
| 37 | "lw $0,%0\n\t" \ |
| 38 | "nop\n\t" \ |
| 39 | ".set pop" \ |
| 40 | : /* no output */ \ |
| 41 | : "m" (*(int *)CKSEG1) \ |
| 42 | : "memory") |
| 43 | #ifdef CONFIG_CPU_CAVIUM_OCTEON |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 44 | # define fast_iob() do { } while (0) |
| 45 | #else /* ! CONFIG_CPU_CAVIUM_OCTEON */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 46 | # ifdef CONFIG_SGI_IP28 |
| 47 | # define fast_iob() \ |
| 48 | __asm__ __volatile__( \ |
| 49 | ".set push\n\t" \ |
| 50 | ".set noreorder\n\t" \ |
| 51 | "lw $0,%0\n\t" \ |
| 52 | "sync\n\t" \ |
| 53 | "lw $0,%0\n\t" \ |
| 54 | ".set pop" \ |
| 55 | : /* no output */ \ |
| 56 | : "m" (*(int *)CKSEG1ADDR(0x1fa00004)) \ |
| 57 | : "memory") |
| 58 | # else |
| 59 | # define fast_iob() \ |
| 60 | do { \ |
| 61 | __sync(); \ |
| 62 | __fast_iob(); \ |
| 63 | } while (0) |
| 64 | # endif |
| 65 | #endif /* CONFIG_CPU_CAVIUM_OCTEON */ |
| 66 | |
| 67 | #ifdef CONFIG_CPU_HAS_WB |
| 68 | |
| 69 | #include <asm/wbflush.h> |
| 70 | |
| 71 | #define mb() wbflush() |
| 72 | #define iob() wbflush() |
| 73 | |
| 74 | #else /* !CONFIG_CPU_HAS_WB */ |
| 75 | |
| 76 | #define mb() fast_mb() |
| 77 | #define iob() fast_iob() |
| 78 | |
| 79 | #endif /* !CONFIG_CPU_HAS_WB */ |
| 80 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 81 | #if defined(CONFIG_WEAK_ORDERING) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 82 | # define __smp_mb() __sync() |
| 83 | # define __smp_rmb() rmb() |
| 84 | # define __smp_wmb() wmb() |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 85 | #else |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 86 | # define __smp_mb() barrier() |
| 87 | # define __smp_rmb() barrier() |
| 88 | # define __smp_wmb() barrier() |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 89 | #endif |
| 90 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 91 | /* |
| 92 | * When LL/SC does imply order, it must also be a compiler barrier to avoid the |
| 93 | * compiler from reordering where the CPU will not. When it does not imply |
| 94 | * order, the compiler is also free to reorder across the LL/SC loop and |
| 95 | * ordering will be done by smp_llsc_mb() and friends. |
| 96 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 97 | #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 98 | # define __WEAK_LLSC_MB sync |
| 99 | # define smp_llsc_mb() \ |
| 100 | __asm__ __volatile__(__stringify(__WEAK_LLSC_MB) : : :"memory") |
| 101 | # define __LLSC_CLOBBER |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 102 | #else |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 103 | # define __WEAK_LLSC_MB |
| 104 | # define smp_llsc_mb() do { } while (0) |
| 105 | # define __LLSC_CLOBBER "memory" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 106 | #endif |
| 107 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 108 | #ifdef CONFIG_CPU_CAVIUM_OCTEON |
| 109 | #define smp_mb__before_llsc() smp_wmb() |
| 110 | #define __smp_mb__before_llsc() __smp_wmb() |
| 111 | /* Cause previous writes to become visible on all CPUs as soon as possible */ |
| 112 | #define nudge_writes() __asm__ __volatile__(".set push\n\t" \ |
| 113 | ".set arch=octeon\n\t" \ |
| 114 | "syncw\n\t" \ |
| 115 | ".set pop" : : : "memory") |
| 116 | #else |
| 117 | #define smp_mb__before_llsc() smp_llsc_mb() |
| 118 | #define __smp_mb__before_llsc() smp_llsc_mb() |
| 119 | #define nudge_writes() mb() |
| 120 | #endif |
| 121 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 122 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 123 | * In the Loongson3 LL/SC workaround case, all of our LL/SC loops already have |
| 124 | * a completion barrier immediately preceding the LL instruction. Therefore we |
| 125 | * can skip emitting a barrier from __smp_mb__before_atomic(). |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 126 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 127 | #ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS |
| 128 | # define __smp_mb__before_atomic() |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 129 | #else |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 130 | # define __smp_mb__before_atomic() __smp_mb__before_llsc() |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 131 | #endif |
| 132 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 133 | #define __smp_mb__after_atomic() smp_llsc_mb() |
| 134 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 135 | static inline void sync_ginv(void) |
| 136 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 137 | asm volatile(__SYNC(ginv, always)); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 138 | } |
| 139 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 140 | #include <asm-generic/barrier.h> |
| 141 | |
| 142 | #endif /* __ASM_BARRIER_H */ |