David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright 2012 Calxeda, Inc. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4 | */ |
| 5 | #ifndef _ASM_ARM_PERCPU_H_ |
| 6 | #define _ASM_ARM_PERCPU_H_ |
| 7 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8 | register unsigned long current_stack_pointer asm ("sp"); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 9 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10 | /* |
| 11 | * Same as asm-generic/percpu.h, except that we store the per cpu offset |
| 12 | * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 |
| 13 | */ |
| 14 | #if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6) |
| 15 | static inline void set_my_cpu_offset(unsigned long off) |
| 16 | { |
| 17 | /* Set TPIDRPRW */ |
| 18 | asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory"); |
| 19 | } |
| 20 | |
| 21 | static inline unsigned long __my_cpu_offset(void) |
| 22 | { |
| 23 | unsigned long off; |
| 24 | |
| 25 | /* |
| 26 | * Read TPIDRPRW. |
| 27 | * We want to allow caching the value, so avoid using volatile and |
| 28 | * instead use a fake stack read to hazard against barrier(). |
| 29 | */ |
| 30 | asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) |
| 31 | : "Q" (*(const unsigned long *)current_stack_pointer)); |
| 32 | |
| 33 | return off; |
| 34 | } |
| 35 | #define __my_cpu_offset __my_cpu_offset() |
| 36 | #else |
| 37 | #define set_my_cpu_offset(x) do {} while(0) |
| 38 | |
| 39 | #endif /* CONFIG_SMP */ |
| 40 | |
| 41 | #include <asm-generic/percpu.h> |
| 42 | |
| 43 | #endif /* _ASM_ARM_PERCPU_H_ */ |