Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* |
| 3 | * arch/arm/include/asm/cache.h |
| 4 | */ |
| 5 | #ifndef __ASMARM_CACHE_H |
| 6 | #define __ASMARM_CACHE_H |
| 7 | |
| 8 | #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT |
| 9 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) |
| 10 | |
| 11 | /* |
| 12 | * Memory returned by kmalloc() may be used for DMA, so we must make |
| 13 | * sure that all such allocations are cache aligned. Otherwise, |
| 14 | * unrelated code may cause parts of the buffer to be read into the |
| 15 | * cache before the transfer is done, causing old data to be seen by |
| 16 | * the CPU. |
| 17 | */ |
| 18 | #define ARCH_DMA_MINALIGN L1_CACHE_BYTES |
| 19 | |
| 20 | /* |
| 21 | * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. |
| 22 | */ |
| 23 | #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) |
| 24 | #define ARCH_SLAB_MINALIGN 8 |
| 25 | #endif |
| 26 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 27 | #define __read_mostly __section(".data..read_mostly") |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 28 | |
| 29 | #endif |