David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3 | */ |
| 4 | #ifndef _ASM_POWERPC_CACHEFLUSH_H |
| 5 | #define _ASM_POWERPC_CACHEFLUSH_H |
| 6 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7 | #include <linux/mm.h> |
| 8 | #include <asm/cputable.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9 | #include <asm/cpu_has_feature.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10 | |
| 11 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 12 | /* |
| 13 | * Book3s has no ptesync after setting a pte, so without this ptesync it's |
| 14 | * possible for a kernel virtual mapping access to return a spurious fault |
| 15 | * if it's accessed right after the pte is set. The page fault handler does |
| 16 | * not expect this type of fault. flush_cache_vmap is not exactly the right |
| 17 | * place to put this, but it seems to work well enough. |
| 18 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 19 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) |
| 20 | { |
| 21 | asm volatile("ptesync" ::: "memory"); |
| 22 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 23 | #define flush_cache_vmap flush_cache_vmap |
| 24 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 25 | |
| 26 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 |
| 27 | extern void flush_dcache_page(struct page *page); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 28 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 29 | void flush_icache_range(unsigned long start, unsigned long stop); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 30 | #define flush_icache_range flush_icache_range |
| 31 | |
| 32 | void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, |
| 33 | unsigned long addr, int len); |
| 34 | #define flush_icache_user_page flush_icache_user_page |
| 35 | |
| 36 | void flush_dcache_icache_page(struct page *page); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 37 | void __flush_dcache_icache(void *page); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 38 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 39 | /** |
| 40 | * flush_dcache_range(): Write any modified data cache blocks out to memory and |
| 41 | * invalidate them. Does not invalidate the corresponding instruction cache |
| 42 | * blocks. |
| 43 | * |
| 44 | * @start: the start address |
| 45 | * @stop: the stop address (exclusive) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 46 | */ |
| 47 | static inline void flush_dcache_range(unsigned long start, unsigned long stop) |
| 48 | { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 49 | unsigned long shift = l1_dcache_shift(); |
| 50 | unsigned long bytes = l1_dcache_bytes(); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 51 | void *addr = (void *)(start & ~(bytes - 1)); |
| 52 | unsigned long size = stop - (unsigned long)addr + (bytes - 1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 53 | unsigned long i; |
| 54 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 55 | if (IS_ENABLED(CONFIG_PPC64)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 56 | mb(); /* sync */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 57 | |
| 58 | for (i = 0; i < size >> shift; i++, addr += bytes) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 59 | dcbf(addr); |
| 60 | mb(); /* sync */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 61 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 62 | } |
| 63 | |
| 64 | /* |
| 65 | * Write any modified data cache blocks out to memory. |
| 66 | * Does not invalidate the corresponding cache lines (especially for |
| 67 | * any corresponding instruction cache). |
| 68 | */ |
| 69 | static inline void clean_dcache_range(unsigned long start, unsigned long stop) |
| 70 | { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 71 | unsigned long shift = l1_dcache_shift(); |
| 72 | unsigned long bytes = l1_dcache_bytes(); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 73 | void *addr = (void *)(start & ~(bytes - 1)); |
| 74 | unsigned long size = stop - (unsigned long)addr + (bytes - 1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 75 | unsigned long i; |
| 76 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 77 | for (i = 0; i < size >> shift; i++, addr += bytes) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 78 | dcbst(addr); |
| 79 | mb(); /* sync */ |
| 80 | } |
| 81 | |
| 82 | /* |
| 83 | * Like above, but invalidate the D-cache. This is used by the 8xx |
| 84 | * to invalidate the cache so the PPC core doesn't get stale data |
| 85 | * from the CPM (no cache snooping here :-). |
| 86 | */ |
| 87 | static inline void invalidate_dcache_range(unsigned long start, |
| 88 | unsigned long stop) |
| 89 | { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 90 | unsigned long shift = l1_dcache_shift(); |
| 91 | unsigned long bytes = l1_dcache_bytes(); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 92 | void *addr = (void *)(start & ~(bytes - 1)); |
| 93 | unsigned long size = stop - (unsigned long)addr + (bytes - 1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 94 | unsigned long i; |
| 95 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 96 | for (i = 0; i < size >> shift; i++, addr += bytes) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 97 | dcbi(addr); |
| 98 | mb(); /* sync */ |
| 99 | } |
| 100 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 101 | #ifdef CONFIG_4xx |
| 102 | static inline void flush_instruction_cache(void) |
| 103 | { |
| 104 | iccci((void *)KERNELBASE); |
| 105 | isync(); |
| 106 | } |
| 107 | #else |
| 108 | void flush_instruction_cache(void); |
| 109 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 110 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 111 | #include <asm-generic/cacheflush.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 112 | |
| 113 | #endif /* _ASM_POWERPC_CACHEFLUSH_H */ |