blob: 4a1c9f0200e1bf7389956409803a326e65264e19 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-or-later */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003 */
4#ifndef _ASM_POWERPC_CACHEFLUSH_H
5#define _ASM_POWERPC_CACHEFLUSH_H
6
7#ifdef __KERNEL__
8
9#include <linux/mm.h>
10#include <asm/cputable.h>
11
12/*
13 * No cache flushing is required when address mappings are changed,
14 * because the caches on PowerPCs are physically addressed.
15 */
16#define flush_cache_all() do { } while (0)
17#define flush_cache_mm(mm) do { } while (0)
18#define flush_cache_dup_mm(mm) do { } while (0)
19#define flush_cache_range(vma, start, end) do { } while (0)
20#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
21#define flush_icache_page(vma, page) do { } while (0)
22#define flush_cache_vunmap(start, end) do { } while (0)
23
24#ifdef CONFIG_PPC_BOOK3S_64
25/*
26 * Book3s has no ptesync after setting a pte, so without this ptesync it's
27 * possible for a kernel virtual mapping access to return a spurious fault
28 * if it's accessed right after the pte is set. The page fault handler does
29 * not expect this type of fault. flush_cache_vmap is not exactly the right
30 * place to put this, but it seems to work well enough.
31 */
David Brazdil0f672f62019-12-10 10:32:29 +000032static inline void flush_cache_vmap(unsigned long start, unsigned long end)
33{
34 asm volatile("ptesync" ::: "memory");
35}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000036#else
David Brazdil0f672f62019-12-10 10:32:29 +000037static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000038#endif
39
40#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
41extern void flush_dcache_page(struct page *page);
42#define flush_dcache_mmap_lock(mapping) do { } while (0)
43#define flush_dcache_mmap_unlock(mapping) do { } while (0)
44
Olivier Deprez0e641232021-09-23 10:07:05 +020045void flush_icache_range(unsigned long start, unsigned long stop);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000046extern void flush_icache_user_range(struct vm_area_struct *vma,
47 struct page *page, unsigned long addr,
48 int len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000049extern void flush_dcache_icache_page(struct page *page);
Olivier Deprez0e641232021-09-23 10:07:05 +020050void __flush_dcache_icache(void *page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000051
Olivier Deprez0e641232021-09-23 10:07:05 +020052/**
53 * flush_dcache_range(): Write any modified data cache blocks out to memory and
54 * invalidate them. Does not invalidate the corresponding instruction cache
55 * blocks.
56 *
57 * @start: the start address
58 * @stop: the stop address (exclusive)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059 */
60static inline void flush_dcache_range(unsigned long start, unsigned long stop)
61{
Olivier Deprez0e641232021-09-23 10:07:05 +020062 unsigned long shift = l1_dcache_shift();
63 unsigned long bytes = l1_dcache_bytes();
David Brazdil0f672f62019-12-10 10:32:29 +000064 void *addr = (void *)(start & ~(bytes - 1));
65 unsigned long size = stop - (unsigned long)addr + (bytes - 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066 unsigned long i;
67
David Brazdil0f672f62019-12-10 10:32:29 +000068 if (IS_ENABLED(CONFIG_PPC64)) {
69 mb(); /* sync */
70 isync();
71 }
72
73 for (i = 0; i < size >> shift; i++, addr += bytes)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000074 dcbf(addr);
75 mb(); /* sync */
David Brazdil0f672f62019-12-10 10:32:29 +000076
77 if (IS_ENABLED(CONFIG_PPC64))
78 isync();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000079}
80
81/*
82 * Write any modified data cache blocks out to memory.
83 * Does not invalidate the corresponding cache lines (especially for
84 * any corresponding instruction cache).
85 */
86static inline void clean_dcache_range(unsigned long start, unsigned long stop)
87{
Olivier Deprez0e641232021-09-23 10:07:05 +020088 unsigned long shift = l1_dcache_shift();
89 unsigned long bytes = l1_dcache_bytes();
David Brazdil0f672f62019-12-10 10:32:29 +000090 void *addr = (void *)(start & ~(bytes - 1));
91 unsigned long size = stop - (unsigned long)addr + (bytes - 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000092 unsigned long i;
93
David Brazdil0f672f62019-12-10 10:32:29 +000094 for (i = 0; i < size >> shift; i++, addr += bytes)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000095 dcbst(addr);
96 mb(); /* sync */
97}
98
99/*
100 * Like above, but invalidate the D-cache. This is used by the 8xx
101 * to invalidate the cache so the PPC core doesn't get stale data
102 * from the CPM (no cache snooping here :-).
103 */
104static inline void invalidate_dcache_range(unsigned long start,
105 unsigned long stop)
106{
Olivier Deprez0e641232021-09-23 10:07:05 +0200107 unsigned long shift = l1_dcache_shift();
108 unsigned long bytes = l1_dcache_bytes();
David Brazdil0f672f62019-12-10 10:32:29 +0000109 void *addr = (void *)(start & ~(bytes - 1));
110 unsigned long size = stop - (unsigned long)addr + (bytes - 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000111 unsigned long i;
112
David Brazdil0f672f62019-12-10 10:32:29 +0000113 for (i = 0; i < size >> shift; i++, addr += bytes)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114 dcbi(addr);
115 mb(); /* sync */
116}
117
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000118#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
119 do { \
120 memcpy(dst, src, len); \
121 flush_icache_user_range(vma, page, vaddr, len); \
122 } while (0)
123#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
124 memcpy(dst, src, len)
125
126#endif /* __KERNEL__ */
127
128#endif /* _ASM_POWERPC_CACHEFLUSH_H */