blob: 9384fd8fc13cc4ff836a9857e8b8fb586511bd70 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Based on arch/arm/include/asm/cacheflush.h
4 *
5 * Copyright (C) 1999-2002 Russell King.
6 * Copyright (C) 2012 ARM Ltd.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 */
8#ifndef __ASM_CACHEFLUSH_H
9#define __ASM_CACHEFLUSH_H
10
11#include <linux/kgdb.h>
12#include <linux/mm.h>
13
14/*
15 * This flag is used to indicate that the page pointed to by a pte is clean
16 * and does not require cleaning before returning it to the user.
17 */
18#define PG_dcache_clean PG_arch_1
19
20/*
21 * MM Cache Management
22 * ===================
23 *
24 * The arch/arm64/mm/cache.S implements these methods.
25 *
26 * Start addresses are inclusive and end addresses are exclusive; start
27 * addresses should be rounded down, end addresses up.
28 *
29 * See Documentation/core-api/cachetlb.rst for more information. Please note that
30 * the implementation assumes non-aliasing VIPT D-cache and (aliasing)
31 * VIPT I-cache.
32 *
33 * flush_cache_mm(mm)
34 *
35 * Clean and invalidate all user space cache entries
36 * before a change of page tables.
37 *
38 * flush_icache_range(start, end)
39 *
40 * Ensure coherency between the I-cache and the D-cache in the
41 * region described by start, end.
42 * - start - virtual start address
43 * - end - virtual end address
44 *
45 * invalidate_icache_range(start, end)
46 *
47 * Invalidate the I-cache in the region described by start, end.
48 * - start - virtual start address
49 * - end - virtual end address
50 *
51 * __flush_cache_user_range(start, end)
52 *
53 * Ensure coherency between the I-cache and the D-cache in the
54 * region described by start, end.
55 * - start - virtual start address
56 * - end - virtual end address
57 *
58 * __flush_dcache_area(kaddr, size)
59 *
60 * Ensure that the data held in page is written back.
61 * - kaddr - page address
62 * - size - region size
63 */
64extern void __flush_icache_range(unsigned long start, unsigned long end);
65extern int invalidate_icache_range(unsigned long start, unsigned long end);
66extern void __flush_dcache_area(void *addr, size_t len);
67extern void __inval_dcache_area(void *addr, size_t len);
68extern void __clean_dcache_area_poc(void *addr, size_t len);
69extern void __clean_dcache_area_pop(void *addr, size_t len);
70extern void __clean_dcache_area_pou(void *addr, size_t len);
71extern long __flush_cache_user_range(unsigned long start, unsigned long end);
72extern void sync_icache_aliases(void *kaddr, unsigned long len);
73
74static inline void flush_icache_range(unsigned long start, unsigned long end)
75{
76 __flush_icache_range(start, end);
77
78 /*
79 * IPI all online CPUs so that they undergo a context synchronization
80 * event and are forced to refetch the new instructions.
81 */
Olivier Deprez0e641232021-09-23 10:07:05 +020082
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000083 /*
84 * KGDB performs cache maintenance with interrupts disabled, so we
85 * will deadlock trying to IPI the secondary CPUs. In theory, we can
86 * set CACHE_FLUSH_IS_SAFE to 0 to avoid this known issue, but that
87 * just means that KGDB will elide the maintenance altogether! As it
88 * turns out, KGDB uses IPIs to round-up the secondary CPUs during
89 * the patching operation, so we don't need extra IPIs here anyway.
90 * In which case, add a KGDB-specific bodge and return early.
91 */
Olivier Deprez0e641232021-09-23 10:07:05 +020092 if (in_dbg_master())
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000093 return;
Olivier Deprez0e641232021-09-23 10:07:05 +020094
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000095 kick_all_cpus_sync();
96}
Olivier Deprez157378f2022-04-04 15:47:50 +020097#define flush_icache_range flush_icache_range
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000098
99/*
100 * Cache maintenance functions used by the DMA API. No to be used directly.
101 */
102extern void __dma_map_area(const void *, size_t, int);
103extern void __dma_unmap_area(const void *, size_t, int);
104extern void __dma_flush_area(const void *, size_t);
105
106/*
107 * Copy user data from/to a page which is mapped into a different
108 * processes address space. Really, we want to allow our "user
109 * space" model to handle this.
110 */
111extern void copy_to_user_page(struct vm_area_struct *, struct page *,
112 unsigned long, void *, const void *, unsigned long);
Olivier Deprez157378f2022-04-04 15:47:50 +0200113#define copy_to_user_page copy_to_user_page
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114
115/*
116 * flush_dcache_page is used when the kernel has written to the page
117 * cache page at virtual address page->virtual.
118 *
119 * If this page isn't mapped (ie, page_mapping == NULL), or it might
120 * have userspace mappings, then we _must_ always clean + invalidate
121 * the dcache entries associated with the kernel mapping.
122 *
123 * Otherwise we can defer the operation, and clean the cache when we are
124 * about to change to user space. This is the same method as used on SPARC64.
125 * See update_mmu_cache for the user space part.
126 */
127#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
128extern void flush_dcache_page(struct page *);
129
Olivier Deprez157378f2022-04-04 15:47:50 +0200130static __always_inline void __flush_icache_all(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000131{
132 if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
133 return;
134
135 asm("ic ialluis");
136 dsb(ish);
137}
138
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000139int set_memory_valid(unsigned long addr, int numpages, int enable);
140
David Brazdil0f672f62019-12-10 10:32:29 +0000141int set_direct_map_invalid_noflush(struct page *page);
142int set_direct_map_default_noflush(struct page *page);
143
Olivier Deprez157378f2022-04-04 15:47:50 +0200144#include <asm-generic/cacheflush.h>
145
146#endif /* __ASM_CACHEFLUSH_H */