blob: 555b20b11dc3513e85a94afaccd97c024380dbb7 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Copyright (C) 2015 Regents of the University of California
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004 */
5
6#ifndef _ASM_RISCV_CACHEFLUSH_H
7#define _ASM_RISCV_CACHEFLUSH_H
8
David Brazdil0f672f62019-12-10 10:32:29 +00009#include <linux/mm.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010
David Brazdil0f672f62019-12-10 10:32:29 +000011#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
12
13/*
14 * The cache doesn't need to be flushed when TLB entries change when
15 * the cache is mapped to physical memory, not virtual memory
16 */
17static inline void flush_cache_all(void)
18{
19}
20
21static inline void flush_cache_mm(struct mm_struct *mm)
22{
23}
24
25static inline void flush_cache_dup_mm(struct mm_struct *mm)
26{
27}
28
29static inline void flush_cache_range(struct vm_area_struct *vma,
30 unsigned long start,
31 unsigned long end)
32{
33}
34
35static inline void flush_cache_page(struct vm_area_struct *vma,
36 unsigned long vmaddr,
37 unsigned long pfn)
38{
39}
40
41static inline void flush_dcache_mmap_lock(struct address_space *mapping)
42{
43}
44
45static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
46{
47}
48
49static inline void flush_icache_page(struct vm_area_struct *vma,
50 struct page *page)
51{
52}
53
54static inline void flush_cache_vmap(unsigned long start, unsigned long end)
55{
56}
57
58static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
59{
60}
61
62#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
63 do { \
64 memcpy(dst, src, len); \
65 flush_icache_user_range(vma, page, vaddr, len); \
66 } while (0)
67#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
68 memcpy(dst, src, len)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000069
70static inline void local_flush_icache_all(void)
71{
72 asm volatile ("fence.i" ::: "memory");
73}
74
75#define PG_dcache_clean PG_arch_1
76
77static inline void flush_dcache_page(struct page *page)
78{
79 if (test_bit(PG_dcache_clean, &page->flags))
80 clear_bit(PG_dcache_clean, &page->flags);
81}
82
83/*
84 * RISC-V doesn't have an instruction to flush parts of the instruction cache,
85 * so instead we just flush the whole thing.
86 */
87#define flush_icache_range(start, end) flush_icache_all()
88#define flush_icache_user_range(vma, pg, addr, len) flush_icache_all()
89
90#ifndef CONFIG_SMP
91
92#define flush_icache_all() local_flush_icache_all()
93#define flush_icache_mm(mm, local) flush_icache_all()
94
95#else /* CONFIG_SMP */
96
David Brazdil0f672f62019-12-10 10:32:29 +000097void flush_icache_all(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000098void flush_icache_mm(struct mm_struct *mm, bool local);
99
100#endif /* CONFIG_SMP */
101
102/*
103 * Bits in sys_riscv_flush_icache()'s flags argument.
104 */
105#define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL
106#define SYS_RISCV_FLUSH_ICACHE_ALL (SYS_RISCV_FLUSH_ICACHE_LOCAL)
107
108#endif /* _ASM_RISCV_CACHEFLUSH_H */