blob: ac485163a4a7669f0e6d1b1eca683c65ad8c59ec [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Based on arch/arm/mm/flush.c
4 *
5 * Copyright (C) 1995-2002 Russell King
6 * Copyright (C) 2012 ARM Ltd.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 */
8
9#include <linux/export.h>
10#include <linux/mm.h>
11#include <linux/pagemap.h>
12
13#include <asm/cacheflush.h>
14#include <asm/cache.h>
15#include <asm/tlbflush.h>
16
17void sync_icache_aliases(void *kaddr, unsigned long len)
18{
19 unsigned long addr = (unsigned long)kaddr;
20
21 if (icache_is_aliasing()) {
22 __clean_dcache_area_pou(kaddr, len);
23 __flush_icache_all();
24 } else {
David Brazdil0f672f62019-12-10 10:32:29 +000025 /*
26 * Don't issue kick_all_cpus_sync() after I-cache invalidation
27 * for user mappings.
28 */
29 __flush_icache_range(addr, addr + len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000030 }
31}
32
33static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
34 unsigned long uaddr, void *kaddr,
35 unsigned long len)
36{
37 if (vma->vm_flags & VM_EXEC)
38 sync_icache_aliases(kaddr, len);
39}
40
41/*
42 * Copy user data from/to a page which is mapped into a different processes
43 * address space. Really, we want to allow our "user space" model to handle
44 * this.
45 */
46void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
47 unsigned long uaddr, void *dst, const void *src,
48 unsigned long len)
49{
50 memcpy(dst, src, len);
51 flush_ptrace_access(vma, page, uaddr, dst, len);
52}
53
54void __sync_icache_dcache(pte_t pte)
55{
56 struct page *page = pte_page(pte);
57
58 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
David Brazdil0f672f62019-12-10 10:32:29 +000059 sync_icache_aliases(page_address(page), page_size(page));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000060}
61EXPORT_SYMBOL_GPL(__sync_icache_dcache);
62
63/*
64 * This function is called when a page has been modified by the kernel. Mark
65 * it as dirty for later flushing when mapped in user space (if executable,
66 * see __sync_icache_dcache).
67 */
68void flush_dcache_page(struct page *page)
69{
70 if (test_bit(PG_dcache_clean, &page->flags))
71 clear_bit(PG_dcache_clean, &page->flags);
72}
73EXPORT_SYMBOL(flush_dcache_page);
74
75/*
76 * Additional functions defined in assembly.
77 */
78EXPORT_SYMBOL(__flush_icache_range);
79
80#ifdef CONFIG_ARCH_HAS_PMEM_API
81void arch_wb_cache_pmem(void *addr, size_t size)
82{
83 /* Ensure order against any prior non-cacheable writes */
84 dmb(osh);
85 __clean_dcache_area_pop(addr, size);
86}
87EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
88
89void arch_invalidate_pmem(void *addr, size_t size)
90{
91 __inval_dcache_area(addr, size);
92}
93EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
94#endif