David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Based on arch/arm/mm/flush.c |
| 4 | * |
| 5 | * Copyright (C) 1995-2002 Russell King |
| 6 | * Copyright (C) 2012 ARM Ltd. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #include <linux/export.h> |
| 10 | #include <linux/mm.h> |
| 11 | #include <linux/pagemap.h> |
| 12 | |
| 13 | #include <asm/cacheflush.h> |
| 14 | #include <asm/cache.h> |
| 15 | #include <asm/tlbflush.h> |
| 16 | |
| 17 | void sync_icache_aliases(void *kaddr, unsigned long len) |
| 18 | { |
| 19 | unsigned long addr = (unsigned long)kaddr; |
| 20 | |
| 21 | if (icache_is_aliasing()) { |
| 22 | __clean_dcache_area_pou(kaddr, len); |
| 23 | __flush_icache_all(); |
| 24 | } else { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 25 | /* |
| 26 | * Don't issue kick_all_cpus_sync() after I-cache invalidation |
| 27 | * for user mappings. |
| 28 | */ |
| 29 | __flush_icache_range(addr, addr + len); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 30 | } |
| 31 | } |
| 32 | |
| 33 | static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, |
| 34 | unsigned long uaddr, void *kaddr, |
| 35 | unsigned long len) |
| 36 | { |
| 37 | if (vma->vm_flags & VM_EXEC) |
| 38 | sync_icache_aliases(kaddr, len); |
| 39 | } |
| 40 | |
| 41 | /* |
| 42 | * Copy user data from/to a page which is mapped into a different processes |
| 43 | * address space. Really, we want to allow our "user space" model to handle |
| 44 | * this. |
| 45 | */ |
| 46 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, |
| 47 | unsigned long uaddr, void *dst, const void *src, |
| 48 | unsigned long len) |
| 49 | { |
| 50 | memcpy(dst, src, len); |
| 51 | flush_ptrace_access(vma, page, uaddr, dst, len); |
| 52 | } |
| 53 | |
| 54 | void __sync_icache_dcache(pte_t pte) |
| 55 | { |
| 56 | struct page *page = pte_page(pte); |
| 57 | |
| 58 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 59 | sync_icache_aliases(page_address(page), page_size(page)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 60 | } |
| 61 | EXPORT_SYMBOL_GPL(__sync_icache_dcache); |
| 62 | |
| 63 | /* |
| 64 | * This function is called when a page has been modified by the kernel. Mark |
| 65 | * it as dirty for later flushing when mapped in user space (if executable, |
| 66 | * see __sync_icache_dcache). |
| 67 | */ |
| 68 | void flush_dcache_page(struct page *page) |
| 69 | { |
| 70 | if (test_bit(PG_dcache_clean, &page->flags)) |
| 71 | clear_bit(PG_dcache_clean, &page->flags); |
| 72 | } |
| 73 | EXPORT_SYMBOL(flush_dcache_page); |
| 74 | |
| 75 | /* |
| 76 | * Additional functions defined in assembly. |
| 77 | */ |
| 78 | EXPORT_SYMBOL(__flush_icache_range); |
| 79 | |
| 80 | #ifdef CONFIG_ARCH_HAS_PMEM_API |
| 81 | void arch_wb_cache_pmem(void *addr, size_t size) |
| 82 | { |
| 83 | /* Ensure order against any prior non-cacheable writes */ |
| 84 | dmb(osh); |
| 85 | __clean_dcache_area_pop(addr, size); |
| 86 | } |
| 87 | EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); |
| 88 | |
| 89 | void arch_invalidate_pmem(void *addr, size_t size) |
| 90 | { |
| 91 | __inval_dcache_area(addr, size); |
| 92 | } |
| 93 | EXPORT_SYMBOL_GPL(arch_invalidate_pmem); |
| 94 | #endif |