blob: 5bb887b275e1213e4e6e9e77b894a7ef490d06c7 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/cache.h>
5#include <linux/highmem.h>
6#include <linux/mm.h>
7#include <asm/cache.h>
8
9void flush_icache_page(struct vm_area_struct *vma, struct page *page)
10{
11 unsigned long start;
12
13 start = (unsigned long) kmap_atomic(page);
14
15 cache_wbinv_range(start, start + PAGE_SIZE);
16
17 kunmap_atomic((void *)start);
18}
19
20void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
21 unsigned long vaddr, int len)
22{
23 unsigned long kaddr;
24
25 kaddr = (unsigned long) kmap_atomic(page) + (vaddr & ~PAGE_MASK);
26
27 cache_wbinv_range(kaddr, kaddr + len);
28
29 kunmap_atomic((void *)kaddr);
30}
31
32void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
33 pte_t *pte)
34{
35 unsigned long addr, pfn;
36 struct page *page;
37
38 pfn = pte_pfn(*pte);
39 if (unlikely(!pfn_valid(pfn)))
40 return;
41
42 page = pfn_to_page(pfn);
43 if (page == ZERO_PAGE(0))
44 return;
45
46 addr = (unsigned long) kmap_atomic(page);
47
48 cache_wbinv_range(addr, addr + PAGE_SIZE);
49
50 kunmap_atomic((void *) addr);
51}