blob: 9e6b38b03cf7b2ad3427741f779c55af9e8274b6 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * arch/sh/mm/kmap.c
4 *
5 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
6 * Copyright (C) 2002 - 2009 Paul Mundt
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 */
8#include <linux/mm.h>
9#include <linux/init.h>
10#include <linux/mutex.h>
11#include <linux/fs.h>
12#include <linux/highmem.h>
13#include <linux/module.h>
14#include <asm/mmu_context.h>
15#include <asm/cacheflush.h>
16
17#define kmap_get_fixmap_pte(vaddr) \
18 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
19
20static pte_t *kmap_coherent_pte;
21
22void __init kmap_coherent_init(void)
23{
24 unsigned long vaddr;
25
26 /* cache the first coherent kmap pte */
27 vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
28 kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
29}
30
31void *kmap_coherent(struct page *page, unsigned long addr)
32{
33 enum fixed_addresses idx;
34 unsigned long vaddr;
35
36 BUG_ON(!test_bit(PG_dcache_clean, &page->flags));
37
38 preempt_disable();
39 pagefault_disable();
40
41 idx = FIX_CMAP_END -
42 (((addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1)) +
43 (FIX_N_COLOURS * smp_processor_id()));
44
45 vaddr = __fix_to_virt(idx);
46
47 BUG_ON(!pte_none(*(kmap_coherent_pte - idx)));
48 set_pte(kmap_coherent_pte - idx, mk_pte(page, PAGE_KERNEL));
49
50 return (void *)vaddr;
51}
52
53void kunmap_coherent(void *kvaddr)
54{
55 if (kvaddr >= (void *)FIXADDR_START) {
56 unsigned long vaddr = (unsigned long)kvaddr & PAGE_MASK;
57 enum fixed_addresses idx = __virt_to_fix(vaddr);
58
59 /* XXX.. Kill this later, here for sanity at the moment.. */
60 __flush_purge_region((void *)vaddr, PAGE_SIZE);
61
62 pte_clear(&init_mm, vaddr, kmap_coherent_pte - idx);
63 local_flush_tlb_one(get_asid(), vaddr);
64 }
65
66 pagefault_enable();
67 preempt_enable();
68}