Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * highmem.c: virtual kernel memory mappings for high memory |
| 4 | * |
| 5 | * PowerPC version, stolen from the i386 version. |
| 6 | * |
| 7 | * Used in CONFIG_HIGHMEM systems for memory pages which |
| 8 | * are not addressable by direct kernel virtual addresses. |
| 9 | * |
| 10 | * Copyright (C) 1999 Gerhard Wichert, Siemens AG |
| 11 | * Gerhard.Wichert@pdb.siemens.de |
| 12 | * |
| 13 | * |
| 14 | * Redesigned the x86 32-bit VM architecture to deal with |
| 15 | * up to 16 Terrabyte physical memory. With current x86 CPUs |
| 16 | * we now support up to 64 Gigabytes physical RAM. |
| 17 | * |
| 18 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> |
| 19 | * |
| 20 | * Reworked for PowerPC by various contributors. Moved from |
| 21 | * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp. |
| 22 | */ |
| 23 | |
| 24 | #include <linux/highmem.h> |
| 25 | #include <linux/module.h> |
| 26 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 27 | void *kmap_atomic_high_prot(struct page *page, pgprot_t prot) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 28 | { |
| 29 | unsigned long vaddr; |
| 30 | int idx, type; |
| 31 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 32 | type = kmap_atomic_idx_push(); |
| 33 | idx = type + KM_TYPE_NR*smp_processor_id(); |
| 34 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 35 | WARN_ON(IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !pte_none(*(kmap_pte - idx))); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 36 | __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1); |
| 37 | local_flush_tlb_page(NULL, vaddr); |
| 38 | |
| 39 | return (void*) vaddr; |
| 40 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 41 | EXPORT_SYMBOL(kmap_atomic_high_prot); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 42 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 43 | void kunmap_atomic_high(void *kvaddr) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 44 | { |
| 45 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 46 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 47 | if (vaddr < __fix_to_virt(FIX_KMAP_END)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 48 | return; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 49 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 50 | if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM)) { |
| 51 | int type = kmap_atomic_idx(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 52 | unsigned int idx; |
| 53 | |
| 54 | idx = type + KM_TYPE_NR * smp_processor_id(); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 55 | WARN_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 56 | |
| 57 | /* |
| 58 | * force other mappings to Oops if they'll try to access |
| 59 | * this pte without first remap it |
| 60 | */ |
| 61 | pte_clear(&init_mm, vaddr, kmap_pte-idx); |
| 62 | local_flush_tlb_page(NULL, vaddr); |
| 63 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 64 | |
| 65 | kmap_atomic_idx_pop(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 66 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 67 | EXPORT_SYMBOL(kunmap_atomic_high); |