Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * Copyright (C) 2020 Western Digital Corporation or its affiliates. |
| 4 | * Adapted from arch/arm64/kernel/efi.c |
| 5 | */ |
| 6 | |
| 7 | #include <linux/efi.h> |
| 8 | #include <linux/init.h> |
| 9 | |
| 10 | #include <asm/efi.h> |
| 11 | #include <asm/pgtable.h> |
| 12 | #include <asm/pgtable-bits.h> |
| 13 | |
| 14 | /* |
| 15 | * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be |
| 16 | * executable, everything else can be mapped with the XN bits |
| 17 | * set. Also take the new (optional) RO/XP bits into account. |
| 18 | */ |
| 19 | static __init pgprot_t efimem_to_pgprot_map(efi_memory_desc_t *md) |
| 20 | { |
| 21 | u64 attr = md->attribute; |
| 22 | u32 type = md->type; |
| 23 | |
| 24 | if (type == EFI_MEMORY_MAPPED_IO) |
| 25 | return PAGE_KERNEL; |
| 26 | |
| 27 | /* R-- */ |
| 28 | if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) == |
| 29 | (EFI_MEMORY_XP | EFI_MEMORY_RO)) |
| 30 | return PAGE_KERNEL_READ; |
| 31 | |
| 32 | /* R-X */ |
| 33 | if (attr & EFI_MEMORY_RO) |
| 34 | return PAGE_KERNEL_READ_EXEC; |
| 35 | |
| 36 | /* RW- */ |
| 37 | if (((attr & (EFI_MEMORY_RP | EFI_MEMORY_WP | EFI_MEMORY_XP)) == |
| 38 | EFI_MEMORY_XP) || |
| 39 | type != EFI_RUNTIME_SERVICES_CODE) |
| 40 | return PAGE_KERNEL; |
| 41 | |
| 42 | /* RWX */ |
| 43 | return PAGE_KERNEL_EXEC; |
| 44 | } |
| 45 | |
| 46 | int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) |
| 47 | { |
| 48 | pgprot_t prot = __pgprot(pgprot_val(efimem_to_pgprot_map(md)) & |
| 49 | ~(_PAGE_GLOBAL)); |
| 50 | int i; |
| 51 | |
| 52 | /* RISC-V maps one page at a time */ |
| 53 | for (i = 0; i < md->num_pages; i++) |
| 54 | create_pgd_mapping(mm->pgd, md->virt_addr + i * PAGE_SIZE, |
| 55 | md->phys_addr + i * PAGE_SIZE, |
| 56 | PAGE_SIZE, prot); |
| 57 | return 0; |
| 58 | } |
| 59 | |
| 60 | static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data) |
| 61 | { |
| 62 | efi_memory_desc_t *md = data; |
| 63 | pte_t pte = READ_ONCE(*ptep); |
| 64 | unsigned long val; |
| 65 | |
| 66 | if (md->attribute & EFI_MEMORY_RO) { |
| 67 | val = pte_val(pte) & ~_PAGE_WRITE; |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame^] | 68 | val |= _PAGE_READ; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 69 | pte = __pte(val); |
| 70 | } |
| 71 | if (md->attribute & EFI_MEMORY_XP) { |
| 72 | val = pte_val(pte) & ~_PAGE_EXEC; |
| 73 | pte = __pte(val); |
| 74 | } |
| 75 | set_pte(ptep, pte); |
| 76 | |
| 77 | return 0; |
| 78 | } |
| 79 | |
| 80 | int __init efi_set_mapping_permissions(struct mm_struct *mm, |
| 81 | efi_memory_desc_t *md) |
| 82 | { |
| 83 | BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE && |
| 84 | md->type != EFI_RUNTIME_SERVICES_DATA); |
| 85 | |
| 86 | /* |
| 87 | * Calling apply_to_page_range() is only safe on regions that are |
| 88 | * guaranteed to be mapped down to pages. Since we are only called |
| 89 | * for regions that have been mapped using efi_create_mapping() above |
| 90 | * (and this is checked by the generic Memory Attributes table parsing |
| 91 | * routines), there is no need to check that again here. |
| 92 | */ |
| 93 | return apply_to_page_range(mm, md->virt_addr, |
| 94 | md->num_pages << EFI_PAGE_SHIFT, |
| 95 | set_permissions, md); |
| 96 | } |