Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
Andrew Scull | fbc938a | 2018-08-20 14:09:28 +0100 | [diff] [blame] | 9 | #pragma once |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 10 | |
J-Alves | 715d623 | 2023-02-16 16:33:28 +0000 | [diff] [blame] | 11 | #include <stddef.h> |
Wedson Almeida Filho | 84a30a0 | 2018-07-23 20:05:05 +0100 | [diff] [blame] | 12 | #include <stdint.h> |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 13 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 14 | #include "hf/addr.h" |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 15 | #include "hf/mpool.h" |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 16 | |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 17 | typedef uint32_t mm_mode_t; |
| 18 | typedef uint64_t mm_attr_t; |
Karl Meakin | a3a9f95 | 2025-02-08 00:11:16 +0000 | [diff] [blame] | 19 | |
| 20 | /** |
| 21 | * The level of a page table entry (i.e. how deep into the recursive tree |
| 22 | * structure it is). See also Arm ARM, table D8-14. |
| 23 | * |
| 24 | * - `level == 4`: table entries (root) |
| 25 | * - `level == 3`: table or block entries |
| 26 | * - `level == 2`: table or block entries |
| 27 | * - `level == 1`: table or block entries |
| 28 | * - `level == 0`: page entries |
| 29 | * |
| 30 | * NOTE: The Arm ARM uses levels in the opposite order to our code: in the Arm |
| 31 | * ARM, levels start at 0 (or -1 if 52 bits of PA are used, but that is not |
| 32 | * supported by Hafnium) and page entries are at level 3. We go in the opposite |
| 33 | * direction: levels start at 3 or 4 and page entries are at level 0. This is |
| 34 | * because it makes the arithmetic and bit manipulation easier. |
| 35 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 36 | typedef uint8_t mm_level_t; |
| 37 | typedef uint16_t mm_asid_t; |
| 38 | |
Karl Meakin | 23122e1 | 2025-02-05 14:44:20 +0000 | [diff] [blame] | 39 | /* |
| 40 | * A page table entry (PTE) will take one of the following forms: |
| 41 | * |
| 42 | * 1. absent : There is no mapping. |
| 43 | * 2. invalid block : Represents a block that is not in the address space. |
| 44 | * 3. valid block : Represents a block that is in the address space. |
| 45 | * 4. table : Represents a reference to a table of PTEs. |
| 46 | * See Arm ARM, D8.3 (Translation table descriptor formats). |
| 47 | */ |
| 48 | enum mm_pte_type { |
| 49 | PTE_TYPE_ABSENT, |
| 50 | PTE_TYPE_INVALID_BLOCK, |
| 51 | PTE_TYPE_VALID_BLOCK, |
| 52 | PTE_TYPE_TABLE, |
| 53 | }; |
| 54 | |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 55 | /* Keep macro alignment */ |
| 56 | /* clang-format off */ |
| 57 | |
J-Alves | 715d623 | 2023-02-16 16:33:28 +0000 | [diff] [blame] | 58 | #define PAGE_SIZE ((size_t)(1 << PAGE_BITS)) |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 59 | #define MM_PTE_PER_PAGE (PAGE_SIZE / sizeof(pte_t)) |
| 60 | |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 61 | /* The following are arch-independent page mapping modes. */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 62 | #define MM_MODE_R (1U << 0) /* read */ |
| 63 | #define MM_MODE_W (1U << 1) /* write */ |
| 64 | #define MM_MODE_X (1U << 2) /* execute */ |
| 65 | #define MM_MODE_D (1U << 3) /* device */ |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 66 | |
| 67 | /* |
| 68 | * Memory in stage-1 is either valid (present) or invalid (absent). |
| 69 | * |
| 70 | * Memory in stage-2 has more states to track sharing, borrowing and giving of |
| 71 | * memory. The states are made up of three parts: |
| 72 | * |
| 73 | * 1. V = valid/invalid : Whether the memory is part of the VM's address |
| 74 | * space. A fault will be generated if accessed when |
| 75 | * invalid. |
| 76 | * 2. O = owned/unowned : Whether the memory is owned by the VM. |
| 77 | * 3. X = exclusive/shared : Whether access is exclusive to the VM or shared |
| 78 | * with at most one other. |
| 79 | * |
| 80 | * These parts compose to form the following state: |
| 81 | * |
| 82 | * - V O X : Owner of memory with exclusive access. |
| 83 | * - V O !X : Owner of memory with access shared with at most one other VM. |
| 84 | * - V !O X : Borrower of memory with exclusive access. |
| 85 | * - V !O !X : Borrower of memory where access is shared with the owner. |
| 86 | * - !V O X : Owner of memory lent to a VM that has exclusive access. |
| 87 | * |
| 88 | * - !V O !X : Unused. Owner of shared memory always has access. |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 89 | * - !V !O X : Unused. Next entry is used for invalid memory. |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 90 | * |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 91 | * - !V !O !X : Invalid memory. Memory is unrelated to the VM. |
| 92 | * |
| 93 | * Modes are selected so that owner of exclusive memory is the default. |
| 94 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 95 | #define MM_MODE_INVALID (1U << 4) |
| 96 | #define MM_MODE_UNOWNED (1U << 5) |
| 97 | #define MM_MODE_SHARED (1U << 6) |
Raghu Krishnamurthy | 25daaea | 2021-02-10 13:19:16 -0800 | [diff] [blame] | 98 | |
| 99 | /* Map page as non-global. */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 100 | #define MM_MODE_NG (1U << 8) |
| 101 | |
| 102 | /* Specifies if a mapping will be a user mapping(EL0). */ |
| 103 | #define MM_MODE_USER (1U << 9) |
Raghu Krishnamurthy | 25daaea | 2021-02-10 13:19:16 -0800 | [diff] [blame] | 104 | |
Andrew Scull | 73b8954 | 2019-11-20 17:31:26 +0000 | [diff] [blame] | 105 | /* The mask for a mode that is considered unmapped. */ |
| 106 | #define MM_MODE_UNMAPPED_MASK (MM_MODE_INVALID | MM_MODE_UNOWNED) |
| 107 | |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 108 | /* clang-format on */ |
| 109 | |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 110 | /** |
| 111 | * Flags for page table operations. |
| 112 | * - commit: Commit the given range rather than preparing it. |
| 113 | * - unmap: Unmap the given range rather than mapping it. |
| 114 | */ |
Karl Meakin | 1fd4b82 | 2025-02-01 17:13:47 +0000 | [diff] [blame] | 115 | struct mm_flags { |
| 116 | bool commit : 1; |
| 117 | bool unmap : 1; |
Karl Meakin | 1fd4b82 | 2025-02-01 17:13:47 +0000 | [diff] [blame] | 118 | }; |
| 119 | |
Andrew Walbran | 5de9c3d | 2020-02-10 13:35:29 +0000 | [diff] [blame] | 120 | #define MM_PPOOL_ENTRY_SIZE sizeof(struct mm_page_table) |
| 121 | |
Andrew Scull | 4e5f814 | 2018-10-12 14:37:19 +0100 | [diff] [blame] | 122 | struct mm_page_table { |
| 123 | alignas(PAGE_SIZE) pte_t entries[MM_PTE_PER_PAGE]; |
| 124 | }; |
| 125 | static_assert(sizeof(struct mm_page_table) == PAGE_SIZE, |
| 126 | "A page table must take exactly one page."); |
| 127 | static_assert(alignof(struct mm_page_table) == PAGE_SIZE, |
| 128 | "A page table must be page aligned."); |
| 129 | |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 130 | struct mm_ptable { |
Raghu Krishnamurthy | 0132b51 | 2021-02-03 14:13:26 -0800 | [diff] [blame] | 131 | /** |
| 132 | * VMID/ASID associated with a page table. ASID 0 is reserved for use by |
| 133 | * the hypervisor. |
| 134 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 135 | mm_asid_t id; |
Karl Meakin | e1aeb1d | 2025-02-08 00:35:14 +0000 | [diff] [blame] | 136 | /** |
| 137 | * Address of the root tables. |
| 138 | * At stage 1, concatenated tables are not used, so there is only one |
| 139 | * root table. |
| 140 | * At stage 2, concatenated tables are used, so there are multiple root |
| 141 | * tables (given by `arch_mm_root_table_count()`). The Arm ARM says |
| 142 | * there can be up to 16 root tables, but we only use 4. |
| 143 | */ |
| 144 | struct mm_page_table *root_tables; |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 145 | /** If true, the PT is a stage1 PT, otherwise it is a stage2 PT. */ |
| 146 | bool stage1 : 1; |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 147 | }; |
| 148 | |
David Brazdil | 711fbe9 | 2019-08-06 13:39:58 +0100 | [diff] [blame] | 149 | /** The type of addresses stored in the page table. */ |
| 150 | typedef uintvaddr_t ptable_addr_t; |
| 151 | |
Andrew Scull | ba79b0a | 2019-07-03 11:26:53 +0100 | [diff] [blame] | 152 | /** Represents the currently locked stage-1 page table of the hypervisor. */ |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 153 | struct mm_stage1_locked { |
| 154 | struct mm_ptable *ptable; |
| 155 | }; |
| 156 | |
Andrew Scull | da24197 | 2019-01-05 18:17:48 +0000 | [diff] [blame] | 157 | void mm_vm_enable_invalidation(void); |
| 158 | |
Karl Meakin | 0f506a1 | 2025-02-08 23:28:45 +0000 | [diff] [blame] | 159 | bool mm_ptable_init(struct mm_ptable *ptable, mm_asid_t id, bool stage1, |
| 160 | struct mpool *ppool); |
| 161 | ptable_addr_t mm_ptable_addr_space_end(const struct mm_ptable *ptable); |
David Brazdil | 711fbe9 | 2019-08-06 13:39:58 +0100 | [diff] [blame] | 162 | |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 163 | bool mm_vm_init(struct mm_ptable *ptable, mm_asid_t id, struct mpool *ppool); |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 164 | void mm_vm_fini(const struct mm_ptable *ptable, struct mpool *ppool); |
Raghu Krishnamurthy | 43fe93a | 2021-01-31 16:38:38 -0800 | [diff] [blame] | 165 | |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 166 | bool mm_identity_prepare(struct mm_ptable *ptable, paddr_t begin, paddr_t end, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 167 | mm_mode_t mode, struct mpool *ppool); |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 168 | void *mm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 169 | mm_mode_t mode, struct mpool *ppool); |
Raghu Krishnamurthy | 43fe93a | 2021-01-31 16:38:38 -0800 | [diff] [blame] | 170 | |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 171 | bool mm_vm_identity_map(struct mm_ptable *ptable, paddr_t begin, paddr_t end, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 172 | mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa); |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 173 | bool mm_vm_identity_prepare(struct mm_ptable *ptable, paddr_t begin, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 174 | paddr_t end, mm_mode_t mode, struct mpool *ppool); |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 175 | void mm_vm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 176 | mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa); |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 177 | bool mm_vm_unmap(struct mm_ptable *ptable, paddr_t begin, paddr_t end, |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 178 | struct mpool *ppool); |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 179 | void mm_stage1_defrag(struct mm_ptable *ptable, struct mpool *ppool); |
| 180 | void mm_vm_defrag(struct mm_ptable *ptable, struct mpool *ppool, |
| 181 | bool non_secure); |
| 182 | void mm_vm_dump(const struct mm_ptable *ptable); |
| 183 | bool mm_vm_get_mode(const struct mm_ptable *ptable, ipaddr_t begin, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 184 | ipaddr_t end, mm_mode_t *mode); |
Karl Meakin | b2b5ff7 | 2025-02-19 15:47:56 +0000 | [diff] [blame] | 185 | |
| 186 | bool mm_vm_get_mode_partial(const struct mm_ptable *ptable, ipaddr_t begin, |
| 187 | ipaddr_t end, mm_mode_t *mode, ipaddr_t *end_ret); |
| 188 | |
Karl Meakin | d64aaf8 | 2025-02-08 01:12:55 +0000 | [diff] [blame] | 189 | bool mm_get_mode(const struct mm_ptable *ptable, vaddr_t begin, vaddr_t end, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 190 | mm_mode_t *mode); |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 191 | |
Karl Meakin | b2b5ff7 | 2025-02-19 15:47:56 +0000 | [diff] [blame] | 192 | bool mm_get_mode_partial(const struct mm_ptable *ptable, vaddr_t begin, |
| 193 | vaddr_t end, mm_mode_t *mode, vaddr_t *end_ret); |
| 194 | |
Raghu Krishnamurthy | d3ab8c3 | 2021-02-10 19:11:30 -0800 | [diff] [blame] | 195 | struct mm_stage1_locked mm_lock_ptable_unsafe(struct mm_ptable *ptable); |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 196 | struct mm_stage1_locked mm_lock_stage1(void); |
| 197 | void mm_unlock_stage1(struct mm_stage1_locked *lock); |
| 198 | void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin, |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame] | 199 | paddr_t end, mm_mode_t mode, struct mpool *ppool); |
Andrew Scull | 3c0a90a | 2019-07-01 11:55:53 +0100 | [diff] [blame] | 200 | bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end, |
| 201 | struct mpool *ppool); |
| 202 | void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool); |
| 203 | |
Wedson Almeida Filho | 22d5eaa | 2018-12-16 00:38:49 +0000 | [diff] [blame] | 204 | bool mm_init(struct mpool *ppool); |