Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
Andrew Scull | fbc938a | 2018-08-20 14:09:28 +0100 | [diff] [blame] | 9 | #pragma once |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 10 | |
| 11 | #include <stdbool.h> |
| 12 | #include <stddef.h> |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 13 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 14 | #include "hf/addr.h" |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame^] | 15 | #include "hf/mm.h" |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 16 | |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 17 | #include "vmapi/hf/ffa.h" |
| 18 | |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 19 | /* |
| 20 | * A page table entry (PTE) will take one of the following forms: |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 21 | * |
| 22 | * 1. absent : There is no mapping. |
| 23 | * 2. invalid block : Represents a block that is not in the address space. |
| 24 | * 3. valid block : Represents a block that is in the address space. |
| 25 | * 4. table : Represents a reference to a table of PTEs. |
| 26 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 27 | |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 28 | /** |
| 29 | * Creates an absent PTE. |
Andrew Walbran | 2513374 | 2018-09-28 16:28:02 +0100 | [diff] [blame] | 30 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame^] | 31 | pte_t arch_mm_absent_pte(mm_level_t level); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 32 | |
| 33 | /** |
Andrew Scull | 9a6384b | 2019-01-02 12:08:40 +0000 | [diff] [blame] | 34 | * Creates a table PTE. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 35 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame^] | 36 | pte_t arch_mm_table_pte(mm_level_t level, paddr_t pa); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 37 | |
| 38 | /** |
| 39 | * Creates a block PTE. |
| 40 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame^] | 41 | pte_t arch_mm_block_pte(mm_level_t level, paddr_t pa, mm_attr_t attrs); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 42 | |
| 43 | /** |
Andrew Scull | 9a6384b | 2019-01-02 12:08:40 +0000 | [diff] [blame] | 44 | * Checks whether a block is allowed at the given level of the page table. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 45 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame^] | 46 | bool arch_mm_is_block_allowed(mm_level_t level); |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 47 | |
| 48 | /** |
| 49 | * Determines if a PTE is present i.e. it contains information and therefore |
| 50 | * needs to exist in the page table. Any non-absent PTE is present. |
| 51 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame^] | 52 | bool arch_mm_pte_is_present(pte_t pte, mm_level_t level); |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 53 | |
| 54 | /** |
| 55 | * Determines if a PTE is valid i.e. it can affect the address space. Tables and |
| 56 | * valid blocks fall into this category. Invalid blocks do not as they hold |
| 57 | * information about blocks that are not in the address space. |
| 58 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame^] | 59 | bool arch_mm_pte_is_valid(pte_t pte, mm_level_t level); |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 60 | |
| 61 | /** |
| 62 | * Determines if a PTE is a block and represents an address range, valid or |
| 63 | * invalid. |
| 64 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame^] | 65 | bool arch_mm_pte_is_block(pte_t pte, mm_level_t level); |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 66 | |
| 67 | /** |
| 68 | * Determines if a PTE represents a reference to a table of PTEs. |
| 69 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame^] | 70 | bool arch_mm_pte_is_table(pte_t pte, mm_level_t level); |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 71 | |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 72 | /** |
| 73 | * Clears the bits of an address that are ignored by the page table. In effect, |
| 74 | * the address is rounded down to the start of the corresponding PTE range. |
| 75 | */ |
Andrew Scull | 459d3b5 | 2018-12-07 16:37:12 +0000 | [diff] [blame] | 76 | paddr_t arch_mm_clear_pa(paddr_t pa); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 77 | |
| 78 | /** |
Andrew Scull | 9a6384b | 2019-01-02 12:08:40 +0000 | [diff] [blame] | 79 | * Extracts the start address of the PTE range. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 80 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame^] | 81 | paddr_t arch_mm_block_from_pte(pte_t pte, mm_level_t level); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 82 | |
| 83 | /** |
Andrew Scull | 9a6384b | 2019-01-02 12:08:40 +0000 | [diff] [blame] | 84 | * Extracts the address of the table referenced by the PTE. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 85 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame^] | 86 | paddr_t arch_mm_table_from_pte(pte_t pte, mm_level_t level); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 87 | |
| 88 | /** |
Andrew Scull | 9a6384b | 2019-01-02 12:08:40 +0000 | [diff] [blame] | 89 | * Extracts the attributes of the PTE. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 90 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame^] | 91 | mm_attr_t arch_mm_pte_attrs(pte_t pte, mm_level_t level); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 92 | |
| 93 | /** |
Andrew Scull | 9a6384b | 2019-01-02 12:08:40 +0000 | [diff] [blame] | 94 | * Merges the attributes of a block into those of its containing table. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 95 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame^] | 96 | mm_attr_t arch_mm_combine_table_entry_attrs(mm_attr_t table_attrs, |
| 97 | mm_attr_t block_attrs); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 98 | |
| 99 | /** |
Andrew Scull | 9a6384b | 2019-01-02 12:08:40 +0000 | [diff] [blame] | 100 | * Invalidates the given range of stage-1 TLB. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 101 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame^] | 102 | void arch_mm_invalidate_stage1_range(ffa_id_t asid, vaddr_t va_begin, |
Raghu Krishnamurthy | 8fdd6df | 2021-02-03 18:30:59 -0800 | [diff] [blame] | 103 | vaddr_t va_end); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 104 | |
| 105 | /** |
Andrew Scull | 9a6384b | 2019-01-02 12:08:40 +0000 | [diff] [blame] | 106 | * Invalidates the given range of stage-2 TLB. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 107 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame^] | 108 | void arch_mm_invalidate_stage2_range(ffa_id_t vmid, ipaddr_t va_begin, |
Olivier Deprez | 6f40037 | 2022-03-07 09:31:08 +0100 | [diff] [blame] | 109 | ipaddr_t va_end, bool non_secure); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 110 | |
| 111 | /** |
Andrew Scull | c059fbe | 2019-09-12 12:58:40 +0100 | [diff] [blame] | 112 | * Writes back the given range of virtual memory to such a point that all cores |
| 113 | * and devices will see the updated values. The corresponding cache lines are |
| 114 | * also invalidated. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 115 | */ |
Andrew Scull | c059fbe | 2019-09-12 12:58:40 +0100 | [diff] [blame] | 116 | void arch_mm_flush_dcache(void *base, size_t size); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 117 | |
| 118 | /** |
Arunachalam Ganapathy | 0f0f706 | 2022-01-26 17:09:53 +0000 | [diff] [blame] | 119 | * Sets the maximum level allowed in the page table for stage-1. |
| 120 | */ |
| 121 | void arch_mm_stage1_max_level_set(uint32_t pa_bits); |
| 122 | |
| 123 | /** |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 124 | * Gets the maximum level allowed in the page table for stage-1. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 125 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame^] | 126 | mm_level_t arch_mm_stage1_max_level(void); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 127 | |
| 128 | /** |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 129 | * Gets the maximum level allowed in the page table for stage-2. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 130 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame^] | 131 | mm_level_t arch_mm_stage2_max_level(void); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 132 | |
| 133 | /** |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 134 | * Gets the number of concatenated page tables used at the root for stage-1. |
| 135 | * |
| 136 | * Tables are concatenated at the root to avoid introducing another level in the |
| 137 | * page table meaning the table is shallow and wide. Each level is an extra |
| 138 | * memory access when walking the table so keeping it shallow reduces the memory |
| 139 | * accesses to aid performance. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 140 | */ |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 141 | uint8_t arch_mm_stage1_root_table_count(void); |
| 142 | |
| 143 | /** |
| 144 | * Gets the number of concatenated page tables used at the root for stage-2. |
| 145 | */ |
| 146 | uint8_t arch_mm_stage2_root_table_count(void); |
| 147 | |
| 148 | /** |
| 149 | * Converts the mode into stage-1 attributes for a block PTE. |
| 150 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame^] | 151 | mm_attr_t arch_mm_mode_to_stage1_attrs(mm_mode_t mode); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 152 | |
| 153 | /** |
| 154 | * Converts the mode into stage-2 attributes for a block PTE. |
| 155 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame^] | 156 | mm_attr_t arch_mm_mode_to_stage2_attrs(mm_mode_t mode); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 157 | |
| 158 | /** |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 159 | * Converts the stage-2 block attributes back to the corresponding mode. |
| 160 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame^] | 161 | mm_mode_t arch_mm_stage2_attrs_to_mode(mm_attr_t attrs); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 162 | |
| 163 | /** |
Raghu Krishnamurthy | 2323d72 | 2021-02-12 22:55:38 -0800 | [diff] [blame] | 164 | * Converts the stage-1 block attributes back to the corresponding mode. |
| 165 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame^] | 166 | mm_mode_t arch_mm_stage1_attrs_to_mode(mm_attr_t attrs); |
Raghu Krishnamurthy | 2323d72 | 2021-02-12 22:55:38 -0800 | [diff] [blame] | 167 | |
| 168 | /** |
Andrew Scull | c280bee | 2019-08-14 11:11:03 +0100 | [diff] [blame] | 169 | * Initializes the arch specific memory management. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 170 | */ |
Andrew Scull | b291056 | 2019-09-17 14:08:27 +0100 | [diff] [blame] | 171 | bool arch_mm_init(paddr_t table); |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 172 | |
| 173 | /** |
| 174 | * Return the arch specific mm mode for send/recv pages of given VM ID. |
| 175 | */ |
Karl Meakin | 07a69ab | 2025-02-07 14:53:19 +0000 | [diff] [blame^] | 176 | mm_mode_t arch_mm_extra_mode_from_vm(ffa_id_t id); |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame] | 177 | |
| 178 | /** |
| 179 | * Execute any barriers or synchronization that is required |
| 180 | * by a given architecture, after page table writes. |
| 181 | */ |
| 182 | void arch_mm_sync_table_writes(void); |
Federico Recanati | 4fd065d | 2021-12-13 20:06:23 +0100 | [diff] [blame] | 183 | |
| 184 | /** |
Jens Wiklander | 4f1880c | 2022-10-19 17:00:14 +0200 | [diff] [blame] | 185 | * Returns the maximum supported PA Range index. |
| 186 | */ |
| 187 | uint64_t arch_mm_get_pa_range(void); |
| 188 | |
| 189 | /** |
Federico Recanati | 4fd065d | 2021-12-13 20:06:23 +0100 | [diff] [blame] | 190 | * Returns the maximum supported PA Range in bits. |
| 191 | */ |
Jens Wiklander | 4f1880c | 2022-10-19 17:00:14 +0200 | [diff] [blame] | 192 | uint32_t arch_mm_get_pa_bits(uint64_t pa_range); |
Olivier Deprez | b7f6bd6 | 2022-03-08 10:55:52 +0100 | [diff] [blame] | 193 | |
Maksims Svecovs | 7efb163 | 2022-03-29 17:05:24 +0100 | [diff] [blame] | 194 | /** |
| 195 | * Returns VTCR_EL2 configured in arch_mm_init. |
| 196 | */ |
Olivier Deprez | b7f6bd6 | 2022-03-08 10:55:52 +0100 | [diff] [blame] | 197 | uintptr_t arch_mm_get_vtcr_el2(void); |
| 198 | |
Maksims Svecovs | 7efb163 | 2022-03-29 17:05:24 +0100 | [diff] [blame] | 199 | /** |
| 200 | * Returns VSTCR_EL2 configured in arch_mm_init. |
| 201 | */ |
Olivier Deprez | b7f6bd6 | 2022-03-08 10:55:52 +0100 | [diff] [blame] | 202 | uintptr_t arch_mm_get_vstcr_el2(void); |