Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 1 | /* |
Andrew Walbran | 692b325 | 2019-03-07 15:51:31 +0000 | [diff] [blame] | 2 | * Copyright 2018 The Hafnium Authors. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Andrew Scull | 1883487 | 2018-10-12 11:48:09 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
Andrew Scull | fbc938a | 2018-08-20 14:09:28 +0100 | [diff] [blame] | 9 | #pragma once |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 10 | |
| 11 | #include <stdbool.h> |
| 12 | #include <stddef.h> |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 13 | |
Andrew Scull | 18c78fc | 2018-08-20 12:57:41 +0100 | [diff] [blame] | 14 | #include "hf/addr.h" |
Wedson Almeida Filho | fed6902 | 2018-07-11 15:39:12 +0100 | [diff] [blame] | 15 | |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 16 | #include "vmapi/hf/ffa.h" |
| 17 | |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 18 | /* |
| 19 | * A page table entry (PTE) will take one of the following forms: |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 20 | * |
| 21 | * 1. absent : There is no mapping. |
| 22 | * 2. invalid block : Represents a block that is not in the address space. |
| 23 | * 3. valid block : Represents a block that is in the address space. |
| 24 | * 4. table : Represents a reference to a table of PTEs. |
| 25 | */ |
Andrew Scull | 8087132 | 2018-08-06 12:04:09 +0100 | [diff] [blame] | 26 | |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 27 | /** |
| 28 | * Creates an absent PTE. |
Andrew Walbran | 2513374 | 2018-09-28 16:28:02 +0100 | [diff] [blame] | 29 | */ |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 30 | pte_t arch_mm_absent_pte(uint8_t level); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 31 | |
| 32 | /** |
Andrew Scull | 9a6384b | 2019-01-02 12:08:40 +0000 | [diff] [blame] | 33 | * Creates a table PTE. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 34 | */ |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 35 | pte_t arch_mm_table_pte(uint8_t level, paddr_t pa); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 36 | |
| 37 | /** |
| 38 | * Creates a block PTE. |
| 39 | */ |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 40 | pte_t arch_mm_block_pte(uint8_t level, paddr_t pa, uint64_t attrs); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 41 | |
| 42 | /** |
Andrew Scull | 9a6384b | 2019-01-02 12:08:40 +0000 | [diff] [blame] | 43 | * Checks whether a block is allowed at the given level of the page table. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 44 | */ |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 45 | bool arch_mm_is_block_allowed(uint8_t level); |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 46 | |
| 47 | /** |
| 48 | * Determines if a PTE is present i.e. it contains information and therefore |
| 49 | * needs to exist in the page table. Any non-absent PTE is present. |
| 50 | */ |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 51 | bool arch_mm_pte_is_present(pte_t pte, uint8_t level); |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 52 | |
| 53 | /** |
| 54 | * Determines if a PTE is valid i.e. it can affect the address space. Tables and |
| 55 | * valid blocks fall into this category. Invalid blocks do not as they hold |
| 56 | * information about blocks that are not in the address space. |
| 57 | */ |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 58 | bool arch_mm_pte_is_valid(pte_t pte, uint8_t level); |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 59 | |
| 60 | /** |
| 61 | * Determines if a PTE is a block and represents an address range, valid or |
| 62 | * invalid. |
| 63 | */ |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 64 | bool arch_mm_pte_is_block(pte_t pte, uint8_t level); |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 65 | |
| 66 | /** |
| 67 | * Determines if a PTE represents a reference to a table of PTEs. |
| 68 | */ |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 69 | bool arch_mm_pte_is_table(pte_t pte, uint8_t level); |
Andrew Scull | c66a04d | 2018-12-07 13:41:56 +0000 | [diff] [blame] | 70 | |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 71 | /** |
| 72 | * Clears the bits of an address that are ignored by the page table. In effect, |
| 73 | * the address is rounded down to the start of the corresponding PTE range. |
| 74 | */ |
Andrew Scull | 459d3b5 | 2018-12-07 16:37:12 +0000 | [diff] [blame] | 75 | paddr_t arch_mm_clear_pa(paddr_t pa); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 76 | |
| 77 | /** |
Andrew Scull | 9a6384b | 2019-01-02 12:08:40 +0000 | [diff] [blame] | 78 | * Extracts the start address of the PTE range. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 79 | */ |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 80 | paddr_t arch_mm_block_from_pte(pte_t pte, uint8_t level); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 81 | |
| 82 | /** |
Andrew Scull | 9a6384b | 2019-01-02 12:08:40 +0000 | [diff] [blame] | 83 | * Extracts the address of the table referenced by the PTE. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 84 | */ |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 85 | paddr_t arch_mm_table_from_pte(pte_t pte, uint8_t level); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 86 | |
| 87 | /** |
Andrew Scull | 9a6384b | 2019-01-02 12:08:40 +0000 | [diff] [blame] | 88 | * Extracts the attributes of the PTE. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 89 | */ |
Andrew Scull | 3681b8d | 2018-12-12 14:22:59 +0000 | [diff] [blame] | 90 | uint64_t arch_mm_pte_attrs(pte_t pte, uint8_t level); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 91 | |
| 92 | /** |
Andrew Scull | 9a6384b | 2019-01-02 12:08:40 +0000 | [diff] [blame] | 93 | * Merges the attributes of a block into those of its containing table. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 94 | */ |
Andrew Walbran | 2400ed2 | 2018-09-27 14:45:58 +0100 | [diff] [blame] | 95 | uint64_t arch_mm_combine_table_entry_attrs(uint64_t table_attrs, |
| 96 | uint64_t block_attrs); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 97 | |
| 98 | /** |
Andrew Scull | 9a6384b | 2019-01-02 12:08:40 +0000 | [diff] [blame] | 99 | * Invalidates the given range of stage-1 TLB. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 100 | */ |
Raghu Krishnamurthy | 8fdd6df | 2021-02-03 18:30:59 -0800 | [diff] [blame] | 101 | void arch_mm_invalidate_stage1_range(uint16_t asid, vaddr_t va_begin, |
| 102 | vaddr_t va_end); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 103 | |
| 104 | /** |
Andrew Scull | 9a6384b | 2019-01-02 12:08:40 +0000 | [diff] [blame] | 105 | * Invalidates the given range of stage-2 TLB. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 106 | */ |
Raghu Krishnamurthy | 8fdd6df | 2021-02-03 18:30:59 -0800 | [diff] [blame] | 107 | void arch_mm_invalidate_stage2_range(uint16_t vmid, ipaddr_t va_begin, |
| 108 | ipaddr_t va_end); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 109 | |
| 110 | /** |
Andrew Scull | c059fbe | 2019-09-12 12:58:40 +0100 | [diff] [blame] | 111 | * Writes back the given range of virtual memory to such a point that all cores |
| 112 | * and devices will see the updated values. The corresponding cache lines are |
| 113 | * also invalidated. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 114 | */ |
Andrew Scull | c059fbe | 2019-09-12 12:58:40 +0100 | [diff] [blame] | 115 | void arch_mm_flush_dcache(void *base, size_t size); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 116 | |
| 117 | /** |
Arunachalam Ganapathy | 0f0f706 | 2022-01-26 17:09:53 +0000 | [diff] [blame] | 118 | * Sets the maximum level allowed in the page table for stage-1. |
| 119 | */ |
| 120 | void arch_mm_stage1_max_level_set(uint32_t pa_bits); |
| 121 | |
| 122 | /** |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 123 | * Gets the maximum level allowed in the page table for stage-1. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 124 | */ |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 125 | uint8_t arch_mm_stage1_max_level(void); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 126 | |
| 127 | /** |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 128 | * Gets the maximum level allowed in the page table for stage-2. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 129 | */ |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 130 | uint8_t arch_mm_stage2_max_level(void); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 131 | |
| 132 | /** |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 133 | * Gets the number of concatenated page tables used at the root for stage-1. |
| 134 | * |
| 135 | * Tables are concatenated at the root to avoid introducing another level in the |
| 136 | * page table meaning the table is shallow and wide. Each level is an extra |
| 137 | * memory access when walking the table so keeping it shallow reduces the memory |
| 138 | * accesses to aid performance. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 139 | */ |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 140 | uint8_t arch_mm_stage1_root_table_count(void); |
| 141 | |
| 142 | /** |
| 143 | * Gets the number of concatenated page tables used at the root for stage-2. |
| 144 | */ |
| 145 | uint8_t arch_mm_stage2_root_table_count(void); |
| 146 | |
| 147 | /** |
| 148 | * Converts the mode into stage-1 attributes for a block PTE. |
| 149 | */ |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 150 | uint64_t arch_mm_mode_to_stage1_attrs(uint32_t mode); |
Andrew Scull | da3df7f | 2019-01-05 17:49:27 +0000 | [diff] [blame] | 151 | |
| 152 | /** |
| 153 | * Converts the mode into stage-2 attributes for a block PTE. |
| 154 | */ |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 155 | uint64_t arch_mm_mode_to_stage2_attrs(uint32_t mode); |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 156 | |
| 157 | /** |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 158 | * Converts the stage-2 block attributes back to the corresponding mode. |
| 159 | */ |
Andrew Walbran | 1281ed4 | 2019-10-22 17:23:40 +0100 | [diff] [blame] | 160 | uint32_t arch_mm_stage2_attrs_to_mode(uint64_t attrs); |
Andrew Scull | 81e8509 | 2018-12-12 12:56:20 +0000 | [diff] [blame] | 161 | |
| 162 | /** |
Raghu Krishnamurthy | 2323d72 | 2021-02-12 22:55:38 -0800 | [diff] [blame] | 163 | * Converts the stage-1 block attributes back to the corresponding mode. |
| 164 | */ |
| 165 | uint32_t arch_mm_stage1_attrs_to_mode(uint64_t attrs); |
| 166 | |
| 167 | /** |
Andrew Scull | c280bee | 2019-08-14 11:11:03 +0100 | [diff] [blame] | 168 | * Initializes the arch specific memory management. |
Andrew Scull | 11a4a0c | 2018-12-29 11:38:31 +0000 | [diff] [blame] | 169 | */ |
Andrew Scull | b291056 | 2019-09-17 14:08:27 +0100 | [diff] [blame] | 170 | bool arch_mm_init(paddr_t table); |
Olivier Deprez | 96a2a26 | 2020-06-11 17:21:38 +0200 | [diff] [blame] | 171 | |
| 172 | /** |
| 173 | * Return the arch specific mm mode for send/recv pages of given VM ID. |
| 174 | */ |
| 175 | uint32_t arch_mm_extra_attributes_from_vm(ffa_vm_id_t id); |
Raghu Krishnamurthy | c1012d6 | 2021-01-24 19:19:31 -0800 | [diff] [blame] | 176 | |
| 177 | /** |
| 178 | * Execute any barriers or synchronization that is required |
| 179 | * by a given architecture, after page table writes. |
| 180 | */ |
| 181 | void arch_mm_sync_table_writes(void); |
Federico Recanati | 4fd065d | 2021-12-13 20:06:23 +0100 | [diff] [blame] | 182 | |
| 183 | /** |
Jens Wiklander | 4f1880c | 2022-10-19 17:00:14 +0200 | [diff] [blame] | 184 | * Returns the maximum supported PA Range index. |
| 185 | */ |
| 186 | uint64_t arch_mm_get_pa_range(void); |
| 187 | |
| 188 | /** |
Federico Recanati | 4fd065d | 2021-12-13 20:06:23 +0100 | [diff] [blame] | 189 | * Returns the maximum supported PA Range in bits. |
| 190 | */ |
Jens Wiklander | 4f1880c | 2022-10-19 17:00:14 +0200 | [diff] [blame] | 191 | uint32_t arch_mm_get_pa_bits(uint64_t pa_range); |
Olivier Deprez | b7f6bd6 | 2022-03-08 10:55:52 +0100 | [diff] [blame] | 192 | |
Maksims Svecovs | 7efb163 | 2022-03-29 17:05:24 +0100 | [diff] [blame] | 193 | /** |
| 194 | * Returns VTCR_EL2 configured in arch_mm_init. |
| 195 | */ |
Olivier Deprez | b7f6bd6 | 2022-03-08 10:55:52 +0100 | [diff] [blame] | 196 | uintptr_t arch_mm_get_vtcr_el2(void); |
| 197 | |
Maksims Svecovs | 7efb163 | 2022-03-29 17:05:24 +0100 | [diff] [blame] | 198 | /** |
| 199 | * Returns VSTCR_EL2 configured in arch_mm_init. |
| 200 | */ |
Olivier Deprez | b7f6bd6 | 2022-03-08 10:55:52 +0100 | [diff] [blame] | 201 | uintptr_t arch_mm_get_vstcr_el2(void); |