blob: d871bf68e139c237cef97866884bd4e292b616e5 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scullfbc938a2018-08-20 14:09:28 +01009#pragma once
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010010
11#include <stdbool.h>
12#include <stddef.h>
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010013
Andrew Scull18c78fc2018-08-20 12:57:41 +010014#include "hf/addr.h"
Karl Meakin07a69ab2025-02-07 14:53:19 +000015#include "hf/mm.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010016
Olivier Deprez96a2a262020-06-11 17:21:38 +020017#include "vmapi/hf/ffa.h"
18
Andrew Scull11a4a0c2018-12-29 11:38:31 +000019/**
20 * Creates an absent PTE.
Andrew Walbran25133742018-09-28 16:28:02 +010021 */
Karl Meakin07a69ab2025-02-07 14:53:19 +000022pte_t arch_mm_absent_pte(mm_level_t level);
Andrew Scull11a4a0c2018-12-29 11:38:31 +000023
24/**
Andrew Scull9a6384b2019-01-02 12:08:40 +000025 * Creates a table PTE.
Andrew Scull11a4a0c2018-12-29 11:38:31 +000026 */
Karl Meakin07a69ab2025-02-07 14:53:19 +000027pte_t arch_mm_table_pte(mm_level_t level, paddr_t pa);
Andrew Scull11a4a0c2018-12-29 11:38:31 +000028
29/**
30 * Creates a block PTE.
31 */
Karl Meakin07a69ab2025-02-07 14:53:19 +000032pte_t arch_mm_block_pte(mm_level_t level, paddr_t pa, mm_attr_t attrs);
Andrew Scull11a4a0c2018-12-29 11:38:31 +000033
Karl Meakin23122e12025-02-05 14:44:20 +000034enum mm_pte_type arch_mm_pte_type(pte_t pte, mm_level_t level);
35
Andrew Scull11a4a0c2018-12-29 11:38:31 +000036/**
Andrew Scull9a6384b2019-01-02 12:08:40 +000037 * Checks whether a block is allowed at the given level of the page table.
Andrew Scull11a4a0c2018-12-29 11:38:31 +000038 */
Karl Meakin07a69ab2025-02-07 14:53:19 +000039bool arch_mm_is_block_allowed(mm_level_t level);
Andrew Scullc66a04d2018-12-07 13:41:56 +000040
Karl Meakin23122e12025-02-05 14:44:20 +000041static inline bool arch_mm_pte_is_absent(pte_t pte, mm_level_t level)
42{
43 return arch_mm_pte_type(pte, level) == PTE_TYPE_ABSENT;
44}
45
Andrew Scullc66a04d2018-12-07 13:41:56 +000046/**
47 * Determines if a PTE is present i.e. it contains information and therefore
48 * needs to exist in the page table. Any non-absent PTE is present.
49 */
Karl Meakin23122e12025-02-05 14:44:20 +000050static inline bool arch_mm_pte_is_present(pte_t pte, mm_level_t level)
51{
52 return !arch_mm_pte_is_absent(pte, level);
53}
Andrew Scullc66a04d2018-12-07 13:41:56 +000054
55/**
56 * Determines if a PTE is valid i.e. it can affect the address space. Tables and
57 * valid blocks fall into this category. Invalid blocks do not as they hold
58 * information about blocks that are not in the address space.
59 */
Karl Meakin23122e12025-02-05 14:44:20 +000060static inline bool arch_mm_pte_is_valid(pte_t pte, mm_level_t level)
61{
62 switch (arch_mm_pte_type(pte, level)) {
63 case PTE_TYPE_ABSENT:
64 case PTE_TYPE_INVALID_BLOCK:
65 return false;
66 case PTE_TYPE_VALID_BLOCK:
67 case PTE_TYPE_TABLE:
68 return true;
69 }
70}
Andrew Scullc66a04d2018-12-07 13:41:56 +000071
72/**
73 * Determines if a PTE is a block and represents an address range, valid or
74 * invalid.
75 */
Karl Meakin23122e12025-02-05 14:44:20 +000076static inline bool arch_mm_pte_is_block(pte_t pte, mm_level_t level)
77{
78 switch (arch_mm_pte_type(pte, level)) {
79 case PTE_TYPE_ABSENT:
80 case PTE_TYPE_TABLE:
81 return false;
82 case PTE_TYPE_INVALID_BLOCK:
83 case PTE_TYPE_VALID_BLOCK:
84 return true;
85 }
86}
Andrew Scullc66a04d2018-12-07 13:41:56 +000087
88/**
89 * Determines if a PTE represents a reference to a table of PTEs.
90 */
Karl Meakin23122e12025-02-05 14:44:20 +000091static inline bool arch_mm_pte_is_table(pte_t pte, mm_level_t level)
92{
93 return arch_mm_pte_type(pte, level) == PTE_TYPE_TABLE;
94}
Andrew Scullc66a04d2018-12-07 13:41:56 +000095
Andrew Scull11a4a0c2018-12-29 11:38:31 +000096/**
97 * Clears the bits of an address that are ignored by the page table. In effect,
98 * the address is rounded down to the start of the corresponding PTE range.
99 */
Andrew Scull459d3b52018-12-07 16:37:12 +0000100paddr_t arch_mm_clear_pa(paddr_t pa);
Andrew Scull11a4a0c2018-12-29 11:38:31 +0000101
102/**
Andrew Scull9a6384b2019-01-02 12:08:40 +0000103 * Extracts the start address of the PTE range.
Andrew Scull11a4a0c2018-12-29 11:38:31 +0000104 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000105paddr_t arch_mm_block_from_pte(pte_t pte, mm_level_t level);
Andrew Scull11a4a0c2018-12-29 11:38:31 +0000106
107/**
Andrew Scull9a6384b2019-01-02 12:08:40 +0000108 * Extracts the address of the table referenced by the PTE.
Andrew Scull11a4a0c2018-12-29 11:38:31 +0000109 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000110paddr_t arch_mm_table_from_pte(pte_t pte, mm_level_t level);
Andrew Scull11a4a0c2018-12-29 11:38:31 +0000111
112/**
Andrew Scull9a6384b2019-01-02 12:08:40 +0000113 * Extracts the attributes of the PTE.
Andrew Scull11a4a0c2018-12-29 11:38:31 +0000114 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000115mm_attr_t arch_mm_pte_attrs(pte_t pte, mm_level_t level);
Andrew Scull11a4a0c2018-12-29 11:38:31 +0000116
117/**
Karl Meakin23122e12025-02-05 14:44:20 +0000118 * Merges the attributes of a block into those of its parent table.
Andrew Scull11a4a0c2018-12-29 11:38:31 +0000119 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000120mm_attr_t arch_mm_combine_table_entry_attrs(mm_attr_t table_attrs,
121 mm_attr_t block_attrs);
Andrew Scull11a4a0c2018-12-29 11:38:31 +0000122
123/**
Andrew Scull9a6384b2019-01-02 12:08:40 +0000124 * Invalidates the given range of stage-1 TLB.
Andrew Scull11a4a0c2018-12-29 11:38:31 +0000125 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000126void arch_mm_invalidate_stage1_range(ffa_id_t asid, vaddr_t va_begin,
Raghu Krishnamurthy8fdd6df2021-02-03 18:30:59 -0800127 vaddr_t va_end);
Andrew Scull11a4a0c2018-12-29 11:38:31 +0000128
129/**
Andrew Scull9a6384b2019-01-02 12:08:40 +0000130 * Invalidates the given range of stage-2 TLB.
Andrew Scull11a4a0c2018-12-29 11:38:31 +0000131 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000132void arch_mm_invalidate_stage2_range(ffa_id_t vmid, ipaddr_t va_begin,
Olivier Deprez6f400372022-03-07 09:31:08 +0100133 ipaddr_t va_end, bool non_secure);
Andrew Scull11a4a0c2018-12-29 11:38:31 +0000134
135/**
Andrew Scullc059fbe2019-09-12 12:58:40 +0100136 * Writes back the given range of virtual memory to such a point that all cores
137 * and devices will see the updated values. The corresponding cache lines are
138 * also invalidated.
Andrew Scull11a4a0c2018-12-29 11:38:31 +0000139 */
Andrew Scullc059fbe2019-09-12 12:58:40 +0100140void arch_mm_flush_dcache(void *base, size_t size);
Andrew Scull11a4a0c2018-12-29 11:38:31 +0000141
142/**
Arunachalam Ganapathy0f0f7062022-01-26 17:09:53 +0000143 * Sets the maximum level allowed in the page table for stage-1.
144 */
145void arch_mm_stage1_max_level_set(uint32_t pa_bits);
146
147/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000148 * Gets the maximum level allowed in the page table for stage-1.
Andrew Scull11a4a0c2018-12-29 11:38:31 +0000149 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000150mm_level_t arch_mm_stage1_max_level(void);
Andrew Scull11a4a0c2018-12-29 11:38:31 +0000151
152/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000153 * Gets the maximum level allowed in the page table for stage-2.
Andrew Scull11a4a0c2018-12-29 11:38:31 +0000154 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000155mm_level_t arch_mm_stage2_max_level(void);
Andrew Scull11a4a0c2018-12-29 11:38:31 +0000156
157/**
Andrew Scullda3df7f2019-01-05 17:49:27 +0000158 * Gets the number of concatenated page tables used at the root for stage-1.
159 *
160 * Tables are concatenated at the root to avoid introducing another level in the
161 * page table meaning the table is shallow and wide. Each level is an extra
162 * memory access when walking the table so keeping it shallow reduces the memory
163 * accesses to aid performance.
Andrew Scull11a4a0c2018-12-29 11:38:31 +0000164 */
Andrew Scullda3df7f2019-01-05 17:49:27 +0000165uint8_t arch_mm_stage1_root_table_count(void);
166
167/**
168 * Gets the number of concatenated page tables used at the root for stage-2.
169 */
170uint8_t arch_mm_stage2_root_table_count(void);
171
172/**
173 * Converts the mode into stage-1 attributes for a block PTE.
174 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000175mm_attr_t arch_mm_mode_to_stage1_attrs(mm_mode_t mode);
Andrew Scullda3df7f2019-01-05 17:49:27 +0000176
177/**
178 * Converts the mode into stage-2 attributes for a block PTE.
179 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000180mm_attr_t arch_mm_mode_to_stage2_attrs(mm_mode_t mode);
Andrew Scull11a4a0c2018-12-29 11:38:31 +0000181
182/**
Andrew Scull81e85092018-12-12 12:56:20 +0000183 * Converts the stage-2 block attributes back to the corresponding mode.
184 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000185mm_mode_t arch_mm_stage2_attrs_to_mode(mm_attr_t attrs);
Andrew Scull81e85092018-12-12 12:56:20 +0000186
187/**
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -0800188 * Converts the stage-1 block attributes back to the corresponding mode.
189 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000190mm_mode_t arch_mm_stage1_attrs_to_mode(mm_attr_t attrs);
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -0800191
192/**
Andrew Scullc280bee2019-08-14 11:11:03 +0100193 * Initializes the arch specific memory management.
Andrew Scull11a4a0c2018-12-29 11:38:31 +0000194 */
Andrew Scullb2910562019-09-17 14:08:27 +0100195bool arch_mm_init(paddr_t table);
Olivier Deprez96a2a262020-06-11 17:21:38 +0200196
197/**
198 * Return the arch specific mm mode for send/recv pages of given VM ID.
199 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000200mm_mode_t arch_mm_extra_mode_from_vm(ffa_id_t id);
Raghu Krishnamurthyc1012d62021-01-24 19:19:31 -0800201
202/**
203 * Execute any barriers or synchronization that is required
204 * by a given architecture, after page table writes.
205 */
206void arch_mm_sync_table_writes(void);
Federico Recanati4fd065d2021-12-13 20:06:23 +0100207
208/**
Jens Wiklander4f1880c2022-10-19 17:00:14 +0200209 * Returns the maximum supported PA Range index.
210 */
211uint64_t arch_mm_get_pa_range(void);
212
213/**
Federico Recanati4fd065d2021-12-13 20:06:23 +0100214 * Returns the maximum supported PA Range in bits.
215 */
Jens Wiklander4f1880c2022-10-19 17:00:14 +0200216uint32_t arch_mm_get_pa_bits(uint64_t pa_range);
Olivier Deprezb7f6bd62022-03-08 10:55:52 +0100217
Maksims Svecovs7efb1632022-03-29 17:05:24 +0100218/**
219 * Returns VTCR_EL2 configured in arch_mm_init.
220 */
Olivier Deprezb7f6bd62022-03-08 10:55:52 +0100221uintptr_t arch_mm_get_vtcr_el2(void);
222
Maksims Svecovs7efb1632022-03-29 17:05:24 +0100223/**
224 * Returns VSTCR_EL2 configured in arch_mm_init.
225 */
Olivier Deprezb7f6bd62022-03-08 10:55:52 +0100226uintptr_t arch_mm_get_vstcr_el2(void);