blob: 39277875df4ec6eea261e47b94e59975b2314c25 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scullfbc938a2018-08-20 14:09:28 +01009#pragma once
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010010
Andrew Scull4e5f8142018-10-12 14:37:19 +010011#include <stdalign.h>
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010012#include <stdbool.h>
J-Alves715d6232023-02-16 16:33:28 +000013#include <stddef.h>
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +010014#include <stdint.h>
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010015
Andrew Scull18c78fc2018-08-20 12:57:41 +010016#include "hf/addr.h"
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000017#include "hf/mpool.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010018#include "hf/static_assert.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010019
Karl Meakin07a69ab2025-02-07 14:53:19 +000020typedef uint32_t mm_mode_t;
21typedef uint64_t mm_attr_t;
Karl Meakina3a9f952025-02-08 00:11:16 +000022
23/**
24 * The level of a page table entry (i.e. how deep into the recursive tree
25 * structure it is). See also Arm ARM, table D8-14.
26 *
27 * - `level == 4`: table entries (root)
28 * - `level == 3`: table or block entries
29 * - `level == 2`: table or block entries
30 * - `level == 1`: table or block entries
31 * - `level == 0`: page entries
32 *
33 * NOTE: The Arm ARM uses levels in the opposite order to our code: in the Arm
34 * ARM, levels start at 0 (or -1 if 52 bits of PA are used, but that is not
35 * supported by Hafnium) and page entries are at level 3. We go in the opposite
36 * direction: levels start at 3 or 4 and page entries are at level 0. This is
37 * because it makes the arithmetic and bit manipulation easier.
38 */
Karl Meakin07a69ab2025-02-07 14:53:19 +000039typedef uint8_t mm_level_t;
40typedef uint16_t mm_asid_t;
41
Karl Meakin23122e12025-02-05 14:44:20 +000042/*
43 * A page table entry (PTE) will take one of the following forms:
44 *
45 * 1. absent : There is no mapping.
46 * 2. invalid block : Represents a block that is not in the address space.
47 * 3. valid block : Represents a block that is in the address space.
48 * 4. table : Represents a reference to a table of PTEs.
49 * See Arm ARM, D8.3 (Translation table descriptor formats).
50 */
51enum mm_pte_type {
52 PTE_TYPE_ABSENT,
53 PTE_TYPE_INVALID_BLOCK,
54 PTE_TYPE_VALID_BLOCK,
55 PTE_TYPE_TABLE,
56};
57
Andrew Scullc66a04d2018-12-07 13:41:56 +000058/* Keep macro alignment */
59/* clang-format off */
60
J-Alves715d6232023-02-16 16:33:28 +000061#define PAGE_SIZE ((size_t)(1 << PAGE_BITS))
Andrew Scull4e5f8142018-10-12 14:37:19 +010062#define MM_PTE_PER_PAGE (PAGE_SIZE / sizeof(pte_t))
63
Andrew Scullc66a04d2018-12-07 13:41:56 +000064/* The following are arch-independent page mapping modes. */
Karl Meakin07a69ab2025-02-07 14:53:19 +000065#define MM_MODE_R (1U << 0) /* read */
66#define MM_MODE_W (1U << 1) /* write */
67#define MM_MODE_X (1U << 2) /* execute */
68#define MM_MODE_D (1U << 3) /* device */
Andrew Scullc66a04d2018-12-07 13:41:56 +000069
70/*
71 * Memory in stage-1 is either valid (present) or invalid (absent).
72 *
73 * Memory in stage-2 has more states to track sharing, borrowing and giving of
74 * memory. The states are made up of three parts:
75 *
76 * 1. V = valid/invalid : Whether the memory is part of the VM's address
77 * space. A fault will be generated if accessed when
78 * invalid.
79 * 2. O = owned/unowned : Whether the memory is owned by the VM.
80 * 3. X = exclusive/shared : Whether access is exclusive to the VM or shared
81 * with at most one other.
82 *
83 * These parts compose to form the following state:
84 *
85 * - V O X : Owner of memory with exclusive access.
86 * - V O !X : Owner of memory with access shared with at most one other VM.
87 * - V !O X : Borrower of memory with exclusive access.
88 * - V !O !X : Borrower of memory where access is shared with the owner.
89 * - !V O X : Owner of memory lent to a VM that has exclusive access.
90 *
91 * - !V O !X : Unused. Owner of shared memory always has access.
Andrew Scull73b89542019-11-20 17:31:26 +000092 * - !V !O X : Unused. Next entry is used for invalid memory.
Andrew Scullc66a04d2018-12-07 13:41:56 +000093 *
Andrew Scullc66a04d2018-12-07 13:41:56 +000094 * - !V !O !X : Invalid memory. Memory is unrelated to the VM.
95 *
96 * Modes are selected so that owner of exclusive memory is the default.
97 */
Karl Meakin07a69ab2025-02-07 14:53:19 +000098#define MM_MODE_INVALID (1U << 4)
99#define MM_MODE_UNOWNED (1U << 5)
100#define MM_MODE_SHARED (1U << 6)
Raghu Krishnamurthy25daaea2021-02-10 13:19:16 -0800101
102/* Map page as non-global. */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000103#define MM_MODE_NG (1U << 8)
104
105/* Specifies if a mapping will be a user mapping(EL0). */
106#define MM_MODE_USER (1U << 9)
Raghu Krishnamurthy25daaea2021-02-10 13:19:16 -0800107
Andrew Scull73b89542019-11-20 17:31:26 +0000108/* The mask for a mode that is considered unmapped. */
109#define MM_MODE_UNMAPPED_MASK (MM_MODE_INVALID | MM_MODE_UNOWNED)
110
Andrew Scullc66a04d2018-12-07 13:41:56 +0000111/* clang-format on */
112
Karl Meakin1fd4b822025-02-01 17:13:47 +0000113struct mm_flags {
114 bool commit : 1;
115 bool unmap : 1;
116 bool stage1 : 1;
117};
118
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000119#define MM_PPOOL_ENTRY_SIZE sizeof(struct mm_page_table)
120
Andrew Scull4e5f8142018-10-12 14:37:19 +0100121struct mm_page_table {
122 alignas(PAGE_SIZE) pte_t entries[MM_PTE_PER_PAGE];
123};
124static_assert(sizeof(struct mm_page_table) == PAGE_SIZE,
125 "A page table must take exactly one page.");
126static_assert(alignof(struct mm_page_table) == PAGE_SIZE,
127 "A page table must be page aligned.");
128
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100129struct mm_ptable {
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800130 /**
131 * VMID/ASID associated with a page table. ASID 0 is reserved for use by
132 * the hypervisor.
133 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000134 mm_asid_t id;
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000135 /**
136 * Address of the root tables.
137 * At stage 1, concatenated tables are not used, so there is only one
138 * root table.
139 * At stage 2, concatenated tables are used, so there are multiple root
140 * tables (given by `arch_mm_root_table_count()`). The Arm ARM says
141 * there can be up to 16 root tables, but we only use 4.
142 */
143 struct mm_page_table *root_tables;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100144};
145
David Brazdil711fbe92019-08-06 13:39:58 +0100146/** The type of addresses stored in the page table. */
147typedef uintvaddr_t ptable_addr_t;
148
Andrew Scullba79b0a2019-07-03 11:26:53 +0100149/** Represents the currently locked stage-1 page table of the hypervisor. */
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100150struct mm_stage1_locked {
151 struct mm_ptable *ptable;
152};
153
Andrew Scullda241972019-01-05 18:17:48 +0000154void mm_vm_enable_invalidation(void);
155
Karl Meakin07a69ab2025-02-07 14:53:19 +0000156bool mm_ptable_init(struct mm_ptable *ptable, mm_asid_t id,
Karl Meakin1fd4b822025-02-01 17:13:47 +0000157 struct mm_flags flags, struct mpool *ppool);
158ptable_addr_t mm_ptable_addr_space_end(struct mm_flags flags);
David Brazdil711fbe92019-08-06 13:39:58 +0100159
Karl Meakin07a69ab2025-02-07 14:53:19 +0000160bool mm_vm_init(struct mm_ptable *ptable, mm_asid_t id, struct mpool *ppool);
Karl Meakind64aaf82025-02-08 01:12:55 +0000161void mm_vm_fini(const struct mm_ptable *ptable, struct mpool *ppool);
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800162
Karl Meakind64aaf82025-02-08 01:12:55 +0000163bool mm_identity_prepare(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000164 mm_mode_t mode, struct mpool *ppool);
Karl Meakind64aaf82025-02-08 01:12:55 +0000165void *mm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000166 mm_mode_t mode, struct mpool *ppool);
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800167
Karl Meakind64aaf82025-02-08 01:12:55 +0000168bool mm_vm_identity_map(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000169 mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa);
Karl Meakind64aaf82025-02-08 01:12:55 +0000170bool mm_vm_identity_prepare(struct mm_ptable *ptable, paddr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000171 paddr_t end, mm_mode_t mode, struct mpool *ppool);
Karl Meakind64aaf82025-02-08 01:12:55 +0000172void mm_vm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000173 mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa);
Karl Meakind64aaf82025-02-08 01:12:55 +0000174bool mm_vm_unmap(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000175 struct mpool *ppool);
Karl Meakind64aaf82025-02-08 01:12:55 +0000176void mm_stage1_defrag(struct mm_ptable *ptable, struct mpool *ppool);
177void mm_vm_defrag(struct mm_ptable *ptable, struct mpool *ppool,
178 bool non_secure);
179void mm_vm_dump(const struct mm_ptable *ptable);
180bool mm_vm_get_mode(const struct mm_ptable *ptable, ipaddr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000181 ipaddr_t end, mm_mode_t *mode);
Karl Meakind64aaf82025-02-08 01:12:55 +0000182bool mm_get_mode(const struct mm_ptable *ptable, vaddr_t begin, vaddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000183 mm_mode_t *mode);
Andrew Scull80871322018-08-06 12:04:09 +0100184
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800185struct mm_stage1_locked mm_lock_ptable_unsafe(struct mm_ptable *ptable);
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100186struct mm_stage1_locked mm_lock_stage1(void);
187void mm_unlock_stage1(struct mm_stage1_locked *lock);
188void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000189 paddr_t end, mm_mode_t mode, struct mpool *ppool);
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100190bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
191 struct mpool *ppool);
192void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool);
193
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000194bool mm_init(struct mpool *ppool);