blob: 0c1c9efba82b108f5dec05dbbac8464ca571ad43 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scullfbc938a2018-08-20 14:09:28 +01009#pragma once
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010010
J-Alves715d6232023-02-16 16:33:28 +000011#include <stddef.h>
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +010012#include <stdint.h>
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010013
Andrew Scull18c78fc2018-08-20 12:57:41 +010014#include "hf/addr.h"
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000015#include "hf/mpool.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010016
Karl Meakin07a69ab2025-02-07 14:53:19 +000017typedef uint32_t mm_mode_t;
18typedef uint64_t mm_attr_t;
Karl Meakina3a9f952025-02-08 00:11:16 +000019
20/**
21 * The level of a page table entry (i.e. how deep into the recursive tree
22 * structure it is). See also Arm ARM, table D8-14.
23 *
24 * - `level == 4`: table entries (root)
25 * - `level == 3`: table or block entries
26 * - `level == 2`: table or block entries
27 * - `level == 1`: table or block entries
28 * - `level == 0`: page entries
29 *
30 * NOTE: The Arm ARM uses levels in the opposite order to our code: in the Arm
31 * ARM, levels start at 0 (or -1 if 52 bits of PA are used, but that is not
32 * supported by Hafnium) and page entries are at level 3. We go in the opposite
33 * direction: levels start at 3 or 4 and page entries are at level 0. This is
34 * because it makes the arithmetic and bit manipulation easier.
35 */
Karl Meakin07a69ab2025-02-07 14:53:19 +000036typedef uint8_t mm_level_t;
37typedef uint16_t mm_asid_t;
38
Karl Meakin23122e12025-02-05 14:44:20 +000039/*
40 * A page table entry (PTE) will take one of the following forms:
41 *
42 * 1. absent : There is no mapping.
43 * 2. invalid block : Represents a block that is not in the address space.
44 * 3. valid block : Represents a block that is in the address space.
45 * 4. table : Represents a reference to a table of PTEs.
46 * See Arm ARM, D8.3 (Translation table descriptor formats).
47 */
48enum mm_pte_type {
49 PTE_TYPE_ABSENT,
50 PTE_TYPE_INVALID_BLOCK,
51 PTE_TYPE_VALID_BLOCK,
52 PTE_TYPE_TABLE,
53};
54
Andrew Scullc66a04d2018-12-07 13:41:56 +000055/* Keep macro alignment */
56/* clang-format off */
57
J-Alves715d6232023-02-16 16:33:28 +000058#define PAGE_SIZE ((size_t)(1 << PAGE_BITS))
Andrew Scull4e5f8142018-10-12 14:37:19 +010059#define MM_PTE_PER_PAGE (PAGE_SIZE / sizeof(pte_t))
60
Andrew Scullc66a04d2018-12-07 13:41:56 +000061/* The following are arch-independent page mapping modes. */
Karl Meakin07a69ab2025-02-07 14:53:19 +000062#define MM_MODE_R (1U << 0) /* read */
63#define MM_MODE_W (1U << 1) /* write */
64#define MM_MODE_X (1U << 2) /* execute */
65#define MM_MODE_D (1U << 3) /* device */
Andrew Scullc66a04d2018-12-07 13:41:56 +000066
67/*
68 * Memory in stage-1 is either valid (present) or invalid (absent).
69 *
70 * Memory in stage-2 has more states to track sharing, borrowing and giving of
71 * memory. The states are made up of three parts:
72 *
73 * 1. V = valid/invalid : Whether the memory is part of the VM's address
74 * space. A fault will be generated if accessed when
75 * invalid.
76 * 2. O = owned/unowned : Whether the memory is owned by the VM.
77 * 3. X = exclusive/shared : Whether access is exclusive to the VM or shared
78 * with at most one other.
79 *
80 * These parts compose to form the following state:
81 *
82 * - V O X : Owner of memory with exclusive access.
83 * - V O !X : Owner of memory with access shared with at most one other VM.
84 * - V !O X : Borrower of memory with exclusive access.
85 * - V !O !X : Borrower of memory where access is shared with the owner.
86 * - !V O X : Owner of memory lent to a VM that has exclusive access.
87 *
88 * - !V O !X : Unused. Owner of shared memory always has access.
Andrew Scull73b89542019-11-20 17:31:26 +000089 * - !V !O X : Unused. Next entry is used for invalid memory.
Andrew Scullc66a04d2018-12-07 13:41:56 +000090 *
Andrew Scullc66a04d2018-12-07 13:41:56 +000091 * - !V !O !X : Invalid memory. Memory is unrelated to the VM.
92 *
93 * Modes are selected so that owner of exclusive memory is the default.
94 */
Karl Meakin07a69ab2025-02-07 14:53:19 +000095#define MM_MODE_INVALID (1U << 4)
96#define MM_MODE_UNOWNED (1U << 5)
97#define MM_MODE_SHARED (1U << 6)
Raghu Krishnamurthy25daaea2021-02-10 13:19:16 -080098
99/* Map page as non-global. */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000100#define MM_MODE_NG (1U << 8)
101
102/* Specifies if a mapping will be a user mapping(EL0). */
103#define MM_MODE_USER (1U << 9)
Raghu Krishnamurthy25daaea2021-02-10 13:19:16 -0800104
Andrew Scull73b89542019-11-20 17:31:26 +0000105/* The mask for a mode that is considered unmapped. */
106#define MM_MODE_UNMAPPED_MASK (MM_MODE_INVALID | MM_MODE_UNOWNED)
107
Andrew Scullc66a04d2018-12-07 13:41:56 +0000108/* clang-format on */
109
Karl Meakin0f506a12025-02-08 23:28:45 +0000110/**
111 * Flags for page table operations.
112 * - commit: Commit the given range rather than preparing it.
113 * - unmap: Unmap the given range rather than mapping it.
114 */
Karl Meakin1fd4b822025-02-01 17:13:47 +0000115struct mm_flags {
116 bool commit : 1;
117 bool unmap : 1;
Karl Meakin1fd4b822025-02-01 17:13:47 +0000118};
119
Andrew Walbran5de9c3d2020-02-10 13:35:29 +0000120#define MM_PPOOL_ENTRY_SIZE sizeof(struct mm_page_table)
121
Andrew Scull4e5f8142018-10-12 14:37:19 +0100122struct mm_page_table {
123 alignas(PAGE_SIZE) pte_t entries[MM_PTE_PER_PAGE];
124};
125static_assert(sizeof(struct mm_page_table) == PAGE_SIZE,
126 "A page table must take exactly one page.");
127static_assert(alignof(struct mm_page_table) == PAGE_SIZE,
128 "A page table must be page aligned.");
129
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100130struct mm_ptable {
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -0800131 /**
132 * VMID/ASID associated with a page table. ASID 0 is reserved for use by
133 * the hypervisor.
134 */
Karl Meakin07a69ab2025-02-07 14:53:19 +0000135 mm_asid_t id;
Karl Meakine1aeb1d2025-02-08 00:35:14 +0000136 /**
137 * Address of the root tables.
138 * At stage 1, concatenated tables are not used, so there is only one
139 * root table.
140 * At stage 2, concatenated tables are used, so there are multiple root
141 * tables (given by `arch_mm_root_table_count()`). The Arm ARM says
142 * there can be up to 16 root tables, but we only use 4.
143 */
144 struct mm_page_table *root_tables;
Karl Meakin0f506a12025-02-08 23:28:45 +0000145 /** If true, the PT is a stage1 PT, otherwise it is a stage2 PT. */
146 bool stage1 : 1;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100147};
148
David Brazdil711fbe92019-08-06 13:39:58 +0100149/** The type of addresses stored in the page table. */
150typedef uintvaddr_t ptable_addr_t;
151
Andrew Scullba79b0a2019-07-03 11:26:53 +0100152/** Represents the currently locked stage-1 page table of the hypervisor. */
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100153struct mm_stage1_locked {
154 struct mm_ptable *ptable;
155};
156
Andrew Scullda241972019-01-05 18:17:48 +0000157void mm_vm_enable_invalidation(void);
158
Karl Meakin0f506a12025-02-08 23:28:45 +0000159bool mm_ptable_init(struct mm_ptable *ptable, mm_asid_t id, bool stage1,
160 struct mpool *ppool);
161ptable_addr_t mm_ptable_addr_space_end(const struct mm_ptable *ptable);
David Brazdil711fbe92019-08-06 13:39:58 +0100162
Karl Meakin07a69ab2025-02-07 14:53:19 +0000163bool mm_vm_init(struct mm_ptable *ptable, mm_asid_t id, struct mpool *ppool);
Karl Meakind64aaf82025-02-08 01:12:55 +0000164void mm_vm_fini(const struct mm_ptable *ptable, struct mpool *ppool);
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800165
Karl Meakind64aaf82025-02-08 01:12:55 +0000166bool mm_identity_prepare(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000167 mm_mode_t mode, struct mpool *ppool);
Karl Meakind64aaf82025-02-08 01:12:55 +0000168void *mm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000169 mm_mode_t mode, struct mpool *ppool);
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800170
Karl Meakind64aaf82025-02-08 01:12:55 +0000171bool mm_vm_identity_map(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000172 mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa);
Karl Meakind64aaf82025-02-08 01:12:55 +0000173bool mm_vm_identity_prepare(struct mm_ptable *ptable, paddr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000174 paddr_t end, mm_mode_t mode, struct mpool *ppool);
Karl Meakind64aaf82025-02-08 01:12:55 +0000175void mm_vm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000176 mm_mode_t mode, struct mpool *ppool, ipaddr_t *ipa);
Karl Meakind64aaf82025-02-08 01:12:55 +0000177bool mm_vm_unmap(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000178 struct mpool *ppool);
Karl Meakind64aaf82025-02-08 01:12:55 +0000179void mm_stage1_defrag(struct mm_ptable *ptable, struct mpool *ppool);
180void mm_vm_defrag(struct mm_ptable *ptable, struct mpool *ppool,
181 bool non_secure);
182void mm_vm_dump(const struct mm_ptable *ptable);
183bool mm_vm_get_mode(const struct mm_ptable *ptable, ipaddr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000184 ipaddr_t end, mm_mode_t *mode);
Karl Meakinb2b5ff72025-02-19 15:47:56 +0000185
186bool mm_vm_get_mode_partial(const struct mm_ptable *ptable, ipaddr_t begin,
187 ipaddr_t end, mm_mode_t *mode, ipaddr_t *end_ret);
188
Karl Meakind64aaf82025-02-08 01:12:55 +0000189bool mm_get_mode(const struct mm_ptable *ptable, vaddr_t begin, vaddr_t end,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000190 mm_mode_t *mode);
Andrew Scull80871322018-08-06 12:04:09 +0100191
Karl Meakinb2b5ff72025-02-19 15:47:56 +0000192bool mm_get_mode_partial(const struct mm_ptable *ptable, vaddr_t begin,
193 vaddr_t end, mm_mode_t *mode, vaddr_t *end_ret);
194
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800195struct mm_stage1_locked mm_lock_ptable_unsafe(struct mm_ptable *ptable);
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100196struct mm_stage1_locked mm_lock_stage1(void);
197void mm_unlock_stage1(struct mm_stage1_locked *lock);
198void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
Karl Meakin07a69ab2025-02-07 14:53:19 +0000199 paddr_t end, mm_mode_t mode, struct mpool *ppool);
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100200bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
201 struct mpool *ppool);
202void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool);
203
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000204bool mm_init(struct mpool *ppool);