blob: 8f2d5d8f944dd33c11d9b54c57e3431d593a3a6d [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Scull18834872018-10-12 11:48:09 +01007 */
8
Andrew Scullfbc938a2018-08-20 14:09:28 +01009#pragma once
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010010
Andrew Scull4e5f8142018-10-12 14:37:19 +010011#include <stdalign.h>
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010012#include <stdbool.h>
J-Alves715d6232023-02-16 16:33:28 +000013#include <stddef.h>
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +010014#include <stdint.h>
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010015
Andrew Scull18c78fc2018-08-20 12:57:41 +010016#include "hf/arch/mm.h"
17
18#include "hf/addr.h"
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000019#include "hf/mpool.h"
Andrew Scull877ae4b2019-07-02 12:52:33 +010020#include "hf/static_assert.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010021
Andrew Scullc66a04d2018-12-07 13:41:56 +000022/* Keep macro alignment */
23/* clang-format off */
24
J-Alves715d6232023-02-16 16:33:28 +000025#define PAGE_SIZE ((size_t)(1 << PAGE_BITS))
Andrew Scull4e5f8142018-10-12 14:37:19 +010026#define MM_PTE_PER_PAGE (PAGE_SIZE / sizeof(pte_t))
27
Andrew Scullc66a04d2018-12-07 13:41:56 +000028/* The following are arch-independent page mapping modes. */
Andrew Walbran1281ed42019-10-22 17:23:40 +010029#define MM_MODE_R UINT32_C(0x0001) /* read */
30#define MM_MODE_W UINT32_C(0x0002) /* write */
31#define MM_MODE_X UINT32_C(0x0004) /* execute */
32#define MM_MODE_D UINT32_C(0x0008) /* device */
Andrew Scullc66a04d2018-12-07 13:41:56 +000033
34/*
35 * Memory in stage-1 is either valid (present) or invalid (absent).
36 *
37 * Memory in stage-2 has more states to track sharing, borrowing and giving of
38 * memory. The states are made up of three parts:
39 *
40 * 1. V = valid/invalid : Whether the memory is part of the VM's address
41 * space. A fault will be generated if accessed when
42 * invalid.
43 * 2. O = owned/unowned : Whether the memory is owned by the VM.
44 * 3. X = exclusive/shared : Whether access is exclusive to the VM or shared
45 * with at most one other.
46 *
47 * These parts compose to form the following state:
48 *
49 * - V O X : Owner of memory with exclusive access.
50 * - V O !X : Owner of memory with access shared with at most one other VM.
51 * - V !O X : Borrower of memory with exclusive access.
52 * - V !O !X : Borrower of memory where access is shared with the owner.
53 * - !V O X : Owner of memory lent to a VM that has exclusive access.
54 *
55 * - !V O !X : Unused. Owner of shared memory always has access.
Andrew Scull73b89542019-11-20 17:31:26 +000056 * - !V !O X : Unused. Next entry is used for invalid memory.
Andrew Scullc66a04d2018-12-07 13:41:56 +000057 *
Andrew Scullc66a04d2018-12-07 13:41:56 +000058 * - !V !O !X : Invalid memory. Memory is unrelated to the VM.
59 *
60 * Modes are selected so that owner of exclusive memory is the default.
61 */
Andrew Walbran1281ed42019-10-22 17:23:40 +010062#define MM_MODE_INVALID UINT32_C(0x0010)
63#define MM_MODE_UNOWNED UINT32_C(0x0020)
64#define MM_MODE_SHARED UINT32_C(0x0040)
Andrew Scullc66a04d2018-12-07 13:41:56 +000065
Raghu Krishnamurthy25daaea2021-02-10 13:19:16 -080066/* Specifies if a mapping will be a user mapping(EL0). */
67#define MM_MODE_USER UINT32_C(0x0200)
68
69/* Map page as non-global. */
70#define MM_MODE_NG UINT32_C(0x0100) /* non-global */
71
Andrew Scull73b89542019-11-20 17:31:26 +000072/* The mask for a mode that is considered unmapped. */
73#define MM_MODE_UNMAPPED_MASK (MM_MODE_INVALID | MM_MODE_UNOWNED)
74
Andrew Scullc66a04d2018-12-07 13:41:56 +000075/* clang-format on */
76
Karl Meakin1fd4b822025-02-01 17:13:47 +000077struct mm_flags {
78 bool commit : 1;
79 bool unmap : 1;
80 bool stage1 : 1;
81};
82
Andrew Walbran5de9c3d2020-02-10 13:35:29 +000083#define MM_PPOOL_ENTRY_SIZE sizeof(struct mm_page_table)
84
Andrew Scull4e5f8142018-10-12 14:37:19 +010085struct mm_page_table {
86 alignas(PAGE_SIZE) pte_t entries[MM_PTE_PER_PAGE];
87};
88static_assert(sizeof(struct mm_page_table) == PAGE_SIZE,
89 "A page table must take exactly one page.");
90static_assert(alignof(struct mm_page_table) == PAGE_SIZE,
91 "A page table must be page aligned.");
92
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010093struct mm_ptable {
Raghu Krishnamurthy0132b512021-02-03 14:13:26 -080094 /**
95 * VMID/ASID associated with a page table. ASID 0 is reserved for use by
96 * the hypervisor.
97 */
98 uint16_t id;
Andrew Scull1ba470e2018-10-31 15:14:31 +000099 /** Address of the root of the page table. */
100 paddr_t root;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +0100101};
102
David Brazdil711fbe92019-08-06 13:39:58 +0100103/** The type of addresses stored in the page table. */
104typedef uintvaddr_t ptable_addr_t;
105
Andrew Scullba79b0a2019-07-03 11:26:53 +0100106/** Represents the currently locked stage-1 page table of the hypervisor. */
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100107struct mm_stage1_locked {
108 struct mm_ptable *ptable;
109};
110
Andrew Scullda241972019-01-05 18:17:48 +0000111void mm_vm_enable_invalidation(void);
112
Karl Meakin1fd4b822025-02-01 17:13:47 +0000113bool mm_ptable_init(struct mm_ptable *ptable, uint16_t id,
114 struct mm_flags flags, struct mpool *ppool);
115ptable_addr_t mm_ptable_addr_space_end(struct mm_flags flags);
David Brazdil711fbe92019-08-06 13:39:58 +0100116
Karl Meakind64aaf82025-02-08 01:12:55 +0000117bool mm_vm_init(struct mm_ptable *ptable, uint16_t id, struct mpool *ppool);
118void mm_vm_fini(const struct mm_ptable *ptable, struct mpool *ppool);
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800119
Karl Meakind64aaf82025-02-08 01:12:55 +0000120bool mm_identity_prepare(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800121 uint32_t mode, struct mpool *ppool);
Karl Meakind64aaf82025-02-08 01:12:55 +0000122void *mm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Raghu Krishnamurthy43fe93a2021-01-31 16:38:38 -0800123 uint32_t mode, struct mpool *ppool);
124
Karl Meakind64aaf82025-02-08 01:12:55 +0000125bool mm_vm_identity_map(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000126 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
Karl Meakind64aaf82025-02-08 01:12:55 +0000127bool mm_vm_identity_prepare(struct mm_ptable *ptable, paddr_t begin,
128 paddr_t end, uint32_t mode, struct mpool *ppool);
129void mm_vm_identity_commit(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Andrew Walbran8ec2b9f2019-11-25 15:05:40 +0000130 uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
Karl Meakind64aaf82025-02-08 01:12:55 +0000131bool mm_vm_unmap(struct mm_ptable *ptable, paddr_t begin, paddr_t end,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000132 struct mpool *ppool);
Karl Meakind64aaf82025-02-08 01:12:55 +0000133void mm_stage1_defrag(struct mm_ptable *ptable, struct mpool *ppool);
134void mm_vm_defrag(struct mm_ptable *ptable, struct mpool *ppool,
135 bool non_secure);
136void mm_vm_dump(const struct mm_ptable *ptable);
137bool mm_vm_get_mode(const struct mm_ptable *ptable, ipaddr_t begin,
138 ipaddr_t end, uint32_t *mode);
139bool mm_get_mode(const struct mm_ptable *ptable, vaddr_t begin, vaddr_t end,
Raghu Krishnamurthy2323d722021-02-12 22:55:38 -0800140 uint32_t *mode);
Andrew Scull80871322018-08-06 12:04:09 +0100141
Raghu Krishnamurthyd3ab8c32021-02-10 19:11:30 -0800142struct mm_stage1_locked mm_lock_ptable_unsafe(struct mm_ptable *ptable);
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100143struct mm_stage1_locked mm_lock_stage1(void);
144void mm_unlock_stage1(struct mm_stage1_locked *lock);
145void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
Andrew Walbran1281ed42019-10-22 17:23:40 +0100146 paddr_t end, uint32_t mode, struct mpool *ppool);
Andrew Scull3c0a90a2019-07-01 11:55:53 +0100147bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
148 struct mpool *ppool);
149void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool);
150
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000151bool mm_init(struct mpool *ppool);