blob: ae6178210f6defccaf3d90a345164f7b55c56766 [file] [log] [blame]
Andrew Scull18834872018-10-12 11:48:09 +01001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Andrew Scull18834872018-10-12 11:48:09 +01003 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andrew Scullfbc938a2018-08-20 14:09:28 +010017#pragma once
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010018
Andrew Scull4e5f8142018-10-12 14:37:19 +010019#include <assert.h>
20#include <stdalign.h>
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010021#include <stdbool.h>
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +010022#include <stdint.h>
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010023
Andrew Scull18c78fc2018-08-20 12:57:41 +010024#include "hf/arch/mm.h"
25
26#include "hf/addr.h"
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000027#include "hf/mpool.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010028
Andrew Scullc66a04d2018-12-07 13:41:56 +000029/* Keep macro alignment */
30/* clang-format off */
31
Andrew Scull4e5f8142018-10-12 14:37:19 +010032#define PAGE_SIZE (1 << PAGE_BITS)
33#define MM_PTE_PER_PAGE (PAGE_SIZE / sizeof(pte_t))
34
Andrew Scullc66a04d2018-12-07 13:41:56 +000035
36/* The following are arch-independent page mapping modes. */
37#define MM_MODE_R 0x0001 /* read */
38#define MM_MODE_W 0x0002 /* write */
39#define MM_MODE_X 0x0004 /* execute */
40#define MM_MODE_D 0x0008 /* device */
41
42/*
43 * Memory in stage-1 is either valid (present) or invalid (absent).
44 *
45 * Memory in stage-2 has more states to track sharing, borrowing and giving of
46 * memory. The states are made up of three parts:
47 *
48 * 1. V = valid/invalid : Whether the memory is part of the VM's address
49 * space. A fault will be generated if accessed when
50 * invalid.
51 * 2. O = owned/unowned : Whether the memory is owned by the VM.
52 * 3. X = exclusive/shared : Whether access is exclusive to the VM or shared
53 * with at most one other.
54 *
55 * These parts compose to form the following state:
56 *
57 * - V O X : Owner of memory with exclusive access.
58 * - V O !X : Owner of memory with access shared with at most one other VM.
59 * - V !O X : Borrower of memory with exclusive access.
60 * - V !O !X : Borrower of memory where access is shared with the owner.
61 * - !V O X : Owner of memory lent to a VM that has exclusive access.
62 *
63 * - !V O !X : Unused. Owner of shared memory always has access.
64 *
65 * - !V !O X : Invalid memory. Memory is unrelated to the VM.
66 * - !V !O !X : Invalid memory. Memory is unrelated to the VM.
67 *
68 * Modes are selected so that owner of exclusive memory is the default.
69 */
70#define MM_MODE_INVALID 0x0010
71#define MM_MODE_UNOWNED 0x0020
72#define MM_MODE_SHARED 0x0040
73
Andrew Scullc66a04d2018-12-07 13:41:56 +000074/* clang-format on */
75
Andrew Scull4e5f8142018-10-12 14:37:19 +010076struct mm_page_table {
77 alignas(PAGE_SIZE) pte_t entries[MM_PTE_PER_PAGE];
78};
79static_assert(sizeof(struct mm_page_table) == PAGE_SIZE,
80 "A page table must take exactly one page.");
81static_assert(alignof(struct mm_page_table) == PAGE_SIZE,
82 "A page table must be page aligned.");
83
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010084struct mm_ptable {
Andrew Scull1ba470e2018-10-31 15:14:31 +000085 /** Address of the root of the page table. */
86 paddr_t root;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010087};
88
Andrew Scullda241972019-01-05 18:17:48 +000089void mm_vm_enable_invalidation(void);
90
Andrew Scullda3df7f2019-01-05 17:49:27 +000091bool mm_vm_init(struct mm_ptable *t, struct mpool *ppool);
92void mm_vm_fini(struct mm_ptable *t, struct mpool *ppool);
Andrew Scull80871322018-08-06 12:04:09 +010093bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000094 int mode, ipaddr_t *ipa, struct mpool *ppool);
Andrew Scullda241972019-01-05 18:17:48 +000095bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end,
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +000096 struct mpool *ppool);
Andrew Scullda241972019-01-05 18:17:48 +000097bool mm_vm_unmap_hypervisor(struct mm_ptable *t, struct mpool *ppool);
Andrew Scullda3df7f2019-01-05 17:49:27 +000098void mm_vm_defrag(struct mm_ptable *t, struct mpool *ppool);
99void mm_vm_dump(struct mm_ptable *t);
Andrew Scull81e85092018-12-12 12:56:20 +0000100bool mm_vm_get_mode(struct mm_ptable *t, ipaddr_t begin, ipaddr_t end,
101 int *mode);
Andrew Scull80871322018-08-06 12:04:09 +0100102
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000103bool mm_init(struct mpool *ppool);
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +0100104bool mm_cpu_init(void);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000105void *mm_identity_map(paddr_t begin, paddr_t end, int mode,
106 struct mpool *ppool);
Andrew Scullda241972019-01-05 18:17:48 +0000107bool mm_unmap(paddr_t begin, paddr_t end, struct mpool *ppool);
Wedson Almeida Filho22d5eaa2018-12-16 00:38:49 +0000108void mm_defrag(struct mpool *ppool);