blob: 3a5c6914095cff9696b4243d0020580e2aa0a040 [file] [log] [blame]
Wedson Almeida Filhofed69022018-07-11 15:39:12 +01001#ifndef _MM_H
2#define _MM_H
3
4#include <stdbool.h>
5
6#include "arch_mm.h"
7
8struct mm_ptable {
9 struct arch_mm_ptable arch;
10 pte_t *table;
11};
12
13#define PAGE_SIZE (1 << PAGE_BITS)
14
15/* The following are arch-independent page mapping modes. */
16#define MM_MODE_R 0x01 /* read */
17#define MM_MODE_W 0x02 /* write */
18#define MM_MODE_X 0x04 /* execute */
19#define MM_MODE_D 0x08 /* device */
20
21/*
22 * This flag indicates that memory allocation must not use locks. This is
23 * relevant in systems where interlocked operations are only available after
24 * virtual memory is enabled.
25 */
26#define MM_MODE_NOSYNC 0x10
27
28/*
29 * This flag indicates that the mapping is intended to be used in a first
30 * stage translation table, which might have different encodings for the
31 * attribute bits than the second stage table.
32 */
33#define MM_MODE_STAGE1 0x20
34
35bool mm_ptable_init(struct mm_ptable *t, int mode);
36void mm_ptable_dump(struct mm_ptable *t);
Andrew Scull020ae692018-07-19 16:20:14 +010037bool mm_ptable_map(struct mm_ptable *t, vaddr_t begin, vaddr_t end,
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010038 paddr_t paddr, int mode);
39bool mm_ptable_map_page(struct mm_ptable *t, vaddr_t va, paddr_t pa, int mode);
40bool mm_ptable_unmap(struct mm_ptable *t, vaddr_t begin, vaddr_t end, int mode);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010041void mm_ptable_defrag(struct mm_ptable *t, int mode);
42
43bool mm_init(void);
44bool mm_map(vaddr_t begin, vaddr_t end, paddr_t paddr, int mode);
45bool mm_unmap(vaddr_t begin, vaddr_t end, int mode);
46void mm_defrag(void);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010047
Andrew Scull4f170f52018-07-19 12:58:20 +010048#endif /* _MM_H */