blob: 74158a7b5481e930999c0b982bb936235eb01c39 [file] [log] [blame]
Wedson Almeida Filhofed69022018-07-11 15:39:12 +01001#ifndef _MM_H
2#define _MM_H
3
4#include <stdbool.h>
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +01005#include <stdint.h>
Wedson Almeida Filhofed69022018-07-11 15:39:12 +01006
Andrew Scull8dce4982018-08-06 13:02:20 +01007#include "addr.h"
Wedson Almeida Filhofed69022018-07-11 15:39:12 +01008#include "arch_mm.h"
9
10struct mm_ptable {
Andrew Scull265ada92018-07-30 15:19:01 +010011 paddr_t table;
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +010012 uint32_t id;
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010013};
14
15#define PAGE_SIZE (1 << PAGE_BITS)
16
17/* The following are arch-independent page mapping modes. */
18#define MM_MODE_R 0x01 /* read */
19#define MM_MODE_W 0x02 /* write */
20#define MM_MODE_X 0x04 /* execute */
21#define MM_MODE_D 0x08 /* device */
22
23/*
24 * This flag indicates that memory allocation must not use locks. This is
25 * relevant in systems where interlocked operations are only available after
26 * virtual memory is enabled.
27 */
28#define MM_MODE_NOSYNC 0x10
29
30/*
31 * This flag indicates that the mapping is intended to be used in a first
32 * stage translation table, which might have different encodings for the
33 * attribute bits than the second stage table.
34 */
35#define MM_MODE_STAGE1 0x20
36
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +010037/*
38 * This flag indicates that no TLB invalidations should be issued for the
39 * changes in the page table.
40 */
41#define MM_MODE_NOINVALIDATE 0x40
42
43bool mm_ptable_init(struct mm_ptable *t, uint32_t id, int mode);
44void mm_ptable_dump(struct mm_ptable *t, int mode);
Andrew Scullfe636b12018-07-30 14:15:54 +010045bool mm_ptable_identity_map(struct mm_ptable *t, vaddr_t begin, vaddr_t end,
46 int mode);
47bool mm_ptable_identity_map_page(struct mm_ptable *t, vaddr_t va, int mode);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010048bool mm_ptable_unmap(struct mm_ptable *t, vaddr_t begin, vaddr_t end, int mode);
Wedson Almeida Filho2f94ec12018-07-26 16:00:48 +010049bool mm_ptable_is_mapped(struct mm_ptable *t, vaddr_t addr, int mode);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010050void mm_ptable_defrag(struct mm_ptable *t, int mode);
Wedson Almeida Filho84a30a02018-07-23 20:05:05 +010051bool mm_ptable_unmap_hypervisor(struct mm_ptable *t, int mode);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010052
53bool mm_init(void);
Wedson Almeida Filho03e767a2018-07-30 15:32:03 +010054bool mm_cpu_init(void);
Andrew Scullfe636b12018-07-30 14:15:54 +010055bool mm_identity_map(vaddr_t begin, vaddr_t end, int mode);
Wedson Almeida Filhofdf4afc2018-07-19 15:45:21 +010056bool mm_unmap(vaddr_t begin, vaddr_t end, int mode);
57void mm_defrag(void);
Wedson Almeida Filhofed69022018-07-11 15:39:12 +010058
Andrew Scull265ada92018-07-30 15:19:01 +010059/**
Andrew Scull265ada92018-07-30 15:19:01 +010060 * Converts an intermediate physical address to a physical address. Addresses
61 * are currently identity mapped so this is a simple type convertion. Returns
62 * true if the address was mapped in the table and the address was converted.
63 */
Andrew Scull8dce4982018-08-06 13:02:20 +010064static inline bool mm_ptable_translate_ipa(struct mm_ptable *t, ipaddr_t ipa,
65 paddr_t *pa)
Andrew Scull265ada92018-07-30 15:19:01 +010066{
67 /* TODO: the ptable functions map physical to virtual addresses but they
68 * should really be mapping to intermediate physical addresses.
69 * It might be better to have different interfaces to the mm functions?
70 * This might also mean ipaddr_t should be used when building the VM
71 * tables too?
72 * */
73 if (mm_ptable_is_mapped(t, va_init(ipa_addr(ipa)), 0)) {
74 *pa = pa_init(ipa_addr(ipa));
75 return true;
76 }
77 return false;
78}
79
Andrew Scull4f170f52018-07-19 12:58:20 +010080#endif /* _MM_H */