blob: a8418e1379eb7ee08c92acd034eae000cb19c695 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __MMU_H
3#define __MMU_H
4
5#include <linux/cpumask.h>
6#include <linux/errno.h>
7
8typedef struct {
9 spinlock_t lock;
10 cpumask_t cpu_attach_mask;
11 atomic_t flush_count;
12 unsigned int flush_mm;
13 struct list_head pgtable_list;
14 struct list_head gmap_list;
15 unsigned long gmap_asce;
16 unsigned long asce;
17 unsigned long asce_limit;
18 unsigned long vdso_base;
19 /*
20 * The following bitfields need a down_write on the mm
21 * semaphore when they are written to. As they are only
22 * written once, they can be read without a lock.
23 *
24 * The mmu context allocates 4K page tables.
25 */
26 unsigned int alloc_pgste:1;
27 /* The mmu context uses extended page tables. */
28 unsigned int has_pgste:1;
29 /* The mmu context uses storage keys. */
30 unsigned int uses_skeys:1;
31 /* The mmu context uses CMM. */
32 unsigned int uses_cmm:1;
33 /* The gmaps associated with this context are allowed to use huge pages. */
34 unsigned int allow_gmap_hpage_1m:1;
35} mm_context_t;
36
37#define INIT_MM_CONTEXT(name) \
38 .context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock), \
39 .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
40 .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
41
42static inline int tprot(unsigned long addr)
43{
44 int rc = -EFAULT;
45
46 asm volatile(
47 " tprot 0(%1),0\n"
48 "0: ipm %0\n"
49 " srl %0,28\n"
50 "1:\n"
51 EX_TABLE(0b,1b)
52 : "+d" (rc) : "a" (addr) : "cc");
53 return rc;
54}
55
56#endif