blob: de8f0bf5f238c4fba2cb32ea8985eabbb4aa8f5e [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * IBM System z Huge TLB Page Support for Kernel.
4 *
5 * Copyright IBM Corp. 2008
6 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
7 */
8
9#ifndef _ASM_S390_HUGETLB_H
10#define _ASM_S390_HUGETLB_H
11
12#include <asm/page.h>
13#include <asm/pgtable.h>
14
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000015#define hugetlb_free_pgd_range free_pgd_range
16#define hugepages_supported() (MACHINE_HAS_EDAT1)
17
18void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
19 pte_t *ptep, pte_t pte);
20pte_t huge_ptep_get(pte_t *ptep);
21pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
22 unsigned long addr, pte_t *ptep);
23
David Brazdil0f672f62019-12-10 10:32:29 +000024static inline bool is_hugepage_only_range(struct mm_struct *mm,
25 unsigned long addr,
26 unsigned long len)
27{
28 return false;
29}
30
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000031/*
32 * If the arch doesn't supply something else, assume that hugepage
33 * size aligned regions are ok without further preparation.
34 */
35static inline int prepare_hugepage_range(struct file *file,
36 unsigned long addr, unsigned long len)
37{
38 if (len & ~HPAGE_MASK)
39 return -EINVAL;
40 if (addr & ~HPAGE_MASK)
41 return -EINVAL;
42 return 0;
43}
44
45static inline void arch_clear_hugepage_flags(struct page *page)
46{
47 clear_bit(PG_arch_1, &page->flags);
48}
49
50static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
51 pte_t *ptep, unsigned long sz)
52{
53 if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
54 pte_val(*ptep) = _REGION3_ENTRY_EMPTY;
55 else
56 pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY;
57}
58
59static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
60 unsigned long address, pte_t *ptep)
61{
62 huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
63}
64
65static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
66 unsigned long addr, pte_t *ptep,
67 pte_t pte, int dirty)
68{
69 int changed = !pte_same(huge_ptep_get(ptep), pte);
70 if (changed) {
71 huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
72 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
73 }
74 return changed;
75}
76
77static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
78 unsigned long addr, pte_t *ptep)
79{
80 pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
81 set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
82}
83
84static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
85{
86 return mk_pte(page, pgprot);
87}
88
89static inline int huge_pte_none(pte_t pte)
90{
91 return pte_none(pte);
92}
93
94static inline int huge_pte_write(pte_t pte)
95{
96 return pte_write(pte);
97}
98
99static inline int huge_pte_dirty(pte_t pte)
100{
101 return pte_dirty(pte);
102}
103
104static inline pte_t huge_pte_mkwrite(pte_t pte)
105{
106 return pte_mkwrite(pte);
107}
108
109static inline pte_t huge_pte_mkdirty(pte_t pte)
110{
111 return pte_mkdirty(pte);
112}
113
114static inline pte_t huge_pte_wrprotect(pte_t pte)
115{
116 return pte_wrprotect(pte);
117}
118
119static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
120{
121 return pte_modify(pte, newprot);
122}
123
David Brazdil0f672f62019-12-10 10:32:29 +0000124static inline bool gigantic_page_runtime_supported(void)
125{
126 return true;
127}
128
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000129#endif /* _ASM_S390_HUGETLB_H */