blob: d7ba014a7fbb5c6930a4283a331db6c4ef267fd2 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * PARISC64 Huge TLB page support.
4 *
5 * This parisc implementation is heavily based on the SPARC and x86 code.
6 *
7 * Copyright (C) 2015 Helge Deller <deller@gmx.de>
8 */
9
10#include <linux/fs.h>
11#include <linux/mm.h>
12#include <linux/sched/mm.h>
13#include <linux/hugetlb.h>
14#include <linux/pagemap.h>
15#include <linux/sysctl.h>
16
17#include <asm/mman.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000018#include <asm/tlb.h>
19#include <asm/tlbflush.h>
20#include <asm/cacheflush.h>
21#include <asm/mmu_context.h>
22
23
24unsigned long
25hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
26 unsigned long len, unsigned long pgoff, unsigned long flags)
27{
28 struct hstate *h = hstate_file(file);
29
30 if (len & ~huge_page_mask(h))
31 return -EINVAL;
32 if (len > TASK_SIZE)
33 return -ENOMEM;
34
35 if (flags & MAP_FIXED)
36 if (prepare_hugepage_range(file, addr, len))
37 return -EINVAL;
38
39 if (addr)
40 addr = ALIGN(addr, huge_page_size(h));
41
42 /* we need to make sure the colouring is OK */
43 return arch_get_unmapped_area(file, addr, len, pgoff, flags);
44}
45
46
47pte_t *huge_pte_alloc(struct mm_struct *mm,
48 unsigned long addr, unsigned long sz)
49{
50 pgd_t *pgd;
Olivier Deprez157378f2022-04-04 15:47:50 +020051 p4d_t *p4d;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052 pud_t *pud;
53 pmd_t *pmd;
54 pte_t *pte = NULL;
55
56 /* We must align the address, because our caller will run
57 * set_huge_pte_at() on whatever we return, which writes out
58 * all of the sub-ptes for the hugepage range. So we have
59 * to give it the first such sub-pte.
60 */
61 addr &= HPAGE_MASK;
62
63 pgd = pgd_offset(mm, addr);
Olivier Deprez157378f2022-04-04 15:47:50 +020064 p4d = p4d_offset(pgd, addr);
65 pud = pud_alloc(mm, p4d, addr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066 if (pud) {
67 pmd = pmd_alloc(mm, pud, addr);
68 if (pmd)
69 pte = pte_alloc_map(mm, pmd, addr);
70 }
71 return pte;
72}
73
74pte_t *huge_pte_offset(struct mm_struct *mm,
75 unsigned long addr, unsigned long sz)
76{
77 pgd_t *pgd;
Olivier Deprez157378f2022-04-04 15:47:50 +020078 p4d_t *p4d;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000079 pud_t *pud;
80 pmd_t *pmd;
81 pte_t *pte = NULL;
82
83 addr &= HPAGE_MASK;
84
85 pgd = pgd_offset(mm, addr);
86 if (!pgd_none(*pgd)) {
Olivier Deprez157378f2022-04-04 15:47:50 +020087 p4d = p4d_offset(pgd, addr);
88 if (!p4d_none(*p4d)) {
89 pud = pud_offset(p4d, addr);
90 if (!pud_none(*pud)) {
91 pmd = pmd_offset(pud, addr);
92 if (!pmd_none(*pmd))
93 pte = pte_offset_map(pmd, addr);
94 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000095 }
96 }
97 return pte;
98}
99
100/* Purge data and instruction TLB entries. Must be called holding
101 * the pa_tlb_lock. The TLB purge instructions are slow on SMP
102 * machines since the purge must be broadcast to all CPUs.
103 */
104static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
105{
106 int i;
107
108 /* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
109 * Linux standard huge pages (e.g. 2 MB) */
110 BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
111
112 addr &= HPAGE_MASK;
113 addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
114
115 for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
116 purge_tlb_entries(mm, addr);
117 addr += (1UL << REAL_HPAGE_SHIFT);
118 }
119}
120
121/* __set_huge_pte_at() must be called holding the pa_tlb_lock. */
122static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
123 pte_t *ptep, pte_t entry)
124{
125 unsigned long addr_start;
126 int i;
127
128 addr &= HPAGE_MASK;
129 addr_start = addr;
130
131 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
132 set_pte(ptep, entry);
133 ptep++;
134
135 addr += PAGE_SIZE;
136 pte_val(entry) += PAGE_SIZE;
137 }
138
139 purge_tlb_entries_huge(mm, addr_start);
140}
141
142void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
143 pte_t *ptep, pte_t entry)
144{
145 unsigned long flags;
146
David Brazdil0f672f62019-12-10 10:32:29 +0000147 spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000148 __set_huge_pte_at(mm, addr, ptep, entry);
David Brazdil0f672f62019-12-10 10:32:29 +0000149 spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000150}
151
152
153pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
154 pte_t *ptep)
155{
156 unsigned long flags;
157 pte_t entry;
158
David Brazdil0f672f62019-12-10 10:32:29 +0000159 spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000160 entry = *ptep;
161 __set_huge_pte_at(mm, addr, ptep, __pte(0));
David Brazdil0f672f62019-12-10 10:32:29 +0000162 spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000163
164 return entry;
165}
166
167
168void huge_ptep_set_wrprotect(struct mm_struct *mm,
169 unsigned long addr, pte_t *ptep)
170{
171 unsigned long flags;
172 pte_t old_pte;
173
David Brazdil0f672f62019-12-10 10:32:29 +0000174 spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000175 old_pte = *ptep;
176 __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
David Brazdil0f672f62019-12-10 10:32:29 +0000177 spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000178}
179
180int huge_ptep_set_access_flags(struct vm_area_struct *vma,
181 unsigned long addr, pte_t *ptep,
182 pte_t pte, int dirty)
183{
184 unsigned long flags;
185 int changed;
David Brazdil0f672f62019-12-10 10:32:29 +0000186 struct mm_struct *mm = vma->vm_mm;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000187
David Brazdil0f672f62019-12-10 10:32:29 +0000188 spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000189 changed = !pte_same(*ptep, pte);
190 if (changed) {
David Brazdil0f672f62019-12-10 10:32:29 +0000191 __set_huge_pte_at(mm, addr, ptep, pte);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000192 }
David Brazdil0f672f62019-12-10 10:32:29 +0000193 spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000194 return changed;
195}
196
197
198int pmd_huge(pmd_t pmd)
199{
200 return 0;
201}
202
203int pud_huge(pud_t pud)
204{
205 return 0;
206}