blob: 4f213ba33491ba8c31c0b2398c6b51b15f8c7912 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * address space "slices" (meta-segments) support
3 *
4 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
5 *
6 * Based on hugetlb implementation
7 *
8 * Copyright (C) 2003 David Gibson, IBM Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25#undef DEBUG
26
27#include <linux/kernel.h>
28#include <linux/mm.h>
29#include <linux/pagemap.h>
30#include <linux/err.h>
31#include <linux/spinlock.h>
32#include <linux/export.h>
33#include <linux/hugetlb.h>
34#include <asm/mman.h>
35#include <asm/mmu.h>
36#include <asm/copro.h>
37#include <asm/hugetlb.h>
38#include <asm/mmu_context.h>
39
40static DEFINE_SPINLOCK(slice_convert_lock);
41
42#ifdef DEBUG
43int _slice_debug = 1;
44
45static void slice_print_mask(const char *label, const struct slice_mask *mask)
46{
47 if (!_slice_debug)
48 return;
49 pr_devel("%s low_slice: %*pbl\n", label,
50 (int)SLICE_NUM_LOW, &mask->low_slices);
51 pr_devel("%s high_slice: %*pbl\n", label,
52 (int)SLICE_NUM_HIGH, mask->high_slices);
53}
54
55#define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
56
57#else
58
59static void slice_print_mask(const char *label, const struct slice_mask *mask) {}
60#define slice_dbg(fmt...)
61
62#endif
63
64static inline bool slice_addr_is_low(unsigned long addr)
65{
66 u64 tmp = (u64)addr;
67
68 return tmp < SLICE_LOW_TOP;
69}
70
71static void slice_range_to_mask(unsigned long start, unsigned long len,
72 struct slice_mask *ret)
73{
74 unsigned long end = start + len - 1;
75
76 ret->low_slices = 0;
77 if (SLICE_NUM_HIGH)
78 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
79
80 if (slice_addr_is_low(start)) {
81 unsigned long mend = min(end,
82 (unsigned long)(SLICE_LOW_TOP - 1));
83
84 ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
85 - (1u << GET_LOW_SLICE_INDEX(start));
86 }
87
88 if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
89 unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
90 unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
91 unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
92
93 bitmap_set(ret->high_slices, start_index, count);
94 }
95}
96
97static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
98 unsigned long len)
99{
100 struct vm_area_struct *vma;
101
102 if ((mm->context.slb_addr_limit - len) < addr)
103 return 0;
104 vma = find_vma(mm, addr);
105 return (!vma || (addr + len) <= vm_start_gap(vma));
106}
107
108static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
109{
110 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
111 1ul << SLICE_LOW_SHIFT);
112}
113
114static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
115{
116 unsigned long start = slice << SLICE_HIGH_SHIFT;
117 unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
118
119#ifdef CONFIG_PPC64
120 /* Hack, so that each addresses is controlled by exactly one
121 * of the high or low area bitmaps, the first high area starts
122 * at 4GB, not 0 */
123 if (start == 0)
124 start = SLICE_LOW_TOP;
125#endif
126
127 return !slice_area_is_free(mm, start, end - start);
128}
129
130static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
131 unsigned long high_limit)
132{
133 unsigned long i;
134
135 ret->low_slices = 0;
136 if (SLICE_NUM_HIGH)
137 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
138
139 for (i = 0; i < SLICE_NUM_LOW; i++)
140 if (!slice_low_has_vma(mm, i))
141 ret->low_slices |= 1u << i;
142
143 if (slice_addr_is_low(high_limit - 1))
144 return;
145
146 for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
147 if (!slice_high_has_vma(mm, i))
148 __set_bit(i, ret->high_slices);
149}
150
151#ifdef CONFIG_PPC_BOOK3S_64
152static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize)
153{
154#ifdef CONFIG_PPC_64K_PAGES
155 if (psize == MMU_PAGE_64K)
156 return &mm->context.mask_64k;
157#endif
158 if (psize == MMU_PAGE_4K)
159 return &mm->context.mask_4k;
160#ifdef CONFIG_HUGETLB_PAGE
161 if (psize == MMU_PAGE_16M)
162 return &mm->context.mask_16m;
163 if (psize == MMU_PAGE_16G)
164 return &mm->context.mask_16g;
165#endif
166 BUG();
167}
168#elif defined(CONFIG_PPC_8xx)
169static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize)
170{
171 if (psize == mmu_virtual_psize)
172 return &mm->context.mask_base_psize;
173#ifdef CONFIG_HUGETLB_PAGE
174 if (psize == MMU_PAGE_512K)
175 return &mm->context.mask_512k;
176 if (psize == MMU_PAGE_8M)
177 return &mm->context.mask_8m;
178#endif
179 BUG();
180}
181#else
182#error "Must define the slice masks for page sizes supported by the platform"
183#endif
184
185static bool slice_check_range_fits(struct mm_struct *mm,
186 const struct slice_mask *available,
187 unsigned long start, unsigned long len)
188{
189 unsigned long end = start + len - 1;
190 u64 low_slices = 0;
191
192 if (slice_addr_is_low(start)) {
193 unsigned long mend = min(end,
194 (unsigned long)(SLICE_LOW_TOP - 1));
195
196 low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
197 - (1u << GET_LOW_SLICE_INDEX(start));
198 }
199 if ((low_slices & available->low_slices) != low_slices)
200 return false;
201
202 if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
203 unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
204 unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
205 unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
206 unsigned long i;
207
208 for (i = start_index; i < start_index + count; i++) {
209 if (!test_bit(i, available->high_slices))
210 return false;
211 }
212 }
213
214 return true;
215}
216
217static void slice_flush_segments(void *parm)
218{
219#ifdef CONFIG_PPC64
220 struct mm_struct *mm = parm;
221 unsigned long flags;
222
223 if (mm != current->active_mm)
224 return;
225
226 copy_mm_to_paca(current->active_mm);
227
228 local_irq_save(flags);
229 slb_flush_and_rebolt();
230 local_irq_restore(flags);
231#endif
232}
233
234static void slice_convert(struct mm_struct *mm,
235 const struct slice_mask *mask, int psize)
236{
237 int index, mask_index;
238 /* Write the new slice psize bits */
239 unsigned char *hpsizes, *lpsizes;
240 struct slice_mask *psize_mask, *old_mask;
241 unsigned long i, flags;
242 int old_psize;
243
244 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
245 slice_print_mask(" mask", mask);
246
247 psize_mask = slice_mask_for_size(mm, psize);
248
249 /* We need to use a spinlock here to protect against
250 * concurrent 64k -> 4k demotion ...
251 */
252 spin_lock_irqsave(&slice_convert_lock, flags);
253
254 lpsizes = mm->context.low_slices_psize;
255 for (i = 0; i < SLICE_NUM_LOW; i++) {
256 if (!(mask->low_slices & (1u << i)))
257 continue;
258
259 mask_index = i & 0x1;
260 index = i >> 1;
261
262 /* Update the slice_mask */
263 old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf;
264 old_mask = slice_mask_for_size(mm, old_psize);
265 old_mask->low_slices &= ~(1u << i);
266 psize_mask->low_slices |= 1u << i;
267
268 /* Update the sizes array */
269 lpsizes[index] = (lpsizes[index] & ~(0xf << (mask_index * 4))) |
270 (((unsigned long)psize) << (mask_index * 4));
271 }
272
273 hpsizes = mm->context.high_slices_psize;
274 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
275 if (!test_bit(i, mask->high_slices))
276 continue;
277
278 mask_index = i & 0x1;
279 index = i >> 1;
280
281 /* Update the slice_mask */
282 old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf;
283 old_mask = slice_mask_for_size(mm, old_psize);
284 __clear_bit(i, old_mask->high_slices);
285 __set_bit(i, psize_mask->high_slices);
286
287 /* Update the sizes array */
288 hpsizes[index] = (hpsizes[index] & ~(0xf << (mask_index * 4))) |
289 (((unsigned long)psize) << (mask_index * 4));
290 }
291
292 slice_dbg(" lsps=%lx, hsps=%lx\n",
293 (unsigned long)mm->context.low_slices_psize,
294 (unsigned long)mm->context.high_slices_psize);
295
296 spin_unlock_irqrestore(&slice_convert_lock, flags);
297
298 copro_flush_all_slbs(mm);
299}
300
301/*
302 * Compute which slice addr is part of;
303 * set *boundary_addr to the start or end boundary of that slice
304 * (depending on 'end' parameter);
305 * return boolean indicating if the slice is marked as available in the
306 * 'available' slice_mark.
307 */
308static bool slice_scan_available(unsigned long addr,
309 const struct slice_mask *available,
310 int end, unsigned long *boundary_addr)
311{
312 unsigned long slice;
313 if (slice_addr_is_low(addr)) {
314 slice = GET_LOW_SLICE_INDEX(addr);
315 *boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
316 return !!(available->low_slices & (1u << slice));
317 } else {
318 slice = GET_HIGH_SLICE_INDEX(addr);
319 *boundary_addr = (slice + end) ?
320 ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
321 return !!test_bit(slice, available->high_slices);
322 }
323}
324
325static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
326 unsigned long len,
327 const struct slice_mask *available,
328 int psize, unsigned long high_limit)
329{
330 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
331 unsigned long addr, found, next_end;
332 struct vm_unmapped_area_info info;
333
334 info.flags = 0;
335 info.length = len;
336 info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
337 info.align_offset = 0;
338
339 addr = TASK_UNMAPPED_BASE;
340 /*
341 * Check till the allow max value for this mmap request
342 */
343 while (addr < high_limit) {
344 info.low_limit = addr;
345 if (!slice_scan_available(addr, available, 1, &addr))
346 continue;
347
348 next_slice:
349 /*
350 * At this point [info.low_limit; addr) covers
351 * available slices only and ends at a slice boundary.
352 * Check if we need to reduce the range, or if we can
353 * extend it to cover the next available slice.
354 */
355 if (addr >= high_limit)
356 addr = high_limit;
357 else if (slice_scan_available(addr, available, 1, &next_end)) {
358 addr = next_end;
359 goto next_slice;
360 }
361 info.high_limit = addr;
362
363 found = vm_unmapped_area(&info);
364 if (!(found & ~PAGE_MASK))
365 return found;
366 }
367
368 return -ENOMEM;
369}
370
371static unsigned long slice_find_area_topdown(struct mm_struct *mm,
372 unsigned long len,
373 const struct slice_mask *available,
374 int psize, unsigned long high_limit)
375{
376 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
377 unsigned long addr, found, prev;
378 struct vm_unmapped_area_info info;
379
380 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
381 info.length = len;
382 info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
383 info.align_offset = 0;
384
385 addr = mm->mmap_base;
386 /*
387 * If we are trying to allocate above DEFAULT_MAP_WINDOW
388 * Add the different to the mmap_base.
389 * Only for that request for which high_limit is above
390 * DEFAULT_MAP_WINDOW we should apply this.
391 */
392 if (high_limit > DEFAULT_MAP_WINDOW)
393 addr += mm->context.slb_addr_limit - DEFAULT_MAP_WINDOW;
394
395 while (addr > PAGE_SIZE) {
396 info.high_limit = addr;
397 if (!slice_scan_available(addr - 1, available, 0, &addr))
398 continue;
399
400 prev_slice:
401 /*
402 * At this point [addr; info.high_limit) covers
403 * available slices only and starts at a slice boundary.
404 * Check if we need to reduce the range, or if we can
405 * extend it to cover the previous available slice.
406 */
407 if (addr < PAGE_SIZE)
408 addr = PAGE_SIZE;
409 else if (slice_scan_available(addr - 1, available, 0, &prev)) {
410 addr = prev;
411 goto prev_slice;
412 }
413 info.low_limit = addr;
414
415 found = vm_unmapped_area(&info);
416 if (!(found & ~PAGE_MASK))
417 return found;
418 }
419
420 /*
421 * A failed mmap() very likely causes application failure,
422 * so fall back to the bottom-up function here. This scenario
423 * can happen with large stack limits and large mmap()
424 * allocations.
425 */
426 return slice_find_area_bottomup(mm, len, available, psize, high_limit);
427}
428
429
430static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
431 const struct slice_mask *mask, int psize,
432 int topdown, unsigned long high_limit)
433{
434 if (topdown)
435 return slice_find_area_topdown(mm, len, mask, psize, high_limit);
436 else
437 return slice_find_area_bottomup(mm, len, mask, psize, high_limit);
438}
439
440static inline void slice_copy_mask(struct slice_mask *dst,
441 const struct slice_mask *src)
442{
443 dst->low_slices = src->low_slices;
444 if (!SLICE_NUM_HIGH)
445 return;
446 bitmap_copy(dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
447}
448
449static inline void slice_or_mask(struct slice_mask *dst,
450 const struct slice_mask *src1,
451 const struct slice_mask *src2)
452{
453 dst->low_slices = src1->low_slices | src2->low_slices;
454 if (!SLICE_NUM_HIGH)
455 return;
456 bitmap_or(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
457}
458
459static inline void slice_andnot_mask(struct slice_mask *dst,
460 const struct slice_mask *src1,
461 const struct slice_mask *src2)
462{
463 dst->low_slices = src1->low_slices & ~src2->low_slices;
464 if (!SLICE_NUM_HIGH)
465 return;
466 bitmap_andnot(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
467}
468
469#ifdef CONFIG_PPC_64K_PAGES
470#define MMU_PAGE_BASE MMU_PAGE_64K
471#else
472#define MMU_PAGE_BASE MMU_PAGE_4K
473#endif
474
475unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
476 unsigned long flags, unsigned int psize,
477 int topdown)
478{
479 struct slice_mask good_mask;
480 struct slice_mask potential_mask;
481 const struct slice_mask *maskp;
482 const struct slice_mask *compat_maskp = NULL;
483 int fixed = (flags & MAP_FIXED);
484 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
485 unsigned long page_size = 1UL << pshift;
486 struct mm_struct *mm = current->mm;
487 unsigned long newaddr;
488 unsigned long high_limit;
489
490 high_limit = DEFAULT_MAP_WINDOW;
491 if (addr >= high_limit || (fixed && (addr + len > high_limit)))
492 high_limit = TASK_SIZE;
493
494 if (len > high_limit)
495 return -ENOMEM;
496 if (len & (page_size - 1))
497 return -EINVAL;
498 if (fixed) {
499 if (addr & (page_size - 1))
500 return -EINVAL;
501 if (addr > high_limit - len)
502 return -ENOMEM;
503 }
504
505 if (high_limit > mm->context.slb_addr_limit) {
506 /*
507 * Increasing the slb_addr_limit does not require
508 * slice mask cache to be recalculated because it should
509 * be already initialised beyond the old address limit.
510 */
511 mm->context.slb_addr_limit = high_limit;
512
513 on_each_cpu(slice_flush_segments, mm, 1);
514 }
515
516 /* Sanity checks */
517 BUG_ON(mm->task_size == 0);
518 BUG_ON(mm->context.slb_addr_limit == 0);
519 VM_BUG_ON(radix_enabled());
520
521 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
522 slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
523 addr, len, flags, topdown);
524
525 /* If hint, make sure it matches our alignment restrictions */
526 if (!fixed && addr) {
527 addr = _ALIGN_UP(addr, page_size);
528 slice_dbg(" aligned addr=%lx\n", addr);
529 /* Ignore hint if it's too large or overlaps a VMA */
530 if (addr > high_limit - len ||
531 !slice_area_is_free(mm, addr, len))
532 addr = 0;
533 }
534
535 /* First make up a "good" mask of slices that have the right size
536 * already
537 */
538 maskp = slice_mask_for_size(mm, psize);
539
540 /*
541 * Here "good" means slices that are already the right page size,
542 * "compat" means slices that have a compatible page size (i.e.
543 * 4k in a 64k pagesize kernel), and "free" means slices without
544 * any VMAs.
545 *
546 * If MAP_FIXED:
547 * check if fits in good | compat => OK
548 * check if fits in good | compat | free => convert free
549 * else bad
550 * If have hint:
551 * check if hint fits in good => OK
552 * check if hint fits in good | free => convert free
553 * Otherwise:
554 * search in good, found => OK
555 * search in good | free, found => convert free
556 * search in good | compat | free, found => convert free.
557 */
558
559 /*
560 * If we support combo pages, we can allow 64k pages in 4k slices
561 * The mask copies could be avoided in most cases here if we had
562 * a pointer to good mask for the next code to use.
563 */
564 if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
565 compat_maskp = slice_mask_for_size(mm, MMU_PAGE_4K);
566 if (fixed)
567 slice_or_mask(&good_mask, maskp, compat_maskp);
568 else
569 slice_copy_mask(&good_mask, maskp);
570 } else {
571 slice_copy_mask(&good_mask, maskp);
572 }
573
574 slice_print_mask(" good_mask", &good_mask);
575 if (compat_maskp)
576 slice_print_mask(" compat_mask", compat_maskp);
577
578 /* First check hint if it's valid or if we have MAP_FIXED */
579 if (addr != 0 || fixed) {
580 /* Check if we fit in the good mask. If we do, we just return,
581 * nothing else to do
582 */
583 if (slice_check_range_fits(mm, &good_mask, addr, len)) {
584 slice_dbg(" fits good !\n");
585 newaddr = addr;
586 goto return_addr;
587 }
588 } else {
589 /* Now let's see if we can find something in the existing
590 * slices for that size
591 */
592 newaddr = slice_find_area(mm, len, &good_mask,
593 psize, topdown, high_limit);
594 if (newaddr != -ENOMEM) {
595 /* Found within the good mask, we don't have to setup,
596 * we thus return directly
597 */
598 slice_dbg(" found area at 0x%lx\n", newaddr);
599 goto return_addr;
600 }
601 }
602 /*
603 * We don't fit in the good mask, check what other slices are
604 * empty and thus can be converted
605 */
606 slice_mask_for_free(mm, &potential_mask, high_limit);
607 slice_or_mask(&potential_mask, &potential_mask, &good_mask);
608 slice_print_mask(" potential", &potential_mask);
609
610 if (addr != 0 || fixed) {
611 if (slice_check_range_fits(mm, &potential_mask, addr, len)) {
612 slice_dbg(" fits potential !\n");
613 newaddr = addr;
614 goto convert;
615 }
616 }
617
618 /* If we have MAP_FIXED and failed the above steps, then error out */
619 if (fixed)
620 return -EBUSY;
621
622 slice_dbg(" search...\n");
623
624 /* If we had a hint that didn't work out, see if we can fit
625 * anywhere in the good area.
626 */
627 if (addr) {
628 newaddr = slice_find_area(mm, len, &good_mask,
629 psize, topdown, high_limit);
630 if (newaddr != -ENOMEM) {
631 slice_dbg(" found area at 0x%lx\n", newaddr);
632 goto return_addr;
633 }
634 }
635
636 /* Now let's see if we can find something in the existing slices
637 * for that size plus free slices
638 */
639 newaddr = slice_find_area(mm, len, &potential_mask,
640 psize, topdown, high_limit);
641
642#ifdef CONFIG_PPC_64K_PAGES
643 if (newaddr == -ENOMEM && psize == MMU_PAGE_64K) {
644 /* retry the search with 4k-page slices included */
645 slice_or_mask(&potential_mask, &potential_mask, compat_maskp);
646 newaddr = slice_find_area(mm, len, &potential_mask,
647 psize, topdown, high_limit);
648 }
649#endif
650
651 if (newaddr == -ENOMEM)
652 return -ENOMEM;
653
654 slice_range_to_mask(newaddr, len, &potential_mask);
655 slice_dbg(" found potential area at 0x%lx\n", newaddr);
656 slice_print_mask(" mask", &potential_mask);
657
658 convert:
659 /*
660 * Try to allocate the context before we do slice convert
661 * so that we handle the context allocation failure gracefully.
662 */
663 if (need_extra_context(mm, newaddr)) {
664 if (alloc_extended_context(mm, newaddr) < 0)
665 return -ENOMEM;
666 }
667
668 slice_andnot_mask(&potential_mask, &potential_mask, &good_mask);
669 if (compat_maskp && !fixed)
670 slice_andnot_mask(&potential_mask, &potential_mask, compat_maskp);
671 if (potential_mask.low_slices ||
672 (SLICE_NUM_HIGH &&
673 !bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) {
674 slice_convert(mm, &potential_mask, psize);
675 if (psize > MMU_PAGE_BASE)
676 on_each_cpu(slice_flush_segments, mm, 1);
677 }
678 return newaddr;
679
680return_addr:
681 if (need_extra_context(mm, newaddr)) {
682 if (alloc_extended_context(mm, newaddr) < 0)
683 return -ENOMEM;
684 }
685 return newaddr;
686}
687EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
688
689unsigned long arch_get_unmapped_area(struct file *filp,
690 unsigned long addr,
691 unsigned long len,
692 unsigned long pgoff,
693 unsigned long flags)
694{
695 return slice_get_unmapped_area(addr, len, flags,
696 current->mm->context.user_psize, 0);
697}
698
699unsigned long arch_get_unmapped_area_topdown(struct file *filp,
700 const unsigned long addr0,
701 const unsigned long len,
702 const unsigned long pgoff,
703 const unsigned long flags)
704{
705 return slice_get_unmapped_area(addr0, len, flags,
706 current->mm->context.user_psize, 1);
707}
708
709unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
710{
711 unsigned char *psizes;
712 int index, mask_index;
713
714 VM_BUG_ON(radix_enabled());
715
716 if (slice_addr_is_low(addr)) {
717 psizes = mm->context.low_slices_psize;
718 index = GET_LOW_SLICE_INDEX(addr);
719 } else {
720 psizes = mm->context.high_slices_psize;
721 index = GET_HIGH_SLICE_INDEX(addr);
722 }
723 mask_index = index & 0x1;
724 return (psizes[index >> 1] >> (mask_index * 4)) & 0xf;
725}
726EXPORT_SYMBOL_GPL(get_slice_psize);
727
728void slice_init_new_context_exec(struct mm_struct *mm)
729{
730 unsigned char *hpsizes, *lpsizes;
731 struct slice_mask *mask;
732 unsigned int psize = mmu_virtual_psize;
733
734 slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm);
735
736 /*
737 * In the case of exec, use the default limit. In the
738 * case of fork it is just inherited from the mm being
739 * duplicated.
740 */
741#ifdef CONFIG_PPC64
742 mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
743#else
744 mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW;
745#endif
746
747 mm->context.user_psize = psize;
748
749 /*
750 * Set all slice psizes to the default.
751 */
752 lpsizes = mm->context.low_slices_psize;
753 memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1);
754
755 hpsizes = mm->context.high_slices_psize;
756 memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1);
757
758 /*
759 * Slice mask cache starts zeroed, fill the default size cache.
760 */
761 mask = slice_mask_for_size(mm, psize);
762 mask->low_slices = ~0UL;
763 if (SLICE_NUM_HIGH)
764 bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
765}
766
767void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
768 unsigned long len, unsigned int psize)
769{
770 struct slice_mask mask;
771
772 VM_BUG_ON(radix_enabled());
773
774 slice_range_to_mask(start, len, &mask);
775 slice_convert(mm, &mask, psize);
776}
777
778#ifdef CONFIG_HUGETLB_PAGE
779/*
780 * is_hugepage_only_range() is used by generic code to verify whether
781 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
782 *
783 * until the generic code provides a more generic hook and/or starts
784 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
785 * here knows how to deal with), we hijack it to keep standard mappings
786 * away from us.
787 *
788 * because of that generic code limitation, MAP_FIXED mapping cannot
789 * "convert" back a slice with no VMAs to the standard page size, only
790 * get_unmapped_area() can. It would be possible to fix it here but I
791 * prefer working on fixing the generic code instead.
792 *
793 * WARNING: This will not work if hugetlbfs isn't enabled since the
794 * generic code will redefine that function as 0 in that. This is ok
795 * for now as we only use slices with hugetlbfs enabled. This should
796 * be fixed as the generic code gets fixed.
797 */
798int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
799 unsigned long len)
800{
801 const struct slice_mask *maskp;
802 unsigned int psize = mm->context.user_psize;
803
804 VM_BUG_ON(radix_enabled());
805
806 maskp = slice_mask_for_size(mm, psize);
807#ifdef CONFIG_PPC_64K_PAGES
808 /* We need to account for 4k slices too */
809 if (psize == MMU_PAGE_64K) {
810 const struct slice_mask *compat_maskp;
811 struct slice_mask available;
812
813 compat_maskp = slice_mask_for_size(mm, MMU_PAGE_4K);
814 slice_or_mask(&available, maskp, compat_maskp);
815 return !slice_check_range_fits(mm, &available, addr, len);
816 }
817#endif
818
819 return !slice_check_range_fits(mm, maskp, addr, len);
820}
821#endif