blob: a2f43f1a85f8dfe3f0bc6be76d0dc6cd67766774 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * hugetlbpage-backed filesystem. Based on ramfs.
3 *
4 * Nadia Yvette Chambers, 2002
5 *
6 * Copyright (C) 2002 Linus Torvalds.
7 * License: GPL
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/thread_info.h>
13#include <asm/current.h>
14#include <linux/sched/signal.h> /* remove ASAP */
15#include <linux/falloc.h>
16#include <linux/fs.h>
17#include <linux/mount.h>
18#include <linux/file.h>
19#include <linux/kernel.h>
20#include <linux/writeback.h>
21#include <linux/pagemap.h>
22#include <linux/highmem.h>
23#include <linux/init.h>
24#include <linux/string.h>
25#include <linux/capability.h>
26#include <linux/ctype.h>
27#include <linux/backing-dev.h>
28#include <linux/hugetlb.h>
29#include <linux/pagevec.h>
David Brazdil0f672f62019-12-10 10:32:29 +000030#include <linux/fs_parser.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000031#include <linux/mman.h>
32#include <linux/slab.h>
33#include <linux/dnotify.h>
34#include <linux/statfs.h>
35#include <linux/security.h>
36#include <linux/magic.h>
37#include <linux/migrate.h>
38#include <linux/uio.h>
39
40#include <linux/uaccess.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020041#include <linux/sched/mm.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000042
43static const struct super_operations hugetlbfs_ops;
44static const struct address_space_operations hugetlbfs_aops;
45const struct file_operations hugetlbfs_file_operations;
46static const struct inode_operations hugetlbfs_dir_inode_operations;
47static const struct inode_operations hugetlbfs_inode_operations;
48
David Brazdil0f672f62019-12-10 10:32:29 +000049enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
50
51struct hugetlbfs_fs_context {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052 struct hstate *hstate;
David Brazdil0f672f62019-12-10 10:32:29 +000053 unsigned long long max_size_opt;
54 unsigned long long min_size_opt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000055 long max_hpages;
56 long nr_inodes;
57 long min_hpages;
David Brazdil0f672f62019-12-10 10:32:29 +000058 enum hugetlbfs_size_type max_val_type;
59 enum hugetlbfs_size_type min_val_type;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000060 kuid_t uid;
61 kgid_t gid;
62 umode_t mode;
63};
64
65int sysctl_hugetlb_shm_group;
66
David Brazdil0f672f62019-12-10 10:32:29 +000067enum hugetlb_param {
68 Opt_gid,
69 Opt_min_size,
70 Opt_mode,
71 Opt_nr_inodes,
72 Opt_pagesize,
73 Opt_size,
74 Opt_uid,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000075};
76
Olivier Deprez157378f2022-04-04 15:47:50 +020077static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
David Brazdil0f672f62019-12-10 10:32:29 +000078 fsparam_u32 ("gid", Opt_gid),
79 fsparam_string("min_size", Opt_min_size),
Olivier Deprez0e641232021-09-23 10:07:05 +020080 fsparam_u32oct("mode", Opt_mode),
David Brazdil0f672f62019-12-10 10:32:29 +000081 fsparam_string("nr_inodes", Opt_nr_inodes),
82 fsparam_string("pagesize", Opt_pagesize),
83 fsparam_string("size", Opt_size),
84 fsparam_u32 ("uid", Opt_uid),
85 {}
86};
87
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000088#ifdef CONFIG_NUMA
89static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
90 struct inode *inode, pgoff_t index)
91{
92 vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
93 index);
94}
95
96static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
97{
98 mpol_cond_put(vma->vm_policy);
99}
100#else
101static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
102 struct inode *inode, pgoff_t index)
103{
104}
105
106static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
107{
108}
109#endif
110
111static void huge_pagevec_release(struct pagevec *pvec)
112{
113 int i;
114
115 for (i = 0; i < pagevec_count(pvec); ++i)
116 put_page(pvec->pages[i]);
117
118 pagevec_reinit(pvec);
119}
120
121/*
122 * Mask used when checking the page offset value passed in via system
123 * calls. This value will be converted to a loff_t which is signed.
124 * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
125 * value. The extra bit (- 1 in the shift value) is to take the sign
126 * bit into account.
127 */
128#define PGOFF_LOFFT_MAX \
129 (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
130
131static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
132{
133 struct inode *inode = file_inode(file);
Olivier Deprez0e641232021-09-23 10:07:05 +0200134 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000135 loff_t len, vma_len;
136 int ret;
137 struct hstate *h = hstate_file(file);
138
139 /*
140 * vma address alignment (but not the pgoff alignment) has
141 * already been checked by prepare_hugepage_range. If you add
142 * any error returns here, do so after setting VM_HUGETLB, so
143 * is_vm_hugetlb_page tests below unmap_region go the right
Olivier Deprez157378f2022-04-04 15:47:50 +0200144 * way when do_mmap unwinds (may be important on powerpc
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000145 * and ia64).
146 */
147 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
148 vma->vm_ops = &hugetlb_vm_ops;
149
Olivier Deprez0e641232021-09-23 10:07:05 +0200150 ret = seal_check_future_write(info->seals, vma);
151 if (ret)
152 return ret;
153
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000154 /*
155 * page based offset in vm_pgoff could be sufficiently large to
156 * overflow a loff_t when converted to byte offset. This can
157 * only happen on architectures where sizeof(loff_t) ==
158 * sizeof(unsigned long). So, only check in those instances.
159 */
160 if (sizeof(unsigned long) == sizeof(loff_t)) {
161 if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
162 return -EINVAL;
163 }
164
165 /* must be huge page aligned */
166 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
167 return -EINVAL;
168
169 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
170 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
171 /* check for overflow */
172 if (len < vma_len)
173 return -EINVAL;
174
175 inode_lock(inode);
176 file_accessed(file);
177
178 ret = -ENOMEM;
179 if (hugetlb_reserve_pages(inode,
180 vma->vm_pgoff >> huge_page_order(h),
181 len >> huge_page_shift(h), vma,
182 vma->vm_flags))
183 goto out;
184
185 ret = 0;
186 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
187 i_size_write(inode, len);
188out:
189 inode_unlock(inode);
190
191 return ret;
192}
193
194/*
Olivier Deprez157378f2022-04-04 15:47:50 +0200195 * Called under mmap_write_lock(mm).
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000196 */
197
198#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
199static unsigned long
Olivier Deprez157378f2022-04-04 15:47:50 +0200200hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
201 unsigned long len, unsigned long pgoff, unsigned long flags)
202{
203 struct hstate *h = hstate_file(file);
204 struct vm_unmapped_area_info info;
205
206 info.flags = 0;
207 info.length = len;
208 info.low_limit = current->mm->mmap_base;
Olivier Deprez92d4c212022-12-06 15:05:30 +0100209 info.high_limit = arch_get_mmap_end(addr);
Olivier Deprez157378f2022-04-04 15:47:50 +0200210 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
211 info.align_offset = 0;
212 return vm_unmapped_area(&info);
213}
214
215static unsigned long
216hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
217 unsigned long len, unsigned long pgoff, unsigned long flags)
218{
219 struct hstate *h = hstate_file(file);
220 struct vm_unmapped_area_info info;
221
222 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
223 info.length = len;
224 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
Olivier Deprez92d4c212022-12-06 15:05:30 +0100225 info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
Olivier Deprez157378f2022-04-04 15:47:50 +0200226 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
227 info.align_offset = 0;
228 addr = vm_unmapped_area(&info);
229
230 /*
231 * A failed mmap() very likely causes application failure,
232 * so fall back to the bottom-up function here. This scenario
233 * can happen with large stack limits and large mmap()
234 * allocations.
235 */
236 if (unlikely(offset_in_page(addr))) {
237 VM_BUG_ON(addr != -ENOMEM);
238 info.flags = 0;
239 info.low_limit = current->mm->mmap_base;
Olivier Deprez92d4c212022-12-06 15:05:30 +0100240 info.high_limit = arch_get_mmap_end(addr);
Olivier Deprez157378f2022-04-04 15:47:50 +0200241 addr = vm_unmapped_area(&info);
242 }
243
244 return addr;
245}
246
247static unsigned long
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000248hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
249 unsigned long len, unsigned long pgoff, unsigned long flags)
250{
251 struct mm_struct *mm = current->mm;
252 struct vm_area_struct *vma;
253 struct hstate *h = hstate_file(file);
Olivier Deprez92d4c212022-12-06 15:05:30 +0100254 const unsigned long mmap_end = arch_get_mmap_end(addr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000255
256 if (len & ~huge_page_mask(h))
257 return -EINVAL;
258 if (len > TASK_SIZE)
259 return -ENOMEM;
260
261 if (flags & MAP_FIXED) {
262 if (prepare_hugepage_range(file, addr, len))
263 return -EINVAL;
264 return addr;
265 }
266
267 if (addr) {
268 addr = ALIGN(addr, huge_page_size(h));
269 vma = find_vma(mm, addr);
Olivier Deprez92d4c212022-12-06 15:05:30 +0100270 if (mmap_end - len >= addr &&
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000271 (!vma || addr + len <= vm_start_gap(vma)))
272 return addr;
273 }
274
Olivier Deprez157378f2022-04-04 15:47:50 +0200275 /*
276 * Use mm->get_unmapped_area value as a hint to use topdown routine.
277 * If architectures have special needs, they should define their own
278 * version of hugetlb_get_unmapped_area.
279 */
280 if (mm->get_unmapped_area == arch_get_unmapped_area_topdown)
281 return hugetlb_get_unmapped_area_topdown(file, addr, len,
282 pgoff, flags);
283 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
284 pgoff, flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000285}
286#endif
287
288static size_t
289hugetlbfs_read_actor(struct page *page, unsigned long offset,
290 struct iov_iter *to, unsigned long size)
291{
292 size_t copied = 0;
293 int i, chunksize;
294
295 /* Find which 4k chunk and offset with in that chunk */
296 i = offset >> PAGE_SHIFT;
297 offset = offset & ~PAGE_MASK;
298
299 while (size) {
300 size_t n;
301 chunksize = PAGE_SIZE;
302 if (offset)
303 chunksize -= offset;
304 if (chunksize > size)
305 chunksize = size;
306 n = copy_page_to_iter(&page[i], offset, chunksize, to);
307 copied += n;
308 if (n != chunksize)
309 return copied;
310 offset = 0;
311 size -= chunksize;
312 i++;
313 }
314 return copied;
315}
316
317/*
318 * Support for read() - Find the page attached to f_mapping and copy out the
319 * data. Its *very* similar to do_generic_mapping_read(), we can't use that
320 * since it has PAGE_SIZE assumptions.
321 */
322static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
323{
324 struct file *file = iocb->ki_filp;
325 struct hstate *h = hstate_file(file);
326 struct address_space *mapping = file->f_mapping;
327 struct inode *inode = mapping->host;
328 unsigned long index = iocb->ki_pos >> huge_page_shift(h);
329 unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
330 unsigned long end_index;
331 loff_t isize;
332 ssize_t retval = 0;
333
334 while (iov_iter_count(to)) {
335 struct page *page;
336 size_t nr, copied;
337
338 /* nr is the maximum number of bytes to copy from this page */
339 nr = huge_page_size(h);
340 isize = i_size_read(inode);
341 if (!isize)
342 break;
343 end_index = (isize - 1) >> huge_page_shift(h);
344 if (index > end_index)
345 break;
346 if (index == end_index) {
347 nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
348 if (nr <= offset)
349 break;
350 }
351 nr = nr - offset;
352
353 /* Find the page */
354 page = find_lock_page(mapping, index);
355 if (unlikely(page == NULL)) {
356 /*
357 * We have a HOLE, zero out the user-buffer for the
358 * length of the hole or request.
359 */
360 copied = iov_iter_zero(nr, to);
361 } else {
362 unlock_page(page);
363
364 /*
365 * We have the page, copy it to user space buffer.
366 */
367 copied = hugetlbfs_read_actor(page, offset, to, nr);
368 put_page(page);
369 }
370 offset += copied;
371 retval += copied;
372 if (copied != nr && iov_iter_count(to)) {
373 if (!retval)
374 retval = -EFAULT;
375 break;
376 }
377 index += offset >> huge_page_shift(h);
378 offset &= ~huge_page_mask(h);
379 }
380 iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
381 return retval;
382}
383
384static int hugetlbfs_write_begin(struct file *file,
385 struct address_space *mapping,
386 loff_t pos, unsigned len, unsigned flags,
387 struct page **pagep, void **fsdata)
388{
389 return -EINVAL;
390}
391
392static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
393 loff_t pos, unsigned len, unsigned copied,
394 struct page *page, void *fsdata)
395{
396 BUG();
397 return -EINVAL;
398}
399
400static void remove_huge_page(struct page *page)
401{
402 ClearPageDirty(page);
403 ClearPageUptodate(page);
404 delete_from_page_cache(page);
405}
406
407static void
408hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
409{
410 struct vm_area_struct *vma;
411
412 /*
413 * end == 0 indicates that the entire range after
414 * start should be unmapped.
415 */
416 vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
417 unsigned long v_offset;
418 unsigned long v_end;
419
420 /*
421 * Can the expression below overflow on 32-bit arches?
422 * No, because the interval tree returns us only those vmas
423 * which overlap the truncated area starting at pgoff,
424 * and no vma on a 32-bit arch can span beyond the 4GB.
425 */
426 if (vma->vm_pgoff < start)
427 v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
428 else
429 v_offset = 0;
430
431 if (!end)
432 v_end = vma->vm_end;
433 else {
434 v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
435 + vma->vm_start;
436 if (v_end > vma->vm_end)
437 v_end = vma->vm_end;
438 }
439
440 unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
441 NULL);
442 }
443}
444
445/*
446 * remove_inode_hugepages handles two distinct cases: truncation and hole
447 * punch. There are subtle differences in operation for each case.
448 *
449 * truncation is indicated by end of range being LLONG_MAX
450 * In this case, we first scan the range and release found pages.
451 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
452 * maps and global counts. Page faults can not race with truncation
Olivier Deprez157378f2022-04-04 15:47:50 +0200453 * in this routine. hugetlb_no_page() holds i_mmap_rwsem and prevents
454 * page faults in the truncated range by checking i_size. i_size is
455 * modified while holding i_mmap_rwsem.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000456 * hole punch is indicated if end is not LLONG_MAX
457 * In the hole punch case we scan the range and release found pages.
458 * Only when releasing a page is the associated region/reserv map
459 * deleted. The region/reserv map for ranges without associated
460 * pages are not modified. Page faults can race with hole punch.
461 * This is indicated if we find a mapped page.
462 * Note: If the passed end of range value is beyond the end of file, but
463 * not LLONG_MAX this routine still performs a hole punch operation.
464 */
465static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
466 loff_t lend)
467{
468 struct hstate *h = hstate_inode(inode);
469 struct address_space *mapping = &inode->i_data;
470 const pgoff_t start = lstart >> huge_page_shift(h);
471 const pgoff_t end = lend >> huge_page_shift(h);
472 struct vm_area_struct pseudo_vma;
473 struct pagevec pvec;
474 pgoff_t next, index;
475 int i, freed = 0;
476 bool truncate_op = (lend == LLONG_MAX);
477
478 vma_init(&pseudo_vma, current->mm);
479 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
480 pagevec_init(&pvec);
481 next = start;
482 while (next < end) {
483 /*
484 * When no more pages are found, we are done.
485 */
486 if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1))
487 break;
488
489 for (i = 0; i < pagevec_count(&pvec); ++i) {
490 struct page *page = pvec.pages[i];
491 u32 hash;
492
493 index = page->index;
Olivier Deprez157378f2022-04-04 15:47:50 +0200494 hash = hugetlb_fault_mutex_hash(mapping, index);
495 if (!truncate_op) {
496 /*
497 * Only need to hold the fault mutex in the
498 * hole punch case. This prevents races with
499 * page faults. Races are not possible in the
500 * case of truncation.
501 */
502 mutex_lock(&hugetlb_fault_mutex_table[hash]);
503 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000504
505 /*
506 * If page is mapped, it was faulted in after being
507 * unmapped in caller. Unmap (again) now after taking
508 * the fault mutex. The mutex will prevent faults
509 * until we finish removing the page.
510 *
511 * This race can only happen in the hole punch case.
512 * Getting here in a truncate operation is a bug.
513 */
514 if (unlikely(page_mapped(page))) {
515 BUG_ON(truncate_op);
516
Olivier Deprez157378f2022-04-04 15:47:50 +0200517 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000518 i_mmap_lock_write(mapping);
Olivier Deprez157378f2022-04-04 15:47:50 +0200519 mutex_lock(&hugetlb_fault_mutex_table[hash]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000520 hugetlb_vmdelete_list(&mapping->i_mmap,
521 index * pages_per_huge_page(h),
522 (index + 1) * pages_per_huge_page(h));
523 i_mmap_unlock_write(mapping);
524 }
525
526 lock_page(page);
527 /*
528 * We must free the huge page and remove from page
529 * cache (remove_huge_page) BEFORE removing the
530 * region/reserve map (hugetlb_unreserve_pages). In
531 * rare out of memory conditions, removal of the
532 * region/reserve map could fail. Correspondingly,
533 * the subpool and global reserve usage count can need
534 * to be adjusted.
535 */
536 VM_BUG_ON(PagePrivate(page));
537 remove_huge_page(page);
538 freed++;
539 if (!truncate_op) {
540 if (unlikely(hugetlb_unreserve_pages(inode,
541 index, index + 1, 1)))
542 hugetlb_fix_reserve_counts(inode);
543 }
544
545 unlock_page(page);
Olivier Deprez157378f2022-04-04 15:47:50 +0200546 if (!truncate_op)
547 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000548 }
549 huge_pagevec_release(&pvec);
550 cond_resched();
551 }
552
553 if (truncate_op)
554 (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
555}
556
557static void hugetlbfs_evict_inode(struct inode *inode)
558{
559 struct resv_map *resv_map;
560
561 remove_inode_hugepages(inode, 0, LLONG_MAX);
David Brazdil0f672f62019-12-10 10:32:29 +0000562
563 /*
564 * Get the resv_map from the address space embedded in the inode.
565 * This is the address space which points to any resv_map allocated
566 * at inode creation time. If this is a device special inode,
567 * i_mapping may not point to the original address space.
568 */
569 resv_map = (struct resv_map *)(&inode->i_data)->private_data;
570 /* Only regular and link inodes have associated reserve maps */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000571 if (resv_map)
572 resv_map_release(&resv_map->refs);
573 clear_inode(inode);
574}
575
576static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
577{
578 pgoff_t pgoff;
579 struct address_space *mapping = inode->i_mapping;
580 struct hstate *h = hstate_inode(inode);
581
582 BUG_ON(offset & ~huge_page_mask(h));
583 pgoff = offset >> PAGE_SHIFT;
584
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000585 i_mmap_lock_write(mapping);
Olivier Deprez157378f2022-04-04 15:47:50 +0200586 i_size_write(inode, offset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000587 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
588 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
589 i_mmap_unlock_write(mapping);
590 remove_inode_hugepages(inode, offset, LLONG_MAX);
591 return 0;
592}
593
594static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
595{
596 struct hstate *h = hstate_inode(inode);
597 loff_t hpage_size = huge_page_size(h);
598 loff_t hole_start, hole_end;
599
600 /*
601 * For hole punch round up the beginning offset of the hole and
602 * round down the end.
603 */
604 hole_start = round_up(offset, hpage_size);
605 hole_end = round_down(offset + len, hpage_size);
606
607 if (hole_end > hole_start) {
608 struct address_space *mapping = inode->i_mapping;
609 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
610
611 inode_lock(inode);
612
613 /* protected by i_mutex */
David Brazdil0f672f62019-12-10 10:32:29 +0000614 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000615 inode_unlock(inode);
616 return -EPERM;
617 }
618
619 i_mmap_lock_write(mapping);
620 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
621 hugetlb_vmdelete_list(&mapping->i_mmap,
622 hole_start >> PAGE_SHIFT,
623 hole_end >> PAGE_SHIFT);
624 i_mmap_unlock_write(mapping);
625 remove_inode_hugepages(inode, hole_start, hole_end);
626 inode_unlock(inode);
627 }
628
629 return 0;
630}
631
632static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
633 loff_t len)
634{
635 struct inode *inode = file_inode(file);
636 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
637 struct address_space *mapping = inode->i_mapping;
638 struct hstate *h = hstate_inode(inode);
639 struct vm_area_struct pseudo_vma;
640 struct mm_struct *mm = current->mm;
641 loff_t hpage_size = huge_page_size(h);
642 unsigned long hpage_shift = huge_page_shift(h);
643 pgoff_t start, index, end;
644 int error;
645 u32 hash;
646
647 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
648 return -EOPNOTSUPP;
649
650 if (mode & FALLOC_FL_PUNCH_HOLE)
651 return hugetlbfs_punch_hole(inode, offset, len);
652
653 /*
654 * Default preallocate case.
655 * For this range, start is rounded down and end is rounded up
656 * as well as being converted to page offsets.
657 */
658 start = offset >> hpage_shift;
659 end = (offset + len + hpage_size - 1) >> hpage_shift;
660
661 inode_lock(inode);
662
663 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
664 error = inode_newsize_ok(inode, offset + len);
665 if (error)
666 goto out;
667
668 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
669 error = -EPERM;
670 goto out;
671 }
672
673 /*
674 * Initialize a pseudo vma as this is required by the huge page
675 * allocation routines. If NUMA is configured, use page index
676 * as input to create an allocation policy.
677 */
678 vma_init(&pseudo_vma, mm);
679 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
680 pseudo_vma.vm_file = file;
681
682 for (index = start; index < end; index++) {
683 /*
684 * This is supposed to be the vaddr where the page is being
685 * faulted in, but we have no vaddr here.
686 */
687 struct page *page;
688 unsigned long addr;
689 int avoid_reserve = 0;
690
691 cond_resched();
692
693 /*
694 * fallocate(2) manpage permits EINTR; we may have been
695 * interrupted because we are using up too much memory.
696 */
697 if (signal_pending(current)) {
698 error = -EINTR;
699 break;
700 }
701
702 /* Set numa allocation policy based on index */
703 hugetlb_set_vma_policy(&pseudo_vma, inode, index);
704
705 /* addr is the offset within the file (zero based) */
706 addr = index * hpage_size;
707
Olivier Deprez157378f2022-04-04 15:47:50 +0200708 /*
709 * fault mutex taken here, protects against fault path
710 * and hole punch. inode_lock previously taken protects
711 * against truncation.
712 */
713 hash = hugetlb_fault_mutex_hash(mapping, index);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000714 mutex_lock(&hugetlb_fault_mutex_table[hash]);
715
716 /* See if already present in mapping to avoid alloc/free */
717 page = find_get_page(mapping, index);
718 if (page) {
719 put_page(page);
720 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
721 hugetlb_drop_vma_policy(&pseudo_vma);
722 continue;
723 }
724
725 /* Allocate page and add to page cache */
726 page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve);
727 hugetlb_drop_vma_policy(&pseudo_vma);
728 if (IS_ERR(page)) {
729 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
730 error = PTR_ERR(page);
731 goto out;
732 }
733 clear_huge_page(page, addr, pages_per_huge_page(h));
734 __SetPageUptodate(page);
735 error = huge_add_to_page_cache(page, mapping, index);
736 if (unlikely(error)) {
737 put_page(page);
738 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
739 goto out;
740 }
741
742 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
743
Olivier Deprez0e641232021-09-23 10:07:05 +0200744 set_page_huge_active(page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000745 /*
746 * unlock_page because locked by add_to_page_cache()
Olivier Deprez0e641232021-09-23 10:07:05 +0200747 * put_page() due to reference from alloc_huge_page()
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000748 */
749 unlock_page(page);
750 put_page(page);
751 }
752
753 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
754 i_size_write(inode, offset + len);
755 inode->i_ctime = current_time(inode);
756out:
757 inode_unlock(inode);
758 return error;
759}
760
761static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
762{
763 struct inode *inode = d_inode(dentry);
764 struct hstate *h = hstate_inode(inode);
765 int error;
766 unsigned int ia_valid = attr->ia_valid;
767 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
768
769 BUG_ON(!inode);
770
771 error = setattr_prepare(dentry, attr);
772 if (error)
773 return error;
774
775 if (ia_valid & ATTR_SIZE) {
776 loff_t oldsize = inode->i_size;
777 loff_t newsize = attr->ia_size;
778
779 if (newsize & ~huge_page_mask(h))
780 return -EINVAL;
781 /* protected by i_mutex */
782 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
783 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
784 return -EPERM;
785 error = hugetlb_vmtruncate(inode, newsize);
786 if (error)
787 return error;
788 }
789
790 setattr_copy(inode, attr);
791 mark_inode_dirty(inode);
792 return 0;
793}
794
795static struct inode *hugetlbfs_get_root(struct super_block *sb,
David Brazdil0f672f62019-12-10 10:32:29 +0000796 struct hugetlbfs_fs_context *ctx)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000797{
798 struct inode *inode;
799
800 inode = new_inode(sb);
801 if (inode) {
802 inode->i_ino = get_next_ino();
David Brazdil0f672f62019-12-10 10:32:29 +0000803 inode->i_mode = S_IFDIR | ctx->mode;
804 inode->i_uid = ctx->uid;
805 inode->i_gid = ctx->gid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000806 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
807 inode->i_op = &hugetlbfs_dir_inode_operations;
808 inode->i_fop = &simple_dir_operations;
809 /* directory inodes start off with i_nlink == 2 (for "." entry) */
810 inc_nlink(inode);
811 lockdep_annotate_inode_mutex_key(inode);
812 }
813 return inode;
814}
815
816/*
817 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
818 * be taken from reclaim -- unlike regular filesystems. This needs an
819 * annotation because huge_pmd_share() does an allocation under hugetlb's
820 * i_mmap_rwsem.
821 */
822static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
823
824static struct inode *hugetlbfs_get_inode(struct super_block *sb,
825 struct inode *dir,
826 umode_t mode, dev_t dev)
827{
828 struct inode *inode;
David Brazdil0f672f62019-12-10 10:32:29 +0000829 struct resv_map *resv_map = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000830
David Brazdil0f672f62019-12-10 10:32:29 +0000831 /*
832 * Reserve maps are only needed for inodes that can have associated
833 * page allocations.
834 */
835 if (S_ISREG(mode) || S_ISLNK(mode)) {
836 resv_map = resv_map_alloc();
837 if (!resv_map)
838 return NULL;
839 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000840
841 inode = new_inode(sb);
842 if (inode) {
843 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
844
845 inode->i_ino = get_next_ino();
846 inode_init_owner(inode, dir, mode);
847 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
848 &hugetlbfs_i_mmap_rwsem_key);
849 inode->i_mapping->a_ops = &hugetlbfs_aops;
850 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
851 inode->i_mapping->private_data = resv_map;
852 info->seals = F_SEAL_SEAL;
853 switch (mode & S_IFMT) {
854 default:
855 init_special_inode(inode, mode, dev);
856 break;
857 case S_IFREG:
858 inode->i_op = &hugetlbfs_inode_operations;
859 inode->i_fop = &hugetlbfs_file_operations;
860 break;
861 case S_IFDIR:
862 inode->i_op = &hugetlbfs_dir_inode_operations;
863 inode->i_fop = &simple_dir_operations;
864
865 /* directory inodes start off with i_nlink == 2 (for "." entry) */
866 inc_nlink(inode);
867 break;
868 case S_IFLNK:
869 inode->i_op = &page_symlink_inode_operations;
870 inode_nohighmem(inode);
871 break;
872 }
873 lockdep_annotate_inode_mutex_key(inode);
David Brazdil0f672f62019-12-10 10:32:29 +0000874 } else {
875 if (resv_map)
876 kref_put(&resv_map->refs, resv_map_release);
877 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000878
879 return inode;
880}
881
882/*
883 * File creation. Allocate an inode, and we're done..
884 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200885static int do_hugetlbfs_mknod(struct inode *dir,
886 struct dentry *dentry,
887 umode_t mode,
888 dev_t dev,
889 bool tmpfile)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000890{
891 struct inode *inode;
892 int error = -ENOSPC;
893
894 inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
895 if (inode) {
896 dir->i_ctime = dir->i_mtime = current_time(dir);
Olivier Deprez157378f2022-04-04 15:47:50 +0200897 if (tmpfile) {
898 d_tmpfile(dentry, inode);
899 } else {
900 d_instantiate(dentry, inode);
901 dget(dentry);/* Extra count - pin the dentry in core */
902 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000903 error = 0;
904 }
905 return error;
906}
907
Olivier Deprez157378f2022-04-04 15:47:50 +0200908static int hugetlbfs_mknod(struct inode *dir,
909 struct dentry *dentry, umode_t mode, dev_t dev)
910{
911 return do_hugetlbfs_mknod(dir, dentry, mode, dev, false);
912}
913
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000914static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
915{
916 int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
917 if (!retval)
918 inc_nlink(dir);
919 return retval;
920}
921
922static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
923{
924 return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
925}
926
Olivier Deprez157378f2022-04-04 15:47:50 +0200927static int hugetlbfs_tmpfile(struct inode *dir,
928 struct dentry *dentry, umode_t mode)
929{
930 return do_hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0, true);
931}
932
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000933static int hugetlbfs_symlink(struct inode *dir,
934 struct dentry *dentry, const char *symname)
935{
936 struct inode *inode;
937 int error = -ENOSPC;
938
939 inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
940 if (inode) {
941 int l = strlen(symname)+1;
942 error = page_symlink(inode, symname, l);
943 if (!error) {
944 d_instantiate(dentry, inode);
945 dget(dentry);
946 } else
947 iput(inode);
948 }
949 dir->i_ctime = dir->i_mtime = current_time(dir);
950
951 return error;
952}
953
954/*
955 * mark the head page dirty
956 */
957static int hugetlbfs_set_page_dirty(struct page *page)
958{
959 struct page *head = compound_head(page);
960
961 SetPageDirty(head);
962 return 0;
963}
964
965static int hugetlbfs_migrate_page(struct address_space *mapping,
966 struct page *newpage, struct page *page,
967 enum migrate_mode mode)
968{
969 int rc;
970
971 rc = migrate_huge_page_move_mapping(mapping, newpage, page);
972 if (rc != MIGRATEPAGE_SUCCESS)
973 return rc;
David Brazdil0f672f62019-12-10 10:32:29 +0000974
975 /*
976 * page_private is subpool pointer in hugetlb pages. Transfer to
977 * new page. PagePrivate is not associated with page_private for
978 * hugetlb pages and can not be set here as only page_huge_active
979 * pages can be migrated.
980 */
981 if (page_private(page)) {
982 set_page_private(newpage, page_private(page));
983 set_page_private(page, 0);
984 }
985
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000986 if (mode != MIGRATE_SYNC_NO_COPY)
987 migrate_page_copy(newpage, page);
988 else
989 migrate_page_states(newpage, page);
990
991 return MIGRATEPAGE_SUCCESS;
992}
993
994static int hugetlbfs_error_remove_page(struct address_space *mapping,
995 struct page *page)
996{
997 struct inode *inode = mapping->host;
998 pgoff_t index = page->index;
999
1000 remove_huge_page(page);
1001 if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
1002 hugetlb_fix_reserve_counts(inode);
1003
1004 return 0;
1005}
1006
1007/*
1008 * Display the mount options in /proc/mounts.
1009 */
1010static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
1011{
1012 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
1013 struct hugepage_subpool *spool = sbinfo->spool;
1014 unsigned long hpage_size = huge_page_size(sbinfo->hstate);
1015 unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
1016 char mod;
1017
1018 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
1019 seq_printf(m, ",uid=%u",
1020 from_kuid_munged(&init_user_ns, sbinfo->uid));
1021 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
1022 seq_printf(m, ",gid=%u",
1023 from_kgid_munged(&init_user_ns, sbinfo->gid));
1024 if (sbinfo->mode != 0755)
1025 seq_printf(m, ",mode=%o", sbinfo->mode);
1026 if (sbinfo->max_inodes != -1)
1027 seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
1028
1029 hpage_size /= 1024;
1030 mod = 'K';
1031 if (hpage_size >= 1024) {
1032 hpage_size /= 1024;
1033 mod = 'M';
1034 }
1035 seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
1036 if (spool) {
1037 if (spool->max_hpages != -1)
1038 seq_printf(m, ",size=%llu",
1039 (unsigned long long)spool->max_hpages << hpage_shift);
1040 if (spool->min_hpages != -1)
1041 seq_printf(m, ",min_size=%llu",
1042 (unsigned long long)spool->min_hpages << hpage_shift);
1043 }
1044 return 0;
1045}
1046
1047static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1048{
1049 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
1050 struct hstate *h = hstate_inode(d_inode(dentry));
1051
1052 buf->f_type = HUGETLBFS_MAGIC;
1053 buf->f_bsize = huge_page_size(h);
1054 if (sbinfo) {
1055 spin_lock(&sbinfo->stat_lock);
1056 /* If no limits set, just report 0 for max/free/used
1057 * blocks, like simple_statfs() */
1058 if (sbinfo->spool) {
1059 long free_pages;
1060
1061 spin_lock(&sbinfo->spool->lock);
1062 buf->f_blocks = sbinfo->spool->max_hpages;
1063 free_pages = sbinfo->spool->max_hpages
1064 - sbinfo->spool->used_hpages;
1065 buf->f_bavail = buf->f_bfree = free_pages;
1066 spin_unlock(&sbinfo->spool->lock);
1067 buf->f_files = sbinfo->max_inodes;
1068 buf->f_ffree = sbinfo->free_inodes;
1069 }
1070 spin_unlock(&sbinfo->stat_lock);
1071 }
1072 buf->f_namelen = NAME_MAX;
1073 return 0;
1074}
1075
1076static void hugetlbfs_put_super(struct super_block *sb)
1077{
1078 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
1079
1080 if (sbi) {
1081 sb->s_fs_info = NULL;
1082
1083 if (sbi->spool)
1084 hugepage_put_subpool(sbi->spool);
1085
1086 kfree(sbi);
1087 }
1088}
1089
1090static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1091{
1092 if (sbinfo->free_inodes >= 0) {
1093 spin_lock(&sbinfo->stat_lock);
1094 if (unlikely(!sbinfo->free_inodes)) {
1095 spin_unlock(&sbinfo->stat_lock);
1096 return 0;
1097 }
1098 sbinfo->free_inodes--;
1099 spin_unlock(&sbinfo->stat_lock);
1100 }
1101
1102 return 1;
1103}
1104
1105static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1106{
1107 if (sbinfo->free_inodes >= 0) {
1108 spin_lock(&sbinfo->stat_lock);
1109 sbinfo->free_inodes++;
1110 spin_unlock(&sbinfo->stat_lock);
1111 }
1112}
1113
1114
1115static struct kmem_cache *hugetlbfs_inode_cachep;
1116
1117static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
1118{
1119 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
1120 struct hugetlbfs_inode_info *p;
1121
1122 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
1123 return NULL;
1124 p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
1125 if (unlikely(!p)) {
1126 hugetlbfs_inc_free_inodes(sbinfo);
1127 return NULL;
1128 }
1129
1130 /*
1131 * Any time after allocation, hugetlbfs_destroy_inode can be called
1132 * for the inode. mpol_free_shared_policy is unconditionally called
1133 * as part of hugetlbfs_destroy_inode. So, initialize policy here
1134 * in case of a quick call to destroy.
1135 *
1136 * Note that the policy is initialized even if we are creating a
1137 * private inode. This simplifies hugetlbfs_destroy_inode.
1138 */
1139 mpol_shared_policy_init(&p->policy, NULL);
1140
1141 return &p->vfs_inode;
1142}
1143
David Brazdil0f672f62019-12-10 10:32:29 +00001144static void hugetlbfs_free_inode(struct inode *inode)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001145{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001146 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
1147}
1148
1149static void hugetlbfs_destroy_inode(struct inode *inode)
1150{
1151 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
1152 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001153}
1154
1155static const struct address_space_operations hugetlbfs_aops = {
1156 .write_begin = hugetlbfs_write_begin,
1157 .write_end = hugetlbfs_write_end,
1158 .set_page_dirty = hugetlbfs_set_page_dirty,
1159 .migratepage = hugetlbfs_migrate_page,
1160 .error_remove_page = hugetlbfs_error_remove_page,
1161};
1162
1163
1164static void init_once(void *foo)
1165{
1166 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
1167
1168 inode_init_once(&ei->vfs_inode);
1169}
1170
1171const struct file_operations hugetlbfs_file_operations = {
1172 .read_iter = hugetlbfs_read_iter,
1173 .mmap = hugetlbfs_file_mmap,
1174 .fsync = noop_fsync,
1175 .get_unmapped_area = hugetlb_get_unmapped_area,
1176 .llseek = default_llseek,
1177 .fallocate = hugetlbfs_fallocate,
1178};
1179
1180static const struct inode_operations hugetlbfs_dir_inode_operations = {
1181 .create = hugetlbfs_create,
1182 .lookup = simple_lookup,
1183 .link = simple_link,
1184 .unlink = simple_unlink,
1185 .symlink = hugetlbfs_symlink,
1186 .mkdir = hugetlbfs_mkdir,
1187 .rmdir = simple_rmdir,
1188 .mknod = hugetlbfs_mknod,
1189 .rename = simple_rename,
1190 .setattr = hugetlbfs_setattr,
Olivier Deprez157378f2022-04-04 15:47:50 +02001191 .tmpfile = hugetlbfs_tmpfile,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001192};
1193
1194static const struct inode_operations hugetlbfs_inode_operations = {
1195 .setattr = hugetlbfs_setattr,
1196};
1197
1198static const struct super_operations hugetlbfs_ops = {
1199 .alloc_inode = hugetlbfs_alloc_inode,
David Brazdil0f672f62019-12-10 10:32:29 +00001200 .free_inode = hugetlbfs_free_inode,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001201 .destroy_inode = hugetlbfs_destroy_inode,
1202 .evict_inode = hugetlbfs_evict_inode,
1203 .statfs = hugetlbfs_statfs,
1204 .put_super = hugetlbfs_put_super,
1205 .show_options = hugetlbfs_show_options,
1206};
1207
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001208/*
1209 * Convert size option passed from command line to number of huge pages
1210 * in the pool specified by hstate. Size option could be in bytes
1211 * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
1212 */
1213static long
1214hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
1215 enum hugetlbfs_size_type val_type)
1216{
1217 if (val_type == NO_SIZE)
1218 return -1;
1219
1220 if (val_type == SIZE_PERCENT) {
1221 size_opt <<= huge_page_shift(h);
1222 size_opt *= h->max_huge_pages;
1223 do_div(size_opt, 100);
1224 }
1225
1226 size_opt >>= huge_page_shift(h);
1227 return size_opt;
1228}
1229
David Brazdil0f672f62019-12-10 10:32:29 +00001230/*
1231 * Parse one mount parameter.
1232 */
1233static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001234{
David Brazdil0f672f62019-12-10 10:32:29 +00001235 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1236 struct fs_parse_result result;
1237 char *rest;
1238 unsigned long ps;
1239 int opt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001240
Olivier Deprez157378f2022-04-04 15:47:50 +02001241 opt = fs_parse(fc, hugetlb_fs_parameters, param, &result);
David Brazdil0f672f62019-12-10 10:32:29 +00001242 if (opt < 0)
1243 return opt;
1244
1245 switch (opt) {
1246 case Opt_uid:
1247 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
1248 if (!uid_valid(ctx->uid))
1249 goto bad_val;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001250 return 0;
1251
David Brazdil0f672f62019-12-10 10:32:29 +00001252 case Opt_gid:
1253 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
1254 if (!gid_valid(ctx->gid))
1255 goto bad_val;
1256 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001257
David Brazdil0f672f62019-12-10 10:32:29 +00001258 case Opt_mode:
1259 ctx->mode = result.uint_32 & 01777U;
1260 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001261
David Brazdil0f672f62019-12-10 10:32:29 +00001262 case Opt_size:
1263 /* memparse() will accept a K/M/G without a digit */
1264 if (!isdigit(param->string[0]))
1265 goto bad_val;
1266 ctx->max_size_opt = memparse(param->string, &rest);
1267 ctx->max_val_type = SIZE_STD;
1268 if (*rest == '%')
1269 ctx->max_val_type = SIZE_PERCENT;
1270 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001271
David Brazdil0f672f62019-12-10 10:32:29 +00001272 case Opt_nr_inodes:
1273 /* memparse() will accept a K/M/G without a digit */
1274 if (!isdigit(param->string[0]))
1275 goto bad_val;
1276 ctx->nr_inodes = memparse(param->string, &rest);
1277 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001278
David Brazdil0f672f62019-12-10 10:32:29 +00001279 case Opt_pagesize:
1280 ps = memparse(param->string, &rest);
1281 ctx->hstate = size_to_hstate(ps);
1282 if (!ctx->hstate) {
1283 pr_err("Unsupported page size %lu MB\n", ps >> 20);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001284 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001285 }
David Brazdil0f672f62019-12-10 10:32:29 +00001286 return 0;
1287
1288 case Opt_min_size:
1289 /* memparse() will accept a K/M/G without a digit */
1290 if (!isdigit(param->string[0]))
1291 goto bad_val;
1292 ctx->min_size_opt = memparse(param->string, &rest);
1293 ctx->min_val_type = SIZE_STD;
1294 if (*rest == '%')
1295 ctx->min_val_type = SIZE_PERCENT;
1296 return 0;
1297
1298 default:
1299 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001300 }
1301
David Brazdil0f672f62019-12-10 10:32:29 +00001302bad_val:
Olivier Deprez157378f2022-04-04 15:47:50 +02001303 return invalfc(fc, "Bad value '%s' for mount option '%s'\n",
David Brazdil0f672f62019-12-10 10:32:29 +00001304 param->string, param->key);
1305}
1306
1307/*
1308 * Validate the parsed options.
1309 */
1310static int hugetlbfs_validate(struct fs_context *fc)
1311{
1312 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1313
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001314 /*
1315 * Use huge page pool size (in hstate) to convert the size
1316 * options to number of huge pages. If NO_SIZE, -1 is returned.
1317 */
David Brazdil0f672f62019-12-10 10:32:29 +00001318 ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1319 ctx->max_size_opt,
1320 ctx->max_val_type);
1321 ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1322 ctx->min_size_opt,
1323 ctx->min_val_type);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001324
1325 /*
1326 * If max_size was specified, then min_size must be smaller
1327 */
David Brazdil0f672f62019-12-10 10:32:29 +00001328 if (ctx->max_val_type > NO_SIZE &&
1329 ctx->min_hpages > ctx->max_hpages) {
1330 pr_err("Minimum size can not be greater than maximum size\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001331 return -EINVAL;
1332 }
1333
1334 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001335}
1336
1337static int
David Brazdil0f672f62019-12-10 10:32:29 +00001338hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001339{
David Brazdil0f672f62019-12-10 10:32:29 +00001340 struct hugetlbfs_fs_context *ctx = fc->fs_private;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001341 struct hugetlbfs_sb_info *sbinfo;
1342
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001343 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
1344 if (!sbinfo)
1345 return -ENOMEM;
1346 sb->s_fs_info = sbinfo;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001347 spin_lock_init(&sbinfo->stat_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00001348 sbinfo->hstate = ctx->hstate;
1349 sbinfo->max_inodes = ctx->nr_inodes;
1350 sbinfo->free_inodes = ctx->nr_inodes;
1351 sbinfo->spool = NULL;
1352 sbinfo->uid = ctx->uid;
1353 sbinfo->gid = ctx->gid;
1354 sbinfo->mode = ctx->mode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001355
1356 /*
1357 * Allocate and initialize subpool if maximum or minimum size is
1358 * specified. Any needed reservations (for minimim size) are taken
1359 * taken when the subpool is created.
1360 */
David Brazdil0f672f62019-12-10 10:32:29 +00001361 if (ctx->max_hpages != -1 || ctx->min_hpages != -1) {
1362 sbinfo->spool = hugepage_new_subpool(ctx->hstate,
1363 ctx->max_hpages,
1364 ctx->min_hpages);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001365 if (!sbinfo->spool)
1366 goto out_free;
1367 }
1368 sb->s_maxbytes = MAX_LFS_FILESIZE;
David Brazdil0f672f62019-12-10 10:32:29 +00001369 sb->s_blocksize = huge_page_size(ctx->hstate);
1370 sb->s_blocksize_bits = huge_page_shift(ctx->hstate);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001371 sb->s_magic = HUGETLBFS_MAGIC;
1372 sb->s_op = &hugetlbfs_ops;
1373 sb->s_time_gran = 1;
Olivier Deprez0e641232021-09-23 10:07:05 +02001374
1375 /*
1376 * Due to the special and limited functionality of hugetlbfs, it does
1377 * not work well as a stacking filesystem.
1378 */
1379 sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
David Brazdil0f672f62019-12-10 10:32:29 +00001380 sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001381 if (!sb->s_root)
1382 goto out_free;
1383 return 0;
1384out_free:
1385 kfree(sbinfo->spool);
1386 kfree(sbinfo);
1387 return -ENOMEM;
1388}
1389
David Brazdil0f672f62019-12-10 10:32:29 +00001390static int hugetlbfs_get_tree(struct fs_context *fc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001391{
David Brazdil0f672f62019-12-10 10:32:29 +00001392 int err = hugetlbfs_validate(fc);
1393 if (err)
1394 return err;
1395 return get_tree_nodev(fc, hugetlbfs_fill_super);
1396}
1397
1398static void hugetlbfs_fs_context_free(struct fs_context *fc)
1399{
1400 kfree(fc->fs_private);
1401}
1402
1403static const struct fs_context_operations hugetlbfs_fs_context_ops = {
1404 .free = hugetlbfs_fs_context_free,
1405 .parse_param = hugetlbfs_parse_param,
1406 .get_tree = hugetlbfs_get_tree,
1407};
1408
1409static int hugetlbfs_init_fs_context(struct fs_context *fc)
1410{
1411 struct hugetlbfs_fs_context *ctx;
1412
1413 ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL);
1414 if (!ctx)
1415 return -ENOMEM;
1416
1417 ctx->max_hpages = -1; /* No limit on size by default */
1418 ctx->nr_inodes = -1; /* No limit on number of inodes by default */
1419 ctx->uid = current_fsuid();
1420 ctx->gid = current_fsgid();
1421 ctx->mode = 0755;
1422 ctx->hstate = &default_hstate;
1423 ctx->min_hpages = -1; /* No default minimum size */
1424 ctx->max_val_type = NO_SIZE;
1425 ctx->min_val_type = NO_SIZE;
1426 fc->fs_private = ctx;
1427 fc->ops = &hugetlbfs_fs_context_ops;
1428 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001429}
1430
1431static struct file_system_type hugetlbfs_fs_type = {
David Brazdil0f672f62019-12-10 10:32:29 +00001432 .name = "hugetlbfs",
1433 .init_fs_context = hugetlbfs_init_fs_context,
Olivier Deprez157378f2022-04-04 15:47:50 +02001434 .parameters = hugetlb_fs_parameters,
David Brazdil0f672f62019-12-10 10:32:29 +00001435 .kill_sb = kill_litter_super,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001436};
1437
1438static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
1439
1440static int can_do_hugetlb_shm(void)
1441{
1442 kgid_t shm_group;
1443 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
1444 return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
1445}
1446
1447static int get_hstate_idx(int page_size_log)
1448{
1449 struct hstate *h = hstate_sizelog(page_size_log);
1450
1451 if (!h)
1452 return -1;
1453 return h - hstates;
1454}
1455
1456/*
1457 * Note that size should be aligned to proper hugepage size in caller side,
1458 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1459 */
1460struct file *hugetlb_file_setup(const char *name, size_t size,
1461 vm_flags_t acctflag, struct user_struct **user,
1462 int creat_flags, int page_size_log)
1463{
1464 struct inode *inode;
1465 struct vfsmount *mnt;
1466 int hstate_idx;
1467 struct file *file;
1468
1469 hstate_idx = get_hstate_idx(page_size_log);
1470 if (hstate_idx < 0)
1471 return ERR_PTR(-ENODEV);
1472
1473 *user = NULL;
1474 mnt = hugetlbfs_vfsmount[hstate_idx];
1475 if (!mnt)
1476 return ERR_PTR(-ENOENT);
1477
1478 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
1479 *user = current_user();
1480 if (user_shm_lock(size, *user)) {
1481 task_lock(current);
1482 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
1483 current->comm, current->pid);
1484 task_unlock(current);
1485 } else {
1486 *user = NULL;
1487 return ERR_PTR(-EPERM);
1488 }
1489 }
1490
1491 file = ERR_PTR(-ENOSPC);
1492 inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0);
1493 if (!inode)
1494 goto out;
1495 if (creat_flags == HUGETLB_SHMFS_INODE)
1496 inode->i_flags |= S_PRIVATE;
1497
1498 inode->i_size = size;
1499 clear_nlink(inode);
1500
1501 if (hugetlb_reserve_pages(inode, 0,
1502 size >> huge_page_shift(hstate_inode(inode)), NULL,
1503 acctflag))
1504 file = ERR_PTR(-ENOMEM);
1505 else
1506 file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
1507 &hugetlbfs_file_operations);
1508 if (!IS_ERR(file))
1509 return file;
1510
1511 iput(inode);
1512out:
1513 if (*user) {
1514 user_shm_unlock(size, *user);
1515 *user = NULL;
1516 }
1517 return file;
1518}
1519
David Brazdil0f672f62019-12-10 10:32:29 +00001520static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
1521{
1522 struct fs_context *fc;
1523 struct vfsmount *mnt;
1524
1525 fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT);
1526 if (IS_ERR(fc)) {
1527 mnt = ERR_CAST(fc);
1528 } else {
1529 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1530 ctx->hstate = h;
1531 mnt = fc_mount(fc);
1532 put_fs_context(fc);
1533 }
1534 if (IS_ERR(mnt))
1535 pr_err("Cannot mount internal hugetlbfs for page size %uK",
1536 1U << (h->order + PAGE_SHIFT - 10));
1537 return mnt;
1538}
1539
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001540static int __init init_hugetlbfs_fs(void)
1541{
David Brazdil0f672f62019-12-10 10:32:29 +00001542 struct vfsmount *mnt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001543 struct hstate *h;
1544 int error;
1545 int i;
1546
1547 if (!hugepages_supported()) {
1548 pr_info("disabling because there are no supported hugepage sizes\n");
1549 return -ENOTSUPP;
1550 }
1551
1552 error = -ENOMEM;
1553 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1554 sizeof(struct hugetlbfs_inode_info),
1555 0, SLAB_ACCOUNT, init_once);
1556 if (hugetlbfs_inode_cachep == NULL)
Olivier Deprez0e641232021-09-23 10:07:05 +02001557 goto out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001558
1559 error = register_filesystem(&hugetlbfs_fs_type);
1560 if (error)
Olivier Deprez0e641232021-09-23 10:07:05 +02001561 goto out_free;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001562
Olivier Deprez0e641232021-09-23 10:07:05 +02001563 /* default hstate mount is required */
1564 mnt = mount_one_hugetlbfs(&hstates[default_hstate_idx]);
1565 if (IS_ERR(mnt)) {
1566 error = PTR_ERR(mnt);
1567 goto out_unreg;
1568 }
1569 hugetlbfs_vfsmount[default_hstate_idx] = mnt;
1570
1571 /* other hstates are optional */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001572 i = 0;
1573 for_each_hstate(h) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001574 if (i == default_hstate_idx) {
1575 i++;
1576 continue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001577 }
Olivier Deprez0e641232021-09-23 10:07:05 +02001578
1579 mnt = mount_one_hugetlbfs(h);
1580 if (IS_ERR(mnt))
1581 hugetlbfs_vfsmount[i] = NULL;
1582 else
1583 hugetlbfs_vfsmount[i] = mnt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001584 i++;
1585 }
David Brazdil0f672f62019-12-10 10:32:29 +00001586
1587 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001588
Olivier Deprez0e641232021-09-23 10:07:05 +02001589 out_unreg:
1590 (void)unregister_filesystem(&hugetlbfs_fs_type);
1591 out_free:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001592 kmem_cache_destroy(hugetlbfs_inode_cachep);
Olivier Deprez0e641232021-09-23 10:07:05 +02001593 out:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001594 return error;
1595}
1596fs_initcall(init_hugetlbfs_fs)