Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * hugetlbpage-backed filesystem. Based on ramfs. |
| 3 | * |
| 4 | * Nadia Yvette Chambers, 2002 |
| 5 | * |
| 6 | * Copyright (C) 2002 Linus Torvalds. |
| 7 | * License: GPL |
| 8 | */ |
| 9 | |
| 10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 11 | |
| 12 | #include <linux/thread_info.h> |
| 13 | #include <asm/current.h> |
| 14 | #include <linux/sched/signal.h> /* remove ASAP */ |
| 15 | #include <linux/falloc.h> |
| 16 | #include <linux/fs.h> |
| 17 | #include <linux/mount.h> |
| 18 | #include <linux/file.h> |
| 19 | #include <linux/kernel.h> |
| 20 | #include <linux/writeback.h> |
| 21 | #include <linux/pagemap.h> |
| 22 | #include <linux/highmem.h> |
| 23 | #include <linux/init.h> |
| 24 | #include <linux/string.h> |
| 25 | #include <linux/capability.h> |
| 26 | #include <linux/ctype.h> |
| 27 | #include <linux/backing-dev.h> |
| 28 | #include <linux/hugetlb.h> |
| 29 | #include <linux/pagevec.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 30 | #include <linux/fs_parser.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 31 | #include <linux/mman.h> |
| 32 | #include <linux/slab.h> |
| 33 | #include <linux/dnotify.h> |
| 34 | #include <linux/statfs.h> |
| 35 | #include <linux/security.h> |
| 36 | #include <linux/magic.h> |
| 37 | #include <linux/migrate.h> |
| 38 | #include <linux/uio.h> |
| 39 | |
| 40 | #include <linux/uaccess.h> |
| 41 | |
| 42 | static const struct super_operations hugetlbfs_ops; |
| 43 | static const struct address_space_operations hugetlbfs_aops; |
| 44 | const struct file_operations hugetlbfs_file_operations; |
| 45 | static const struct inode_operations hugetlbfs_dir_inode_operations; |
| 46 | static const struct inode_operations hugetlbfs_inode_operations; |
| 47 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 48 | enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT }; |
| 49 | |
| 50 | struct hugetlbfs_fs_context { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 51 | struct hstate *hstate; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 52 | unsigned long long max_size_opt; |
| 53 | unsigned long long min_size_opt; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 54 | long max_hpages; |
| 55 | long nr_inodes; |
| 56 | long min_hpages; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 57 | enum hugetlbfs_size_type max_val_type; |
| 58 | enum hugetlbfs_size_type min_val_type; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 59 | kuid_t uid; |
| 60 | kgid_t gid; |
| 61 | umode_t mode; |
| 62 | }; |
| 63 | |
| 64 | int sysctl_hugetlb_shm_group; |
| 65 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 66 | enum hugetlb_param { |
| 67 | Opt_gid, |
| 68 | Opt_min_size, |
| 69 | Opt_mode, |
| 70 | Opt_nr_inodes, |
| 71 | Opt_pagesize, |
| 72 | Opt_size, |
| 73 | Opt_uid, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 74 | }; |
| 75 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 76 | static const struct fs_parameter_spec hugetlb_param_specs[] = { |
| 77 | fsparam_u32 ("gid", Opt_gid), |
| 78 | fsparam_string("min_size", Opt_min_size), |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 79 | fsparam_u32oct("mode", Opt_mode), |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 80 | fsparam_string("nr_inodes", Opt_nr_inodes), |
| 81 | fsparam_string("pagesize", Opt_pagesize), |
| 82 | fsparam_string("size", Opt_size), |
| 83 | fsparam_u32 ("uid", Opt_uid), |
| 84 | {} |
| 85 | }; |
| 86 | |
| 87 | static const struct fs_parameter_description hugetlb_fs_parameters = { |
| 88 | .name = "hugetlbfs", |
| 89 | .specs = hugetlb_param_specs, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 90 | }; |
| 91 | |
| 92 | #ifdef CONFIG_NUMA |
| 93 | static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, |
| 94 | struct inode *inode, pgoff_t index) |
| 95 | { |
| 96 | vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy, |
| 97 | index); |
| 98 | } |
| 99 | |
| 100 | static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) |
| 101 | { |
| 102 | mpol_cond_put(vma->vm_policy); |
| 103 | } |
| 104 | #else |
| 105 | static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, |
| 106 | struct inode *inode, pgoff_t index) |
| 107 | { |
| 108 | } |
| 109 | |
| 110 | static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) |
| 111 | { |
| 112 | } |
| 113 | #endif |
| 114 | |
| 115 | static void huge_pagevec_release(struct pagevec *pvec) |
| 116 | { |
| 117 | int i; |
| 118 | |
| 119 | for (i = 0; i < pagevec_count(pvec); ++i) |
| 120 | put_page(pvec->pages[i]); |
| 121 | |
| 122 | pagevec_reinit(pvec); |
| 123 | } |
| 124 | |
| 125 | /* |
| 126 | * Mask used when checking the page offset value passed in via system |
| 127 | * calls. This value will be converted to a loff_t which is signed. |
| 128 | * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the |
| 129 | * value. The extra bit (- 1 in the shift value) is to take the sign |
| 130 | * bit into account. |
| 131 | */ |
| 132 | #define PGOFF_LOFFT_MAX \ |
| 133 | (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) |
| 134 | |
| 135 | static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) |
| 136 | { |
| 137 | struct inode *inode = file_inode(file); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 138 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 139 | loff_t len, vma_len; |
| 140 | int ret; |
| 141 | struct hstate *h = hstate_file(file); |
| 142 | |
| 143 | /* |
| 144 | * vma address alignment (but not the pgoff alignment) has |
| 145 | * already been checked by prepare_hugepage_range. If you add |
| 146 | * any error returns here, do so after setting VM_HUGETLB, so |
| 147 | * is_vm_hugetlb_page tests below unmap_region go the right |
| 148 | * way when do_mmap_pgoff unwinds (may be important on powerpc |
| 149 | * and ia64). |
| 150 | */ |
| 151 | vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; |
| 152 | vma->vm_ops = &hugetlb_vm_ops; |
| 153 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 154 | ret = seal_check_future_write(info->seals, vma); |
| 155 | if (ret) |
| 156 | return ret; |
| 157 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 158 | /* |
| 159 | * page based offset in vm_pgoff could be sufficiently large to |
| 160 | * overflow a loff_t when converted to byte offset. This can |
| 161 | * only happen on architectures where sizeof(loff_t) == |
| 162 | * sizeof(unsigned long). So, only check in those instances. |
| 163 | */ |
| 164 | if (sizeof(unsigned long) == sizeof(loff_t)) { |
| 165 | if (vma->vm_pgoff & PGOFF_LOFFT_MAX) |
| 166 | return -EINVAL; |
| 167 | } |
| 168 | |
| 169 | /* must be huge page aligned */ |
| 170 | if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) |
| 171 | return -EINVAL; |
| 172 | |
| 173 | vma_len = (loff_t)(vma->vm_end - vma->vm_start); |
| 174 | len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); |
| 175 | /* check for overflow */ |
| 176 | if (len < vma_len) |
| 177 | return -EINVAL; |
| 178 | |
| 179 | inode_lock(inode); |
| 180 | file_accessed(file); |
| 181 | |
| 182 | ret = -ENOMEM; |
| 183 | if (hugetlb_reserve_pages(inode, |
| 184 | vma->vm_pgoff >> huge_page_order(h), |
| 185 | len >> huge_page_shift(h), vma, |
| 186 | vma->vm_flags)) |
| 187 | goto out; |
| 188 | |
| 189 | ret = 0; |
| 190 | if (vma->vm_flags & VM_WRITE && inode->i_size < len) |
| 191 | i_size_write(inode, len); |
| 192 | out: |
| 193 | inode_unlock(inode); |
| 194 | |
| 195 | return ret; |
| 196 | } |
| 197 | |
| 198 | /* |
| 199 | * Called under down_write(mmap_sem). |
| 200 | */ |
| 201 | |
| 202 | #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
| 203 | static unsigned long |
| 204 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
| 205 | unsigned long len, unsigned long pgoff, unsigned long flags) |
| 206 | { |
| 207 | struct mm_struct *mm = current->mm; |
| 208 | struct vm_area_struct *vma; |
| 209 | struct hstate *h = hstate_file(file); |
| 210 | struct vm_unmapped_area_info info; |
| 211 | |
| 212 | if (len & ~huge_page_mask(h)) |
| 213 | return -EINVAL; |
| 214 | if (len > TASK_SIZE) |
| 215 | return -ENOMEM; |
| 216 | |
| 217 | if (flags & MAP_FIXED) { |
| 218 | if (prepare_hugepage_range(file, addr, len)) |
| 219 | return -EINVAL; |
| 220 | return addr; |
| 221 | } |
| 222 | |
| 223 | if (addr) { |
| 224 | addr = ALIGN(addr, huge_page_size(h)); |
| 225 | vma = find_vma(mm, addr); |
| 226 | if (TASK_SIZE - len >= addr && |
| 227 | (!vma || addr + len <= vm_start_gap(vma))) |
| 228 | return addr; |
| 229 | } |
| 230 | |
| 231 | info.flags = 0; |
| 232 | info.length = len; |
| 233 | info.low_limit = TASK_UNMAPPED_BASE; |
| 234 | info.high_limit = TASK_SIZE; |
| 235 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
| 236 | info.align_offset = 0; |
| 237 | return vm_unmapped_area(&info); |
| 238 | } |
| 239 | #endif |
| 240 | |
| 241 | static size_t |
| 242 | hugetlbfs_read_actor(struct page *page, unsigned long offset, |
| 243 | struct iov_iter *to, unsigned long size) |
| 244 | { |
| 245 | size_t copied = 0; |
| 246 | int i, chunksize; |
| 247 | |
| 248 | /* Find which 4k chunk and offset with in that chunk */ |
| 249 | i = offset >> PAGE_SHIFT; |
| 250 | offset = offset & ~PAGE_MASK; |
| 251 | |
| 252 | while (size) { |
| 253 | size_t n; |
| 254 | chunksize = PAGE_SIZE; |
| 255 | if (offset) |
| 256 | chunksize -= offset; |
| 257 | if (chunksize > size) |
| 258 | chunksize = size; |
| 259 | n = copy_page_to_iter(&page[i], offset, chunksize, to); |
| 260 | copied += n; |
| 261 | if (n != chunksize) |
| 262 | return copied; |
| 263 | offset = 0; |
| 264 | size -= chunksize; |
| 265 | i++; |
| 266 | } |
| 267 | return copied; |
| 268 | } |
| 269 | |
| 270 | /* |
| 271 | * Support for read() - Find the page attached to f_mapping and copy out the |
| 272 | * data. Its *very* similar to do_generic_mapping_read(), we can't use that |
| 273 | * since it has PAGE_SIZE assumptions. |
| 274 | */ |
| 275 | static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) |
| 276 | { |
| 277 | struct file *file = iocb->ki_filp; |
| 278 | struct hstate *h = hstate_file(file); |
| 279 | struct address_space *mapping = file->f_mapping; |
| 280 | struct inode *inode = mapping->host; |
| 281 | unsigned long index = iocb->ki_pos >> huge_page_shift(h); |
| 282 | unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); |
| 283 | unsigned long end_index; |
| 284 | loff_t isize; |
| 285 | ssize_t retval = 0; |
| 286 | |
| 287 | while (iov_iter_count(to)) { |
| 288 | struct page *page; |
| 289 | size_t nr, copied; |
| 290 | |
| 291 | /* nr is the maximum number of bytes to copy from this page */ |
| 292 | nr = huge_page_size(h); |
| 293 | isize = i_size_read(inode); |
| 294 | if (!isize) |
| 295 | break; |
| 296 | end_index = (isize - 1) >> huge_page_shift(h); |
| 297 | if (index > end_index) |
| 298 | break; |
| 299 | if (index == end_index) { |
| 300 | nr = ((isize - 1) & ~huge_page_mask(h)) + 1; |
| 301 | if (nr <= offset) |
| 302 | break; |
| 303 | } |
| 304 | nr = nr - offset; |
| 305 | |
| 306 | /* Find the page */ |
| 307 | page = find_lock_page(mapping, index); |
| 308 | if (unlikely(page == NULL)) { |
| 309 | /* |
| 310 | * We have a HOLE, zero out the user-buffer for the |
| 311 | * length of the hole or request. |
| 312 | */ |
| 313 | copied = iov_iter_zero(nr, to); |
| 314 | } else { |
| 315 | unlock_page(page); |
| 316 | |
| 317 | /* |
| 318 | * We have the page, copy it to user space buffer. |
| 319 | */ |
| 320 | copied = hugetlbfs_read_actor(page, offset, to, nr); |
| 321 | put_page(page); |
| 322 | } |
| 323 | offset += copied; |
| 324 | retval += copied; |
| 325 | if (copied != nr && iov_iter_count(to)) { |
| 326 | if (!retval) |
| 327 | retval = -EFAULT; |
| 328 | break; |
| 329 | } |
| 330 | index += offset >> huge_page_shift(h); |
| 331 | offset &= ~huge_page_mask(h); |
| 332 | } |
| 333 | iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; |
| 334 | return retval; |
| 335 | } |
| 336 | |
| 337 | static int hugetlbfs_write_begin(struct file *file, |
| 338 | struct address_space *mapping, |
| 339 | loff_t pos, unsigned len, unsigned flags, |
| 340 | struct page **pagep, void **fsdata) |
| 341 | { |
| 342 | return -EINVAL; |
| 343 | } |
| 344 | |
| 345 | static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, |
| 346 | loff_t pos, unsigned len, unsigned copied, |
| 347 | struct page *page, void *fsdata) |
| 348 | { |
| 349 | BUG(); |
| 350 | return -EINVAL; |
| 351 | } |
| 352 | |
| 353 | static void remove_huge_page(struct page *page) |
| 354 | { |
| 355 | ClearPageDirty(page); |
| 356 | ClearPageUptodate(page); |
| 357 | delete_from_page_cache(page); |
| 358 | } |
| 359 | |
| 360 | static void |
| 361 | hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end) |
| 362 | { |
| 363 | struct vm_area_struct *vma; |
| 364 | |
| 365 | /* |
| 366 | * end == 0 indicates that the entire range after |
| 367 | * start should be unmapped. |
| 368 | */ |
| 369 | vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) { |
| 370 | unsigned long v_offset; |
| 371 | unsigned long v_end; |
| 372 | |
| 373 | /* |
| 374 | * Can the expression below overflow on 32-bit arches? |
| 375 | * No, because the interval tree returns us only those vmas |
| 376 | * which overlap the truncated area starting at pgoff, |
| 377 | * and no vma on a 32-bit arch can span beyond the 4GB. |
| 378 | */ |
| 379 | if (vma->vm_pgoff < start) |
| 380 | v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT; |
| 381 | else |
| 382 | v_offset = 0; |
| 383 | |
| 384 | if (!end) |
| 385 | v_end = vma->vm_end; |
| 386 | else { |
| 387 | v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) |
| 388 | + vma->vm_start; |
| 389 | if (v_end > vma->vm_end) |
| 390 | v_end = vma->vm_end; |
| 391 | } |
| 392 | |
| 393 | unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end, |
| 394 | NULL); |
| 395 | } |
| 396 | } |
| 397 | |
| 398 | /* |
| 399 | * remove_inode_hugepages handles two distinct cases: truncation and hole |
| 400 | * punch. There are subtle differences in operation for each case. |
| 401 | * |
| 402 | * truncation is indicated by end of range being LLONG_MAX |
| 403 | * In this case, we first scan the range and release found pages. |
| 404 | * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv |
| 405 | * maps and global counts. Page faults can not race with truncation |
| 406 | * in this routine. hugetlb_no_page() prevents page faults in the |
| 407 | * truncated range. It checks i_size before allocation, and again after |
| 408 | * with the page table lock for the page held. The same lock must be |
| 409 | * acquired to unmap a page. |
| 410 | * hole punch is indicated if end is not LLONG_MAX |
| 411 | * In the hole punch case we scan the range and release found pages. |
| 412 | * Only when releasing a page is the associated region/reserv map |
| 413 | * deleted. The region/reserv map for ranges without associated |
| 414 | * pages are not modified. Page faults can race with hole punch. |
| 415 | * This is indicated if we find a mapped page. |
| 416 | * Note: If the passed end of range value is beyond the end of file, but |
| 417 | * not LLONG_MAX this routine still performs a hole punch operation. |
| 418 | */ |
| 419 | static void remove_inode_hugepages(struct inode *inode, loff_t lstart, |
| 420 | loff_t lend) |
| 421 | { |
| 422 | struct hstate *h = hstate_inode(inode); |
| 423 | struct address_space *mapping = &inode->i_data; |
| 424 | const pgoff_t start = lstart >> huge_page_shift(h); |
| 425 | const pgoff_t end = lend >> huge_page_shift(h); |
| 426 | struct vm_area_struct pseudo_vma; |
| 427 | struct pagevec pvec; |
| 428 | pgoff_t next, index; |
| 429 | int i, freed = 0; |
| 430 | bool truncate_op = (lend == LLONG_MAX); |
| 431 | |
| 432 | vma_init(&pseudo_vma, current->mm); |
| 433 | pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); |
| 434 | pagevec_init(&pvec); |
| 435 | next = start; |
| 436 | while (next < end) { |
| 437 | /* |
| 438 | * When no more pages are found, we are done. |
| 439 | */ |
| 440 | if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1)) |
| 441 | break; |
| 442 | |
| 443 | for (i = 0; i < pagevec_count(&pvec); ++i) { |
| 444 | struct page *page = pvec.pages[i]; |
| 445 | u32 hash; |
| 446 | |
| 447 | index = page->index; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 448 | hash = hugetlb_fault_mutex_hash(h, mapping, index); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 449 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
| 450 | |
| 451 | /* |
| 452 | * If page is mapped, it was faulted in after being |
| 453 | * unmapped in caller. Unmap (again) now after taking |
| 454 | * the fault mutex. The mutex will prevent faults |
| 455 | * until we finish removing the page. |
| 456 | * |
| 457 | * This race can only happen in the hole punch case. |
| 458 | * Getting here in a truncate operation is a bug. |
| 459 | */ |
| 460 | if (unlikely(page_mapped(page))) { |
| 461 | BUG_ON(truncate_op); |
| 462 | |
| 463 | i_mmap_lock_write(mapping); |
| 464 | hugetlb_vmdelete_list(&mapping->i_mmap, |
| 465 | index * pages_per_huge_page(h), |
| 466 | (index + 1) * pages_per_huge_page(h)); |
| 467 | i_mmap_unlock_write(mapping); |
| 468 | } |
| 469 | |
| 470 | lock_page(page); |
| 471 | /* |
| 472 | * We must free the huge page and remove from page |
| 473 | * cache (remove_huge_page) BEFORE removing the |
| 474 | * region/reserve map (hugetlb_unreserve_pages). In |
| 475 | * rare out of memory conditions, removal of the |
| 476 | * region/reserve map could fail. Correspondingly, |
| 477 | * the subpool and global reserve usage count can need |
| 478 | * to be adjusted. |
| 479 | */ |
| 480 | VM_BUG_ON(PagePrivate(page)); |
| 481 | remove_huge_page(page); |
| 482 | freed++; |
| 483 | if (!truncate_op) { |
| 484 | if (unlikely(hugetlb_unreserve_pages(inode, |
| 485 | index, index + 1, 1))) |
| 486 | hugetlb_fix_reserve_counts(inode); |
| 487 | } |
| 488 | |
| 489 | unlock_page(page); |
| 490 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
| 491 | } |
| 492 | huge_pagevec_release(&pvec); |
| 493 | cond_resched(); |
| 494 | } |
| 495 | |
| 496 | if (truncate_op) |
| 497 | (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed); |
| 498 | } |
| 499 | |
| 500 | static void hugetlbfs_evict_inode(struct inode *inode) |
| 501 | { |
| 502 | struct resv_map *resv_map; |
| 503 | |
| 504 | remove_inode_hugepages(inode, 0, LLONG_MAX); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 505 | |
| 506 | /* |
| 507 | * Get the resv_map from the address space embedded in the inode. |
| 508 | * This is the address space which points to any resv_map allocated |
| 509 | * at inode creation time. If this is a device special inode, |
| 510 | * i_mapping may not point to the original address space. |
| 511 | */ |
| 512 | resv_map = (struct resv_map *)(&inode->i_data)->private_data; |
| 513 | /* Only regular and link inodes have associated reserve maps */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 514 | if (resv_map) |
| 515 | resv_map_release(&resv_map->refs); |
| 516 | clear_inode(inode); |
| 517 | } |
| 518 | |
| 519 | static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) |
| 520 | { |
| 521 | pgoff_t pgoff; |
| 522 | struct address_space *mapping = inode->i_mapping; |
| 523 | struct hstate *h = hstate_inode(inode); |
| 524 | |
| 525 | BUG_ON(offset & ~huge_page_mask(h)); |
| 526 | pgoff = offset >> PAGE_SHIFT; |
| 527 | |
| 528 | i_size_write(inode, offset); |
| 529 | i_mmap_lock_write(mapping); |
| 530 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) |
| 531 | hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0); |
| 532 | i_mmap_unlock_write(mapping); |
| 533 | remove_inode_hugepages(inode, offset, LLONG_MAX); |
| 534 | return 0; |
| 535 | } |
| 536 | |
| 537 | static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) |
| 538 | { |
| 539 | struct hstate *h = hstate_inode(inode); |
| 540 | loff_t hpage_size = huge_page_size(h); |
| 541 | loff_t hole_start, hole_end; |
| 542 | |
| 543 | /* |
| 544 | * For hole punch round up the beginning offset of the hole and |
| 545 | * round down the end. |
| 546 | */ |
| 547 | hole_start = round_up(offset, hpage_size); |
| 548 | hole_end = round_down(offset + len, hpage_size); |
| 549 | |
| 550 | if (hole_end > hole_start) { |
| 551 | struct address_space *mapping = inode->i_mapping; |
| 552 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
| 553 | |
| 554 | inode_lock(inode); |
| 555 | |
| 556 | /* protected by i_mutex */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 557 | if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 558 | inode_unlock(inode); |
| 559 | return -EPERM; |
| 560 | } |
| 561 | |
| 562 | i_mmap_lock_write(mapping); |
| 563 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) |
| 564 | hugetlb_vmdelete_list(&mapping->i_mmap, |
| 565 | hole_start >> PAGE_SHIFT, |
| 566 | hole_end >> PAGE_SHIFT); |
| 567 | i_mmap_unlock_write(mapping); |
| 568 | remove_inode_hugepages(inode, hole_start, hole_end); |
| 569 | inode_unlock(inode); |
| 570 | } |
| 571 | |
| 572 | return 0; |
| 573 | } |
| 574 | |
| 575 | static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, |
| 576 | loff_t len) |
| 577 | { |
| 578 | struct inode *inode = file_inode(file); |
| 579 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
| 580 | struct address_space *mapping = inode->i_mapping; |
| 581 | struct hstate *h = hstate_inode(inode); |
| 582 | struct vm_area_struct pseudo_vma; |
| 583 | struct mm_struct *mm = current->mm; |
| 584 | loff_t hpage_size = huge_page_size(h); |
| 585 | unsigned long hpage_shift = huge_page_shift(h); |
| 586 | pgoff_t start, index, end; |
| 587 | int error; |
| 588 | u32 hash; |
| 589 | |
| 590 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) |
| 591 | return -EOPNOTSUPP; |
| 592 | |
| 593 | if (mode & FALLOC_FL_PUNCH_HOLE) |
| 594 | return hugetlbfs_punch_hole(inode, offset, len); |
| 595 | |
| 596 | /* |
| 597 | * Default preallocate case. |
| 598 | * For this range, start is rounded down and end is rounded up |
| 599 | * as well as being converted to page offsets. |
| 600 | */ |
| 601 | start = offset >> hpage_shift; |
| 602 | end = (offset + len + hpage_size - 1) >> hpage_shift; |
| 603 | |
| 604 | inode_lock(inode); |
| 605 | |
| 606 | /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ |
| 607 | error = inode_newsize_ok(inode, offset + len); |
| 608 | if (error) |
| 609 | goto out; |
| 610 | |
| 611 | if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { |
| 612 | error = -EPERM; |
| 613 | goto out; |
| 614 | } |
| 615 | |
| 616 | /* |
| 617 | * Initialize a pseudo vma as this is required by the huge page |
| 618 | * allocation routines. If NUMA is configured, use page index |
| 619 | * as input to create an allocation policy. |
| 620 | */ |
| 621 | vma_init(&pseudo_vma, mm); |
| 622 | pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); |
| 623 | pseudo_vma.vm_file = file; |
| 624 | |
| 625 | for (index = start; index < end; index++) { |
| 626 | /* |
| 627 | * This is supposed to be the vaddr where the page is being |
| 628 | * faulted in, but we have no vaddr here. |
| 629 | */ |
| 630 | struct page *page; |
| 631 | unsigned long addr; |
| 632 | int avoid_reserve = 0; |
| 633 | |
| 634 | cond_resched(); |
| 635 | |
| 636 | /* |
| 637 | * fallocate(2) manpage permits EINTR; we may have been |
| 638 | * interrupted because we are using up too much memory. |
| 639 | */ |
| 640 | if (signal_pending(current)) { |
| 641 | error = -EINTR; |
| 642 | break; |
| 643 | } |
| 644 | |
| 645 | /* Set numa allocation policy based on index */ |
| 646 | hugetlb_set_vma_policy(&pseudo_vma, inode, index); |
| 647 | |
| 648 | /* addr is the offset within the file (zero based) */ |
| 649 | addr = index * hpage_size; |
| 650 | |
| 651 | /* mutex taken here, fault path and hole punch */ |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 652 | hash = hugetlb_fault_mutex_hash(h, mapping, index); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 653 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
| 654 | |
| 655 | /* See if already present in mapping to avoid alloc/free */ |
| 656 | page = find_get_page(mapping, index); |
| 657 | if (page) { |
| 658 | put_page(page); |
| 659 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
| 660 | hugetlb_drop_vma_policy(&pseudo_vma); |
| 661 | continue; |
| 662 | } |
| 663 | |
| 664 | /* Allocate page and add to page cache */ |
| 665 | page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve); |
| 666 | hugetlb_drop_vma_policy(&pseudo_vma); |
| 667 | if (IS_ERR(page)) { |
| 668 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
| 669 | error = PTR_ERR(page); |
| 670 | goto out; |
| 671 | } |
| 672 | clear_huge_page(page, addr, pages_per_huge_page(h)); |
| 673 | __SetPageUptodate(page); |
| 674 | error = huge_add_to_page_cache(page, mapping, index); |
| 675 | if (unlikely(error)) { |
| 676 | put_page(page); |
| 677 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
| 678 | goto out; |
| 679 | } |
| 680 | |
| 681 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
| 682 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 683 | set_page_huge_active(page); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 684 | /* |
| 685 | * unlock_page because locked by add_to_page_cache() |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 686 | * put_page() due to reference from alloc_huge_page() |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 687 | */ |
| 688 | unlock_page(page); |
| 689 | put_page(page); |
| 690 | } |
| 691 | |
| 692 | if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) |
| 693 | i_size_write(inode, offset + len); |
| 694 | inode->i_ctime = current_time(inode); |
| 695 | out: |
| 696 | inode_unlock(inode); |
| 697 | return error; |
| 698 | } |
| 699 | |
| 700 | static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) |
| 701 | { |
| 702 | struct inode *inode = d_inode(dentry); |
| 703 | struct hstate *h = hstate_inode(inode); |
| 704 | int error; |
| 705 | unsigned int ia_valid = attr->ia_valid; |
| 706 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
| 707 | |
| 708 | BUG_ON(!inode); |
| 709 | |
| 710 | error = setattr_prepare(dentry, attr); |
| 711 | if (error) |
| 712 | return error; |
| 713 | |
| 714 | if (ia_valid & ATTR_SIZE) { |
| 715 | loff_t oldsize = inode->i_size; |
| 716 | loff_t newsize = attr->ia_size; |
| 717 | |
| 718 | if (newsize & ~huge_page_mask(h)) |
| 719 | return -EINVAL; |
| 720 | /* protected by i_mutex */ |
| 721 | if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || |
| 722 | (newsize > oldsize && (info->seals & F_SEAL_GROW))) |
| 723 | return -EPERM; |
| 724 | error = hugetlb_vmtruncate(inode, newsize); |
| 725 | if (error) |
| 726 | return error; |
| 727 | } |
| 728 | |
| 729 | setattr_copy(inode, attr); |
| 730 | mark_inode_dirty(inode); |
| 731 | return 0; |
| 732 | } |
| 733 | |
| 734 | static struct inode *hugetlbfs_get_root(struct super_block *sb, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 735 | struct hugetlbfs_fs_context *ctx) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 736 | { |
| 737 | struct inode *inode; |
| 738 | |
| 739 | inode = new_inode(sb); |
| 740 | if (inode) { |
| 741 | inode->i_ino = get_next_ino(); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 742 | inode->i_mode = S_IFDIR | ctx->mode; |
| 743 | inode->i_uid = ctx->uid; |
| 744 | inode->i_gid = ctx->gid; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 745 | inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); |
| 746 | inode->i_op = &hugetlbfs_dir_inode_operations; |
| 747 | inode->i_fop = &simple_dir_operations; |
| 748 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ |
| 749 | inc_nlink(inode); |
| 750 | lockdep_annotate_inode_mutex_key(inode); |
| 751 | } |
| 752 | return inode; |
| 753 | } |
| 754 | |
| 755 | /* |
| 756 | * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never |
| 757 | * be taken from reclaim -- unlike regular filesystems. This needs an |
| 758 | * annotation because huge_pmd_share() does an allocation under hugetlb's |
| 759 | * i_mmap_rwsem. |
| 760 | */ |
| 761 | static struct lock_class_key hugetlbfs_i_mmap_rwsem_key; |
| 762 | |
| 763 | static struct inode *hugetlbfs_get_inode(struct super_block *sb, |
| 764 | struct inode *dir, |
| 765 | umode_t mode, dev_t dev) |
| 766 | { |
| 767 | struct inode *inode; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 768 | struct resv_map *resv_map = NULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 769 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 770 | /* |
| 771 | * Reserve maps are only needed for inodes that can have associated |
| 772 | * page allocations. |
| 773 | */ |
| 774 | if (S_ISREG(mode) || S_ISLNK(mode)) { |
| 775 | resv_map = resv_map_alloc(); |
| 776 | if (!resv_map) |
| 777 | return NULL; |
| 778 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 779 | |
| 780 | inode = new_inode(sb); |
| 781 | if (inode) { |
| 782 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
| 783 | |
| 784 | inode->i_ino = get_next_ino(); |
| 785 | inode_init_owner(inode, dir, mode); |
| 786 | lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, |
| 787 | &hugetlbfs_i_mmap_rwsem_key); |
| 788 | inode->i_mapping->a_ops = &hugetlbfs_aops; |
| 789 | inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); |
| 790 | inode->i_mapping->private_data = resv_map; |
| 791 | info->seals = F_SEAL_SEAL; |
| 792 | switch (mode & S_IFMT) { |
| 793 | default: |
| 794 | init_special_inode(inode, mode, dev); |
| 795 | break; |
| 796 | case S_IFREG: |
| 797 | inode->i_op = &hugetlbfs_inode_operations; |
| 798 | inode->i_fop = &hugetlbfs_file_operations; |
| 799 | break; |
| 800 | case S_IFDIR: |
| 801 | inode->i_op = &hugetlbfs_dir_inode_operations; |
| 802 | inode->i_fop = &simple_dir_operations; |
| 803 | |
| 804 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ |
| 805 | inc_nlink(inode); |
| 806 | break; |
| 807 | case S_IFLNK: |
| 808 | inode->i_op = &page_symlink_inode_operations; |
| 809 | inode_nohighmem(inode); |
| 810 | break; |
| 811 | } |
| 812 | lockdep_annotate_inode_mutex_key(inode); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 813 | } else { |
| 814 | if (resv_map) |
| 815 | kref_put(&resv_map->refs, resv_map_release); |
| 816 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 817 | |
| 818 | return inode; |
| 819 | } |
| 820 | |
| 821 | /* |
| 822 | * File creation. Allocate an inode, and we're done.. |
| 823 | */ |
| 824 | static int hugetlbfs_mknod(struct inode *dir, |
| 825 | struct dentry *dentry, umode_t mode, dev_t dev) |
| 826 | { |
| 827 | struct inode *inode; |
| 828 | int error = -ENOSPC; |
| 829 | |
| 830 | inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev); |
| 831 | if (inode) { |
| 832 | dir->i_ctime = dir->i_mtime = current_time(dir); |
| 833 | d_instantiate(dentry, inode); |
| 834 | dget(dentry); /* Extra count - pin the dentry in core */ |
| 835 | error = 0; |
| 836 | } |
| 837 | return error; |
| 838 | } |
| 839 | |
| 840 | static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) |
| 841 | { |
| 842 | int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0); |
| 843 | if (!retval) |
| 844 | inc_nlink(dir); |
| 845 | return retval; |
| 846 | } |
| 847 | |
| 848 | static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) |
| 849 | { |
| 850 | return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0); |
| 851 | } |
| 852 | |
| 853 | static int hugetlbfs_symlink(struct inode *dir, |
| 854 | struct dentry *dentry, const char *symname) |
| 855 | { |
| 856 | struct inode *inode; |
| 857 | int error = -ENOSPC; |
| 858 | |
| 859 | inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0); |
| 860 | if (inode) { |
| 861 | int l = strlen(symname)+1; |
| 862 | error = page_symlink(inode, symname, l); |
| 863 | if (!error) { |
| 864 | d_instantiate(dentry, inode); |
| 865 | dget(dentry); |
| 866 | } else |
| 867 | iput(inode); |
| 868 | } |
| 869 | dir->i_ctime = dir->i_mtime = current_time(dir); |
| 870 | |
| 871 | return error; |
| 872 | } |
| 873 | |
| 874 | /* |
| 875 | * mark the head page dirty |
| 876 | */ |
| 877 | static int hugetlbfs_set_page_dirty(struct page *page) |
| 878 | { |
| 879 | struct page *head = compound_head(page); |
| 880 | |
| 881 | SetPageDirty(head); |
| 882 | return 0; |
| 883 | } |
| 884 | |
| 885 | static int hugetlbfs_migrate_page(struct address_space *mapping, |
| 886 | struct page *newpage, struct page *page, |
| 887 | enum migrate_mode mode) |
| 888 | { |
| 889 | int rc; |
| 890 | |
| 891 | rc = migrate_huge_page_move_mapping(mapping, newpage, page); |
| 892 | if (rc != MIGRATEPAGE_SUCCESS) |
| 893 | return rc; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 894 | |
| 895 | /* |
| 896 | * page_private is subpool pointer in hugetlb pages. Transfer to |
| 897 | * new page. PagePrivate is not associated with page_private for |
| 898 | * hugetlb pages and can not be set here as only page_huge_active |
| 899 | * pages can be migrated. |
| 900 | */ |
| 901 | if (page_private(page)) { |
| 902 | set_page_private(newpage, page_private(page)); |
| 903 | set_page_private(page, 0); |
| 904 | } |
| 905 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 906 | if (mode != MIGRATE_SYNC_NO_COPY) |
| 907 | migrate_page_copy(newpage, page); |
| 908 | else |
| 909 | migrate_page_states(newpage, page); |
| 910 | |
| 911 | return MIGRATEPAGE_SUCCESS; |
| 912 | } |
| 913 | |
| 914 | static int hugetlbfs_error_remove_page(struct address_space *mapping, |
| 915 | struct page *page) |
| 916 | { |
| 917 | struct inode *inode = mapping->host; |
| 918 | pgoff_t index = page->index; |
| 919 | |
| 920 | remove_huge_page(page); |
| 921 | if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1))) |
| 922 | hugetlb_fix_reserve_counts(inode); |
| 923 | |
| 924 | return 0; |
| 925 | } |
| 926 | |
| 927 | /* |
| 928 | * Display the mount options in /proc/mounts. |
| 929 | */ |
| 930 | static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root) |
| 931 | { |
| 932 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb); |
| 933 | struct hugepage_subpool *spool = sbinfo->spool; |
| 934 | unsigned long hpage_size = huge_page_size(sbinfo->hstate); |
| 935 | unsigned hpage_shift = huge_page_shift(sbinfo->hstate); |
| 936 | char mod; |
| 937 | |
| 938 | if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) |
| 939 | seq_printf(m, ",uid=%u", |
| 940 | from_kuid_munged(&init_user_ns, sbinfo->uid)); |
| 941 | if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) |
| 942 | seq_printf(m, ",gid=%u", |
| 943 | from_kgid_munged(&init_user_ns, sbinfo->gid)); |
| 944 | if (sbinfo->mode != 0755) |
| 945 | seq_printf(m, ",mode=%o", sbinfo->mode); |
| 946 | if (sbinfo->max_inodes != -1) |
| 947 | seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes); |
| 948 | |
| 949 | hpage_size /= 1024; |
| 950 | mod = 'K'; |
| 951 | if (hpage_size >= 1024) { |
| 952 | hpage_size /= 1024; |
| 953 | mod = 'M'; |
| 954 | } |
| 955 | seq_printf(m, ",pagesize=%lu%c", hpage_size, mod); |
| 956 | if (spool) { |
| 957 | if (spool->max_hpages != -1) |
| 958 | seq_printf(m, ",size=%llu", |
| 959 | (unsigned long long)spool->max_hpages << hpage_shift); |
| 960 | if (spool->min_hpages != -1) |
| 961 | seq_printf(m, ",min_size=%llu", |
| 962 | (unsigned long long)spool->min_hpages << hpage_shift); |
| 963 | } |
| 964 | return 0; |
| 965 | } |
| 966 | |
| 967 | static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) |
| 968 | { |
| 969 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); |
| 970 | struct hstate *h = hstate_inode(d_inode(dentry)); |
| 971 | |
| 972 | buf->f_type = HUGETLBFS_MAGIC; |
| 973 | buf->f_bsize = huge_page_size(h); |
| 974 | if (sbinfo) { |
| 975 | spin_lock(&sbinfo->stat_lock); |
| 976 | /* If no limits set, just report 0 for max/free/used |
| 977 | * blocks, like simple_statfs() */ |
| 978 | if (sbinfo->spool) { |
| 979 | long free_pages; |
| 980 | |
| 981 | spin_lock(&sbinfo->spool->lock); |
| 982 | buf->f_blocks = sbinfo->spool->max_hpages; |
| 983 | free_pages = sbinfo->spool->max_hpages |
| 984 | - sbinfo->spool->used_hpages; |
| 985 | buf->f_bavail = buf->f_bfree = free_pages; |
| 986 | spin_unlock(&sbinfo->spool->lock); |
| 987 | buf->f_files = sbinfo->max_inodes; |
| 988 | buf->f_ffree = sbinfo->free_inodes; |
| 989 | } |
| 990 | spin_unlock(&sbinfo->stat_lock); |
| 991 | } |
| 992 | buf->f_namelen = NAME_MAX; |
| 993 | return 0; |
| 994 | } |
| 995 | |
| 996 | static void hugetlbfs_put_super(struct super_block *sb) |
| 997 | { |
| 998 | struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); |
| 999 | |
| 1000 | if (sbi) { |
| 1001 | sb->s_fs_info = NULL; |
| 1002 | |
| 1003 | if (sbi->spool) |
| 1004 | hugepage_put_subpool(sbi->spool); |
| 1005 | |
| 1006 | kfree(sbi); |
| 1007 | } |
| 1008 | } |
| 1009 | |
| 1010 | static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) |
| 1011 | { |
| 1012 | if (sbinfo->free_inodes >= 0) { |
| 1013 | spin_lock(&sbinfo->stat_lock); |
| 1014 | if (unlikely(!sbinfo->free_inodes)) { |
| 1015 | spin_unlock(&sbinfo->stat_lock); |
| 1016 | return 0; |
| 1017 | } |
| 1018 | sbinfo->free_inodes--; |
| 1019 | spin_unlock(&sbinfo->stat_lock); |
| 1020 | } |
| 1021 | |
| 1022 | return 1; |
| 1023 | } |
| 1024 | |
| 1025 | static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) |
| 1026 | { |
| 1027 | if (sbinfo->free_inodes >= 0) { |
| 1028 | spin_lock(&sbinfo->stat_lock); |
| 1029 | sbinfo->free_inodes++; |
| 1030 | spin_unlock(&sbinfo->stat_lock); |
| 1031 | } |
| 1032 | } |
| 1033 | |
| 1034 | |
| 1035 | static struct kmem_cache *hugetlbfs_inode_cachep; |
| 1036 | |
| 1037 | static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) |
| 1038 | { |
| 1039 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); |
| 1040 | struct hugetlbfs_inode_info *p; |
| 1041 | |
| 1042 | if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) |
| 1043 | return NULL; |
| 1044 | p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL); |
| 1045 | if (unlikely(!p)) { |
| 1046 | hugetlbfs_inc_free_inodes(sbinfo); |
| 1047 | return NULL; |
| 1048 | } |
| 1049 | |
| 1050 | /* |
| 1051 | * Any time after allocation, hugetlbfs_destroy_inode can be called |
| 1052 | * for the inode. mpol_free_shared_policy is unconditionally called |
| 1053 | * as part of hugetlbfs_destroy_inode. So, initialize policy here |
| 1054 | * in case of a quick call to destroy. |
| 1055 | * |
| 1056 | * Note that the policy is initialized even if we are creating a |
| 1057 | * private inode. This simplifies hugetlbfs_destroy_inode. |
| 1058 | */ |
| 1059 | mpol_shared_policy_init(&p->policy, NULL); |
| 1060 | |
| 1061 | return &p->vfs_inode; |
| 1062 | } |
| 1063 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1064 | static void hugetlbfs_free_inode(struct inode *inode) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1065 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1066 | kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); |
| 1067 | } |
| 1068 | |
| 1069 | static void hugetlbfs_destroy_inode(struct inode *inode) |
| 1070 | { |
| 1071 | hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); |
| 1072 | mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1073 | } |
| 1074 | |
| 1075 | static const struct address_space_operations hugetlbfs_aops = { |
| 1076 | .write_begin = hugetlbfs_write_begin, |
| 1077 | .write_end = hugetlbfs_write_end, |
| 1078 | .set_page_dirty = hugetlbfs_set_page_dirty, |
| 1079 | .migratepage = hugetlbfs_migrate_page, |
| 1080 | .error_remove_page = hugetlbfs_error_remove_page, |
| 1081 | }; |
| 1082 | |
| 1083 | |
| 1084 | static void init_once(void *foo) |
| 1085 | { |
| 1086 | struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; |
| 1087 | |
| 1088 | inode_init_once(&ei->vfs_inode); |
| 1089 | } |
| 1090 | |
| 1091 | const struct file_operations hugetlbfs_file_operations = { |
| 1092 | .read_iter = hugetlbfs_read_iter, |
| 1093 | .mmap = hugetlbfs_file_mmap, |
| 1094 | .fsync = noop_fsync, |
| 1095 | .get_unmapped_area = hugetlb_get_unmapped_area, |
| 1096 | .llseek = default_llseek, |
| 1097 | .fallocate = hugetlbfs_fallocate, |
| 1098 | }; |
| 1099 | |
| 1100 | static const struct inode_operations hugetlbfs_dir_inode_operations = { |
| 1101 | .create = hugetlbfs_create, |
| 1102 | .lookup = simple_lookup, |
| 1103 | .link = simple_link, |
| 1104 | .unlink = simple_unlink, |
| 1105 | .symlink = hugetlbfs_symlink, |
| 1106 | .mkdir = hugetlbfs_mkdir, |
| 1107 | .rmdir = simple_rmdir, |
| 1108 | .mknod = hugetlbfs_mknod, |
| 1109 | .rename = simple_rename, |
| 1110 | .setattr = hugetlbfs_setattr, |
| 1111 | }; |
| 1112 | |
| 1113 | static const struct inode_operations hugetlbfs_inode_operations = { |
| 1114 | .setattr = hugetlbfs_setattr, |
| 1115 | }; |
| 1116 | |
| 1117 | static const struct super_operations hugetlbfs_ops = { |
| 1118 | .alloc_inode = hugetlbfs_alloc_inode, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1119 | .free_inode = hugetlbfs_free_inode, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1120 | .destroy_inode = hugetlbfs_destroy_inode, |
| 1121 | .evict_inode = hugetlbfs_evict_inode, |
| 1122 | .statfs = hugetlbfs_statfs, |
| 1123 | .put_super = hugetlbfs_put_super, |
| 1124 | .show_options = hugetlbfs_show_options, |
| 1125 | }; |
| 1126 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1127 | /* |
| 1128 | * Convert size option passed from command line to number of huge pages |
| 1129 | * in the pool specified by hstate. Size option could be in bytes |
| 1130 | * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). |
| 1131 | */ |
| 1132 | static long |
| 1133 | hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, |
| 1134 | enum hugetlbfs_size_type val_type) |
| 1135 | { |
| 1136 | if (val_type == NO_SIZE) |
| 1137 | return -1; |
| 1138 | |
| 1139 | if (val_type == SIZE_PERCENT) { |
| 1140 | size_opt <<= huge_page_shift(h); |
| 1141 | size_opt *= h->max_huge_pages; |
| 1142 | do_div(size_opt, 100); |
| 1143 | } |
| 1144 | |
| 1145 | size_opt >>= huge_page_shift(h); |
| 1146 | return size_opt; |
| 1147 | } |
| 1148 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1149 | /* |
| 1150 | * Parse one mount parameter. |
| 1151 | */ |
| 1152 | static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1153 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1154 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
| 1155 | struct fs_parse_result result; |
| 1156 | char *rest; |
| 1157 | unsigned long ps; |
| 1158 | int opt; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1159 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1160 | opt = fs_parse(fc, &hugetlb_fs_parameters, param, &result); |
| 1161 | if (opt < 0) |
| 1162 | return opt; |
| 1163 | |
| 1164 | switch (opt) { |
| 1165 | case Opt_uid: |
| 1166 | ctx->uid = make_kuid(current_user_ns(), result.uint_32); |
| 1167 | if (!uid_valid(ctx->uid)) |
| 1168 | goto bad_val; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1169 | return 0; |
| 1170 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1171 | case Opt_gid: |
| 1172 | ctx->gid = make_kgid(current_user_ns(), result.uint_32); |
| 1173 | if (!gid_valid(ctx->gid)) |
| 1174 | goto bad_val; |
| 1175 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1176 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1177 | case Opt_mode: |
| 1178 | ctx->mode = result.uint_32 & 01777U; |
| 1179 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1180 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1181 | case Opt_size: |
| 1182 | /* memparse() will accept a K/M/G without a digit */ |
| 1183 | if (!isdigit(param->string[0])) |
| 1184 | goto bad_val; |
| 1185 | ctx->max_size_opt = memparse(param->string, &rest); |
| 1186 | ctx->max_val_type = SIZE_STD; |
| 1187 | if (*rest == '%') |
| 1188 | ctx->max_val_type = SIZE_PERCENT; |
| 1189 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1190 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1191 | case Opt_nr_inodes: |
| 1192 | /* memparse() will accept a K/M/G without a digit */ |
| 1193 | if (!isdigit(param->string[0])) |
| 1194 | goto bad_val; |
| 1195 | ctx->nr_inodes = memparse(param->string, &rest); |
| 1196 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1197 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1198 | case Opt_pagesize: |
| 1199 | ps = memparse(param->string, &rest); |
| 1200 | ctx->hstate = size_to_hstate(ps); |
| 1201 | if (!ctx->hstate) { |
| 1202 | pr_err("Unsupported page size %lu MB\n", ps >> 20); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1203 | return -EINVAL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1204 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1205 | return 0; |
| 1206 | |
| 1207 | case Opt_min_size: |
| 1208 | /* memparse() will accept a K/M/G without a digit */ |
| 1209 | if (!isdigit(param->string[0])) |
| 1210 | goto bad_val; |
| 1211 | ctx->min_size_opt = memparse(param->string, &rest); |
| 1212 | ctx->min_val_type = SIZE_STD; |
| 1213 | if (*rest == '%') |
| 1214 | ctx->min_val_type = SIZE_PERCENT; |
| 1215 | return 0; |
| 1216 | |
| 1217 | default: |
| 1218 | return -EINVAL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1219 | } |
| 1220 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1221 | bad_val: |
| 1222 | return invalf(fc, "hugetlbfs: Bad value '%s' for mount option '%s'\n", |
| 1223 | param->string, param->key); |
| 1224 | } |
| 1225 | |
| 1226 | /* |
| 1227 | * Validate the parsed options. |
| 1228 | */ |
| 1229 | static int hugetlbfs_validate(struct fs_context *fc) |
| 1230 | { |
| 1231 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
| 1232 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1233 | /* |
| 1234 | * Use huge page pool size (in hstate) to convert the size |
| 1235 | * options to number of huge pages. If NO_SIZE, -1 is returned. |
| 1236 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1237 | ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate, |
| 1238 | ctx->max_size_opt, |
| 1239 | ctx->max_val_type); |
| 1240 | ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate, |
| 1241 | ctx->min_size_opt, |
| 1242 | ctx->min_val_type); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1243 | |
| 1244 | /* |
| 1245 | * If max_size was specified, then min_size must be smaller |
| 1246 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1247 | if (ctx->max_val_type > NO_SIZE && |
| 1248 | ctx->min_hpages > ctx->max_hpages) { |
| 1249 | pr_err("Minimum size can not be greater than maximum size\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1250 | return -EINVAL; |
| 1251 | } |
| 1252 | |
| 1253 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1254 | } |
| 1255 | |
| 1256 | static int |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1257 | hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1258 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1259 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1260 | struct hugetlbfs_sb_info *sbinfo; |
| 1261 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1262 | sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); |
| 1263 | if (!sbinfo) |
| 1264 | return -ENOMEM; |
| 1265 | sb->s_fs_info = sbinfo; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1266 | spin_lock_init(&sbinfo->stat_lock); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1267 | sbinfo->hstate = ctx->hstate; |
| 1268 | sbinfo->max_inodes = ctx->nr_inodes; |
| 1269 | sbinfo->free_inodes = ctx->nr_inodes; |
| 1270 | sbinfo->spool = NULL; |
| 1271 | sbinfo->uid = ctx->uid; |
| 1272 | sbinfo->gid = ctx->gid; |
| 1273 | sbinfo->mode = ctx->mode; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1274 | |
| 1275 | /* |
| 1276 | * Allocate and initialize subpool if maximum or minimum size is |
| 1277 | * specified. Any needed reservations (for minimim size) are taken |
| 1278 | * taken when the subpool is created. |
| 1279 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1280 | if (ctx->max_hpages != -1 || ctx->min_hpages != -1) { |
| 1281 | sbinfo->spool = hugepage_new_subpool(ctx->hstate, |
| 1282 | ctx->max_hpages, |
| 1283 | ctx->min_hpages); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1284 | if (!sbinfo->spool) |
| 1285 | goto out_free; |
| 1286 | } |
| 1287 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1288 | sb->s_blocksize = huge_page_size(ctx->hstate); |
| 1289 | sb->s_blocksize_bits = huge_page_shift(ctx->hstate); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1290 | sb->s_magic = HUGETLBFS_MAGIC; |
| 1291 | sb->s_op = &hugetlbfs_ops; |
| 1292 | sb->s_time_gran = 1; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 1293 | |
| 1294 | /* |
| 1295 | * Due to the special and limited functionality of hugetlbfs, it does |
| 1296 | * not work well as a stacking filesystem. |
| 1297 | */ |
| 1298 | sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1299 | sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1300 | if (!sb->s_root) |
| 1301 | goto out_free; |
| 1302 | return 0; |
| 1303 | out_free: |
| 1304 | kfree(sbinfo->spool); |
| 1305 | kfree(sbinfo); |
| 1306 | return -ENOMEM; |
| 1307 | } |
| 1308 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1309 | static int hugetlbfs_get_tree(struct fs_context *fc) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1310 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1311 | int err = hugetlbfs_validate(fc); |
| 1312 | if (err) |
| 1313 | return err; |
| 1314 | return get_tree_nodev(fc, hugetlbfs_fill_super); |
| 1315 | } |
| 1316 | |
| 1317 | static void hugetlbfs_fs_context_free(struct fs_context *fc) |
| 1318 | { |
| 1319 | kfree(fc->fs_private); |
| 1320 | } |
| 1321 | |
| 1322 | static const struct fs_context_operations hugetlbfs_fs_context_ops = { |
| 1323 | .free = hugetlbfs_fs_context_free, |
| 1324 | .parse_param = hugetlbfs_parse_param, |
| 1325 | .get_tree = hugetlbfs_get_tree, |
| 1326 | }; |
| 1327 | |
| 1328 | static int hugetlbfs_init_fs_context(struct fs_context *fc) |
| 1329 | { |
| 1330 | struct hugetlbfs_fs_context *ctx; |
| 1331 | |
| 1332 | ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL); |
| 1333 | if (!ctx) |
| 1334 | return -ENOMEM; |
| 1335 | |
| 1336 | ctx->max_hpages = -1; /* No limit on size by default */ |
| 1337 | ctx->nr_inodes = -1; /* No limit on number of inodes by default */ |
| 1338 | ctx->uid = current_fsuid(); |
| 1339 | ctx->gid = current_fsgid(); |
| 1340 | ctx->mode = 0755; |
| 1341 | ctx->hstate = &default_hstate; |
| 1342 | ctx->min_hpages = -1; /* No default minimum size */ |
| 1343 | ctx->max_val_type = NO_SIZE; |
| 1344 | ctx->min_val_type = NO_SIZE; |
| 1345 | fc->fs_private = ctx; |
| 1346 | fc->ops = &hugetlbfs_fs_context_ops; |
| 1347 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1348 | } |
| 1349 | |
| 1350 | static struct file_system_type hugetlbfs_fs_type = { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1351 | .name = "hugetlbfs", |
| 1352 | .init_fs_context = hugetlbfs_init_fs_context, |
| 1353 | .parameters = &hugetlb_fs_parameters, |
| 1354 | .kill_sb = kill_litter_super, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1355 | }; |
| 1356 | |
| 1357 | static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; |
| 1358 | |
| 1359 | static int can_do_hugetlb_shm(void) |
| 1360 | { |
| 1361 | kgid_t shm_group; |
| 1362 | shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); |
| 1363 | return capable(CAP_IPC_LOCK) || in_group_p(shm_group); |
| 1364 | } |
| 1365 | |
| 1366 | static int get_hstate_idx(int page_size_log) |
| 1367 | { |
| 1368 | struct hstate *h = hstate_sizelog(page_size_log); |
| 1369 | |
| 1370 | if (!h) |
| 1371 | return -1; |
| 1372 | return h - hstates; |
| 1373 | } |
| 1374 | |
| 1375 | /* |
| 1376 | * Note that size should be aligned to proper hugepage size in caller side, |
| 1377 | * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. |
| 1378 | */ |
| 1379 | struct file *hugetlb_file_setup(const char *name, size_t size, |
| 1380 | vm_flags_t acctflag, struct user_struct **user, |
| 1381 | int creat_flags, int page_size_log) |
| 1382 | { |
| 1383 | struct inode *inode; |
| 1384 | struct vfsmount *mnt; |
| 1385 | int hstate_idx; |
| 1386 | struct file *file; |
| 1387 | |
| 1388 | hstate_idx = get_hstate_idx(page_size_log); |
| 1389 | if (hstate_idx < 0) |
| 1390 | return ERR_PTR(-ENODEV); |
| 1391 | |
| 1392 | *user = NULL; |
| 1393 | mnt = hugetlbfs_vfsmount[hstate_idx]; |
| 1394 | if (!mnt) |
| 1395 | return ERR_PTR(-ENOENT); |
| 1396 | |
| 1397 | if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { |
| 1398 | *user = current_user(); |
| 1399 | if (user_shm_lock(size, *user)) { |
| 1400 | task_lock(current); |
| 1401 | pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n", |
| 1402 | current->comm, current->pid); |
| 1403 | task_unlock(current); |
| 1404 | } else { |
| 1405 | *user = NULL; |
| 1406 | return ERR_PTR(-EPERM); |
| 1407 | } |
| 1408 | } |
| 1409 | |
| 1410 | file = ERR_PTR(-ENOSPC); |
| 1411 | inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0); |
| 1412 | if (!inode) |
| 1413 | goto out; |
| 1414 | if (creat_flags == HUGETLB_SHMFS_INODE) |
| 1415 | inode->i_flags |= S_PRIVATE; |
| 1416 | |
| 1417 | inode->i_size = size; |
| 1418 | clear_nlink(inode); |
| 1419 | |
| 1420 | if (hugetlb_reserve_pages(inode, 0, |
| 1421 | size >> huge_page_shift(hstate_inode(inode)), NULL, |
| 1422 | acctflag)) |
| 1423 | file = ERR_PTR(-ENOMEM); |
| 1424 | else |
| 1425 | file = alloc_file_pseudo(inode, mnt, name, O_RDWR, |
| 1426 | &hugetlbfs_file_operations); |
| 1427 | if (!IS_ERR(file)) |
| 1428 | return file; |
| 1429 | |
| 1430 | iput(inode); |
| 1431 | out: |
| 1432 | if (*user) { |
| 1433 | user_shm_unlock(size, *user); |
| 1434 | *user = NULL; |
| 1435 | } |
| 1436 | return file; |
| 1437 | } |
| 1438 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1439 | static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h) |
| 1440 | { |
| 1441 | struct fs_context *fc; |
| 1442 | struct vfsmount *mnt; |
| 1443 | |
| 1444 | fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT); |
| 1445 | if (IS_ERR(fc)) { |
| 1446 | mnt = ERR_CAST(fc); |
| 1447 | } else { |
| 1448 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
| 1449 | ctx->hstate = h; |
| 1450 | mnt = fc_mount(fc); |
| 1451 | put_fs_context(fc); |
| 1452 | } |
| 1453 | if (IS_ERR(mnt)) |
| 1454 | pr_err("Cannot mount internal hugetlbfs for page size %uK", |
| 1455 | 1U << (h->order + PAGE_SHIFT - 10)); |
| 1456 | return mnt; |
| 1457 | } |
| 1458 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1459 | static int __init init_hugetlbfs_fs(void) |
| 1460 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1461 | struct vfsmount *mnt; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1462 | struct hstate *h; |
| 1463 | int error; |
| 1464 | int i; |
| 1465 | |
| 1466 | if (!hugepages_supported()) { |
| 1467 | pr_info("disabling because there are no supported hugepage sizes\n"); |
| 1468 | return -ENOTSUPP; |
| 1469 | } |
| 1470 | |
| 1471 | error = -ENOMEM; |
| 1472 | hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", |
| 1473 | sizeof(struct hugetlbfs_inode_info), |
| 1474 | 0, SLAB_ACCOUNT, init_once); |
| 1475 | if (hugetlbfs_inode_cachep == NULL) |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 1476 | goto out; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1477 | |
| 1478 | error = register_filesystem(&hugetlbfs_fs_type); |
| 1479 | if (error) |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 1480 | goto out_free; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1481 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 1482 | /* default hstate mount is required */ |
| 1483 | mnt = mount_one_hugetlbfs(&hstates[default_hstate_idx]); |
| 1484 | if (IS_ERR(mnt)) { |
| 1485 | error = PTR_ERR(mnt); |
| 1486 | goto out_unreg; |
| 1487 | } |
| 1488 | hugetlbfs_vfsmount[default_hstate_idx] = mnt; |
| 1489 | |
| 1490 | /* other hstates are optional */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1491 | i = 0; |
| 1492 | for_each_hstate(h) { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 1493 | if (i == default_hstate_idx) { |
| 1494 | i++; |
| 1495 | continue; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1496 | } |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 1497 | |
| 1498 | mnt = mount_one_hugetlbfs(h); |
| 1499 | if (IS_ERR(mnt)) |
| 1500 | hugetlbfs_vfsmount[i] = NULL; |
| 1501 | else |
| 1502 | hugetlbfs_vfsmount[i] = mnt; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1503 | i++; |
| 1504 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1505 | |
| 1506 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1507 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 1508 | out_unreg: |
| 1509 | (void)unregister_filesystem(&hugetlbfs_fs_type); |
| 1510 | out_free: |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1511 | kmem_cache_destroy(hugetlbfs_inode_cachep); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 1512 | out: |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1513 | return error; |
| 1514 | } |
| 1515 | fs_initcall(init_hugetlbfs_fs) |