blob: ce8372ceaa43e828d2fe2a64059c590b8b9f104f [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/ext4/super.c
4 *
5 * Copyright (C) 1992, 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
9 *
10 * from
11 *
12 * linux/fs/minix/inode.c
13 *
14 * Copyright (C) 1991, 1992 Linus Torvalds
15 *
16 * Big-endian to little-endian byte-swapping/bitmaps by
17 * David S. Miller (davem@caip.rutgers.edu), 1995
18 */
19
20#include <linux/module.h>
21#include <linux/string.h>
22#include <linux/fs.h>
23#include <linux/time.h>
24#include <linux/vmalloc.h>
25#include <linux/slab.h>
26#include <linux/init.h>
27#include <linux/blkdev.h>
28#include <linux/backing-dev.h>
29#include <linux/parser.h>
30#include <linux/buffer_head.h>
31#include <linux/exportfs.h>
32#include <linux/vfs.h>
33#include <linux/random.h>
34#include <linux/mount.h>
35#include <linux/namei.h>
36#include <linux/quotaops.h>
37#include <linux/seq_file.h>
38#include <linux/ctype.h>
39#include <linux/log2.h>
40#include <linux/crc16.h>
41#include <linux/dax.h>
42#include <linux/cleancache.h>
43#include <linux/uaccess.h>
44#include <linux/iversion.h>
David Brazdil0f672f62019-12-10 10:32:29 +000045#include <linux/unicode.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000046
47#include <linux/kthread.h>
48#include <linux/freezer.h>
49
50#include "ext4.h"
51#include "ext4_extents.h" /* Needed for trace points definition */
52#include "ext4_jbd2.h"
53#include "xattr.h"
54#include "acl.h"
55#include "mballoc.h"
56#include "fsmap.h"
57
58#define CREATE_TRACE_POINTS
59#include <trace/events/ext4.h>
60
61static struct ext4_lazy_init *ext4_li_info;
62static struct mutex ext4_li_mtx;
63static struct ratelimit_state ext4_mount_msg_ratelimit;
64
65static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
66 unsigned long journal_devnum);
67static int ext4_show_options(struct seq_file *seq, struct dentry *root);
68static int ext4_commit_super(struct super_block *sb, int sync);
Olivier Deprez0e641232021-09-23 10:07:05 +020069static int ext4_mark_recovery_complete(struct super_block *sb,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000070 struct ext4_super_block *es);
Olivier Deprez0e641232021-09-23 10:07:05 +020071static int ext4_clear_journal_err(struct super_block *sb,
72 struct ext4_super_block *es);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000073static int ext4_sync_fs(struct super_block *sb, int wait);
74static int ext4_remount(struct super_block *sb, int *flags, char *data);
75static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
76static int ext4_unfreeze(struct super_block *sb);
77static int ext4_freeze(struct super_block *sb);
78static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
79 const char *dev_name, void *data);
80static inline int ext2_feature_set_ok(struct super_block *sb);
81static inline int ext3_feature_set_ok(struct super_block *sb);
82static int ext4_feature_set_ok(struct super_block *sb, int readonly);
83static void ext4_destroy_lazyinit_thread(void);
84static void ext4_unregister_li_request(struct super_block *sb);
85static void ext4_clear_request_list(void);
86static struct inode *ext4_get_journal_inode(struct super_block *sb,
87 unsigned int journal_inum);
88
89/*
90 * Lock ordering
91 *
92 * Note the difference between i_mmap_sem (EXT4_I(inode)->i_mmap_sem) and
93 * i_mmap_rwsem (inode->i_mmap_rwsem)!
94 *
95 * page fault path:
96 * mmap_sem -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
97 * page lock -> i_data_sem (rw)
98 *
99 * buffered write path:
100 * sb_start_write -> i_mutex -> mmap_sem
101 * sb_start_write -> i_mutex -> transaction start -> page lock ->
102 * i_data_sem (rw)
103 *
104 * truncate:
105 * sb_start_write -> i_mutex -> i_mmap_sem (w) -> i_mmap_rwsem (w) -> page lock
106 * sb_start_write -> i_mutex -> i_mmap_sem (w) -> transaction start ->
107 * i_data_sem (rw)
108 *
109 * direct IO:
110 * sb_start_write -> i_mutex -> mmap_sem
111 * sb_start_write -> i_mutex -> transaction start -> i_data_sem (rw)
112 *
113 * writepages:
114 * transaction start -> page lock(s) -> i_data_sem (rw)
115 */
116
117#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
118static struct file_system_type ext2_fs_type = {
119 .owner = THIS_MODULE,
120 .name = "ext2",
121 .mount = ext4_mount,
122 .kill_sb = kill_block_super,
123 .fs_flags = FS_REQUIRES_DEV,
124};
125MODULE_ALIAS_FS("ext2");
126MODULE_ALIAS("ext2");
127#define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
128#else
129#define IS_EXT2_SB(sb) (0)
130#endif
131
132
133static struct file_system_type ext3_fs_type = {
134 .owner = THIS_MODULE,
135 .name = "ext3",
136 .mount = ext4_mount,
137 .kill_sb = kill_block_super,
138 .fs_flags = FS_REQUIRES_DEV,
139};
140MODULE_ALIAS_FS("ext3");
141MODULE_ALIAS("ext3");
142#define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
143
David Brazdil0f672f62019-12-10 10:32:29 +0000144/*
145 * This works like sb_bread() except it uses ERR_PTR for error
146 * returns. Currently with sb_bread it's impossible to distinguish
147 * between ENOMEM and EIO situations (since both result in a NULL
148 * return.
149 */
150struct buffer_head *
151ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags)
152{
153 struct buffer_head *bh = sb_getblk(sb, block);
154
155 if (bh == NULL)
156 return ERR_PTR(-ENOMEM);
157 if (buffer_uptodate(bh))
158 return bh;
159 ll_rw_block(REQ_OP_READ, REQ_META | op_flags, 1, &bh);
160 wait_on_buffer(bh);
161 if (buffer_uptodate(bh))
162 return bh;
163 put_bh(bh);
164 return ERR_PTR(-EIO);
165}
166
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000167static int ext4_verify_csum_type(struct super_block *sb,
168 struct ext4_super_block *es)
169{
170 if (!ext4_has_feature_metadata_csum(sb))
171 return 1;
172
173 return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
174}
175
176static __le32 ext4_superblock_csum(struct super_block *sb,
177 struct ext4_super_block *es)
178{
179 struct ext4_sb_info *sbi = EXT4_SB(sb);
180 int offset = offsetof(struct ext4_super_block, s_checksum);
181 __u32 csum;
182
183 csum = ext4_chksum(sbi, ~0, (char *)es, offset);
184
185 return cpu_to_le32(csum);
186}
187
188static int ext4_superblock_csum_verify(struct super_block *sb,
189 struct ext4_super_block *es)
190{
191 if (!ext4_has_metadata_csum(sb))
192 return 1;
193
194 return es->s_checksum == ext4_superblock_csum(sb, es);
195}
196
197void ext4_superblock_csum_set(struct super_block *sb)
198{
199 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
200
201 if (!ext4_has_metadata_csum(sb))
202 return;
203
204 es->s_checksum = ext4_superblock_csum(sb, es);
205}
206
207void *ext4_kvmalloc(size_t size, gfp_t flags)
208{
209 void *ret;
210
211 ret = kmalloc(size, flags | __GFP_NOWARN);
212 if (!ret)
213 ret = __vmalloc(size, flags, PAGE_KERNEL);
214 return ret;
215}
216
217void *ext4_kvzalloc(size_t size, gfp_t flags)
218{
219 void *ret;
220
221 ret = kzalloc(size, flags | __GFP_NOWARN);
222 if (!ret)
223 ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
224 return ret;
225}
226
227ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
228 struct ext4_group_desc *bg)
229{
230 return le32_to_cpu(bg->bg_block_bitmap_lo) |
231 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
232 (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
233}
234
235ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
236 struct ext4_group_desc *bg)
237{
238 return le32_to_cpu(bg->bg_inode_bitmap_lo) |
239 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
240 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
241}
242
243ext4_fsblk_t ext4_inode_table(struct super_block *sb,
244 struct ext4_group_desc *bg)
245{
246 return le32_to_cpu(bg->bg_inode_table_lo) |
247 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
248 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
249}
250
251__u32 ext4_free_group_clusters(struct super_block *sb,
252 struct ext4_group_desc *bg)
253{
254 return le16_to_cpu(bg->bg_free_blocks_count_lo) |
255 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
256 (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
257}
258
259__u32 ext4_free_inodes_count(struct super_block *sb,
260 struct ext4_group_desc *bg)
261{
262 return le16_to_cpu(bg->bg_free_inodes_count_lo) |
263 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
264 (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
265}
266
267__u32 ext4_used_dirs_count(struct super_block *sb,
268 struct ext4_group_desc *bg)
269{
270 return le16_to_cpu(bg->bg_used_dirs_count_lo) |
271 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
272 (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
273}
274
275__u32 ext4_itable_unused_count(struct super_block *sb,
276 struct ext4_group_desc *bg)
277{
278 return le16_to_cpu(bg->bg_itable_unused_lo) |
279 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
280 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
281}
282
283void ext4_block_bitmap_set(struct super_block *sb,
284 struct ext4_group_desc *bg, ext4_fsblk_t blk)
285{
286 bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
287 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
288 bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
289}
290
291void ext4_inode_bitmap_set(struct super_block *sb,
292 struct ext4_group_desc *bg, ext4_fsblk_t blk)
293{
294 bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk);
295 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
296 bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
297}
298
299void ext4_inode_table_set(struct super_block *sb,
300 struct ext4_group_desc *bg, ext4_fsblk_t blk)
301{
302 bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
303 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
304 bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
305}
306
307void ext4_free_group_clusters_set(struct super_block *sb,
308 struct ext4_group_desc *bg, __u32 count)
309{
310 bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
311 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
312 bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
313}
314
315void ext4_free_inodes_set(struct super_block *sb,
316 struct ext4_group_desc *bg, __u32 count)
317{
318 bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
319 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
320 bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
321}
322
323void ext4_used_dirs_set(struct super_block *sb,
324 struct ext4_group_desc *bg, __u32 count)
325{
326 bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
327 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
328 bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
329}
330
331void ext4_itable_unused_set(struct super_block *sb,
332 struct ext4_group_desc *bg, __u32 count)
333{
334 bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
335 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
336 bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
337}
338
339static void __ext4_update_tstamp(__le32 *lo, __u8 *hi)
340{
341 time64_t now = ktime_get_real_seconds();
342
343 now = clamp_val(now, 0, (1ull << 40) - 1);
344
345 *lo = cpu_to_le32(lower_32_bits(now));
346 *hi = upper_32_bits(now);
347}
348
349static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi)
350{
351 return ((time64_t)(*hi) << 32) + le32_to_cpu(*lo);
352}
353#define ext4_update_tstamp(es, tstamp) \
354 __ext4_update_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
355#define ext4_get_tstamp(es, tstamp) \
356 __ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
357
358static void __save_error_info(struct super_block *sb, const char *func,
359 unsigned int line)
360{
361 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
362
363 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
364 if (bdev_read_only(sb->s_bdev))
365 return;
366 es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
367 ext4_update_tstamp(es, s_last_error_time);
368 strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
369 es->s_last_error_line = cpu_to_le32(line);
370 if (!es->s_first_error_time) {
371 es->s_first_error_time = es->s_last_error_time;
372 es->s_first_error_time_hi = es->s_last_error_time_hi;
373 strncpy(es->s_first_error_func, func,
374 sizeof(es->s_first_error_func));
375 es->s_first_error_line = cpu_to_le32(line);
376 es->s_first_error_ino = es->s_last_error_ino;
377 es->s_first_error_block = es->s_last_error_block;
378 }
379 /*
380 * Start the daily error reporting function if it hasn't been
381 * started already
382 */
383 if (!es->s_error_count)
384 mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
385 le32_add_cpu(&es->s_error_count, 1);
386}
387
388static void save_error_info(struct super_block *sb, const char *func,
389 unsigned int line)
390{
391 __save_error_info(sb, func, line);
Olivier Deprez0e641232021-09-23 10:07:05 +0200392 if (!bdev_read_only(sb->s_bdev))
393 ext4_commit_super(sb, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000394}
395
396/*
397 * The del_gendisk() function uninitializes the disk-specific data
398 * structures, including the bdi structure, without telling anyone
399 * else. Once this happens, any attempt to call mark_buffer_dirty()
400 * (for example, by ext4_commit_super), will cause a kernel OOPS.
401 * This is a kludge to prevent these oops until we can put in a proper
402 * hook in del_gendisk() to inform the VFS and file system layers.
403 */
404static int block_device_ejected(struct super_block *sb)
405{
406 struct inode *bd_inode = sb->s_bdev->bd_inode;
407 struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
408
409 return bdi->dev == NULL;
410}
411
412static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
413{
414 struct super_block *sb = journal->j_private;
415 struct ext4_sb_info *sbi = EXT4_SB(sb);
416 int error = is_journal_aborted(journal);
417 struct ext4_journal_cb_entry *jce;
418
419 BUG_ON(txn->t_state == T_FINISHED);
420
421 ext4_process_freed_data(sb, txn->t_tid);
422
423 spin_lock(&sbi->s_md_lock);
424 while (!list_empty(&txn->t_private_list)) {
425 jce = list_entry(txn->t_private_list.next,
426 struct ext4_journal_cb_entry, jce_list);
427 list_del_init(&jce->jce_list);
428 spin_unlock(&sbi->s_md_lock);
429 jce->jce_func(sb, jce, error);
430 spin_lock(&sbi->s_md_lock);
431 }
432 spin_unlock(&sbi->s_md_lock);
433}
434
David Brazdil0f672f62019-12-10 10:32:29 +0000435static bool system_going_down(void)
436{
437 return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
438 || system_state == SYSTEM_RESTART;
439}
440
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000441/* Deal with the reporting of failure conditions on a filesystem such as
442 * inconsistencies detected or read IO failures.
443 *
444 * On ext2, we can store the error state of the filesystem in the
445 * superblock. That is not possible on ext4, because we may have other
446 * write ordering constraints on the superblock which prevent us from
447 * writing it out straight away; and given that the journal is about to
448 * be aborted, we can't rely on the current, or future, transactions to
449 * write out the superblock safely.
450 *
451 * We'll just use the jbd2_journal_abort() error code to record an error in
452 * the journal instead. On recovery, the journal will complain about
453 * that error until we've noted it down and cleared it.
454 */
455
456static void ext4_handle_error(struct super_block *sb)
457{
Olivier Deprez0e641232021-09-23 10:07:05 +0200458 journal_t *journal = EXT4_SB(sb)->s_journal;
459
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000460 if (test_opt(sb, WARN_ON_ERROR))
461 WARN_ON_ONCE(1);
462
Olivier Deprez0e641232021-09-23 10:07:05 +0200463 if (sb_rdonly(sb) || test_opt(sb, ERRORS_CONT))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000464 return;
465
Olivier Deprez0e641232021-09-23 10:07:05 +0200466 EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
467 if (journal)
468 jbd2_journal_abort(journal, -EIO);
David Brazdil0f672f62019-12-10 10:32:29 +0000469 /*
470 * We force ERRORS_RO behavior when system is rebooting. Otherwise we
471 * could panic during 'reboot -f' as the underlying device got already
472 * disabled.
473 */
474 if (test_opt(sb, ERRORS_RO) || system_going_down()) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000475 ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
476 /*
477 * Make sure updated value of ->s_mount_flags will be visible
478 * before ->s_flags update
479 */
480 smp_wmb();
481 sb->s_flags |= SB_RDONLY;
David Brazdil0f672f62019-12-10 10:32:29 +0000482 } else if (test_opt(sb, ERRORS_PANIC)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000483 if (EXT4_SB(sb)->s_journal &&
484 !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
485 return;
486 panic("EXT4-fs (device %s): panic forced after error\n",
487 sb->s_id);
488 }
489}
490
491#define ext4_error_ratelimit(sb) \
492 ___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state), \
493 "EXT4-fs error")
494
495void __ext4_error(struct super_block *sb, const char *function,
496 unsigned int line, const char *fmt, ...)
497{
498 struct va_format vaf;
499 va_list args;
500
501 if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
502 return;
503
504 trace_ext4_error(sb, function, line);
505 if (ext4_error_ratelimit(sb)) {
506 va_start(args, fmt);
507 vaf.fmt = fmt;
508 vaf.va = &args;
509 printk(KERN_CRIT
510 "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
511 sb->s_id, function, line, current->comm, &vaf);
512 va_end(args);
513 }
514 save_error_info(sb, function, line);
515 ext4_handle_error(sb);
516}
517
518void __ext4_error_inode(struct inode *inode, const char *function,
519 unsigned int line, ext4_fsblk_t block,
520 const char *fmt, ...)
521{
522 va_list args;
523 struct va_format vaf;
524 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
525
526 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
527 return;
528
529 trace_ext4_error(inode->i_sb, function, line);
530 es->s_last_error_ino = cpu_to_le32(inode->i_ino);
531 es->s_last_error_block = cpu_to_le64(block);
532 if (ext4_error_ratelimit(inode->i_sb)) {
533 va_start(args, fmt);
534 vaf.fmt = fmt;
535 vaf.va = &args;
536 if (block)
537 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
538 "inode #%lu: block %llu: comm %s: %pV\n",
539 inode->i_sb->s_id, function, line, inode->i_ino,
540 block, current->comm, &vaf);
541 else
542 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
543 "inode #%lu: comm %s: %pV\n",
544 inode->i_sb->s_id, function, line, inode->i_ino,
545 current->comm, &vaf);
546 va_end(args);
547 }
548 save_error_info(inode->i_sb, function, line);
549 ext4_handle_error(inode->i_sb);
550}
551
552void __ext4_error_file(struct file *file, const char *function,
553 unsigned int line, ext4_fsblk_t block,
554 const char *fmt, ...)
555{
556 va_list args;
557 struct va_format vaf;
558 struct ext4_super_block *es;
559 struct inode *inode = file_inode(file);
560 char pathname[80], *path;
561
562 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
563 return;
564
565 trace_ext4_error(inode->i_sb, function, line);
566 es = EXT4_SB(inode->i_sb)->s_es;
567 es->s_last_error_ino = cpu_to_le32(inode->i_ino);
568 if (ext4_error_ratelimit(inode->i_sb)) {
569 path = file_path(file, pathname, sizeof(pathname));
570 if (IS_ERR(path))
571 path = "(unknown)";
572 va_start(args, fmt);
573 vaf.fmt = fmt;
574 vaf.va = &args;
575 if (block)
576 printk(KERN_CRIT
577 "EXT4-fs error (device %s): %s:%d: inode #%lu: "
578 "block %llu: comm %s: path %s: %pV\n",
579 inode->i_sb->s_id, function, line, inode->i_ino,
580 block, current->comm, path, &vaf);
581 else
582 printk(KERN_CRIT
583 "EXT4-fs error (device %s): %s:%d: inode #%lu: "
584 "comm %s: path %s: %pV\n",
585 inode->i_sb->s_id, function, line, inode->i_ino,
586 current->comm, path, &vaf);
587 va_end(args);
588 }
589 save_error_info(inode->i_sb, function, line);
590 ext4_handle_error(inode->i_sb);
591}
592
593const char *ext4_decode_error(struct super_block *sb, int errno,
594 char nbuf[16])
595{
596 char *errstr = NULL;
597
598 switch (errno) {
599 case -EFSCORRUPTED:
600 errstr = "Corrupt filesystem";
601 break;
602 case -EFSBADCRC:
603 errstr = "Filesystem failed CRC";
604 break;
605 case -EIO:
606 errstr = "IO failure";
607 break;
608 case -ENOMEM:
609 errstr = "Out of memory";
610 break;
611 case -EROFS:
612 if (!sb || (EXT4_SB(sb)->s_journal &&
613 EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
614 errstr = "Journal has aborted";
615 else
616 errstr = "Readonly filesystem";
617 break;
618 default:
619 /* If the caller passed in an extra buffer for unknown
620 * errors, textualise them now. Else we just return
621 * NULL. */
622 if (nbuf) {
623 /* Check for truncated error codes... */
624 if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
625 errstr = nbuf;
626 }
627 break;
628 }
629
630 return errstr;
631}
632
633/* __ext4_std_error decodes expected errors from journaling functions
634 * automatically and invokes the appropriate error response. */
635
636void __ext4_std_error(struct super_block *sb, const char *function,
637 unsigned int line, int errno)
638{
639 char nbuf[16];
640 const char *errstr;
641
642 if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
643 return;
644
645 /* Special case: if the error is EROFS, and we're not already
646 * inside a transaction, then there's really no point in logging
647 * an error. */
648 if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb))
649 return;
650
651 if (ext4_error_ratelimit(sb)) {
652 errstr = ext4_decode_error(sb, errno, nbuf);
653 printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
654 sb->s_id, function, line, errstr);
655 }
656
657 save_error_info(sb, function, line);
658 ext4_handle_error(sb);
659}
660
661/*
662 * ext4_abort is a much stronger failure handler than ext4_error. The
663 * abort function may be used to deal with unrecoverable failures such
664 * as journal IO errors or ENOMEM at a critical moment in log management.
665 *
666 * We unconditionally force the filesystem into an ABORT|READONLY state,
667 * unless the error response on the fs has been set to panic in which
668 * case we take the easy way out and panic immediately.
669 */
670
671void __ext4_abort(struct super_block *sb, const char *function,
672 unsigned int line, const char *fmt, ...)
673{
674 struct va_format vaf;
675 va_list args;
676
677 if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
678 return;
679
680 save_error_info(sb, function, line);
681 va_start(args, fmt);
682 vaf.fmt = fmt;
683 vaf.va = &args;
684 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: %pV\n",
685 sb->s_id, function, line, &vaf);
686 va_end(args);
687
688 if (sb_rdonly(sb) == 0) {
689 ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
690 EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
691 /*
692 * Make sure updated value of ->s_mount_flags will be visible
693 * before ->s_flags update
694 */
695 smp_wmb();
696 sb->s_flags |= SB_RDONLY;
697 if (EXT4_SB(sb)->s_journal)
698 jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
699 save_error_info(sb, function, line);
700 }
David Brazdil0f672f62019-12-10 10:32:29 +0000701 if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000702 if (EXT4_SB(sb)->s_journal &&
703 !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
704 return;
705 panic("EXT4-fs panic from previous error\n");
706 }
707}
708
709void __ext4_msg(struct super_block *sb,
710 const char *prefix, const char *fmt, ...)
711{
712 struct va_format vaf;
713 va_list args;
714
715 if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
716 return;
717
718 va_start(args, fmt);
719 vaf.fmt = fmt;
720 vaf.va = &args;
721 printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
722 va_end(args);
723}
724
725#define ext4_warning_ratelimit(sb) \
726 ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state), \
727 "EXT4-fs warning")
728
729void __ext4_warning(struct super_block *sb, const char *function,
730 unsigned int line, const char *fmt, ...)
731{
732 struct va_format vaf;
733 va_list args;
734
735 if (!ext4_warning_ratelimit(sb))
736 return;
737
738 va_start(args, fmt);
739 vaf.fmt = fmt;
740 vaf.va = &args;
741 printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
742 sb->s_id, function, line, &vaf);
743 va_end(args);
744}
745
746void __ext4_warning_inode(const struct inode *inode, const char *function,
747 unsigned int line, const char *fmt, ...)
748{
749 struct va_format vaf;
750 va_list args;
751
752 if (!ext4_warning_ratelimit(inode->i_sb))
753 return;
754
755 va_start(args, fmt);
756 vaf.fmt = fmt;
757 vaf.va = &args;
758 printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
759 "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id,
760 function, line, inode->i_ino, current->comm, &vaf);
761 va_end(args);
762}
763
764void __ext4_grp_locked_error(const char *function, unsigned int line,
765 struct super_block *sb, ext4_group_t grp,
766 unsigned long ino, ext4_fsblk_t block,
767 const char *fmt, ...)
768__releases(bitlock)
769__acquires(bitlock)
770{
771 struct va_format vaf;
772 va_list args;
773 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
774
775 if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
776 return;
777
778 trace_ext4_error(sb, function, line);
779 es->s_last_error_ino = cpu_to_le32(ino);
780 es->s_last_error_block = cpu_to_le64(block);
781 __save_error_info(sb, function, line);
782
783 if (ext4_error_ratelimit(sb)) {
784 va_start(args, fmt);
785 vaf.fmt = fmt;
786 vaf.va = &args;
787 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
788 sb->s_id, function, line, grp);
789 if (ino)
790 printk(KERN_CONT "inode %lu: ", ino);
791 if (block)
792 printk(KERN_CONT "block %llu:",
793 (unsigned long long) block);
794 printk(KERN_CONT "%pV\n", &vaf);
795 va_end(args);
796 }
797
798 if (test_opt(sb, WARN_ON_ERROR))
799 WARN_ON_ONCE(1);
800
801 if (test_opt(sb, ERRORS_CONT)) {
802 ext4_commit_super(sb, 0);
803 return;
804 }
805
806 ext4_unlock_group(sb, grp);
807 ext4_commit_super(sb, 1);
808 ext4_handle_error(sb);
809 /*
810 * We only get here in the ERRORS_RO case; relocking the group
811 * may be dangerous, but nothing bad will happen since the
812 * filesystem will have already been marked read/only and the
813 * journal has been aborted. We return 1 as a hint to callers
814 * who might what to use the return value from
815 * ext4_grp_locked_error() to distinguish between the
816 * ERRORS_CONT and ERRORS_RO case, and perhaps return more
817 * aggressively from the ext4 function in question, with a
818 * more appropriate error code.
819 */
820 ext4_lock_group(sb, grp);
821 return;
822}
823
824void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
825 ext4_group_t group,
826 unsigned int flags)
827{
828 struct ext4_sb_info *sbi = EXT4_SB(sb);
829 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
830 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
831 int ret;
832
833 if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) {
834 ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
835 &grp->bb_state);
836 if (!ret)
837 percpu_counter_sub(&sbi->s_freeclusters_counter,
838 grp->bb_free);
839 }
840
841 if (flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) {
842 ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT,
843 &grp->bb_state);
844 if (!ret && gdp) {
845 int count;
846
847 count = ext4_free_inodes_count(sb, gdp);
848 percpu_counter_sub(&sbi->s_freeinodes_counter,
849 count);
850 }
851 }
852}
853
854void ext4_update_dynamic_rev(struct super_block *sb)
855{
856 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
857
858 if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
859 return;
860
861 ext4_warning(sb,
862 "updating to rev %d because of new feature flag, "
863 "running e2fsck is recommended",
864 EXT4_DYNAMIC_REV);
865
866 es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
867 es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
868 es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
869 /* leave es->s_feature_*compat flags alone */
870 /* es->s_uuid will be set by e2fsck if empty */
871
872 /*
873 * The rest of the superblock fields should be zero, and if not it
874 * means they are likely already in use, so leave them alone. We
875 * can leave it up to e2fsck to clean up any inconsistencies there.
876 */
877}
878
879/*
880 * Open the external journal device
881 */
882static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
883{
884 struct block_device *bdev;
885 char b[BDEVNAME_SIZE];
886
887 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
888 if (IS_ERR(bdev))
889 goto fail;
890 return bdev;
891
892fail:
893 ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld",
894 __bdevname(dev, b), PTR_ERR(bdev));
895 return NULL;
896}
897
898/*
899 * Release the journal device
900 */
901static void ext4_blkdev_put(struct block_device *bdev)
902{
903 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
904}
905
906static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
907{
908 struct block_device *bdev;
909 bdev = sbi->journal_bdev;
910 if (bdev) {
911 ext4_blkdev_put(bdev);
912 sbi->journal_bdev = NULL;
913 }
914}
915
916static inline struct inode *orphan_list_entry(struct list_head *l)
917{
918 return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
919}
920
921static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
922{
923 struct list_head *l;
924
925 ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
926 le32_to_cpu(sbi->s_es->s_last_orphan));
927
928 printk(KERN_ERR "sb_info orphan list:\n");
929 list_for_each(l, &sbi->s_orphan) {
930 struct inode *inode = orphan_list_entry(l);
931 printk(KERN_ERR " "
932 "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
933 inode->i_sb->s_id, inode->i_ino, inode,
934 inode->i_mode, inode->i_nlink,
935 NEXT_ORPHAN(inode));
936 }
937}
938
939#ifdef CONFIG_QUOTA
940static int ext4_quota_off(struct super_block *sb, int type);
941
942static inline void ext4_quota_off_umount(struct super_block *sb)
943{
944 int type;
945
946 /* Use our quota_off function to clear inode flags etc. */
947 for (type = 0; type < EXT4_MAXQUOTAS; type++)
948 ext4_quota_off(sb, type);
949}
950
951/*
952 * This is a helper function which is used in the mount/remount
953 * codepaths (which holds s_umount) to fetch the quota file name.
954 */
955static inline char *get_qf_name(struct super_block *sb,
956 struct ext4_sb_info *sbi,
957 int type)
958{
959 return rcu_dereference_protected(sbi->s_qf_names[type],
960 lockdep_is_held(&sb->s_umount));
961}
962#else
963static inline void ext4_quota_off_umount(struct super_block *sb)
964{
965}
966#endif
967
968static void ext4_put_super(struct super_block *sb)
969{
970 struct ext4_sb_info *sbi = EXT4_SB(sb);
971 struct ext4_super_block *es = sbi->s_es;
Olivier Deprez0e641232021-09-23 10:07:05 +0200972 struct buffer_head **group_desc;
973 struct flex_groups **flex_groups;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000974 int aborted = 0;
975 int i, err;
976
977 ext4_unregister_li_request(sb);
978 ext4_quota_off_umount(sb);
979
980 destroy_workqueue(sbi->rsv_conversion_wq);
981
982 if (sbi->s_journal) {
983 aborted = is_journal_aborted(sbi->s_journal);
984 err = jbd2_journal_destroy(sbi->s_journal);
985 sbi->s_journal = NULL;
986 if ((err < 0) && !aborted)
987 ext4_abort(sb, "Couldn't clean up the journal");
988 }
989
990 ext4_unregister_sysfs(sb);
991 ext4_es_unregister_shrinker(sbi);
992 del_timer_sync(&sbi->s_err_report);
993 ext4_release_system_zone(sb);
994 ext4_mb_release(sb);
995 ext4_ext_release(sb);
996
997 if (!sb_rdonly(sb) && !aborted) {
998 ext4_clear_feature_journal_needs_recovery(sb);
999 es->s_state = cpu_to_le16(sbi->s_mount_state);
1000 }
1001 if (!sb_rdonly(sb))
1002 ext4_commit_super(sb, 1);
1003
Olivier Deprez0e641232021-09-23 10:07:05 +02001004 rcu_read_lock();
1005 group_desc = rcu_dereference(sbi->s_group_desc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001006 for (i = 0; i < sbi->s_gdb_count; i++)
Olivier Deprez0e641232021-09-23 10:07:05 +02001007 brelse(group_desc[i]);
1008 kvfree(group_desc);
1009 flex_groups = rcu_dereference(sbi->s_flex_groups);
1010 if (flex_groups) {
1011 for (i = 0; i < sbi->s_flex_groups_allocated; i++)
1012 kvfree(flex_groups[i]);
1013 kvfree(flex_groups);
1014 }
1015 rcu_read_unlock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001016 percpu_counter_destroy(&sbi->s_freeclusters_counter);
1017 percpu_counter_destroy(&sbi->s_freeinodes_counter);
1018 percpu_counter_destroy(&sbi->s_dirs_counter);
1019 percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
Olivier Deprez0e641232021-09-23 10:07:05 +02001020 percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
1021 percpu_free_rwsem(&sbi->s_writepages_rwsem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001022#ifdef CONFIG_QUOTA
1023 for (i = 0; i < EXT4_MAXQUOTAS; i++)
1024 kfree(get_qf_name(sb, sbi, i));
1025#endif
1026
1027 /* Debugging code just in case the in-memory inode orphan list
1028 * isn't empty. The on-disk one can be non-empty if we've
1029 * detected an error and taken the fs readonly, but the
1030 * in-memory list had better be clean by this point. */
1031 if (!list_empty(&sbi->s_orphan))
1032 dump_orphan_list(sb, sbi);
1033 J_ASSERT(list_empty(&sbi->s_orphan));
1034
1035 sync_blockdev(sb->s_bdev);
1036 invalidate_bdev(sb->s_bdev);
1037 if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
1038 /*
1039 * Invalidate the journal device's buffers. We don't want them
1040 * floating about in memory - the physical journal device may
1041 * hotswapped, and it breaks the `ro-after' testing code.
1042 */
1043 sync_blockdev(sbi->journal_bdev);
1044 invalidate_bdev(sbi->journal_bdev);
1045 ext4_blkdev_remove(sbi);
1046 }
David Brazdil0f672f62019-12-10 10:32:29 +00001047
1048 ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
1049 sbi->s_ea_inode_cache = NULL;
1050
1051 ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
1052 sbi->s_ea_block_cache = NULL;
1053
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001054 if (sbi->s_mmp_tsk)
1055 kthread_stop(sbi->s_mmp_tsk);
1056 brelse(sbi->s_sbh);
1057 sb->s_fs_info = NULL;
1058 /*
1059 * Now that we are completely done shutting down the
1060 * superblock, we need to actually destroy the kobject.
1061 */
1062 kobject_put(&sbi->s_kobj);
1063 wait_for_completion(&sbi->s_kobj_unregister);
1064 if (sbi->s_chksum_driver)
1065 crypto_free_shash(sbi->s_chksum_driver);
1066 kfree(sbi->s_blockgroup_lock);
1067 fs_put_dax(sbi->s_daxdev);
David Brazdil0f672f62019-12-10 10:32:29 +00001068#ifdef CONFIG_UNICODE
1069 utf8_unload(sbi->s_encoding);
1070#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001071 kfree(sbi);
1072}
1073
1074static struct kmem_cache *ext4_inode_cachep;
1075
1076/*
1077 * Called inside transaction, so use GFP_NOFS
1078 */
1079static struct inode *ext4_alloc_inode(struct super_block *sb)
1080{
1081 struct ext4_inode_info *ei;
1082
1083 ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
1084 if (!ei)
1085 return NULL;
1086
1087 inode_set_iversion(&ei->vfs_inode, 1);
1088 spin_lock_init(&ei->i_raw_lock);
1089 INIT_LIST_HEAD(&ei->i_prealloc_list);
1090 spin_lock_init(&ei->i_prealloc_lock);
1091 ext4_es_init_tree(&ei->i_es_tree);
1092 rwlock_init(&ei->i_es_lock);
1093 INIT_LIST_HEAD(&ei->i_es_list);
1094 ei->i_es_all_nr = 0;
1095 ei->i_es_shk_nr = 0;
1096 ei->i_es_shrink_lblk = 0;
1097 ei->i_reserved_data_blocks = 0;
1098 ei->i_da_metadata_calc_len = 0;
1099 ei->i_da_metadata_calc_last_lblock = 0;
1100 spin_lock_init(&(ei->i_block_reservation_lock));
David Brazdil0f672f62019-12-10 10:32:29 +00001101 ext4_init_pending_tree(&ei->i_pending_tree);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001102#ifdef CONFIG_QUOTA
1103 ei->i_reserved_quota = 0;
1104 memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
1105#endif
1106 ei->jinode = NULL;
1107 INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
1108 spin_lock_init(&ei->i_completed_io_lock);
1109 ei->i_sync_tid = 0;
1110 ei->i_datasync_tid = 0;
1111 atomic_set(&ei->i_unwritten, 0);
1112 INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
1113 return &ei->vfs_inode;
1114}
1115
1116static int ext4_drop_inode(struct inode *inode)
1117{
1118 int drop = generic_drop_inode(inode);
1119
David Brazdil0f672f62019-12-10 10:32:29 +00001120 if (!drop)
1121 drop = fscrypt_drop_inode(inode);
1122
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001123 trace_ext4_drop_inode(inode, drop);
1124 return drop;
1125}
1126
David Brazdil0f672f62019-12-10 10:32:29 +00001127static void ext4_free_in_core_inode(struct inode *inode)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001128{
David Brazdil0f672f62019-12-10 10:32:29 +00001129 fscrypt_free_inode(inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001130 kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
1131}
1132
1133static void ext4_destroy_inode(struct inode *inode)
1134{
1135 if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
1136 ext4_msg(inode->i_sb, KERN_ERR,
1137 "Inode %lu (%p): orphan list check failed!",
1138 inode->i_ino, EXT4_I(inode));
1139 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
1140 EXT4_I(inode), sizeof(struct ext4_inode_info),
1141 true);
1142 dump_stack();
1143 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001144}
1145
1146static void init_once(void *foo)
1147{
1148 struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
1149
1150 INIT_LIST_HEAD(&ei->i_orphan);
1151 init_rwsem(&ei->xattr_sem);
1152 init_rwsem(&ei->i_data_sem);
1153 init_rwsem(&ei->i_mmap_sem);
1154 inode_init_once(&ei->vfs_inode);
1155}
1156
1157static int __init init_inodecache(void)
1158{
1159 ext4_inode_cachep = kmem_cache_create_usercopy("ext4_inode_cache",
1160 sizeof(struct ext4_inode_info), 0,
1161 (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
1162 SLAB_ACCOUNT),
1163 offsetof(struct ext4_inode_info, i_data),
1164 sizeof_field(struct ext4_inode_info, i_data),
1165 init_once);
1166 if (ext4_inode_cachep == NULL)
1167 return -ENOMEM;
1168 return 0;
1169}
1170
1171static void destroy_inodecache(void)
1172{
1173 /*
1174 * Make sure all delayed rcu free inodes are flushed before we
1175 * destroy cache.
1176 */
1177 rcu_barrier();
1178 kmem_cache_destroy(ext4_inode_cachep);
1179}
1180
1181void ext4_clear_inode(struct inode *inode)
1182{
1183 invalidate_inode_buffers(inode);
1184 clear_inode(inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001185 ext4_discard_preallocations(inode);
1186 ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
Olivier Deprez0e641232021-09-23 10:07:05 +02001187 dquot_drop(inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001188 if (EXT4_I(inode)->jinode) {
1189 jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
1190 EXT4_I(inode)->jinode);
1191 jbd2_free_inode(EXT4_I(inode)->jinode);
1192 EXT4_I(inode)->jinode = NULL;
1193 }
1194 fscrypt_put_encryption_info(inode);
David Brazdil0f672f62019-12-10 10:32:29 +00001195 fsverity_cleanup_inode(inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001196}
1197
1198static struct inode *ext4_nfs_get_inode(struct super_block *sb,
1199 u64 ino, u32 generation)
1200{
1201 struct inode *inode;
1202
David Brazdil0f672f62019-12-10 10:32:29 +00001203 /*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001204 * Currently we don't know the generation for parent directory, so
1205 * a generation of 0 means "accept any"
1206 */
David Brazdil0f672f62019-12-10 10:32:29 +00001207 inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001208 if (IS_ERR(inode))
1209 return ERR_CAST(inode);
1210 if (generation && inode->i_generation != generation) {
1211 iput(inode);
1212 return ERR_PTR(-ESTALE);
1213 }
1214
1215 return inode;
1216}
1217
1218static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
1219 int fh_len, int fh_type)
1220{
1221 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
1222 ext4_nfs_get_inode);
1223}
1224
1225static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
1226 int fh_len, int fh_type)
1227{
1228 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
1229 ext4_nfs_get_inode);
1230}
1231
David Brazdil0f672f62019-12-10 10:32:29 +00001232static int ext4_nfs_commit_metadata(struct inode *inode)
1233{
1234 struct writeback_control wbc = {
1235 .sync_mode = WB_SYNC_ALL
1236 };
1237
1238 trace_ext4_nfs_commit_metadata(inode);
1239 return ext4_write_inode(inode, &wbc);
1240}
1241
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001242/*
1243 * Try to release metadata pages (indirect blocks, directories) which are
1244 * mapped via the block device. Since these pages could have journal heads
1245 * which would prevent try_to_free_buffers() from freeing them, we must use
1246 * jbd2 layer's try_to_free_buffers() function to release them.
1247 */
1248static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
1249 gfp_t wait)
1250{
1251 journal_t *journal = EXT4_SB(sb)->s_journal;
1252
1253 WARN_ON(PageChecked(page));
1254 if (!page_has_buffers(page))
1255 return 0;
1256 if (journal)
1257 return jbd2_journal_try_to_free_buffers(journal, page,
1258 wait & ~__GFP_DIRECT_RECLAIM);
1259 return try_to_free_buffers(page);
1260}
1261
David Brazdil0f672f62019-12-10 10:32:29 +00001262#ifdef CONFIG_FS_ENCRYPTION
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001263static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
1264{
1265 return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
1266 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
1267}
1268
1269static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
1270 void *fs_data)
1271{
1272 handle_t *handle = fs_data;
1273 int res, res2, credits, retries = 0;
1274
1275 /*
1276 * Encrypting the root directory is not allowed because e2fsck expects
1277 * lost+found to exist and be unencrypted, and encrypting the root
1278 * directory would imply encrypting the lost+found directory as well as
1279 * the filename "lost+found" itself.
1280 */
1281 if (inode->i_ino == EXT4_ROOT_INO)
1282 return -EPERM;
1283
1284 if (WARN_ON_ONCE(IS_DAX(inode) && i_size_read(inode)))
1285 return -EINVAL;
1286
1287 res = ext4_convert_inline_data(inode);
1288 if (res)
1289 return res;
1290
1291 /*
1292 * If a journal handle was specified, then the encryption context is
1293 * being set on a new inode via inheritance and is part of a larger
1294 * transaction to create the inode. Otherwise the encryption context is
1295 * being set on an existing inode in its own transaction. Only in the
1296 * latter case should the "retry on ENOSPC" logic be used.
1297 */
1298
1299 if (handle) {
1300 res = ext4_xattr_set_handle(handle, inode,
1301 EXT4_XATTR_INDEX_ENCRYPTION,
1302 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
1303 ctx, len, 0);
1304 if (!res) {
1305 ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
1306 ext4_clear_inode_state(inode,
1307 EXT4_STATE_MAY_INLINE_DATA);
1308 /*
1309 * Update inode->i_flags - S_ENCRYPTED will be enabled,
1310 * S_DAX may be disabled
1311 */
1312 ext4_set_inode_flags(inode);
1313 }
1314 return res;
1315 }
1316
1317 res = dquot_initialize(inode);
1318 if (res)
1319 return res;
1320retry:
1321 res = ext4_xattr_set_credits(inode, len, false /* is_create */,
1322 &credits);
1323 if (res)
1324 return res;
1325
1326 handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
1327 if (IS_ERR(handle))
1328 return PTR_ERR(handle);
1329
1330 res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION,
1331 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
1332 ctx, len, 0);
1333 if (!res) {
1334 ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
1335 /*
1336 * Update inode->i_flags - S_ENCRYPTED will be enabled,
1337 * S_DAX may be disabled
1338 */
1339 ext4_set_inode_flags(inode);
1340 res = ext4_mark_inode_dirty(handle, inode);
1341 if (res)
1342 EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
1343 }
1344 res2 = ext4_journal_stop(handle);
1345
1346 if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1347 goto retry;
1348 if (!res)
1349 res = res2;
1350 return res;
1351}
1352
1353static bool ext4_dummy_context(struct inode *inode)
1354{
1355 return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb));
1356}
1357
1358static const struct fscrypt_operations ext4_cryptops = {
1359 .key_prefix = "ext4:",
1360 .get_context = ext4_get_context,
1361 .set_context = ext4_set_context,
1362 .dummy_context = ext4_dummy_context,
1363 .empty_dir = ext4_empty_dir,
1364 .max_namelen = EXT4_NAME_LEN,
1365};
1366#endif
1367
1368#ifdef CONFIG_QUOTA
1369static const char * const quotatypes[] = INITQFNAMES;
1370#define QTYPE2NAME(t) (quotatypes[t])
1371
1372static int ext4_write_dquot(struct dquot *dquot);
1373static int ext4_acquire_dquot(struct dquot *dquot);
1374static int ext4_release_dquot(struct dquot *dquot);
1375static int ext4_mark_dquot_dirty(struct dquot *dquot);
1376static int ext4_write_info(struct super_block *sb, int type);
1377static int ext4_quota_on(struct super_block *sb, int type, int format_id,
1378 const struct path *path);
1379static int ext4_quota_on_mount(struct super_block *sb, int type);
1380static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
1381 size_t len, loff_t off);
1382static ssize_t ext4_quota_write(struct super_block *sb, int type,
1383 const char *data, size_t len, loff_t off);
1384static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
1385 unsigned int flags);
1386static int ext4_enable_quotas(struct super_block *sb);
1387static int ext4_get_next_id(struct super_block *sb, struct kqid *qid);
1388
1389static struct dquot **ext4_get_dquots(struct inode *inode)
1390{
1391 return EXT4_I(inode)->i_dquot;
1392}
1393
1394static const struct dquot_operations ext4_quota_operations = {
1395 .get_reserved_space = ext4_get_reserved_space,
1396 .write_dquot = ext4_write_dquot,
1397 .acquire_dquot = ext4_acquire_dquot,
1398 .release_dquot = ext4_release_dquot,
1399 .mark_dirty = ext4_mark_dquot_dirty,
1400 .write_info = ext4_write_info,
1401 .alloc_dquot = dquot_alloc,
1402 .destroy_dquot = dquot_destroy,
1403 .get_projid = ext4_get_projid,
1404 .get_inode_usage = ext4_get_inode_usage,
1405 .get_next_id = ext4_get_next_id,
1406};
1407
1408static const struct quotactl_ops ext4_qctl_operations = {
1409 .quota_on = ext4_quota_on,
1410 .quota_off = ext4_quota_off,
1411 .quota_sync = dquot_quota_sync,
1412 .get_state = dquot_get_state,
1413 .set_info = dquot_set_dqinfo,
1414 .get_dqblk = dquot_get_dqblk,
1415 .set_dqblk = dquot_set_dqblk,
1416 .get_nextdqblk = dquot_get_next_dqblk,
1417};
1418#endif
1419
1420static const struct super_operations ext4_sops = {
1421 .alloc_inode = ext4_alloc_inode,
David Brazdil0f672f62019-12-10 10:32:29 +00001422 .free_inode = ext4_free_in_core_inode,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001423 .destroy_inode = ext4_destroy_inode,
1424 .write_inode = ext4_write_inode,
1425 .dirty_inode = ext4_dirty_inode,
1426 .drop_inode = ext4_drop_inode,
1427 .evict_inode = ext4_evict_inode,
1428 .put_super = ext4_put_super,
1429 .sync_fs = ext4_sync_fs,
1430 .freeze_fs = ext4_freeze,
1431 .unfreeze_fs = ext4_unfreeze,
1432 .statfs = ext4_statfs,
1433 .remount_fs = ext4_remount,
1434 .show_options = ext4_show_options,
1435#ifdef CONFIG_QUOTA
1436 .quota_read = ext4_quota_read,
1437 .quota_write = ext4_quota_write,
1438 .get_dquots = ext4_get_dquots,
1439#endif
1440 .bdev_try_to_free_page = bdev_try_to_free_page,
1441};
1442
1443static const struct export_operations ext4_export_ops = {
1444 .fh_to_dentry = ext4_fh_to_dentry,
1445 .fh_to_parent = ext4_fh_to_parent,
1446 .get_parent = ext4_get_parent,
David Brazdil0f672f62019-12-10 10:32:29 +00001447 .commit_metadata = ext4_nfs_commit_metadata,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001448};
1449
1450enum {
1451 Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
1452 Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
1453 Opt_nouid32, Opt_debug, Opt_removed,
1454 Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
1455 Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
1456 Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
1457 Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
1458 Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
1459 Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
1460 Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
1461 Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
1462 Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
1463 Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
1464 Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error,
1465 Opt_nowarn_on_error, Opt_mblk_io_submit,
1466 Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
1467 Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
1468 Opt_inode_readahead_blks, Opt_journal_ioprio,
1469 Opt_dioread_nolock, Opt_dioread_lock,
1470 Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
1471 Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
1472};
1473
1474static const match_table_t tokens = {
1475 {Opt_bsd_df, "bsddf"},
1476 {Opt_minix_df, "minixdf"},
1477 {Opt_grpid, "grpid"},
1478 {Opt_grpid, "bsdgroups"},
1479 {Opt_nogrpid, "nogrpid"},
1480 {Opt_nogrpid, "sysvgroups"},
1481 {Opt_resgid, "resgid=%u"},
1482 {Opt_resuid, "resuid=%u"},
1483 {Opt_sb, "sb=%u"},
1484 {Opt_err_cont, "errors=continue"},
1485 {Opt_err_panic, "errors=panic"},
1486 {Opt_err_ro, "errors=remount-ro"},
1487 {Opt_nouid32, "nouid32"},
1488 {Opt_debug, "debug"},
1489 {Opt_removed, "oldalloc"},
1490 {Opt_removed, "orlov"},
1491 {Opt_user_xattr, "user_xattr"},
1492 {Opt_nouser_xattr, "nouser_xattr"},
1493 {Opt_acl, "acl"},
1494 {Opt_noacl, "noacl"},
1495 {Opt_noload, "norecovery"},
1496 {Opt_noload, "noload"},
1497 {Opt_removed, "nobh"},
1498 {Opt_removed, "bh"},
1499 {Opt_commit, "commit=%u"},
1500 {Opt_min_batch_time, "min_batch_time=%u"},
1501 {Opt_max_batch_time, "max_batch_time=%u"},
1502 {Opt_journal_dev, "journal_dev=%u"},
1503 {Opt_journal_path, "journal_path=%s"},
1504 {Opt_journal_checksum, "journal_checksum"},
1505 {Opt_nojournal_checksum, "nojournal_checksum"},
1506 {Opt_journal_async_commit, "journal_async_commit"},
1507 {Opt_abort, "abort"},
1508 {Opt_data_journal, "data=journal"},
1509 {Opt_data_ordered, "data=ordered"},
1510 {Opt_data_writeback, "data=writeback"},
1511 {Opt_data_err_abort, "data_err=abort"},
1512 {Opt_data_err_ignore, "data_err=ignore"},
1513 {Opt_offusrjquota, "usrjquota="},
1514 {Opt_usrjquota, "usrjquota=%s"},
1515 {Opt_offgrpjquota, "grpjquota="},
1516 {Opt_grpjquota, "grpjquota=%s"},
1517 {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
1518 {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
1519 {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
1520 {Opt_grpquota, "grpquota"},
1521 {Opt_noquota, "noquota"},
1522 {Opt_quota, "quota"},
1523 {Opt_usrquota, "usrquota"},
1524 {Opt_prjquota, "prjquota"},
1525 {Opt_barrier, "barrier=%u"},
1526 {Opt_barrier, "barrier"},
1527 {Opt_nobarrier, "nobarrier"},
1528 {Opt_i_version, "i_version"},
1529 {Opt_dax, "dax"},
1530 {Opt_stripe, "stripe=%u"},
1531 {Opt_delalloc, "delalloc"},
1532 {Opt_warn_on_error, "warn_on_error"},
1533 {Opt_nowarn_on_error, "nowarn_on_error"},
1534 {Opt_lazytime, "lazytime"},
1535 {Opt_nolazytime, "nolazytime"},
1536 {Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
1537 {Opt_nodelalloc, "nodelalloc"},
1538 {Opt_removed, "mblk_io_submit"},
1539 {Opt_removed, "nomblk_io_submit"},
1540 {Opt_block_validity, "block_validity"},
1541 {Opt_noblock_validity, "noblock_validity"},
1542 {Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
1543 {Opt_journal_ioprio, "journal_ioprio=%u"},
1544 {Opt_auto_da_alloc, "auto_da_alloc=%u"},
1545 {Opt_auto_da_alloc, "auto_da_alloc"},
1546 {Opt_noauto_da_alloc, "noauto_da_alloc"},
1547 {Opt_dioread_nolock, "dioread_nolock"},
1548 {Opt_dioread_lock, "dioread_lock"},
1549 {Opt_discard, "discard"},
1550 {Opt_nodiscard, "nodiscard"},
1551 {Opt_init_itable, "init_itable=%u"},
1552 {Opt_init_itable, "init_itable"},
1553 {Opt_noinit_itable, "noinit_itable"},
1554 {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
1555 {Opt_test_dummy_encryption, "test_dummy_encryption"},
1556 {Opt_nombcache, "nombcache"},
1557 {Opt_nombcache, "no_mbcache"}, /* for backward compatibility */
1558 {Opt_removed, "check=none"}, /* mount option from ext2/3 */
1559 {Opt_removed, "nocheck"}, /* mount option from ext2/3 */
1560 {Opt_removed, "reservation"}, /* mount option from ext2/3 */
1561 {Opt_removed, "noreservation"}, /* mount option from ext2/3 */
1562 {Opt_removed, "journal=%u"}, /* mount option from ext2/3 */
1563 {Opt_err, NULL},
1564};
1565
1566static ext4_fsblk_t get_sb_block(void **data)
1567{
1568 ext4_fsblk_t sb_block;
1569 char *options = (char *) *data;
1570
1571 if (!options || strncmp(options, "sb=", 3) != 0)
1572 return 1; /* Default location */
1573
1574 options += 3;
1575 /* TODO: use simple_strtoll with >32bit ext4 */
1576 sb_block = simple_strtoul(options, &options, 0);
1577 if (*options && *options != ',') {
1578 printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
1579 (char *) *data);
1580 return 1;
1581 }
1582 if (*options == ',')
1583 options++;
1584 *data = (void *) options;
1585
1586 return sb_block;
1587}
1588
1589#define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
1590static const char deprecated_msg[] =
1591 "Mount option \"%s\" will be removed by %s\n"
1592 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
1593
1594#ifdef CONFIG_QUOTA
1595static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
1596{
1597 struct ext4_sb_info *sbi = EXT4_SB(sb);
1598 char *qname, *old_qname = get_qf_name(sb, sbi, qtype);
1599 int ret = -1;
1600
1601 if (sb_any_quota_loaded(sb) && !old_qname) {
1602 ext4_msg(sb, KERN_ERR,
1603 "Cannot change journaled "
1604 "quota options when quota turned on");
1605 return -1;
1606 }
1607 if (ext4_has_feature_quota(sb)) {
1608 ext4_msg(sb, KERN_INFO, "Journaled quota options "
1609 "ignored when QUOTA feature is enabled");
1610 return 1;
1611 }
1612 qname = match_strdup(args);
1613 if (!qname) {
1614 ext4_msg(sb, KERN_ERR,
1615 "Not enough memory for storing quotafile name");
1616 return -1;
1617 }
1618 if (old_qname) {
1619 if (strcmp(old_qname, qname) == 0)
1620 ret = 1;
1621 else
1622 ext4_msg(sb, KERN_ERR,
1623 "%s quota file already specified",
1624 QTYPE2NAME(qtype));
1625 goto errout;
1626 }
1627 if (strchr(qname, '/')) {
1628 ext4_msg(sb, KERN_ERR,
1629 "quotafile must be on filesystem root");
1630 goto errout;
1631 }
1632 rcu_assign_pointer(sbi->s_qf_names[qtype], qname);
1633 set_opt(sb, QUOTA);
1634 return 1;
1635errout:
1636 kfree(qname);
1637 return ret;
1638}
1639
1640static int clear_qf_name(struct super_block *sb, int qtype)
1641{
1642
1643 struct ext4_sb_info *sbi = EXT4_SB(sb);
1644 char *old_qname = get_qf_name(sb, sbi, qtype);
1645
1646 if (sb_any_quota_loaded(sb) && old_qname) {
1647 ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
1648 " when quota turned on");
1649 return -1;
1650 }
1651 rcu_assign_pointer(sbi->s_qf_names[qtype], NULL);
1652 synchronize_rcu();
1653 kfree(old_qname);
1654 return 1;
1655}
1656#endif
1657
1658#define MOPT_SET 0x0001
1659#define MOPT_CLEAR 0x0002
1660#define MOPT_NOSUPPORT 0x0004
1661#define MOPT_EXPLICIT 0x0008
1662#define MOPT_CLEAR_ERR 0x0010
1663#define MOPT_GTE0 0x0020
1664#ifdef CONFIG_QUOTA
1665#define MOPT_Q 0
1666#define MOPT_QFMT 0x0040
1667#else
1668#define MOPT_Q MOPT_NOSUPPORT
1669#define MOPT_QFMT MOPT_NOSUPPORT
1670#endif
1671#define MOPT_DATAJ 0x0080
1672#define MOPT_NO_EXT2 0x0100
1673#define MOPT_NO_EXT3 0x0200
1674#define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3)
1675#define MOPT_STRING 0x0400
1676
1677static const struct mount_opts {
1678 int token;
1679 int mount_opt;
1680 int flags;
1681} ext4_mount_opts[] = {
1682 {Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET},
1683 {Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
1684 {Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
1685 {Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
1686 {Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
1687 {Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
1688 {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
1689 MOPT_EXT4_ONLY | MOPT_SET},
1690 {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
1691 MOPT_EXT4_ONLY | MOPT_CLEAR},
1692 {Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
1693 {Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
1694 {Opt_delalloc, EXT4_MOUNT_DELALLOC,
1695 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1696 {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
1697 MOPT_EXT4_ONLY | MOPT_CLEAR},
1698 {Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET},
1699 {Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR},
1700 {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1701 MOPT_EXT4_ONLY | MOPT_CLEAR},
1702 {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1703 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1704 {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
1705 EXT4_MOUNT_JOURNAL_CHECKSUM),
1706 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1707 {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
1708 {Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR},
1709 {Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR},
1710 {Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR},
1711 {Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT,
1712 MOPT_NO_EXT2},
1713 {Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT,
1714 MOPT_NO_EXT2},
1715 {Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
1716 {Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
1717 {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
1718 {Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
1719 {Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
1720 {Opt_commit, 0, MOPT_GTE0},
1721 {Opt_max_batch_time, 0, MOPT_GTE0},
1722 {Opt_min_batch_time, 0, MOPT_GTE0},
1723 {Opt_inode_readahead_blks, 0, MOPT_GTE0},
1724 {Opt_init_itable, 0, MOPT_GTE0},
1725 {Opt_dax, EXT4_MOUNT_DAX, MOPT_SET},
1726 {Opt_stripe, 0, MOPT_GTE0},
1727 {Opt_resuid, 0, MOPT_GTE0},
1728 {Opt_resgid, 0, MOPT_GTE0},
1729 {Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0},
1730 {Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING},
1731 {Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0},
1732 {Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
1733 {Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
1734 {Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
1735 MOPT_NO_EXT2 | MOPT_DATAJ},
1736 {Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
1737 {Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
1738#ifdef CONFIG_EXT4_FS_POSIX_ACL
1739 {Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
1740 {Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR},
1741#else
1742 {Opt_acl, 0, MOPT_NOSUPPORT},
1743 {Opt_noacl, 0, MOPT_NOSUPPORT},
1744#endif
1745 {Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
1746 {Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
1747 {Opt_debug_want_extra_isize, 0, MOPT_GTE0},
1748 {Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
1749 {Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
1750 MOPT_SET | MOPT_Q},
1751 {Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
1752 MOPT_SET | MOPT_Q},
1753 {Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
1754 MOPT_SET | MOPT_Q},
1755 {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
1756 EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
1757 MOPT_CLEAR | MOPT_Q},
Olivier Deprez0e641232021-09-23 10:07:05 +02001758 {Opt_usrjquota, 0, MOPT_Q | MOPT_STRING},
1759 {Opt_grpjquota, 0, MOPT_Q | MOPT_STRING},
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001760 {Opt_offusrjquota, 0, MOPT_Q},
1761 {Opt_offgrpjquota, 0, MOPT_Q},
1762 {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
1763 {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
1764 {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
1765 {Opt_max_dir_size_kb, 0, MOPT_GTE0},
1766 {Opt_test_dummy_encryption, 0, MOPT_GTE0},
1767 {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
1768 {Opt_err, 0, 0}
1769};
1770
David Brazdil0f672f62019-12-10 10:32:29 +00001771#ifdef CONFIG_UNICODE
1772static const struct ext4_sb_encodings {
1773 __u16 magic;
1774 char *name;
1775 char *version;
1776} ext4_sb_encoding_map[] = {
1777 {EXT4_ENC_UTF8_12_1, "utf8", "12.1.0"},
1778};
1779
1780static int ext4_sb_read_encoding(const struct ext4_super_block *es,
1781 const struct ext4_sb_encodings **encoding,
1782 __u16 *flags)
1783{
1784 __u16 magic = le16_to_cpu(es->s_encoding);
1785 int i;
1786
1787 for (i = 0; i < ARRAY_SIZE(ext4_sb_encoding_map); i++)
1788 if (magic == ext4_sb_encoding_map[i].magic)
1789 break;
1790
1791 if (i >= ARRAY_SIZE(ext4_sb_encoding_map))
1792 return -EINVAL;
1793
1794 *encoding = &ext4_sb_encoding_map[i];
1795 *flags = le16_to_cpu(es->s_encoding_flags);
1796
1797 return 0;
1798}
1799#endif
1800
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001801static int handle_mount_opt(struct super_block *sb, char *opt, int token,
1802 substring_t *args, unsigned long *journal_devnum,
1803 unsigned int *journal_ioprio, int is_remount)
1804{
1805 struct ext4_sb_info *sbi = EXT4_SB(sb);
1806 const struct mount_opts *m;
1807 kuid_t uid;
1808 kgid_t gid;
1809 int arg = 0;
1810
1811#ifdef CONFIG_QUOTA
1812 if (token == Opt_usrjquota)
1813 return set_qf_name(sb, USRQUOTA, &args[0]);
1814 else if (token == Opt_grpjquota)
1815 return set_qf_name(sb, GRPQUOTA, &args[0]);
1816 else if (token == Opt_offusrjquota)
1817 return clear_qf_name(sb, USRQUOTA);
1818 else if (token == Opt_offgrpjquota)
1819 return clear_qf_name(sb, GRPQUOTA);
1820#endif
1821 switch (token) {
1822 case Opt_noacl:
1823 case Opt_nouser_xattr:
1824 ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5");
1825 break;
1826 case Opt_sb:
1827 return 1; /* handled by get_sb_block() */
1828 case Opt_removed:
1829 ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt);
1830 return 1;
1831 case Opt_abort:
1832 sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
1833 return 1;
1834 case Opt_i_version:
1835 sb->s_flags |= SB_I_VERSION;
1836 return 1;
1837 case Opt_lazytime:
1838 sb->s_flags |= SB_LAZYTIME;
1839 return 1;
1840 case Opt_nolazytime:
1841 sb->s_flags &= ~SB_LAZYTIME;
1842 return 1;
1843 }
1844
1845 for (m = ext4_mount_opts; m->token != Opt_err; m++)
1846 if (token == m->token)
1847 break;
1848
1849 if (m->token == Opt_err) {
1850 ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
1851 "or missing value", opt);
1852 return -1;
1853 }
1854
1855 if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
1856 ext4_msg(sb, KERN_ERR,
1857 "Mount option \"%s\" incompatible with ext2", opt);
1858 return -1;
1859 }
1860 if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
1861 ext4_msg(sb, KERN_ERR,
1862 "Mount option \"%s\" incompatible with ext3", opt);
1863 return -1;
1864 }
1865
1866 if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg))
1867 return -1;
1868 if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
1869 return -1;
1870 if (m->flags & MOPT_EXPLICIT) {
1871 if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
1872 set_opt2(sb, EXPLICIT_DELALLOC);
1873 } else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
1874 set_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM);
1875 } else
1876 return -1;
1877 }
1878 if (m->flags & MOPT_CLEAR_ERR)
1879 clear_opt(sb, ERRORS_MASK);
1880 if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
1881 ext4_msg(sb, KERN_ERR, "Cannot change quota "
1882 "options when quota turned on");
1883 return -1;
1884 }
1885
1886 if (m->flags & MOPT_NOSUPPORT) {
1887 ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
1888 } else if (token == Opt_commit) {
1889 if (arg == 0)
1890 arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
David Brazdil0f672f62019-12-10 10:32:29 +00001891 else if (arg > INT_MAX / HZ) {
1892 ext4_msg(sb, KERN_ERR,
1893 "Invalid commit interval %d, "
1894 "must be smaller than %d",
1895 arg, INT_MAX / HZ);
1896 return -1;
1897 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001898 sbi->s_commit_interval = HZ * arg;
1899 } else if (token == Opt_debug_want_extra_isize) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001900 if ((arg & 1) ||
1901 (arg < 4) ||
1902 (arg > (sbi->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE))) {
1903 ext4_msg(sb, KERN_ERR,
1904 "Invalid want_extra_isize %d", arg);
1905 return -1;
1906 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001907 sbi->s_want_extra_isize = arg;
1908 } else if (token == Opt_max_batch_time) {
1909 sbi->s_max_batch_time = arg;
1910 } else if (token == Opt_min_batch_time) {
1911 sbi->s_min_batch_time = arg;
1912 } else if (token == Opt_inode_readahead_blks) {
1913 if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) {
1914 ext4_msg(sb, KERN_ERR,
1915 "EXT4-fs: inode_readahead_blks must be "
1916 "0 or a power of 2 smaller than 2^31");
1917 return -1;
1918 }
1919 sbi->s_inode_readahead_blks = arg;
1920 } else if (token == Opt_init_itable) {
1921 set_opt(sb, INIT_INODE_TABLE);
1922 if (!args->from)
1923 arg = EXT4_DEF_LI_WAIT_MULT;
1924 sbi->s_li_wait_mult = arg;
1925 } else if (token == Opt_max_dir_size_kb) {
1926 sbi->s_max_dir_size_kb = arg;
1927 } else if (token == Opt_stripe) {
1928 sbi->s_stripe = arg;
1929 } else if (token == Opt_resuid) {
1930 uid = make_kuid(current_user_ns(), arg);
1931 if (!uid_valid(uid)) {
1932 ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg);
1933 return -1;
1934 }
1935 sbi->s_resuid = uid;
1936 } else if (token == Opt_resgid) {
1937 gid = make_kgid(current_user_ns(), arg);
1938 if (!gid_valid(gid)) {
1939 ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg);
1940 return -1;
1941 }
1942 sbi->s_resgid = gid;
1943 } else if (token == Opt_journal_dev) {
1944 if (is_remount) {
1945 ext4_msg(sb, KERN_ERR,
1946 "Cannot specify journal on remount");
1947 return -1;
1948 }
1949 *journal_devnum = arg;
1950 } else if (token == Opt_journal_path) {
1951 char *journal_path;
1952 struct inode *journal_inode;
1953 struct path path;
1954 int error;
1955
1956 if (is_remount) {
1957 ext4_msg(sb, KERN_ERR,
1958 "Cannot specify journal on remount");
1959 return -1;
1960 }
1961 journal_path = match_strdup(&args[0]);
1962 if (!journal_path) {
1963 ext4_msg(sb, KERN_ERR, "error: could not dup "
1964 "journal device string");
1965 return -1;
1966 }
1967
1968 error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
1969 if (error) {
1970 ext4_msg(sb, KERN_ERR, "error: could not find "
1971 "journal device path: error %d", error);
1972 kfree(journal_path);
1973 return -1;
1974 }
1975
1976 journal_inode = d_inode(path.dentry);
1977 if (!S_ISBLK(journal_inode->i_mode)) {
1978 ext4_msg(sb, KERN_ERR, "error: journal path %s "
1979 "is not a block device", journal_path);
1980 path_put(&path);
1981 kfree(journal_path);
1982 return -1;
1983 }
1984
1985 *journal_devnum = new_encode_dev(journal_inode->i_rdev);
1986 path_put(&path);
1987 kfree(journal_path);
1988 } else if (token == Opt_journal_ioprio) {
1989 if (arg > 7) {
1990 ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
1991 " (must be 0-7)");
1992 return -1;
1993 }
1994 *journal_ioprio =
1995 IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
1996 } else if (token == Opt_test_dummy_encryption) {
David Brazdil0f672f62019-12-10 10:32:29 +00001997#ifdef CONFIG_FS_ENCRYPTION
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001998 sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
1999 ext4_msg(sb, KERN_WARNING,
2000 "Test dummy encryption mode enabled");
2001#else
2002 ext4_msg(sb, KERN_WARNING,
2003 "Test dummy encryption mount option ignored");
2004#endif
2005 } else if (m->flags & MOPT_DATAJ) {
2006 if (is_remount) {
2007 if (!sbi->s_journal)
2008 ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
2009 else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) {
2010 ext4_msg(sb, KERN_ERR,
2011 "Cannot change data mode on remount");
2012 return -1;
2013 }
2014 } else {
2015 clear_opt(sb, DATA_FLAGS);
2016 sbi->s_mount_opt |= m->mount_opt;
2017 }
2018#ifdef CONFIG_QUOTA
2019 } else if (m->flags & MOPT_QFMT) {
2020 if (sb_any_quota_loaded(sb) &&
2021 sbi->s_jquota_fmt != m->mount_opt) {
2022 ext4_msg(sb, KERN_ERR, "Cannot change journaled "
2023 "quota options when quota turned on");
2024 return -1;
2025 }
2026 if (ext4_has_feature_quota(sb)) {
2027 ext4_msg(sb, KERN_INFO,
2028 "Quota format mount options ignored "
2029 "when QUOTA feature is enabled");
2030 return 1;
2031 }
2032 sbi->s_jquota_fmt = m->mount_opt;
2033#endif
2034 } else if (token == Opt_dax) {
2035#ifdef CONFIG_FS_DAX
Olivier Deprez0e641232021-09-23 10:07:05 +02002036 if (is_remount && test_opt(sb, DAX)) {
2037 ext4_msg(sb, KERN_ERR, "can't mount with "
2038 "both data=journal and dax");
2039 return -1;
2040 }
2041 if (is_remount && !(sbi->s_mount_opt & EXT4_MOUNT_DAX)) {
2042 ext4_msg(sb, KERN_ERR, "can't change "
2043 "dax mount option while remounting");
2044 return -1;
2045 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002046 ext4_msg(sb, KERN_WARNING,
2047 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
David Brazdil0f672f62019-12-10 10:32:29 +00002048 sbi->s_mount_opt |= m->mount_opt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002049#else
2050 ext4_msg(sb, KERN_INFO, "dax option not supported");
2051 return -1;
2052#endif
2053 } else if (token == Opt_data_err_abort) {
2054 sbi->s_mount_opt |= m->mount_opt;
2055 } else if (token == Opt_data_err_ignore) {
2056 sbi->s_mount_opt &= ~m->mount_opt;
2057 } else {
2058 if (!args->from)
2059 arg = 1;
2060 if (m->flags & MOPT_CLEAR)
2061 arg = !arg;
2062 else if (unlikely(!(m->flags & MOPT_SET))) {
2063 ext4_msg(sb, KERN_WARNING,
2064 "buggy handling of option %s", opt);
2065 WARN_ON(1);
2066 return -1;
2067 }
2068 if (arg != 0)
2069 sbi->s_mount_opt |= m->mount_opt;
2070 else
2071 sbi->s_mount_opt &= ~m->mount_opt;
2072 }
2073 return 1;
2074}
2075
2076static int parse_options(char *options, struct super_block *sb,
2077 unsigned long *journal_devnum,
2078 unsigned int *journal_ioprio,
2079 int is_remount)
2080{
2081 struct ext4_sb_info *sbi = EXT4_SB(sb);
2082 char *p, __maybe_unused *usr_qf_name, __maybe_unused *grp_qf_name;
2083 substring_t args[MAX_OPT_ARGS];
2084 int token;
2085
2086 if (!options)
2087 return 1;
2088
2089 while ((p = strsep(&options, ",")) != NULL) {
2090 if (!*p)
2091 continue;
2092 /*
2093 * Initialize args struct so we know whether arg was
2094 * found; some options take optional arguments.
2095 */
2096 args[0].to = args[0].from = NULL;
2097 token = match_token(p, tokens, args);
2098 if (handle_mount_opt(sb, p, token, args, journal_devnum,
2099 journal_ioprio, is_remount) < 0)
2100 return 0;
2101 }
2102#ifdef CONFIG_QUOTA
2103 /*
2104 * We do the test below only for project quotas. 'usrquota' and
2105 * 'grpquota' mount options are allowed even without quota feature
2106 * to support legacy quotas in quota files.
2107 */
2108 if (test_opt(sb, PRJQUOTA) && !ext4_has_feature_project(sb)) {
2109 ext4_msg(sb, KERN_ERR, "Project quota feature not enabled. "
2110 "Cannot enable project quota enforcement.");
2111 return 0;
2112 }
2113 usr_qf_name = get_qf_name(sb, sbi, USRQUOTA);
2114 grp_qf_name = get_qf_name(sb, sbi, GRPQUOTA);
2115 if (usr_qf_name || grp_qf_name) {
2116 if (test_opt(sb, USRQUOTA) && usr_qf_name)
2117 clear_opt(sb, USRQUOTA);
2118
2119 if (test_opt(sb, GRPQUOTA) && grp_qf_name)
2120 clear_opt(sb, GRPQUOTA);
2121
2122 if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
2123 ext4_msg(sb, KERN_ERR, "old and new quota "
2124 "format mixing");
2125 return 0;
2126 }
2127
2128 if (!sbi->s_jquota_fmt) {
2129 ext4_msg(sb, KERN_ERR, "journaled quota format "
2130 "not specified");
2131 return 0;
2132 }
2133 }
2134#endif
2135 if (test_opt(sb, DIOREAD_NOLOCK)) {
2136 int blocksize =
2137 BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
2138
2139 if (blocksize < PAGE_SIZE) {
2140 ext4_msg(sb, KERN_ERR, "can't mount with "
2141 "dioread_nolock if block size != PAGE_SIZE");
2142 return 0;
2143 }
2144 }
2145 return 1;
2146}
2147
2148static inline void ext4_show_quota_options(struct seq_file *seq,
2149 struct super_block *sb)
2150{
2151#if defined(CONFIG_QUOTA)
2152 struct ext4_sb_info *sbi = EXT4_SB(sb);
2153 char *usr_qf_name, *grp_qf_name;
2154
2155 if (sbi->s_jquota_fmt) {
2156 char *fmtname = "";
2157
2158 switch (sbi->s_jquota_fmt) {
2159 case QFMT_VFS_OLD:
2160 fmtname = "vfsold";
2161 break;
2162 case QFMT_VFS_V0:
2163 fmtname = "vfsv0";
2164 break;
2165 case QFMT_VFS_V1:
2166 fmtname = "vfsv1";
2167 break;
2168 }
2169 seq_printf(seq, ",jqfmt=%s", fmtname);
2170 }
2171
2172 rcu_read_lock();
2173 usr_qf_name = rcu_dereference(sbi->s_qf_names[USRQUOTA]);
2174 grp_qf_name = rcu_dereference(sbi->s_qf_names[GRPQUOTA]);
2175 if (usr_qf_name)
2176 seq_show_option(seq, "usrjquota", usr_qf_name);
2177 if (grp_qf_name)
2178 seq_show_option(seq, "grpjquota", grp_qf_name);
2179 rcu_read_unlock();
2180#endif
2181}
2182
2183static const char *token2str(int token)
2184{
2185 const struct match_token *t;
2186
2187 for (t = tokens; t->token != Opt_err; t++)
2188 if (t->token == token && !strchr(t->pattern, '='))
2189 break;
2190 return t->pattern;
2191}
2192
2193/*
2194 * Show an option if
2195 * - it's set to a non-default value OR
2196 * - if the per-sb default is different from the global default
2197 */
2198static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
2199 int nodefs)
2200{
2201 struct ext4_sb_info *sbi = EXT4_SB(sb);
2202 struct ext4_super_block *es = sbi->s_es;
2203 int def_errors, def_mount_opt = sbi->s_def_mount_opt;
2204 const struct mount_opts *m;
2205 char sep = nodefs ? '\n' : ',';
2206
2207#define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
2208#define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
2209
2210 if (sbi->s_sb_block != 1)
2211 SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);
2212
2213 for (m = ext4_mount_opts; m->token != Opt_err; m++) {
2214 int want_set = m->flags & MOPT_SET;
2215 if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
2216 (m->flags & MOPT_CLEAR_ERR))
2217 continue;
2218 if (!nodefs && !(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
2219 continue; /* skip if same as the default */
2220 if ((want_set &&
2221 (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
2222 (!want_set && (sbi->s_mount_opt & m->mount_opt)))
2223 continue; /* select Opt_noFoo vs Opt_Foo */
2224 SEQ_OPTS_PRINT("%s", token2str(m->token));
2225 }
2226
2227 if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
2228 le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
2229 SEQ_OPTS_PRINT("resuid=%u",
2230 from_kuid_munged(&init_user_ns, sbi->s_resuid));
2231 if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
2232 le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
2233 SEQ_OPTS_PRINT("resgid=%u",
2234 from_kgid_munged(&init_user_ns, sbi->s_resgid));
2235 def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
2236 if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
2237 SEQ_OPTS_PUTS("errors=remount-ro");
2238 if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
2239 SEQ_OPTS_PUTS("errors=continue");
2240 if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
2241 SEQ_OPTS_PUTS("errors=panic");
2242 if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
2243 SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
2244 if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
2245 SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
2246 if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
2247 SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
2248 if (sb->s_flags & SB_I_VERSION)
2249 SEQ_OPTS_PUTS("i_version");
2250 if (nodefs || sbi->s_stripe)
2251 SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
2252 if (nodefs || EXT4_MOUNT_DATA_FLAGS &
2253 (sbi->s_mount_opt ^ def_mount_opt)) {
2254 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
2255 SEQ_OPTS_PUTS("data=journal");
2256 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
2257 SEQ_OPTS_PUTS("data=ordered");
2258 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
2259 SEQ_OPTS_PUTS("data=writeback");
2260 }
2261 if (nodefs ||
2262 sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
2263 SEQ_OPTS_PRINT("inode_readahead_blks=%u",
2264 sbi->s_inode_readahead_blks);
2265
2266 if (test_opt(sb, INIT_INODE_TABLE) && (nodefs ||
2267 (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
2268 SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
2269 if (nodefs || sbi->s_max_dir_size_kb)
2270 SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
2271 if (test_opt(sb, DATA_ERR_ABORT))
2272 SEQ_OPTS_PUTS("data_err=abort");
2273 if (DUMMY_ENCRYPTION_ENABLED(sbi))
2274 SEQ_OPTS_PUTS("test_dummy_encryption");
2275
2276 ext4_show_quota_options(seq, sb);
2277 return 0;
2278}
2279
2280static int ext4_show_options(struct seq_file *seq, struct dentry *root)
2281{
2282 return _ext4_show_options(seq, root->d_sb, 0);
2283}
2284
2285int ext4_seq_options_show(struct seq_file *seq, void *offset)
2286{
2287 struct super_block *sb = seq->private;
2288 int rc;
2289
2290 seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw");
2291 rc = _ext4_show_options(seq, sb, 1);
2292 seq_puts(seq, "\n");
2293 return rc;
2294}
2295
2296static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
2297 int read_only)
2298{
2299 struct ext4_sb_info *sbi = EXT4_SB(sb);
2300 int err = 0;
2301
2302 if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
2303 ext4_msg(sb, KERN_ERR, "revision level too high, "
2304 "forcing read-only mode");
2305 err = -EROFS;
Olivier Deprez0e641232021-09-23 10:07:05 +02002306 goto done;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002307 }
2308 if (read_only)
2309 goto done;
2310 if (!(sbi->s_mount_state & EXT4_VALID_FS))
2311 ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
2312 "running e2fsck is recommended");
2313 else if (sbi->s_mount_state & EXT4_ERROR_FS)
2314 ext4_msg(sb, KERN_WARNING,
2315 "warning: mounting fs with errors, "
2316 "running e2fsck is recommended");
2317 else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
2318 le16_to_cpu(es->s_mnt_count) >=
2319 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
2320 ext4_msg(sb, KERN_WARNING,
2321 "warning: maximal mount count reached, "
2322 "running e2fsck is recommended");
2323 else if (le32_to_cpu(es->s_checkinterval) &&
2324 (ext4_get_tstamp(es, s_lastcheck) +
2325 le32_to_cpu(es->s_checkinterval) <= ktime_get_real_seconds()))
2326 ext4_msg(sb, KERN_WARNING,
2327 "warning: checktime reached, "
2328 "running e2fsck is recommended");
2329 if (!sbi->s_journal)
2330 es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
2331 if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
2332 es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
2333 le16_add_cpu(&es->s_mnt_count, 1);
2334 ext4_update_tstamp(es, s_mtime);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002335 if (sbi->s_journal)
2336 ext4_set_feature_journal_needs_recovery(sb);
2337
2338 err = ext4_commit_super(sb, 1);
2339done:
2340 if (test_opt(sb, DEBUG))
2341 printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
2342 "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
2343 sb->s_blocksize,
2344 sbi->s_groups_count,
2345 EXT4_BLOCKS_PER_GROUP(sb),
2346 EXT4_INODES_PER_GROUP(sb),
2347 sbi->s_mount_opt, sbi->s_mount_opt2);
2348
2349 cleancache_init_fs(sb);
2350 return err;
2351}
2352
2353int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
2354{
2355 struct ext4_sb_info *sbi = EXT4_SB(sb);
Olivier Deprez0e641232021-09-23 10:07:05 +02002356 struct flex_groups **old_groups, **new_groups;
2357 int size, i, j;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002358
2359 if (!sbi->s_log_groups_per_flex)
2360 return 0;
2361
2362 size = ext4_flex_group(sbi, ngroup - 1) + 1;
2363 if (size <= sbi->s_flex_groups_allocated)
2364 return 0;
2365
Olivier Deprez0e641232021-09-23 10:07:05 +02002366 new_groups = kvzalloc(roundup_pow_of_two(size *
2367 sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002368 if (!new_groups) {
Olivier Deprez0e641232021-09-23 10:07:05 +02002369 ext4_msg(sb, KERN_ERR,
2370 "not enough memory for %d flex group pointers", size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002371 return -ENOMEM;
2372 }
Olivier Deprez0e641232021-09-23 10:07:05 +02002373 for (i = sbi->s_flex_groups_allocated; i < size; i++) {
2374 new_groups[i] = kvzalloc(roundup_pow_of_two(
2375 sizeof(struct flex_groups)),
2376 GFP_KERNEL);
2377 if (!new_groups[i]) {
2378 for (j = sbi->s_flex_groups_allocated; j < i; j++)
2379 kvfree(new_groups[j]);
2380 kvfree(new_groups);
2381 ext4_msg(sb, KERN_ERR,
2382 "not enough memory for %d flex groups", size);
2383 return -ENOMEM;
2384 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002385 }
Olivier Deprez0e641232021-09-23 10:07:05 +02002386 rcu_read_lock();
2387 old_groups = rcu_dereference(sbi->s_flex_groups);
2388 if (old_groups)
2389 memcpy(new_groups, old_groups,
2390 (sbi->s_flex_groups_allocated *
2391 sizeof(struct flex_groups *)));
2392 rcu_read_unlock();
2393 rcu_assign_pointer(sbi->s_flex_groups, new_groups);
2394 sbi->s_flex_groups_allocated = size;
2395 if (old_groups)
2396 ext4_kvfree_array_rcu(old_groups);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002397 return 0;
2398}
2399
2400static int ext4_fill_flex_info(struct super_block *sb)
2401{
2402 struct ext4_sb_info *sbi = EXT4_SB(sb);
2403 struct ext4_group_desc *gdp = NULL;
Olivier Deprez0e641232021-09-23 10:07:05 +02002404 struct flex_groups *fg;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002405 ext4_group_t flex_group;
2406 int i, err;
2407
2408 sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
2409 if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
2410 sbi->s_log_groups_per_flex = 0;
2411 return 1;
2412 }
2413
2414 err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
2415 if (err)
2416 goto failed;
2417
2418 for (i = 0; i < sbi->s_groups_count; i++) {
2419 gdp = ext4_get_group_desc(sb, i, NULL);
2420
2421 flex_group = ext4_flex_group(sbi, i);
Olivier Deprez0e641232021-09-23 10:07:05 +02002422 fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
2423 atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002424 atomic64_add(ext4_free_group_clusters(sb, gdp),
Olivier Deprez0e641232021-09-23 10:07:05 +02002425 &fg->free_clusters);
2426 atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002427 }
2428
2429 return 1;
2430failed:
2431 return 0;
2432}
2433
2434static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
2435 struct ext4_group_desc *gdp)
2436{
2437 int offset = offsetof(struct ext4_group_desc, bg_checksum);
2438 __u16 crc = 0;
2439 __le32 le_group = cpu_to_le32(block_group);
2440 struct ext4_sb_info *sbi = EXT4_SB(sb);
2441
2442 if (ext4_has_metadata_csum(sbi->s_sb)) {
2443 /* Use new metadata_csum algorithm */
2444 __u32 csum32;
2445 __u16 dummy_csum = 0;
2446
2447 csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
2448 sizeof(le_group));
2449 csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
2450 csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
2451 sizeof(dummy_csum));
2452 offset += sizeof(dummy_csum);
2453 if (offset < sbi->s_desc_size)
2454 csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
2455 sbi->s_desc_size - offset);
2456
2457 crc = csum32 & 0xFFFF;
2458 goto out;
2459 }
2460
2461 /* old crc16 code */
2462 if (!ext4_has_feature_gdt_csum(sb))
2463 return 0;
2464
2465 crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
2466 crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
2467 crc = crc16(crc, (__u8 *)gdp, offset);
2468 offset += sizeof(gdp->bg_checksum); /* skip checksum */
2469 /* for checksum of struct ext4_group_desc do the rest...*/
2470 if (ext4_has_feature_64bit(sb) &&
2471 offset < le16_to_cpu(sbi->s_es->s_desc_size))
2472 crc = crc16(crc, (__u8 *)gdp + offset,
2473 le16_to_cpu(sbi->s_es->s_desc_size) -
2474 offset);
2475
2476out:
2477 return cpu_to_le16(crc);
2478}
2479
2480int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
2481 struct ext4_group_desc *gdp)
2482{
2483 if (ext4_has_group_desc_csum(sb) &&
2484 (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp)))
2485 return 0;
2486
2487 return 1;
2488}
2489
2490void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
2491 struct ext4_group_desc *gdp)
2492{
2493 if (!ext4_has_group_desc_csum(sb))
2494 return;
2495 gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp);
2496}
2497
2498/* Called at mount-time, super-block is locked */
2499static int ext4_check_descriptors(struct super_block *sb,
2500 ext4_fsblk_t sb_block,
2501 ext4_group_t *first_not_zeroed)
2502{
2503 struct ext4_sb_info *sbi = EXT4_SB(sb);
2504 ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
2505 ext4_fsblk_t last_block;
2506 ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
2507 ext4_fsblk_t block_bitmap;
2508 ext4_fsblk_t inode_bitmap;
2509 ext4_fsblk_t inode_table;
2510 int flexbg_flag = 0;
2511 ext4_group_t i, grp = sbi->s_groups_count;
2512
2513 if (ext4_has_feature_flex_bg(sb))
2514 flexbg_flag = 1;
2515
2516 ext4_debug("Checking group descriptors");
2517
2518 for (i = 0; i < sbi->s_groups_count; i++) {
2519 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
2520
2521 if (i == sbi->s_groups_count - 1 || flexbg_flag)
2522 last_block = ext4_blocks_count(sbi->s_es) - 1;
2523 else
2524 last_block = first_block +
2525 (EXT4_BLOCKS_PER_GROUP(sb) - 1);
2526
2527 if ((grp == sbi->s_groups_count) &&
2528 !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
2529 grp = i;
2530
2531 block_bitmap = ext4_block_bitmap(sb, gdp);
2532 if (block_bitmap == sb_block) {
2533 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2534 "Block bitmap for group %u overlaps "
2535 "superblock", i);
2536 if (!sb_rdonly(sb))
2537 return 0;
2538 }
2539 if (block_bitmap >= sb_block + 1 &&
2540 block_bitmap <= last_bg_block) {
2541 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2542 "Block bitmap for group %u overlaps "
2543 "block group descriptors", i);
2544 if (!sb_rdonly(sb))
2545 return 0;
2546 }
2547 if (block_bitmap < first_block || block_bitmap > last_block) {
2548 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2549 "Block bitmap for group %u not in group "
2550 "(block %llu)!", i, block_bitmap);
2551 return 0;
2552 }
2553 inode_bitmap = ext4_inode_bitmap(sb, gdp);
2554 if (inode_bitmap == sb_block) {
2555 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2556 "Inode bitmap for group %u overlaps "
2557 "superblock", i);
2558 if (!sb_rdonly(sb))
2559 return 0;
2560 }
2561 if (inode_bitmap >= sb_block + 1 &&
2562 inode_bitmap <= last_bg_block) {
2563 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2564 "Inode bitmap for group %u overlaps "
2565 "block group descriptors", i);
2566 if (!sb_rdonly(sb))
2567 return 0;
2568 }
2569 if (inode_bitmap < first_block || inode_bitmap > last_block) {
2570 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2571 "Inode bitmap for group %u not in group "
2572 "(block %llu)!", i, inode_bitmap);
2573 return 0;
2574 }
2575 inode_table = ext4_inode_table(sb, gdp);
2576 if (inode_table == sb_block) {
2577 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2578 "Inode table for group %u overlaps "
2579 "superblock", i);
2580 if (!sb_rdonly(sb))
2581 return 0;
2582 }
2583 if (inode_table >= sb_block + 1 &&
2584 inode_table <= last_bg_block) {
2585 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2586 "Inode table for group %u overlaps "
2587 "block group descriptors", i);
2588 if (!sb_rdonly(sb))
2589 return 0;
2590 }
2591 if (inode_table < first_block ||
2592 inode_table + sbi->s_itb_per_group - 1 > last_block) {
2593 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2594 "Inode table for group %u not in group "
2595 "(block %llu)!", i, inode_table);
2596 return 0;
2597 }
2598 ext4_lock_group(sb, i);
2599 if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
2600 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2601 "Checksum for group %u failed (%u!=%u)",
2602 i, le16_to_cpu(ext4_group_desc_csum(sb, i,
2603 gdp)), le16_to_cpu(gdp->bg_checksum));
2604 if (!sb_rdonly(sb)) {
2605 ext4_unlock_group(sb, i);
2606 return 0;
2607 }
2608 }
2609 ext4_unlock_group(sb, i);
2610 if (!flexbg_flag)
2611 first_block += EXT4_BLOCKS_PER_GROUP(sb);
2612 }
2613 if (NULL != first_not_zeroed)
2614 *first_not_zeroed = grp;
2615 return 1;
2616}
2617
2618/* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at
2619 * the superblock) which were deleted from all directories, but held open by
2620 * a process at the time of a crash. We walk the list and try to delete these
2621 * inodes at recovery time (only with a read-write filesystem).
2622 *
2623 * In order to keep the orphan inode chain consistent during traversal (in
2624 * case of crash during recovery), we link each inode into the superblock
2625 * orphan list_head and handle it the same way as an inode deletion during
2626 * normal operation (which journals the operations for us).
2627 *
2628 * We only do an iget() and an iput() on each inode, which is very safe if we
2629 * accidentally point at an in-use or already deleted inode. The worst that
2630 * can happen in this case is that we get a "bit already cleared" message from
2631 * ext4_free_inode(). The only reason we would point at a wrong inode is if
2632 * e2fsck was run on this filesystem, and it must have already done the orphan
2633 * inode cleanup for us, so we can safely abort without any further action.
2634 */
2635static void ext4_orphan_cleanup(struct super_block *sb,
2636 struct ext4_super_block *es)
2637{
2638 unsigned int s_flags = sb->s_flags;
2639 int ret, nr_orphans = 0, nr_truncates = 0;
2640#ifdef CONFIG_QUOTA
2641 int quota_update = 0;
2642 int i;
2643#endif
2644 if (!es->s_last_orphan) {
2645 jbd_debug(4, "no orphan inodes to clean up\n");
2646 return;
2647 }
2648
2649 if (bdev_read_only(sb->s_bdev)) {
2650 ext4_msg(sb, KERN_ERR, "write access "
2651 "unavailable, skipping orphan cleanup");
2652 return;
2653 }
2654
2655 /* Check if feature set would not allow a r/w mount */
2656 if (!ext4_feature_set_ok(sb, 0)) {
2657 ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
2658 "unknown ROCOMPAT features");
2659 return;
2660 }
2661
2662 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
2663 /* don't clear list on RO mount w/ errors */
2664 if (es->s_last_orphan && !(s_flags & SB_RDONLY)) {
2665 ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
2666 "clearing orphan list.\n");
2667 es->s_last_orphan = 0;
2668 }
2669 jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
2670 return;
2671 }
2672
2673 if (s_flags & SB_RDONLY) {
2674 ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
2675 sb->s_flags &= ~SB_RDONLY;
2676 }
2677#ifdef CONFIG_QUOTA
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002678 /*
2679 * Turn on quotas which were not enabled for read-only mounts if
2680 * filesystem has quota feature, so that they are updated correctly.
2681 */
2682 if (ext4_has_feature_quota(sb) && (s_flags & SB_RDONLY)) {
2683 int ret = ext4_enable_quotas(sb);
2684
2685 if (!ret)
2686 quota_update = 1;
2687 else
2688 ext4_msg(sb, KERN_ERR,
2689 "Cannot turn on quotas: error %d", ret);
2690 }
2691
2692 /* Turn on journaled quotas used for old sytle */
2693 for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2694 if (EXT4_SB(sb)->s_qf_names[i]) {
2695 int ret = ext4_quota_on_mount(sb, i);
2696
2697 if (!ret)
2698 quota_update = 1;
2699 else
2700 ext4_msg(sb, KERN_ERR,
2701 "Cannot turn on journaled "
2702 "quota: type %d: error %d", i, ret);
2703 }
2704 }
2705#endif
2706
2707 while (es->s_last_orphan) {
2708 struct inode *inode;
2709
2710 /*
2711 * We may have encountered an error during cleanup; if
2712 * so, skip the rest.
2713 */
2714 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
2715 jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
2716 es->s_last_orphan = 0;
2717 break;
2718 }
2719
2720 inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
2721 if (IS_ERR(inode)) {
2722 es->s_last_orphan = 0;
2723 break;
2724 }
2725
2726 list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
2727 dquot_initialize(inode);
2728 if (inode->i_nlink) {
2729 if (test_opt(sb, DEBUG))
2730 ext4_msg(sb, KERN_DEBUG,
2731 "%s: truncating inode %lu to %lld bytes",
2732 __func__, inode->i_ino, inode->i_size);
2733 jbd_debug(2, "truncating inode %lu to %lld bytes\n",
2734 inode->i_ino, inode->i_size);
2735 inode_lock(inode);
2736 truncate_inode_pages(inode->i_mapping, inode->i_size);
2737 ret = ext4_truncate(inode);
Olivier Deprez0e641232021-09-23 10:07:05 +02002738 if (ret) {
2739 /*
2740 * We need to clean up the in-core orphan list
2741 * manually if ext4_truncate() failed to get a
2742 * transaction handle.
2743 */
2744 ext4_orphan_del(NULL, inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002745 ext4_std_error(inode->i_sb, ret);
Olivier Deprez0e641232021-09-23 10:07:05 +02002746 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002747 inode_unlock(inode);
2748 nr_truncates++;
2749 } else {
2750 if (test_opt(sb, DEBUG))
2751 ext4_msg(sb, KERN_DEBUG,
2752 "%s: deleting unreferenced inode %lu",
2753 __func__, inode->i_ino);
2754 jbd_debug(2, "deleting unreferenced inode %lu\n",
2755 inode->i_ino);
2756 nr_orphans++;
2757 }
2758 iput(inode); /* The delete magic happens here! */
2759 }
2760
2761#define PLURAL(x) (x), ((x) == 1) ? "" : "s"
2762
2763 if (nr_orphans)
2764 ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
2765 PLURAL(nr_orphans));
2766 if (nr_truncates)
2767 ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
2768 PLURAL(nr_truncates));
2769#ifdef CONFIG_QUOTA
2770 /* Turn off quotas if they were enabled for orphan cleanup */
2771 if (quota_update) {
2772 for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2773 if (sb_dqopt(sb)->files[i])
2774 dquot_quota_off(sb, i);
2775 }
2776 }
2777#endif
2778 sb->s_flags = s_flags; /* Restore SB_RDONLY status */
2779}
2780
2781/*
2782 * Maximal extent format file size.
2783 * Resulting logical blkno at s_maxbytes must fit in our on-disk
2784 * extent format containers, within a sector_t, and within i_blocks
2785 * in the vfs. ext4 inode has 48 bits of i_block in fsblock units,
2786 * so that won't be a limiting factor.
2787 *
2788 * However there is other limiting factor. We do store extents in the form
2789 * of starting block and length, hence the resulting length of the extent
2790 * covering maximum file size must fit into on-disk format containers as
2791 * well. Given that length is always by 1 unit bigger than max unit (because
2792 * we count 0 as well) we have to lower the s_maxbytes by one fs block.
2793 *
2794 * Note, this does *not* consider any metadata overhead for vfs i_blocks.
2795 */
2796static loff_t ext4_max_size(int blkbits, int has_huge_files)
2797{
2798 loff_t res;
2799 loff_t upper_limit = MAX_LFS_FILESIZE;
2800
David Brazdil0f672f62019-12-10 10:32:29 +00002801 BUILD_BUG_ON(sizeof(blkcnt_t) < sizeof(u64));
2802
2803 if (!has_huge_files) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002804 upper_limit = (1LL << 32) - 1;
2805
2806 /* total blocks in file system block size */
2807 upper_limit >>= (blkbits - 9);
2808 upper_limit <<= blkbits;
2809 }
2810
2811 /*
2812 * 32-bit extent-start container, ee_block. We lower the maxbytes
2813 * by one fs block, so ee_len can cover the extent of maximum file
2814 * size
2815 */
2816 res = (1LL << 32) - 1;
2817 res <<= blkbits;
2818
2819 /* Sanity check against vm- & vfs- imposed limits */
2820 if (res > upper_limit)
2821 res = upper_limit;
2822
2823 return res;
2824}
2825
2826/*
2827 * Maximal bitmap file size. There is a direct, and {,double-,triple-}indirect
2828 * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
2829 * We need to be 1 filesystem block less than the 2^48 sector limit.
2830 */
2831static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
2832{
2833 loff_t res = EXT4_NDIR_BLOCKS;
2834 int meta_blocks;
2835 loff_t upper_limit;
2836 /* This is calculated to be the largest file size for a dense, block
2837 * mapped file such that the file's total number of 512-byte sectors,
2838 * including data and all indirect blocks, does not exceed (2^48 - 1).
2839 *
2840 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
2841 * number of 512-byte sectors of the file.
2842 */
2843
David Brazdil0f672f62019-12-10 10:32:29 +00002844 if (!has_huge_files) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002845 /*
David Brazdil0f672f62019-12-10 10:32:29 +00002846 * !has_huge_files or implies that the inode i_block field
2847 * represents total file blocks in 2^32 512-byte sectors ==
2848 * size of vfs inode i_blocks * 8
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002849 */
2850 upper_limit = (1LL << 32) - 1;
2851
2852 /* total blocks in file system block size */
2853 upper_limit >>= (bits - 9);
2854
2855 } else {
2856 /*
2857 * We use 48 bit ext4_inode i_blocks
2858 * With EXT4_HUGE_FILE_FL set the i_blocks
2859 * represent total number of blocks in
2860 * file system block size
2861 */
2862 upper_limit = (1LL << 48) - 1;
2863
2864 }
2865
2866 /* indirect blocks */
2867 meta_blocks = 1;
2868 /* double indirect blocks */
2869 meta_blocks += 1 + (1LL << (bits-2));
2870 /* tripple indirect blocks */
2871 meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
2872
2873 upper_limit -= meta_blocks;
2874 upper_limit <<= bits;
2875
2876 res += 1LL << (bits-2);
2877 res += 1LL << (2*(bits-2));
2878 res += 1LL << (3*(bits-2));
2879 res <<= bits;
2880 if (res > upper_limit)
2881 res = upper_limit;
2882
2883 if (res > MAX_LFS_FILESIZE)
2884 res = MAX_LFS_FILESIZE;
2885
2886 return res;
2887}
2888
2889static ext4_fsblk_t descriptor_loc(struct super_block *sb,
2890 ext4_fsblk_t logical_sb_block, int nr)
2891{
2892 struct ext4_sb_info *sbi = EXT4_SB(sb);
2893 ext4_group_t bg, first_meta_bg;
2894 int has_super = 0;
2895
2896 first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
2897
2898 if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg)
2899 return logical_sb_block + nr + 1;
2900 bg = sbi->s_desc_per_block * nr;
2901 if (ext4_bg_has_super(sb, bg))
2902 has_super = 1;
2903
2904 /*
2905 * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
2906 * block 2, not 1. If s_first_data_block == 0 (bigalloc is enabled
2907 * on modern mke2fs or blksize > 1k on older mke2fs) then we must
2908 * compensate.
2909 */
2910 if (sb->s_blocksize == 1024 && nr == 0 &&
2911 le32_to_cpu(sbi->s_es->s_first_data_block) == 0)
2912 has_super++;
2913
2914 return (has_super + ext4_group_first_block_no(sb, bg));
2915}
2916
2917/**
2918 * ext4_get_stripe_size: Get the stripe size.
2919 * @sbi: In memory super block info
2920 *
2921 * If we have specified it via mount option, then
2922 * use the mount option value. If the value specified at mount time is
2923 * greater than the blocks per group use the super block value.
2924 * If the super block value is greater than blocks per group return 0.
2925 * Allocator needs it be less than blocks per group.
2926 *
2927 */
2928static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
2929{
2930 unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
2931 unsigned long stripe_width =
2932 le32_to_cpu(sbi->s_es->s_raid_stripe_width);
2933 int ret;
2934
2935 if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
2936 ret = sbi->s_stripe;
2937 else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
2938 ret = stripe_width;
2939 else if (stride && stride <= sbi->s_blocks_per_group)
2940 ret = stride;
2941 else
2942 ret = 0;
2943
2944 /*
2945 * If the stripe width is 1, this makes no sense and
2946 * we set it to 0 to turn off stripe handling code.
2947 */
2948 if (ret <= 1)
2949 ret = 0;
2950
2951 return ret;
2952}
2953
2954/*
2955 * Check whether this filesystem can be mounted based on
2956 * the features present and the RDONLY/RDWR mount requested.
2957 * Returns 1 if this filesystem can be mounted as requested,
2958 * 0 if it cannot be.
2959 */
2960static int ext4_feature_set_ok(struct super_block *sb, int readonly)
2961{
2962 if (ext4_has_unknown_ext4_incompat_features(sb)) {
2963 ext4_msg(sb, KERN_ERR,
2964 "Couldn't mount because of "
2965 "unsupported optional features (%x)",
2966 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
2967 ~EXT4_FEATURE_INCOMPAT_SUPP));
2968 return 0;
2969 }
2970
David Brazdil0f672f62019-12-10 10:32:29 +00002971#ifndef CONFIG_UNICODE
2972 if (ext4_has_feature_casefold(sb)) {
2973 ext4_msg(sb, KERN_ERR,
2974 "Filesystem with casefold feature cannot be "
2975 "mounted without CONFIG_UNICODE");
2976 return 0;
2977 }
2978#endif
2979
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002980 if (readonly)
2981 return 1;
2982
2983 if (ext4_has_feature_readonly(sb)) {
2984 ext4_msg(sb, KERN_INFO, "filesystem is read-only");
2985 sb->s_flags |= SB_RDONLY;
2986 return 1;
2987 }
2988
2989 /* Check that feature set is OK for a read-write mount */
2990 if (ext4_has_unknown_ext4_ro_compat_features(sb)) {
2991 ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
2992 "unsupported optional features (%x)",
2993 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
2994 ~EXT4_FEATURE_RO_COMPAT_SUPP));
2995 return 0;
2996 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002997 if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
2998 ext4_msg(sb, KERN_ERR,
2999 "Can't support bigalloc feature without "
3000 "extents feature\n");
3001 return 0;
3002 }
3003
Olivier Deprez0e641232021-09-23 10:07:05 +02003004#if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2)
3005 if (!readonly && (ext4_has_feature_quota(sb) ||
3006 ext4_has_feature_project(sb))) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003007 ext4_msg(sb, KERN_ERR,
Olivier Deprez0e641232021-09-23 10:07:05 +02003008 "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003009 return 0;
3010 }
3011#endif /* CONFIG_QUOTA */
3012 return 1;
3013}
3014
3015/*
3016 * This function is called once a day if we have errors logged
3017 * on the file system
3018 */
3019static void print_daily_error_info(struct timer_list *t)
3020{
3021 struct ext4_sb_info *sbi = from_timer(sbi, t, s_err_report);
3022 struct super_block *sb = sbi->s_sb;
3023 struct ext4_super_block *es = sbi->s_es;
3024
3025 if (es->s_error_count)
3026 /* fsck newer than v1.41.13 is needed to clean this condition. */
3027 ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
3028 le32_to_cpu(es->s_error_count));
3029 if (es->s_first_error_time) {
3030 printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %llu: %.*s:%d",
3031 sb->s_id,
3032 ext4_get_tstamp(es, s_first_error_time),
3033 (int) sizeof(es->s_first_error_func),
3034 es->s_first_error_func,
3035 le32_to_cpu(es->s_first_error_line));
3036 if (es->s_first_error_ino)
3037 printk(KERN_CONT ": inode %u",
3038 le32_to_cpu(es->s_first_error_ino));
3039 if (es->s_first_error_block)
3040 printk(KERN_CONT ": block %llu", (unsigned long long)
3041 le64_to_cpu(es->s_first_error_block));
3042 printk(KERN_CONT "\n");
3043 }
3044 if (es->s_last_error_time) {
3045 printk(KERN_NOTICE "EXT4-fs (%s): last error at time %llu: %.*s:%d",
3046 sb->s_id,
3047 ext4_get_tstamp(es, s_last_error_time),
3048 (int) sizeof(es->s_last_error_func),
3049 es->s_last_error_func,
3050 le32_to_cpu(es->s_last_error_line));
3051 if (es->s_last_error_ino)
3052 printk(KERN_CONT ": inode %u",
3053 le32_to_cpu(es->s_last_error_ino));
3054 if (es->s_last_error_block)
3055 printk(KERN_CONT ": block %llu", (unsigned long long)
3056 le64_to_cpu(es->s_last_error_block));
3057 printk(KERN_CONT "\n");
3058 }
3059 mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */
3060}
3061
3062/* Find next suitable group and run ext4_init_inode_table */
3063static int ext4_run_li_request(struct ext4_li_request *elr)
3064{
3065 struct ext4_group_desc *gdp = NULL;
3066 ext4_group_t group, ngroups;
3067 struct super_block *sb;
3068 unsigned long timeout = 0;
3069 int ret = 0;
3070
3071 sb = elr->lr_super;
3072 ngroups = EXT4_SB(sb)->s_groups_count;
3073
3074 for (group = elr->lr_next_group; group < ngroups; group++) {
3075 gdp = ext4_get_group_desc(sb, group, NULL);
3076 if (!gdp) {
3077 ret = 1;
3078 break;
3079 }
3080
3081 if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
3082 break;
3083 }
3084
3085 if (group >= ngroups)
3086 ret = 1;
3087
3088 if (!ret) {
3089 timeout = jiffies;
3090 ret = ext4_init_inode_table(sb, group,
3091 elr->lr_timeout ? 0 : 1);
3092 if (elr->lr_timeout == 0) {
3093 timeout = (jiffies - timeout) *
3094 elr->lr_sbi->s_li_wait_mult;
3095 elr->lr_timeout = timeout;
3096 }
3097 elr->lr_next_sched = jiffies + elr->lr_timeout;
3098 elr->lr_next_group = group + 1;
3099 }
3100 return ret;
3101}
3102
3103/*
3104 * Remove lr_request from the list_request and free the
3105 * request structure. Should be called with li_list_mtx held
3106 */
3107static void ext4_remove_li_request(struct ext4_li_request *elr)
3108{
3109 struct ext4_sb_info *sbi;
3110
3111 if (!elr)
3112 return;
3113
3114 sbi = elr->lr_sbi;
3115
3116 list_del(&elr->lr_request);
3117 sbi->s_li_request = NULL;
3118 kfree(elr);
3119}
3120
3121static void ext4_unregister_li_request(struct super_block *sb)
3122{
3123 mutex_lock(&ext4_li_mtx);
3124 if (!ext4_li_info) {
3125 mutex_unlock(&ext4_li_mtx);
3126 return;
3127 }
3128
3129 mutex_lock(&ext4_li_info->li_list_mtx);
3130 ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
3131 mutex_unlock(&ext4_li_info->li_list_mtx);
3132 mutex_unlock(&ext4_li_mtx);
3133}
3134
3135static struct task_struct *ext4_lazyinit_task;
3136
3137/*
3138 * This is the function where ext4lazyinit thread lives. It walks
3139 * through the request list searching for next scheduled filesystem.
3140 * When such a fs is found, run the lazy initialization request
3141 * (ext4_rn_li_request) and keep track of the time spend in this
3142 * function. Based on that time we compute next schedule time of
3143 * the request. When walking through the list is complete, compute
3144 * next waking time and put itself into sleep.
3145 */
3146static int ext4_lazyinit_thread(void *arg)
3147{
3148 struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
3149 struct list_head *pos, *n;
3150 struct ext4_li_request *elr;
3151 unsigned long next_wakeup, cur;
3152
3153 BUG_ON(NULL == eli);
3154
3155cont_thread:
3156 while (true) {
3157 next_wakeup = MAX_JIFFY_OFFSET;
3158
3159 mutex_lock(&eli->li_list_mtx);
3160 if (list_empty(&eli->li_request_list)) {
3161 mutex_unlock(&eli->li_list_mtx);
3162 goto exit_thread;
3163 }
3164 list_for_each_safe(pos, n, &eli->li_request_list) {
3165 int err = 0;
3166 int progress = 0;
3167 elr = list_entry(pos, struct ext4_li_request,
3168 lr_request);
3169
3170 if (time_before(jiffies, elr->lr_next_sched)) {
3171 if (time_before(elr->lr_next_sched, next_wakeup))
3172 next_wakeup = elr->lr_next_sched;
3173 continue;
3174 }
3175 if (down_read_trylock(&elr->lr_super->s_umount)) {
3176 if (sb_start_write_trylock(elr->lr_super)) {
3177 progress = 1;
3178 /*
3179 * We hold sb->s_umount, sb can not
3180 * be removed from the list, it is
3181 * now safe to drop li_list_mtx
3182 */
3183 mutex_unlock(&eli->li_list_mtx);
3184 err = ext4_run_li_request(elr);
3185 sb_end_write(elr->lr_super);
3186 mutex_lock(&eli->li_list_mtx);
3187 n = pos->next;
3188 }
3189 up_read((&elr->lr_super->s_umount));
3190 }
3191 /* error, remove the lazy_init job */
3192 if (err) {
3193 ext4_remove_li_request(elr);
3194 continue;
3195 }
3196 if (!progress) {
3197 elr->lr_next_sched = jiffies +
3198 (prandom_u32()
3199 % (EXT4_DEF_LI_MAX_START_DELAY * HZ));
3200 }
3201 if (time_before(elr->lr_next_sched, next_wakeup))
3202 next_wakeup = elr->lr_next_sched;
3203 }
3204 mutex_unlock(&eli->li_list_mtx);
3205
3206 try_to_freeze();
3207
3208 cur = jiffies;
3209 if ((time_after_eq(cur, next_wakeup)) ||
3210 (MAX_JIFFY_OFFSET == next_wakeup)) {
3211 cond_resched();
3212 continue;
3213 }
3214
3215 schedule_timeout_interruptible(next_wakeup - cur);
3216
3217 if (kthread_should_stop()) {
3218 ext4_clear_request_list();
3219 goto exit_thread;
3220 }
3221 }
3222
3223exit_thread:
3224 /*
3225 * It looks like the request list is empty, but we need
3226 * to check it under the li_list_mtx lock, to prevent any
3227 * additions into it, and of course we should lock ext4_li_mtx
3228 * to atomically free the list and ext4_li_info, because at
3229 * this point another ext4 filesystem could be registering
3230 * new one.
3231 */
3232 mutex_lock(&ext4_li_mtx);
3233 mutex_lock(&eli->li_list_mtx);
3234 if (!list_empty(&eli->li_request_list)) {
3235 mutex_unlock(&eli->li_list_mtx);
3236 mutex_unlock(&ext4_li_mtx);
3237 goto cont_thread;
3238 }
3239 mutex_unlock(&eli->li_list_mtx);
3240 kfree(ext4_li_info);
3241 ext4_li_info = NULL;
3242 mutex_unlock(&ext4_li_mtx);
3243
3244 return 0;
3245}
3246
3247static void ext4_clear_request_list(void)
3248{
3249 struct list_head *pos, *n;
3250 struct ext4_li_request *elr;
3251
3252 mutex_lock(&ext4_li_info->li_list_mtx);
3253 list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
3254 elr = list_entry(pos, struct ext4_li_request,
3255 lr_request);
3256 ext4_remove_li_request(elr);
3257 }
3258 mutex_unlock(&ext4_li_info->li_list_mtx);
3259}
3260
3261static int ext4_run_lazyinit_thread(void)
3262{
3263 ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
3264 ext4_li_info, "ext4lazyinit");
3265 if (IS_ERR(ext4_lazyinit_task)) {
3266 int err = PTR_ERR(ext4_lazyinit_task);
3267 ext4_clear_request_list();
3268 kfree(ext4_li_info);
3269 ext4_li_info = NULL;
3270 printk(KERN_CRIT "EXT4-fs: error %d creating inode table "
3271 "initialization thread\n",
3272 err);
3273 return err;
3274 }
3275 ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
3276 return 0;
3277}
3278
3279/*
3280 * Check whether it make sense to run itable init. thread or not.
3281 * If there is at least one uninitialized inode table, return
3282 * corresponding group number, else the loop goes through all
3283 * groups and return total number of groups.
3284 */
3285static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
3286{
3287 ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
3288 struct ext4_group_desc *gdp = NULL;
3289
3290 if (!ext4_has_group_desc_csum(sb))
3291 return ngroups;
3292
3293 for (group = 0; group < ngroups; group++) {
3294 gdp = ext4_get_group_desc(sb, group, NULL);
3295 if (!gdp)
3296 continue;
3297
3298 if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
3299 break;
3300 }
3301
3302 return group;
3303}
3304
3305static int ext4_li_info_new(void)
3306{
3307 struct ext4_lazy_init *eli = NULL;
3308
3309 eli = kzalloc(sizeof(*eli), GFP_KERNEL);
3310 if (!eli)
3311 return -ENOMEM;
3312
3313 INIT_LIST_HEAD(&eli->li_request_list);
3314 mutex_init(&eli->li_list_mtx);
3315
3316 eli->li_state |= EXT4_LAZYINIT_QUIT;
3317
3318 ext4_li_info = eli;
3319
3320 return 0;
3321}
3322
3323static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
3324 ext4_group_t start)
3325{
3326 struct ext4_sb_info *sbi = EXT4_SB(sb);
3327 struct ext4_li_request *elr;
3328
3329 elr = kzalloc(sizeof(*elr), GFP_KERNEL);
3330 if (!elr)
3331 return NULL;
3332
3333 elr->lr_super = sb;
3334 elr->lr_sbi = sbi;
3335 elr->lr_next_group = start;
3336
3337 /*
3338 * Randomize first schedule time of the request to
3339 * spread the inode table initialization requests
3340 * better.
3341 */
3342 elr->lr_next_sched = jiffies + (prandom_u32() %
3343 (EXT4_DEF_LI_MAX_START_DELAY * HZ));
3344 return elr;
3345}
3346
3347int ext4_register_li_request(struct super_block *sb,
3348 ext4_group_t first_not_zeroed)
3349{
3350 struct ext4_sb_info *sbi = EXT4_SB(sb);
3351 struct ext4_li_request *elr = NULL;
3352 ext4_group_t ngroups = sbi->s_groups_count;
3353 int ret = 0;
3354
3355 mutex_lock(&ext4_li_mtx);
3356 if (sbi->s_li_request != NULL) {
3357 /*
3358 * Reset timeout so it can be computed again, because
3359 * s_li_wait_mult might have changed.
3360 */
3361 sbi->s_li_request->lr_timeout = 0;
3362 goto out;
3363 }
3364
3365 if (first_not_zeroed == ngroups || sb_rdonly(sb) ||
3366 !test_opt(sb, INIT_INODE_TABLE))
3367 goto out;
3368
3369 elr = ext4_li_request_new(sb, first_not_zeroed);
3370 if (!elr) {
3371 ret = -ENOMEM;
3372 goto out;
3373 }
3374
3375 if (NULL == ext4_li_info) {
3376 ret = ext4_li_info_new();
3377 if (ret)
3378 goto out;
3379 }
3380
3381 mutex_lock(&ext4_li_info->li_list_mtx);
3382 list_add(&elr->lr_request, &ext4_li_info->li_request_list);
3383 mutex_unlock(&ext4_li_info->li_list_mtx);
3384
3385 sbi->s_li_request = elr;
3386 /*
3387 * set elr to NULL here since it has been inserted to
3388 * the request_list and the removal and free of it is
3389 * handled by ext4_clear_request_list from now on.
3390 */
3391 elr = NULL;
3392
3393 if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
3394 ret = ext4_run_lazyinit_thread();
3395 if (ret)
3396 goto out;
3397 }
3398out:
3399 mutex_unlock(&ext4_li_mtx);
3400 if (ret)
3401 kfree(elr);
3402 return ret;
3403}
3404
3405/*
3406 * We do not need to lock anything since this is called on
3407 * module unload.
3408 */
3409static void ext4_destroy_lazyinit_thread(void)
3410{
3411 /*
3412 * If thread exited earlier
3413 * there's nothing to be done.
3414 */
3415 if (!ext4_li_info || !ext4_lazyinit_task)
3416 return;
3417
3418 kthread_stop(ext4_lazyinit_task);
3419}
3420
3421static int set_journal_csum_feature_set(struct super_block *sb)
3422{
3423 int ret = 1;
3424 int compat, incompat;
3425 struct ext4_sb_info *sbi = EXT4_SB(sb);
3426
3427 if (ext4_has_metadata_csum(sb)) {
3428 /* journal checksum v3 */
3429 compat = 0;
3430 incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
3431 } else {
3432 /* journal checksum v1 */
3433 compat = JBD2_FEATURE_COMPAT_CHECKSUM;
3434 incompat = 0;
3435 }
3436
3437 jbd2_journal_clear_features(sbi->s_journal,
3438 JBD2_FEATURE_COMPAT_CHECKSUM, 0,
3439 JBD2_FEATURE_INCOMPAT_CSUM_V3 |
3440 JBD2_FEATURE_INCOMPAT_CSUM_V2);
3441 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
3442 ret = jbd2_journal_set_features(sbi->s_journal,
3443 compat, 0,
3444 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
3445 incompat);
3446 } else if (test_opt(sb, JOURNAL_CHECKSUM)) {
3447 ret = jbd2_journal_set_features(sbi->s_journal,
3448 compat, 0,
3449 incompat);
3450 jbd2_journal_clear_features(sbi->s_journal, 0, 0,
3451 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
3452 } else {
3453 jbd2_journal_clear_features(sbi->s_journal, 0, 0,
3454 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
3455 }
3456
3457 return ret;
3458}
3459
3460/*
3461 * Note: calculating the overhead so we can be compatible with
3462 * historical BSD practice is quite difficult in the face of
3463 * clusters/bigalloc. This is because multiple metadata blocks from
3464 * different block group can end up in the same allocation cluster.
3465 * Calculating the exact overhead in the face of clustered allocation
3466 * requires either O(all block bitmaps) in memory or O(number of block
3467 * groups**2) in time. We will still calculate the superblock for
3468 * older file systems --- and if we come across with a bigalloc file
3469 * system with zero in s_overhead_clusters the estimate will be close to
3470 * correct especially for very large cluster sizes --- but for newer
3471 * file systems, it's better to calculate this figure once at mkfs
3472 * time, and store it in the superblock. If the superblock value is
3473 * present (even for non-bigalloc file systems), we will use it.
3474 */
3475static int count_overhead(struct super_block *sb, ext4_group_t grp,
3476 char *buf)
3477{
3478 struct ext4_sb_info *sbi = EXT4_SB(sb);
3479 struct ext4_group_desc *gdp;
3480 ext4_fsblk_t first_block, last_block, b;
3481 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
3482 int s, j, count = 0;
3483
3484 if (!ext4_has_feature_bigalloc(sb))
3485 return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
3486 sbi->s_itb_per_group + 2);
3487
3488 first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
3489 (grp * EXT4_BLOCKS_PER_GROUP(sb));
3490 last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
3491 for (i = 0; i < ngroups; i++) {
3492 gdp = ext4_get_group_desc(sb, i, NULL);
3493 b = ext4_block_bitmap(sb, gdp);
3494 if (b >= first_block && b <= last_block) {
3495 ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
3496 count++;
3497 }
3498 b = ext4_inode_bitmap(sb, gdp);
3499 if (b >= first_block && b <= last_block) {
3500 ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
3501 count++;
3502 }
3503 b = ext4_inode_table(sb, gdp);
3504 if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
3505 for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
3506 int c = EXT4_B2C(sbi, b - first_block);
3507 ext4_set_bit(c, buf);
3508 count++;
3509 }
3510 if (i != grp)
3511 continue;
3512 s = 0;
3513 if (ext4_bg_has_super(sb, grp)) {
3514 ext4_set_bit(s++, buf);
3515 count++;
3516 }
3517 j = ext4_bg_num_gdb(sb, grp);
3518 if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
3519 ext4_error(sb, "Invalid number of block group "
3520 "descriptor blocks: %d", j);
3521 j = EXT4_BLOCKS_PER_GROUP(sb) - s;
3522 }
3523 count += j;
3524 for (; j > 0; j--)
3525 ext4_set_bit(EXT4_B2C(sbi, s++), buf);
3526 }
3527 if (!count)
3528 return 0;
3529 return EXT4_CLUSTERS_PER_GROUP(sb) -
3530 ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
3531}
3532
3533/*
3534 * Compute the overhead and stash it in sbi->s_overhead
3535 */
3536int ext4_calculate_overhead(struct super_block *sb)
3537{
3538 struct ext4_sb_info *sbi = EXT4_SB(sb);
3539 struct ext4_super_block *es = sbi->s_es;
3540 struct inode *j_inode;
3541 unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
3542 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
3543 ext4_fsblk_t overhead = 0;
3544 char *buf = (char *) get_zeroed_page(GFP_NOFS);
3545
3546 if (!buf)
3547 return -ENOMEM;
3548
3549 /*
3550 * Compute the overhead (FS structures). This is constant
3551 * for a given filesystem unless the number of block groups
3552 * changes so we cache the previous value until it does.
3553 */
3554
3555 /*
3556 * All of the blocks before first_data_block are overhead
3557 */
3558 overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
3559
3560 /*
3561 * Add the overhead found in each block group
3562 */
3563 for (i = 0; i < ngroups; i++) {
3564 int blks;
3565
3566 blks = count_overhead(sb, i, buf);
3567 overhead += blks;
3568 if (blks)
3569 memset(buf, 0, PAGE_SIZE);
3570 cond_resched();
3571 }
3572
3573 /*
3574 * Add the internal journal blocks whether the journal has been
3575 * loaded or not
3576 */
3577 if (sbi->s_journal && !sbi->journal_bdev)
3578 overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
Olivier Deprez0e641232021-09-23 10:07:05 +02003579 else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) {
3580 /* j_inum for internal journal is non-zero */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003581 j_inode = ext4_get_journal_inode(sb, j_inum);
3582 if (j_inode) {
3583 j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
3584 overhead += EXT4_NUM_B2C(sbi, j_blocks);
3585 iput(j_inode);
3586 } else {
3587 ext4_msg(sb, KERN_ERR, "can't get journal size");
3588 }
3589 }
3590 sbi->s_overhead = overhead;
3591 smp_wmb();
3592 free_page((unsigned long) buf);
3593 return 0;
3594}
3595
3596static void ext4_set_resv_clusters(struct super_block *sb)
3597{
3598 ext4_fsblk_t resv_clusters;
3599 struct ext4_sb_info *sbi = EXT4_SB(sb);
3600
3601 /*
3602 * There's no need to reserve anything when we aren't using extents.
3603 * The space estimates are exact, there are no unwritten extents,
3604 * hole punching doesn't need new metadata... This is needed especially
3605 * to keep ext2/3 backward compatibility.
3606 */
3607 if (!ext4_has_feature_extents(sb))
3608 return;
3609 /*
3610 * By default we reserve 2% or 4096 clusters, whichever is smaller.
3611 * This should cover the situations where we can not afford to run
3612 * out of space like for example punch hole, or converting
3613 * unwritten extents in delalloc path. In most cases such
3614 * allocation would require 1, or 2 blocks, higher numbers are
3615 * very rare.
3616 */
3617 resv_clusters = (ext4_blocks_count(sbi->s_es) >>
3618 sbi->s_cluster_bits);
3619
3620 do_div(resv_clusters, 50);
3621 resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
3622
3623 atomic64_set(&sbi->s_resv_clusters, resv_clusters);
3624}
3625
3626static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3627{
3628 struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
3629 char *orig_data = kstrdup(data, GFP_KERNEL);
Olivier Deprez0e641232021-09-23 10:07:05 +02003630 struct buffer_head *bh, **group_desc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003631 struct ext4_super_block *es = NULL;
3632 struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
Olivier Deprez0e641232021-09-23 10:07:05 +02003633 struct flex_groups **flex_groups;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003634 ext4_fsblk_t block;
3635 ext4_fsblk_t sb_block = get_sb_block(&data);
3636 ext4_fsblk_t logical_sb_block;
3637 unsigned long offset = 0;
3638 unsigned long journal_devnum = 0;
3639 unsigned long def_mount_opts;
3640 struct inode *root;
3641 const char *descr;
3642 int ret = -ENOMEM;
3643 int blocksize, clustersize;
3644 unsigned int db_count;
3645 unsigned int i;
3646 int needs_recovery, has_huge_files, has_bigalloc;
3647 __u64 blocks_count;
3648 int err = 0;
3649 unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
3650 ext4_group_t first_not_zeroed;
3651
3652 if ((data && !orig_data) || !sbi)
3653 goto out_free_base;
3654
3655 sbi->s_daxdev = dax_dev;
3656 sbi->s_blockgroup_lock =
3657 kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
3658 if (!sbi->s_blockgroup_lock)
3659 goto out_free_base;
3660
3661 sb->s_fs_info = sbi;
3662 sbi->s_sb = sb;
3663 sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
3664 sbi->s_sb_block = sb_block;
3665 if (sb->s_bdev->bd_part)
3666 sbi->s_sectors_written_start =
3667 part_stat_read(sb->s_bdev->bd_part, sectors[STAT_WRITE]);
3668
3669 /* Cleanup superblock name */
3670 strreplace(sb->s_id, '/', '!');
3671
3672 /* -EINVAL is default */
3673 ret = -EINVAL;
3674 blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
3675 if (!blocksize) {
3676 ext4_msg(sb, KERN_ERR, "unable to set blocksize");
3677 goto out_fail;
3678 }
3679
3680 /*
3681 * The ext4 superblock will not be buffer aligned for other than 1kB
3682 * block sizes. We need to calculate the offset from buffer start.
3683 */
3684 if (blocksize != EXT4_MIN_BLOCK_SIZE) {
3685 logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
3686 offset = do_div(logical_sb_block, blocksize);
3687 } else {
3688 logical_sb_block = sb_block;
3689 }
3690
3691 if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) {
3692 ext4_msg(sb, KERN_ERR, "unable to read superblock");
3693 goto out_fail;
3694 }
3695 /*
3696 * Note: s_es must be initialized as soon as possible because
3697 * some ext4 macro-instructions depend on its value
3698 */
3699 es = (struct ext4_super_block *) (bh->b_data + offset);
3700 sbi->s_es = es;
3701 sb->s_magic = le16_to_cpu(es->s_magic);
3702 if (sb->s_magic != EXT4_SUPER_MAGIC)
3703 goto cantfind_ext4;
3704 sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
3705
3706 /* Warn if metadata_csum and gdt_csum are both set. */
3707 if (ext4_has_feature_metadata_csum(sb) &&
3708 ext4_has_feature_gdt_csum(sb))
3709 ext4_warning(sb, "metadata_csum and uninit_bg are "
3710 "redundant flags; please run fsck.");
3711
3712 /* Check for a known checksum algorithm */
3713 if (!ext4_verify_csum_type(sb, es)) {
3714 ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
3715 "unknown checksum algorithm.");
3716 silent = 1;
3717 goto cantfind_ext4;
3718 }
3719
3720 /* Load the checksum driver */
3721 sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
3722 if (IS_ERR(sbi->s_chksum_driver)) {
3723 ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
3724 ret = PTR_ERR(sbi->s_chksum_driver);
3725 sbi->s_chksum_driver = NULL;
3726 goto failed_mount;
3727 }
3728
3729 /* Check superblock checksum */
3730 if (!ext4_superblock_csum_verify(sb, es)) {
3731 ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
3732 "invalid superblock checksum. Run e2fsck?");
3733 silent = 1;
3734 ret = -EFSBADCRC;
3735 goto cantfind_ext4;
3736 }
3737
3738 /* Precompute checksum seed for all metadata */
3739 if (ext4_has_feature_csum_seed(sb))
3740 sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
3741 else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb))
3742 sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
3743 sizeof(es->s_uuid));
3744
3745 /* Set defaults before we parse the mount options */
3746 def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
3747 set_opt(sb, INIT_INODE_TABLE);
3748 if (def_mount_opts & EXT4_DEFM_DEBUG)
3749 set_opt(sb, DEBUG);
3750 if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
3751 set_opt(sb, GRPID);
3752 if (def_mount_opts & EXT4_DEFM_UID16)
3753 set_opt(sb, NO_UID32);
3754 /* xattr user namespace & acls are now defaulted on */
3755 set_opt(sb, XATTR_USER);
3756#ifdef CONFIG_EXT4_FS_POSIX_ACL
3757 set_opt(sb, POSIX_ACL);
3758#endif
3759 /* don't forget to enable journal_csum when metadata_csum is enabled. */
3760 if (ext4_has_metadata_csum(sb))
3761 set_opt(sb, JOURNAL_CHECKSUM);
3762
3763 if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
3764 set_opt(sb, JOURNAL_DATA);
3765 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
3766 set_opt(sb, ORDERED_DATA);
3767 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
3768 set_opt(sb, WRITEBACK_DATA);
3769
3770 if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
3771 set_opt(sb, ERRORS_PANIC);
3772 else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
3773 set_opt(sb, ERRORS_CONT);
3774 else
3775 set_opt(sb, ERRORS_RO);
3776 /* block_validity enabled by default; disable with noblock_validity */
3777 set_opt(sb, BLOCK_VALIDITY);
3778 if (def_mount_opts & EXT4_DEFM_DISCARD)
3779 set_opt(sb, DISCARD);
3780
3781 sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
3782 sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
3783 sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
3784 sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
3785 sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
3786
3787 if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
3788 set_opt(sb, BARRIER);
3789
3790 /*
3791 * enable delayed allocation by default
3792 * Use -o nodelalloc to turn it off
3793 */
3794 if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
3795 ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
3796 set_opt(sb, DELALLOC);
3797
3798 /*
3799 * set default s_li_wait_mult for lazyinit, for the case there is
3800 * no mount option specified.
3801 */
3802 sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
3803
Olivier Deprez0e641232021-09-23 10:07:05 +02003804 blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
3805 if (blocksize < EXT4_MIN_BLOCK_SIZE ||
3806 blocksize > EXT4_MAX_BLOCK_SIZE) {
3807 ext4_msg(sb, KERN_ERR,
3808 "Unsupported filesystem blocksize %d (%d log_block_size)",
3809 blocksize, le32_to_cpu(es->s_log_block_size));
3810 goto failed_mount;
3811 }
3812
3813 if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
3814 sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
3815 sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
3816 } else {
3817 sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
3818 sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
3819 if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
3820 ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
3821 sbi->s_first_ino);
3822 goto failed_mount;
3823 }
3824 if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
3825 (!is_power_of_2(sbi->s_inode_size)) ||
3826 (sbi->s_inode_size > blocksize)) {
3827 ext4_msg(sb, KERN_ERR,
3828 "unsupported inode size: %d",
3829 sbi->s_inode_size);
3830 ext4_msg(sb, KERN_ERR, "blocksize: %d", blocksize);
3831 goto failed_mount;
3832 }
3833 /*
3834 * i_atime_extra is the last extra field available for
3835 * [acm]times in struct ext4_inode. Checking for that
3836 * field should suffice to ensure we have extra space
3837 * for all three.
3838 */
3839 if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) +
3840 sizeof(((struct ext4_inode *)0)->i_atime_extra)) {
3841 sb->s_time_gran = 1;
3842 sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX;
3843 } else {
3844 sb->s_time_gran = NSEC_PER_SEC;
3845 sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX;
3846 }
3847 sb->s_time_min = EXT4_TIMESTAMP_MIN;
3848 }
3849 if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
3850 sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
3851 EXT4_GOOD_OLD_INODE_SIZE;
3852 if (ext4_has_feature_extra_isize(sb)) {
3853 unsigned v, max = (sbi->s_inode_size -
3854 EXT4_GOOD_OLD_INODE_SIZE);
3855
3856 v = le16_to_cpu(es->s_want_extra_isize);
3857 if (v > max) {
3858 ext4_msg(sb, KERN_ERR,
3859 "bad s_want_extra_isize: %d", v);
3860 goto failed_mount;
3861 }
3862 if (sbi->s_want_extra_isize < v)
3863 sbi->s_want_extra_isize = v;
3864
3865 v = le16_to_cpu(es->s_min_extra_isize);
3866 if (v > max) {
3867 ext4_msg(sb, KERN_ERR,
3868 "bad s_min_extra_isize: %d", v);
3869 goto failed_mount;
3870 }
3871 if (sbi->s_want_extra_isize < v)
3872 sbi->s_want_extra_isize = v;
3873 }
3874 }
3875
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003876 if (sbi->s_es->s_mount_opts[0]) {
3877 char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
3878 sizeof(sbi->s_es->s_mount_opts),
3879 GFP_KERNEL);
3880 if (!s_mount_opts)
3881 goto failed_mount;
3882 if (!parse_options(s_mount_opts, sb, &journal_devnum,
3883 &journal_ioprio, 0)) {
3884 ext4_msg(sb, KERN_WARNING,
3885 "failed to parse options in superblock: %s",
3886 s_mount_opts);
3887 }
3888 kfree(s_mount_opts);
3889 }
3890 sbi->s_def_mount_opt = sbi->s_mount_opt;
3891 if (!parse_options((char *) data, sb, &journal_devnum,
3892 &journal_ioprio, 0))
3893 goto failed_mount;
3894
David Brazdil0f672f62019-12-10 10:32:29 +00003895#ifdef CONFIG_UNICODE
3896 if (ext4_has_feature_casefold(sb) && !sbi->s_encoding) {
3897 const struct ext4_sb_encodings *encoding_info;
3898 struct unicode_map *encoding;
3899 __u16 encoding_flags;
3900
3901 if (ext4_has_feature_encrypt(sb)) {
3902 ext4_msg(sb, KERN_ERR,
3903 "Can't mount with encoding and encryption");
3904 goto failed_mount;
3905 }
3906
3907 if (ext4_sb_read_encoding(es, &encoding_info,
3908 &encoding_flags)) {
3909 ext4_msg(sb, KERN_ERR,
3910 "Encoding requested by superblock is unknown");
3911 goto failed_mount;
3912 }
3913
3914 encoding = utf8_load(encoding_info->version);
3915 if (IS_ERR(encoding)) {
3916 ext4_msg(sb, KERN_ERR,
3917 "can't mount with superblock charset: %s-%s "
3918 "not supported by the kernel. flags: 0x%x.",
3919 encoding_info->name, encoding_info->version,
3920 encoding_flags);
3921 goto failed_mount;
3922 }
3923 ext4_msg(sb, KERN_INFO,"Using encoding defined by superblock: "
3924 "%s-%s with flags 0x%hx", encoding_info->name,
3925 encoding_info->version?:"\b", encoding_flags);
3926
3927 sbi->s_encoding = encoding;
3928 sbi->s_encoding_flags = encoding_flags;
3929 }
3930#endif
3931
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003932 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
3933 printk_once(KERN_WARNING "EXT4-fs: Warning: mounting "
3934 "with data=journal disables delayed "
3935 "allocation and O_DIRECT support!\n");
3936 if (test_opt2(sb, EXPLICIT_DELALLOC)) {
3937 ext4_msg(sb, KERN_ERR, "can't mount with "
3938 "both data=journal and delalloc");
3939 goto failed_mount;
3940 }
3941 if (test_opt(sb, DIOREAD_NOLOCK)) {
3942 ext4_msg(sb, KERN_ERR, "can't mount with "
3943 "both data=journal and dioread_nolock");
3944 goto failed_mount;
3945 }
3946 if (test_opt(sb, DAX)) {
3947 ext4_msg(sb, KERN_ERR, "can't mount with "
3948 "both data=journal and dax");
3949 goto failed_mount;
3950 }
3951 if (ext4_has_feature_encrypt(sb)) {
3952 ext4_msg(sb, KERN_WARNING,
3953 "encrypted files will use data=ordered "
3954 "instead of data journaling mode");
3955 }
3956 if (test_opt(sb, DELALLOC))
3957 clear_opt(sb, DELALLOC);
3958 } else {
3959 sb->s_iflags |= SB_I_CGROUPWB;
3960 }
3961
3962 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
3963 (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
3964
3965 if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
3966 (ext4_has_compat_features(sb) ||
3967 ext4_has_ro_compat_features(sb) ||
3968 ext4_has_incompat_features(sb)))
3969 ext4_msg(sb, KERN_WARNING,
3970 "feature flags set on rev 0 fs, "
3971 "running e2fsck is recommended");
3972
3973 if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
3974 set_opt2(sb, HURD_COMPAT);
3975 if (ext4_has_feature_64bit(sb)) {
3976 ext4_msg(sb, KERN_ERR,
3977 "The Hurd can't support 64-bit file systems");
3978 goto failed_mount;
3979 }
3980
3981 /*
3982 * ea_inode feature uses l_i_version field which is not
3983 * available in HURD_COMPAT mode.
3984 */
3985 if (ext4_has_feature_ea_inode(sb)) {
3986 ext4_msg(sb, KERN_ERR,
3987 "ea_inode feature is not supported for Hurd");
3988 goto failed_mount;
3989 }
3990 }
3991
3992 if (IS_EXT2_SB(sb)) {
3993 if (ext2_feature_set_ok(sb))
3994 ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
3995 "using the ext4 subsystem");
3996 else {
3997 /*
3998 * If we're probing be silent, if this looks like
3999 * it's actually an ext[34] filesystem.
4000 */
4001 if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
4002 goto failed_mount;
4003 ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
4004 "to feature incompatibilities");
4005 goto failed_mount;
4006 }
4007 }
4008
4009 if (IS_EXT3_SB(sb)) {
4010 if (ext3_feature_set_ok(sb))
4011 ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
4012 "using the ext4 subsystem");
4013 else {
4014 /*
4015 * If we're probing be silent, if this looks like
4016 * it's actually an ext4 filesystem.
4017 */
4018 if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
4019 goto failed_mount;
4020 ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
4021 "to feature incompatibilities");
4022 goto failed_mount;
4023 }
4024 }
4025
4026 /*
4027 * Check feature flags regardless of the revision level, since we
4028 * previously didn't change the revision level when setting the flags,
4029 * so there is a chance incompat flags are set on a rev 0 filesystem.
4030 */
4031 if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
4032 goto failed_mount;
4033
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004034 if (le32_to_cpu(es->s_log_block_size) >
4035 (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
4036 ext4_msg(sb, KERN_ERR,
4037 "Invalid log block size: %u",
4038 le32_to_cpu(es->s_log_block_size));
4039 goto failed_mount;
4040 }
4041 if (le32_to_cpu(es->s_log_cluster_size) >
4042 (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
4043 ext4_msg(sb, KERN_ERR,
4044 "Invalid log cluster size: %u",
4045 le32_to_cpu(es->s_log_cluster_size));
4046 goto failed_mount;
4047 }
4048
4049 if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
4050 ext4_msg(sb, KERN_ERR,
4051 "Number of reserved GDT blocks insanely large: %d",
4052 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
4053 goto failed_mount;
4054 }
4055
4056 if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
4057 if (ext4_has_feature_inline_data(sb)) {
4058 ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem"
4059 " that may contain inline data");
David Brazdil0f672f62019-12-10 10:32:29 +00004060 goto failed_mount;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004061 }
4062 if (!bdev_dax_supported(sb->s_bdev, blocksize)) {
4063 ext4_msg(sb, KERN_ERR,
David Brazdil0f672f62019-12-10 10:32:29 +00004064 "DAX unsupported by block device.");
4065 goto failed_mount;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004066 }
4067 }
4068
4069 if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
4070 ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
4071 es->s_encryption_level);
4072 goto failed_mount;
4073 }
4074
4075 if (sb->s_blocksize != blocksize) {
4076 /* Validate the filesystem blocksize */
4077 if (!sb_set_blocksize(sb, blocksize)) {
4078 ext4_msg(sb, KERN_ERR, "bad block size %d",
4079 blocksize);
4080 goto failed_mount;
4081 }
4082
4083 brelse(bh);
4084 logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
4085 offset = do_div(logical_sb_block, blocksize);
4086 bh = sb_bread_unmovable(sb, logical_sb_block);
4087 if (!bh) {
4088 ext4_msg(sb, KERN_ERR,
4089 "Can't read superblock on 2nd try");
4090 goto failed_mount;
4091 }
4092 es = (struct ext4_super_block *)(bh->b_data + offset);
4093 sbi->s_es = es;
4094 if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
4095 ext4_msg(sb, KERN_ERR,
4096 "Magic mismatch, very weird!");
4097 goto failed_mount;
4098 }
4099 }
4100
4101 has_huge_files = ext4_has_feature_huge_file(sb);
4102 sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
4103 has_huge_files);
4104 sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
4105
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004106 sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
4107 if (ext4_has_feature_64bit(sb)) {
4108 if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
4109 sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
4110 !is_power_of_2(sbi->s_desc_size)) {
4111 ext4_msg(sb, KERN_ERR,
4112 "unsupported descriptor size %lu",
4113 sbi->s_desc_size);
4114 goto failed_mount;
4115 }
4116 } else
4117 sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
4118
4119 sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
4120 sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
4121
4122 sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
4123 if (sbi->s_inodes_per_block == 0)
4124 goto cantfind_ext4;
4125 if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
4126 sbi->s_inodes_per_group > blocksize * 8) {
4127 ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
Olivier Deprez0e641232021-09-23 10:07:05 +02004128 sbi->s_inodes_per_group);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004129 goto failed_mount;
4130 }
4131 sbi->s_itb_per_group = sbi->s_inodes_per_group /
4132 sbi->s_inodes_per_block;
4133 sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
4134 sbi->s_sbh = bh;
4135 sbi->s_mount_state = le16_to_cpu(es->s_state);
4136 sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
4137 sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
4138
4139 for (i = 0; i < 4; i++)
4140 sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
4141 sbi->s_def_hash_version = es->s_def_hash_version;
4142 if (ext4_has_feature_dir_index(sb)) {
4143 i = le32_to_cpu(es->s_flags);
4144 if (i & EXT2_FLAGS_UNSIGNED_HASH)
4145 sbi->s_hash_unsigned = 3;
4146 else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
4147#ifdef __CHAR_UNSIGNED__
4148 if (!sb_rdonly(sb))
4149 es->s_flags |=
4150 cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
4151 sbi->s_hash_unsigned = 3;
4152#else
4153 if (!sb_rdonly(sb))
4154 es->s_flags |=
4155 cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
4156#endif
4157 }
4158 }
4159
4160 /* Handle clustersize */
4161 clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
4162 has_bigalloc = ext4_has_feature_bigalloc(sb);
4163 if (has_bigalloc) {
4164 if (clustersize < blocksize) {
4165 ext4_msg(sb, KERN_ERR,
4166 "cluster size (%d) smaller than "
4167 "block size (%d)", clustersize, blocksize);
4168 goto failed_mount;
4169 }
4170 sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
4171 le32_to_cpu(es->s_log_block_size);
4172 sbi->s_clusters_per_group =
4173 le32_to_cpu(es->s_clusters_per_group);
4174 if (sbi->s_clusters_per_group > blocksize * 8) {
4175 ext4_msg(sb, KERN_ERR,
4176 "#clusters per group too big: %lu",
4177 sbi->s_clusters_per_group);
4178 goto failed_mount;
4179 }
4180 if (sbi->s_blocks_per_group !=
4181 (sbi->s_clusters_per_group * (clustersize / blocksize))) {
4182 ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
4183 "clusters per group (%lu) inconsistent",
4184 sbi->s_blocks_per_group,
4185 sbi->s_clusters_per_group);
4186 goto failed_mount;
4187 }
4188 } else {
4189 if (clustersize != blocksize) {
4190 ext4_msg(sb, KERN_ERR,
4191 "fragment/cluster size (%d) != "
4192 "block size (%d)", clustersize, blocksize);
4193 goto failed_mount;
4194 }
4195 if (sbi->s_blocks_per_group > blocksize * 8) {
4196 ext4_msg(sb, KERN_ERR,
4197 "#blocks per group too big: %lu",
4198 sbi->s_blocks_per_group);
4199 goto failed_mount;
4200 }
4201 sbi->s_clusters_per_group = sbi->s_blocks_per_group;
4202 sbi->s_cluster_bits = 0;
4203 }
4204 sbi->s_cluster_ratio = clustersize / blocksize;
4205
4206 /* Do we have standard group size of clustersize * 8 blocks ? */
4207 if (sbi->s_blocks_per_group == clustersize << 3)
4208 set_opt2(sb, STD_GROUP_SIZE);
4209
4210 /*
4211 * Test whether we have more sectors than will fit in sector_t,
4212 * and whether the max offset is addressable by the page cache.
4213 */
4214 err = generic_check_addressable(sb->s_blocksize_bits,
4215 ext4_blocks_count(es));
4216 if (err) {
4217 ext4_msg(sb, KERN_ERR, "filesystem"
4218 " too large to mount safely on this system");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004219 goto failed_mount;
4220 }
4221
4222 if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
4223 goto cantfind_ext4;
4224
4225 /* check blocks count against device size */
4226 blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
4227 if (blocks_count && ext4_blocks_count(es) > blocks_count) {
4228 ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
4229 "exceeds size of device (%llu blocks)",
4230 ext4_blocks_count(es), blocks_count);
4231 goto failed_mount;
4232 }
4233
4234 /*
4235 * It makes no sense for the first data block to be beyond the end
4236 * of the filesystem.
4237 */
4238 if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
4239 ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
4240 "block %u is beyond end of filesystem (%llu)",
4241 le32_to_cpu(es->s_first_data_block),
4242 ext4_blocks_count(es));
4243 goto failed_mount;
4244 }
4245 if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) &&
4246 (sbi->s_cluster_ratio == 1)) {
4247 ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
4248 "block is 0 with a 1k block and cluster size");
4249 goto failed_mount;
4250 }
4251
4252 blocks_count = (ext4_blocks_count(es) -
4253 le32_to_cpu(es->s_first_data_block) +
4254 EXT4_BLOCKS_PER_GROUP(sb) - 1);
4255 do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
4256 if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
Olivier Deprez0e641232021-09-23 10:07:05 +02004257 ext4_msg(sb, KERN_WARNING, "groups count too large: %llu "
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004258 "(block count %llu, first data block %u, "
Olivier Deprez0e641232021-09-23 10:07:05 +02004259 "blocks per group %lu)", blocks_count,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004260 ext4_blocks_count(es),
4261 le32_to_cpu(es->s_first_data_block),
4262 EXT4_BLOCKS_PER_GROUP(sb));
4263 goto failed_mount;
4264 }
4265 sbi->s_groups_count = blocks_count;
4266 sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
4267 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
4268 if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
4269 le32_to_cpu(es->s_inodes_count)) {
4270 ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
4271 le32_to_cpu(es->s_inodes_count),
4272 ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
4273 ret = -EINVAL;
4274 goto failed_mount;
4275 }
4276 db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
4277 EXT4_DESC_PER_BLOCK(sb);
4278 if (ext4_has_feature_meta_bg(sb)) {
4279 if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
4280 ext4_msg(sb, KERN_WARNING,
4281 "first meta block group too large: %u "
4282 "(group descriptor block count %u)",
4283 le32_to_cpu(es->s_first_meta_bg), db_count);
4284 goto failed_mount;
4285 }
4286 }
Olivier Deprez0e641232021-09-23 10:07:05 +02004287 rcu_assign_pointer(sbi->s_group_desc,
4288 kvmalloc_array(db_count,
4289 sizeof(struct buffer_head *),
4290 GFP_KERNEL));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004291 if (sbi->s_group_desc == NULL) {
4292 ext4_msg(sb, KERN_ERR, "not enough memory");
4293 ret = -ENOMEM;
4294 goto failed_mount;
4295 }
4296
4297 bgl_lock_init(sbi->s_blockgroup_lock);
4298
4299 /* Pre-read the descriptors into the buffer cache */
4300 for (i = 0; i < db_count; i++) {
4301 block = descriptor_loc(sb, logical_sb_block, i);
Olivier Deprez0e641232021-09-23 10:07:05 +02004302 sb_breadahead_unmovable(sb, block);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004303 }
4304
4305 for (i = 0; i < db_count; i++) {
Olivier Deprez0e641232021-09-23 10:07:05 +02004306 struct buffer_head *bh;
4307
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004308 block = descriptor_loc(sb, logical_sb_block, i);
Olivier Deprez0e641232021-09-23 10:07:05 +02004309 bh = sb_bread_unmovable(sb, block);
4310 if (!bh) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004311 ext4_msg(sb, KERN_ERR,
4312 "can't read group descriptor %d", i);
4313 db_count = i;
4314 goto failed_mount2;
4315 }
Olivier Deprez0e641232021-09-23 10:07:05 +02004316 rcu_read_lock();
4317 rcu_dereference(sbi->s_group_desc)[i] = bh;
4318 rcu_read_unlock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004319 }
4320 sbi->s_gdb_count = db_count;
4321 if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
4322 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
4323 ret = -EFSCORRUPTED;
4324 goto failed_mount2;
4325 }
4326
4327 timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
4328
4329 /* Register extent status tree shrinker */
4330 if (ext4_es_register_shrinker(sbi))
4331 goto failed_mount3;
4332
4333 sbi->s_stripe = ext4_get_stripe_size(sbi);
4334 sbi->s_extent_max_zeroout_kb = 32;
4335
4336 /*
4337 * set up enough so that it can read an inode
4338 */
4339 sb->s_op = &ext4_sops;
4340 sb->s_export_op = &ext4_export_ops;
4341 sb->s_xattr = ext4_xattr_handlers;
David Brazdil0f672f62019-12-10 10:32:29 +00004342#ifdef CONFIG_FS_ENCRYPTION
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004343 sb->s_cop = &ext4_cryptops;
4344#endif
David Brazdil0f672f62019-12-10 10:32:29 +00004345#ifdef CONFIG_FS_VERITY
4346 sb->s_vop = &ext4_verityops;
4347#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004348#ifdef CONFIG_QUOTA
4349 sb->dq_op = &ext4_quota_operations;
4350 if (ext4_has_feature_quota(sb))
4351 sb->s_qcop = &dquot_quotactl_sysfile_ops;
4352 else
4353 sb->s_qcop = &ext4_qctl_operations;
4354 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
4355#endif
4356 memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
4357
4358 INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
4359 mutex_init(&sbi->s_orphan_lock);
4360
4361 sb->s_root = NULL;
4362
4363 needs_recovery = (es->s_last_orphan != 0 ||
4364 ext4_has_feature_journal_needs_recovery(sb));
4365
4366 if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb))
4367 if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
4368 goto failed_mount3a;
4369
4370 /*
4371 * The first inode we look at is the journal inode. Don't try
4372 * root first: it may be modified in the journal!
4373 */
4374 if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
4375 err = ext4_load_journal(sb, es, journal_devnum);
4376 if (err)
4377 goto failed_mount3a;
4378 } else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&
4379 ext4_has_feature_journal_needs_recovery(sb)) {
4380 ext4_msg(sb, KERN_ERR, "required journal recovery "
4381 "suppressed and not mounted read-only");
4382 goto failed_mount_wq;
4383 } else {
4384 /* Nojournal mode, all journal mount options are illegal */
4385 if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
4386 ext4_msg(sb, KERN_ERR, "can't mount with "
4387 "journal_checksum, fs mounted w/o journal");
4388 goto failed_mount_wq;
4389 }
4390 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
4391 ext4_msg(sb, KERN_ERR, "can't mount with "
4392 "journal_async_commit, fs mounted w/o journal");
4393 goto failed_mount_wq;
4394 }
4395 if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
4396 ext4_msg(sb, KERN_ERR, "can't mount with "
4397 "commit=%lu, fs mounted w/o journal",
4398 sbi->s_commit_interval / HZ);
4399 goto failed_mount_wq;
4400 }
4401 if (EXT4_MOUNT_DATA_FLAGS &
4402 (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
4403 ext4_msg(sb, KERN_ERR, "can't mount with "
4404 "data=, fs mounted w/o journal");
4405 goto failed_mount_wq;
4406 }
David Brazdil0f672f62019-12-10 10:32:29 +00004407 sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004408 clear_opt(sb, JOURNAL_CHECKSUM);
4409 clear_opt(sb, DATA_FLAGS);
4410 sbi->s_journal = NULL;
4411 needs_recovery = 0;
4412 goto no_journal;
4413 }
4414
4415 if (ext4_has_feature_64bit(sb) &&
4416 !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
4417 JBD2_FEATURE_INCOMPAT_64BIT)) {
4418 ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
4419 goto failed_mount_wq;
4420 }
4421
4422 if (!set_journal_csum_feature_set(sb)) {
4423 ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
4424 "feature set");
4425 goto failed_mount_wq;
4426 }
4427
4428 /* We have now updated the journal if required, so we can
4429 * validate the data journaling mode. */
4430 switch (test_opt(sb, DATA_FLAGS)) {
4431 case 0:
4432 /* No mode set, assume a default based on the journal
4433 * capabilities: ORDERED_DATA if the journal can
4434 * cope, else JOURNAL_DATA
4435 */
4436 if (jbd2_journal_check_available_features
4437 (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4438 set_opt(sb, ORDERED_DATA);
4439 sbi->s_def_mount_opt |= EXT4_MOUNT_ORDERED_DATA;
4440 } else {
4441 set_opt(sb, JOURNAL_DATA);
4442 sbi->s_def_mount_opt |= EXT4_MOUNT_JOURNAL_DATA;
4443 }
4444 break;
4445
4446 case EXT4_MOUNT_ORDERED_DATA:
4447 case EXT4_MOUNT_WRITEBACK_DATA:
4448 if (!jbd2_journal_check_available_features
4449 (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4450 ext4_msg(sb, KERN_ERR, "Journal does not support "
4451 "requested data journaling mode");
4452 goto failed_mount_wq;
4453 }
4454 default:
4455 break;
4456 }
4457
4458 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
4459 test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
4460 ext4_msg(sb, KERN_ERR, "can't mount with "
4461 "journal_async_commit in data=ordered mode");
4462 goto failed_mount_wq;
4463 }
4464
4465 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
4466
4467 sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
4468
4469no_journal:
4470 if (!test_opt(sb, NO_MBCACHE)) {
4471 sbi->s_ea_block_cache = ext4_xattr_create_cache();
4472 if (!sbi->s_ea_block_cache) {
4473 ext4_msg(sb, KERN_ERR,
4474 "Failed to create ea_block_cache");
4475 goto failed_mount_wq;
4476 }
4477
4478 if (ext4_has_feature_ea_inode(sb)) {
4479 sbi->s_ea_inode_cache = ext4_xattr_create_cache();
4480 if (!sbi->s_ea_inode_cache) {
4481 ext4_msg(sb, KERN_ERR,
4482 "Failed to create ea_inode_cache");
4483 goto failed_mount_wq;
4484 }
4485 }
4486 }
4487
4488 if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
4489 (blocksize != PAGE_SIZE)) {
4490 ext4_msg(sb, KERN_ERR,
4491 "Unsupported blocksize for fs encryption");
4492 goto failed_mount_wq;
4493 }
4494
David Brazdil0f672f62019-12-10 10:32:29 +00004495 if (ext4_has_feature_verity(sb) && blocksize != PAGE_SIZE) {
4496 ext4_msg(sb, KERN_ERR, "Unsupported blocksize for fs-verity");
4497 goto failed_mount_wq;
4498 }
4499
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004500 if (DUMMY_ENCRYPTION_ENABLED(sbi) && !sb_rdonly(sb) &&
4501 !ext4_has_feature_encrypt(sb)) {
4502 ext4_set_feature_encrypt(sb);
4503 ext4_commit_super(sb, 1);
4504 }
4505
4506 /*
4507 * Get the # of file system overhead blocks from the
4508 * superblock if present.
4509 */
4510 if (es->s_overhead_clusters)
4511 sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
4512 else {
4513 err = ext4_calculate_overhead(sb);
4514 if (err)
4515 goto failed_mount_wq;
4516 }
4517
4518 /*
4519 * The maximum number of concurrent works can be high and
4520 * concurrency isn't really necessary. Limit it to 1.
4521 */
4522 EXT4_SB(sb)->rsv_conversion_wq =
4523 alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
4524 if (!EXT4_SB(sb)->rsv_conversion_wq) {
4525 printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
4526 ret = -ENOMEM;
4527 goto failed_mount4;
4528 }
4529
4530 /*
4531 * The jbd2_journal_load will have done any necessary log recovery,
4532 * so we can safely mount the rest of the filesystem now.
4533 */
4534
David Brazdil0f672f62019-12-10 10:32:29 +00004535 root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004536 if (IS_ERR(root)) {
4537 ext4_msg(sb, KERN_ERR, "get root inode failed");
4538 ret = PTR_ERR(root);
4539 root = NULL;
4540 goto failed_mount4;
4541 }
4542 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
4543 ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
4544 iput(root);
4545 goto failed_mount4;
4546 }
David Brazdil0f672f62019-12-10 10:32:29 +00004547
4548#ifdef CONFIG_UNICODE
4549 if (sbi->s_encoding)
4550 sb->s_d_op = &ext4_dentry_ops;
4551#endif
4552
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004553 sb->s_root = d_make_root(root);
4554 if (!sb->s_root) {
4555 ext4_msg(sb, KERN_ERR, "get root dentry failed");
4556 ret = -ENOMEM;
4557 goto failed_mount4;
4558 }
4559
4560 ret = ext4_setup_super(sb, es, sb_rdonly(sb));
4561 if (ret == -EROFS) {
4562 sb->s_flags |= SB_RDONLY;
4563 ret = 0;
4564 } else if (ret)
4565 goto failed_mount4a;
4566
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004567 ext4_set_resv_clusters(sb);
4568
Olivier Deprez0e641232021-09-23 10:07:05 +02004569 if (test_opt(sb, BLOCK_VALIDITY)) {
4570 err = ext4_setup_system_zone(sb);
4571 if (err) {
4572 ext4_msg(sb, KERN_ERR, "failed to initialize system "
4573 "zone (%d)", err);
4574 goto failed_mount4a;
4575 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004576 }
4577
4578 ext4_ext_init(sb);
4579 err = ext4_mb_init(sb);
4580 if (err) {
4581 ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
4582 err);
4583 goto failed_mount5;
4584 }
4585
4586 block = ext4_count_free_clusters(sb);
4587 ext4_free_blocks_count_set(sbi->s_es,
4588 EXT4_C2B(sbi, block));
4589 ext4_superblock_csum_set(sb);
4590 err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
4591 GFP_KERNEL);
4592 if (!err) {
4593 unsigned long freei = ext4_count_free_inodes(sb);
4594 sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
4595 ext4_superblock_csum_set(sb);
4596 err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
4597 GFP_KERNEL);
4598 }
4599 if (!err)
4600 err = percpu_counter_init(&sbi->s_dirs_counter,
4601 ext4_count_dirs(sb), GFP_KERNEL);
4602 if (!err)
4603 err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
4604 GFP_KERNEL);
4605 if (!err)
Olivier Deprez0e641232021-09-23 10:07:05 +02004606 err = percpu_counter_init(&sbi->s_sra_exceeded_retry_limit, 0,
4607 GFP_KERNEL);
4608 if (!err)
4609 err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004610
4611 if (err) {
4612 ext4_msg(sb, KERN_ERR, "insufficient memory");
4613 goto failed_mount6;
4614 }
4615
4616 if (ext4_has_feature_flex_bg(sb))
4617 if (!ext4_fill_flex_info(sb)) {
4618 ext4_msg(sb, KERN_ERR,
4619 "unable to initialize "
4620 "flex_bg meta info!");
Olivier Deprez0e641232021-09-23 10:07:05 +02004621 ret = -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004622 goto failed_mount6;
4623 }
4624
4625 err = ext4_register_li_request(sb, first_not_zeroed);
4626 if (err)
4627 goto failed_mount6;
4628
4629 err = ext4_register_sysfs(sb);
4630 if (err)
4631 goto failed_mount7;
4632
4633#ifdef CONFIG_QUOTA
4634 /* Enable quota usage during mount. */
4635 if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
4636 err = ext4_enable_quotas(sb);
4637 if (err)
4638 goto failed_mount8;
4639 }
4640#endif /* CONFIG_QUOTA */
4641
4642 EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
4643 ext4_orphan_cleanup(sb, es);
4644 EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
4645 if (needs_recovery) {
4646 ext4_msg(sb, KERN_INFO, "recovery complete");
Olivier Deprez0e641232021-09-23 10:07:05 +02004647 err = ext4_mark_recovery_complete(sb, es);
4648 if (err)
4649 goto failed_mount8;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004650 }
4651 if (EXT4_SB(sb)->s_journal) {
4652 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
4653 descr = " journalled data mode";
4654 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
4655 descr = " ordered data mode";
4656 else
4657 descr = " writeback data mode";
4658 } else
4659 descr = "out journal";
4660
4661 if (test_opt(sb, DISCARD)) {
4662 struct request_queue *q = bdev_get_queue(sb->s_bdev);
4663 if (!blk_queue_discard(q))
4664 ext4_msg(sb, KERN_WARNING,
4665 "mounting with \"discard\" option, but "
4666 "the device does not support discard");
4667 }
4668
4669 if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
4670 ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
4671 "Opts: %.*s%s%s", descr,
4672 (int) sizeof(sbi->s_es->s_mount_opts),
4673 sbi->s_es->s_mount_opts,
4674 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
4675
4676 if (es->s_error_count)
4677 mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
4678
4679 /* Enable message ratelimiting. Default is 10 messages per 5 secs. */
4680 ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
4681 ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
4682 ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
4683
4684 kfree(orig_data);
4685 return 0;
4686
4687cantfind_ext4:
4688 if (!silent)
4689 ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
4690 goto failed_mount;
4691
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004692failed_mount8:
4693 ext4_unregister_sysfs(sb);
Olivier Deprez0e641232021-09-23 10:07:05 +02004694 kobject_put(&sbi->s_kobj);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004695failed_mount7:
4696 ext4_unregister_li_request(sb);
4697failed_mount6:
4698 ext4_mb_release(sb);
Olivier Deprez0e641232021-09-23 10:07:05 +02004699 rcu_read_lock();
4700 flex_groups = rcu_dereference(sbi->s_flex_groups);
4701 if (flex_groups) {
4702 for (i = 0; i < sbi->s_flex_groups_allocated; i++)
4703 kvfree(flex_groups[i]);
4704 kvfree(flex_groups);
4705 }
4706 rcu_read_unlock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004707 percpu_counter_destroy(&sbi->s_freeclusters_counter);
4708 percpu_counter_destroy(&sbi->s_freeinodes_counter);
4709 percpu_counter_destroy(&sbi->s_dirs_counter);
4710 percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
Olivier Deprez0e641232021-09-23 10:07:05 +02004711 percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
4712 percpu_free_rwsem(&sbi->s_writepages_rwsem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004713failed_mount5:
4714 ext4_ext_release(sb);
4715 ext4_release_system_zone(sb);
4716failed_mount4a:
4717 dput(sb->s_root);
4718 sb->s_root = NULL;
4719failed_mount4:
4720 ext4_msg(sb, KERN_ERR, "mount failed");
4721 if (EXT4_SB(sb)->rsv_conversion_wq)
4722 destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
4723failed_mount_wq:
David Brazdil0f672f62019-12-10 10:32:29 +00004724 ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
4725 sbi->s_ea_inode_cache = NULL;
4726
4727 ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
4728 sbi->s_ea_block_cache = NULL;
4729
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004730 if (sbi->s_journal) {
4731 jbd2_journal_destroy(sbi->s_journal);
4732 sbi->s_journal = NULL;
4733 }
4734failed_mount3a:
4735 ext4_es_unregister_shrinker(sbi);
4736failed_mount3:
4737 del_timer_sync(&sbi->s_err_report);
4738 if (sbi->s_mmp_tsk)
4739 kthread_stop(sbi->s_mmp_tsk);
4740failed_mount2:
Olivier Deprez0e641232021-09-23 10:07:05 +02004741 rcu_read_lock();
4742 group_desc = rcu_dereference(sbi->s_group_desc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004743 for (i = 0; i < db_count; i++)
Olivier Deprez0e641232021-09-23 10:07:05 +02004744 brelse(group_desc[i]);
4745 kvfree(group_desc);
4746 rcu_read_unlock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004747failed_mount:
4748 if (sbi->s_chksum_driver)
4749 crypto_free_shash(sbi->s_chksum_driver);
David Brazdil0f672f62019-12-10 10:32:29 +00004750
4751#ifdef CONFIG_UNICODE
4752 utf8_unload(sbi->s_encoding);
4753#endif
4754
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004755#ifdef CONFIG_QUOTA
4756 for (i = 0; i < EXT4_MAXQUOTAS; i++)
David Brazdil0f672f62019-12-10 10:32:29 +00004757 kfree(get_qf_name(sb, sbi, i));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004758#endif
4759 ext4_blkdev_remove(sbi);
4760 brelse(bh);
4761out_fail:
4762 sb->s_fs_info = NULL;
4763 kfree(sbi->s_blockgroup_lock);
4764out_free_base:
4765 kfree(sbi);
4766 kfree(orig_data);
4767 fs_put_dax(dax_dev);
4768 return err ? err : ret;
4769}
4770
4771/*
4772 * Setup any per-fs journal parameters now. We'll do this both on
4773 * initial mount, once the journal has been initialised but before we've
4774 * done any recovery; and again on any subsequent remount.
4775 */
4776static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
4777{
4778 struct ext4_sb_info *sbi = EXT4_SB(sb);
4779
4780 journal->j_commit_interval = sbi->s_commit_interval;
4781 journal->j_min_batch_time = sbi->s_min_batch_time;
4782 journal->j_max_batch_time = sbi->s_max_batch_time;
4783
4784 write_lock(&journal->j_state_lock);
4785 if (test_opt(sb, BARRIER))
4786 journal->j_flags |= JBD2_BARRIER;
4787 else
4788 journal->j_flags &= ~JBD2_BARRIER;
4789 if (test_opt(sb, DATA_ERR_ABORT))
4790 journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
4791 else
4792 journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
4793 write_unlock(&journal->j_state_lock);
4794}
4795
4796static struct inode *ext4_get_journal_inode(struct super_block *sb,
4797 unsigned int journal_inum)
4798{
4799 struct inode *journal_inode;
4800
4801 /*
4802 * Test for the existence of a valid inode on disk. Bad things
4803 * happen if we iget() an unused inode, as the subsequent iput()
4804 * will try to delete it.
4805 */
David Brazdil0f672f62019-12-10 10:32:29 +00004806 journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004807 if (IS_ERR(journal_inode)) {
4808 ext4_msg(sb, KERN_ERR, "no journal found");
4809 return NULL;
4810 }
4811 if (!journal_inode->i_nlink) {
4812 make_bad_inode(journal_inode);
4813 iput(journal_inode);
4814 ext4_msg(sb, KERN_ERR, "journal inode is deleted");
4815 return NULL;
4816 }
4817
4818 jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
4819 journal_inode, journal_inode->i_size);
4820 if (!S_ISREG(journal_inode->i_mode)) {
4821 ext4_msg(sb, KERN_ERR, "invalid journal inode");
4822 iput(journal_inode);
4823 return NULL;
4824 }
4825 return journal_inode;
4826}
4827
4828static journal_t *ext4_get_journal(struct super_block *sb,
4829 unsigned int journal_inum)
4830{
4831 struct inode *journal_inode;
4832 journal_t *journal;
4833
Olivier Deprez0e641232021-09-23 10:07:05 +02004834 if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
4835 return NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004836
4837 journal_inode = ext4_get_journal_inode(sb, journal_inum);
4838 if (!journal_inode)
4839 return NULL;
4840
4841 journal = jbd2_journal_init_inode(journal_inode);
4842 if (!journal) {
4843 ext4_msg(sb, KERN_ERR, "Could not load journal inode");
4844 iput(journal_inode);
4845 return NULL;
4846 }
4847 journal->j_private = sb;
4848 ext4_init_journal_params(sb, journal);
4849 return journal;
4850}
4851
4852static journal_t *ext4_get_dev_journal(struct super_block *sb,
4853 dev_t j_dev)
4854{
4855 struct buffer_head *bh;
4856 journal_t *journal;
4857 ext4_fsblk_t start;
4858 ext4_fsblk_t len;
4859 int hblock, blocksize;
4860 ext4_fsblk_t sb_block;
4861 unsigned long offset;
4862 struct ext4_super_block *es;
4863 struct block_device *bdev;
4864
Olivier Deprez0e641232021-09-23 10:07:05 +02004865 if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
4866 return NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004867
4868 bdev = ext4_blkdev_get(j_dev, sb);
4869 if (bdev == NULL)
4870 return NULL;
4871
4872 blocksize = sb->s_blocksize;
4873 hblock = bdev_logical_block_size(bdev);
4874 if (blocksize < hblock) {
4875 ext4_msg(sb, KERN_ERR,
4876 "blocksize too small for journal device");
4877 goto out_bdev;
4878 }
4879
4880 sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
4881 offset = EXT4_MIN_BLOCK_SIZE % blocksize;
4882 set_blocksize(bdev, blocksize);
4883 if (!(bh = __bread(bdev, sb_block, blocksize))) {
4884 ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
4885 "external journal");
4886 goto out_bdev;
4887 }
4888
4889 es = (struct ext4_super_block *) (bh->b_data + offset);
4890 if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
4891 !(le32_to_cpu(es->s_feature_incompat) &
4892 EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
4893 ext4_msg(sb, KERN_ERR, "external journal has "
4894 "bad superblock");
4895 brelse(bh);
4896 goto out_bdev;
4897 }
4898
4899 if ((le32_to_cpu(es->s_feature_ro_compat) &
4900 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
4901 es->s_checksum != ext4_superblock_csum(sb, es)) {
4902 ext4_msg(sb, KERN_ERR, "external journal has "
4903 "corrupt superblock");
4904 brelse(bh);
4905 goto out_bdev;
4906 }
4907
4908 if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
4909 ext4_msg(sb, KERN_ERR, "journal UUID does not match");
4910 brelse(bh);
4911 goto out_bdev;
4912 }
4913
4914 len = ext4_blocks_count(es);
4915 start = sb_block + 1;
4916 brelse(bh); /* we're done with the superblock */
4917
4918 journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
4919 start, len, blocksize);
4920 if (!journal) {
4921 ext4_msg(sb, KERN_ERR, "failed to create device journal");
4922 goto out_bdev;
4923 }
4924 journal->j_private = sb;
4925 ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
4926 wait_on_buffer(journal->j_sb_buffer);
4927 if (!buffer_uptodate(journal->j_sb_buffer)) {
4928 ext4_msg(sb, KERN_ERR, "I/O error on journal device");
4929 goto out_journal;
4930 }
4931 if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
4932 ext4_msg(sb, KERN_ERR, "External journal has more than one "
4933 "user (unsupported) - %d",
4934 be32_to_cpu(journal->j_superblock->s_nr_users));
4935 goto out_journal;
4936 }
4937 EXT4_SB(sb)->journal_bdev = bdev;
4938 ext4_init_journal_params(sb, journal);
4939 return journal;
4940
4941out_journal:
4942 jbd2_journal_destroy(journal);
4943out_bdev:
4944 ext4_blkdev_put(bdev);
4945 return NULL;
4946}
4947
4948static int ext4_load_journal(struct super_block *sb,
4949 struct ext4_super_block *es,
4950 unsigned long journal_devnum)
4951{
4952 journal_t *journal;
4953 unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
4954 dev_t journal_dev;
4955 int err = 0;
4956 int really_read_only;
Olivier Deprez0e641232021-09-23 10:07:05 +02004957 int journal_dev_ro;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004958
Olivier Deprez0e641232021-09-23 10:07:05 +02004959 if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
4960 return -EFSCORRUPTED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004961
4962 if (journal_devnum &&
4963 journal_devnum != le32_to_cpu(es->s_journal_dev)) {
4964 ext4_msg(sb, KERN_INFO, "external journal device major/minor "
4965 "numbers have changed");
4966 journal_dev = new_decode_dev(journal_devnum);
4967 } else
4968 journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
4969
Olivier Deprez0e641232021-09-23 10:07:05 +02004970 if (journal_inum && journal_dev) {
4971 ext4_msg(sb, KERN_ERR,
4972 "filesystem has both journal inode and journal device!");
4973 return -EINVAL;
4974 }
4975
4976 if (journal_inum) {
4977 journal = ext4_get_journal(sb, journal_inum);
4978 if (!journal)
4979 return -EINVAL;
4980 } else {
4981 journal = ext4_get_dev_journal(sb, journal_dev);
4982 if (!journal)
4983 return -EINVAL;
4984 }
4985
4986 journal_dev_ro = bdev_read_only(journal->j_dev);
4987 really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro;
4988
4989 if (journal_dev_ro && !sb_rdonly(sb)) {
4990 ext4_msg(sb, KERN_ERR,
4991 "journal device read-only, try mounting with '-o ro'");
4992 err = -EROFS;
4993 goto err_out;
4994 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004995
4996 /*
4997 * Are we loading a blank journal or performing recovery after a
4998 * crash? For recovery, we need to check in advance whether we
4999 * can get read-write access to the device.
5000 */
5001 if (ext4_has_feature_journal_needs_recovery(sb)) {
5002 if (sb_rdonly(sb)) {
5003 ext4_msg(sb, KERN_INFO, "INFO: recovery "
5004 "required on readonly filesystem");
5005 if (really_read_only) {
5006 ext4_msg(sb, KERN_ERR, "write access "
5007 "unavailable, cannot proceed "
5008 "(try mounting with noload)");
Olivier Deprez0e641232021-09-23 10:07:05 +02005009 err = -EROFS;
5010 goto err_out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005011 }
5012 ext4_msg(sb, KERN_INFO, "write access will "
5013 "be enabled during recovery");
5014 }
5015 }
5016
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005017 if (!(journal->j_flags & JBD2_BARRIER))
5018 ext4_msg(sb, KERN_INFO, "barriers disabled");
5019
5020 if (!ext4_has_feature_journal_needs_recovery(sb))
5021 err = jbd2_journal_wipe(journal, !really_read_only);
5022 if (!err) {
5023 char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
5024 if (save)
5025 memcpy(save, ((char *) es) +
5026 EXT4_S_ERR_START, EXT4_S_ERR_LEN);
5027 err = jbd2_journal_load(journal);
5028 if (save)
5029 memcpy(((char *) es) + EXT4_S_ERR_START,
5030 save, EXT4_S_ERR_LEN);
5031 kfree(save);
5032 }
5033
5034 if (err) {
5035 ext4_msg(sb, KERN_ERR, "error loading journal");
Olivier Deprez0e641232021-09-23 10:07:05 +02005036 goto err_out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005037 }
5038
5039 EXT4_SB(sb)->s_journal = journal;
Olivier Deprez0e641232021-09-23 10:07:05 +02005040 err = ext4_clear_journal_err(sb, es);
5041 if (err) {
5042 EXT4_SB(sb)->s_journal = NULL;
5043 jbd2_journal_destroy(journal);
5044 return err;
5045 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005046
5047 if (!really_read_only && journal_devnum &&
5048 journal_devnum != le32_to_cpu(es->s_journal_dev)) {
5049 es->s_journal_dev = cpu_to_le32(journal_devnum);
5050
5051 /* Make sure we flush the recovery flag to disk. */
5052 ext4_commit_super(sb, 1);
5053 }
5054
5055 return 0;
Olivier Deprez0e641232021-09-23 10:07:05 +02005056
5057err_out:
5058 jbd2_journal_destroy(journal);
5059 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005060}
5061
5062static int ext4_commit_super(struct super_block *sb, int sync)
5063{
5064 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
5065 struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
5066 int error = 0;
5067
Olivier Deprez0e641232021-09-23 10:07:05 +02005068 if (!sbh)
5069 return -EINVAL;
5070 if (block_device_ejected(sb))
5071 return -ENODEV;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005072
5073 /*
5074 * If the file system is mounted read-only, don't update the
5075 * superblock write time. This avoids updating the superblock
5076 * write time when we are mounting the root file system
5077 * read/only but we need to replay the journal; at that point,
5078 * for people who are east of GMT and who make their clock
5079 * tick in localtime for Windows bug-for-bug compatibility,
5080 * the clock is set in the future, and this will cause e2fsck
5081 * to complain and force a full file system check.
5082 */
5083 if (!(sb->s_flags & SB_RDONLY))
5084 ext4_update_tstamp(es, s_wtime);
5085 if (sb->s_bdev->bd_part)
5086 es->s_kbytes_written =
5087 cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
5088 ((part_stat_read(sb->s_bdev->bd_part,
5089 sectors[STAT_WRITE]) -
5090 EXT4_SB(sb)->s_sectors_written_start) >> 1));
5091 else
5092 es->s_kbytes_written =
5093 cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
5094 if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
5095 ext4_free_blocks_count_set(es,
5096 EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
5097 &EXT4_SB(sb)->s_freeclusters_counter)));
5098 if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
5099 es->s_free_inodes_count =
5100 cpu_to_le32(percpu_counter_sum_positive(
5101 &EXT4_SB(sb)->s_freeinodes_counter));
5102 BUFFER_TRACE(sbh, "marking dirty");
5103 ext4_superblock_csum_set(sb);
5104 if (sync)
5105 lock_buffer(sbh);
David Brazdil0f672f62019-12-10 10:32:29 +00005106 if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005107 /*
5108 * Oh, dear. A previous attempt to write the
5109 * superblock failed. This could happen because the
5110 * USB device was yanked out. Or it could happen to
5111 * be a transient write error and maybe the block will
5112 * be remapped. Nothing we can do but to retry the
5113 * write and hope for the best.
5114 */
5115 ext4_msg(sb, KERN_ERR, "previous I/O error to "
5116 "superblock detected");
5117 clear_buffer_write_io_error(sbh);
5118 set_buffer_uptodate(sbh);
5119 }
5120 mark_buffer_dirty(sbh);
5121 if (sync) {
5122 unlock_buffer(sbh);
5123 error = __sync_dirty_buffer(sbh,
5124 REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
5125 if (buffer_write_io_error(sbh)) {
5126 ext4_msg(sb, KERN_ERR, "I/O error while writing "
5127 "superblock");
5128 clear_buffer_write_io_error(sbh);
5129 set_buffer_uptodate(sbh);
5130 }
5131 }
5132 return error;
5133}
5134
5135/*
5136 * Have we just finished recovery? If so, and if we are mounting (or
5137 * remounting) the filesystem readonly, then we will end up with a
5138 * consistent fs on disk. Record that fact.
5139 */
Olivier Deprez0e641232021-09-23 10:07:05 +02005140static int ext4_mark_recovery_complete(struct super_block *sb,
5141 struct ext4_super_block *es)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005142{
Olivier Deprez0e641232021-09-23 10:07:05 +02005143 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005144 journal_t *journal = EXT4_SB(sb)->s_journal;
5145
5146 if (!ext4_has_feature_journal(sb)) {
Olivier Deprez0e641232021-09-23 10:07:05 +02005147 if (journal != NULL) {
5148 ext4_error(sb, "Journal got removed while the fs was "
5149 "mounted!");
5150 return -EFSCORRUPTED;
5151 }
5152 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005153 }
5154 jbd2_journal_lock_updates(journal);
Olivier Deprez0e641232021-09-23 10:07:05 +02005155 err = jbd2_journal_flush(journal);
5156 if (err < 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005157 goto out;
5158
5159 if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
5160 ext4_clear_feature_journal_needs_recovery(sb);
5161 ext4_commit_super(sb, 1);
5162 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005163out:
5164 jbd2_journal_unlock_updates(journal);
Olivier Deprez0e641232021-09-23 10:07:05 +02005165 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005166}
5167
5168/*
5169 * If we are mounting (or read-write remounting) a filesystem whose journal
5170 * has recorded an error from a previous lifetime, move that error to the
5171 * main filesystem now.
5172 */
Olivier Deprez0e641232021-09-23 10:07:05 +02005173static int ext4_clear_journal_err(struct super_block *sb,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005174 struct ext4_super_block *es)
5175{
5176 journal_t *journal;
5177 int j_errno;
5178 const char *errstr;
5179
Olivier Deprez0e641232021-09-23 10:07:05 +02005180 if (!ext4_has_feature_journal(sb)) {
5181 ext4_error(sb, "Journal got removed while the fs was mounted!");
5182 return -EFSCORRUPTED;
5183 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005184
5185 journal = EXT4_SB(sb)->s_journal;
5186
5187 /*
5188 * Now check for any error status which may have been recorded in the
5189 * journal by a prior ext4_error() or ext4_abort()
5190 */
5191
5192 j_errno = jbd2_journal_errno(journal);
5193 if (j_errno) {
5194 char nbuf[16];
5195
5196 errstr = ext4_decode_error(sb, j_errno, nbuf);
5197 ext4_warning(sb, "Filesystem error recorded "
5198 "from previous mount: %s", errstr);
5199 ext4_warning(sb, "Marking fs in need of filesystem check.");
5200
5201 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
5202 es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
5203 ext4_commit_super(sb, 1);
5204
5205 jbd2_journal_clear_err(journal);
5206 jbd2_journal_update_sb_errno(journal);
5207 }
Olivier Deprez0e641232021-09-23 10:07:05 +02005208 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005209}
5210
5211/*
5212 * Force the running and committing transactions to commit,
5213 * and wait on the commit.
5214 */
5215int ext4_force_commit(struct super_block *sb)
5216{
5217 journal_t *journal;
5218
5219 if (sb_rdonly(sb))
5220 return 0;
5221
5222 journal = EXT4_SB(sb)->s_journal;
5223 return ext4_journal_force_commit(journal);
5224}
5225
5226static int ext4_sync_fs(struct super_block *sb, int wait)
5227{
5228 int ret = 0;
5229 tid_t target;
5230 bool needs_barrier = false;
5231 struct ext4_sb_info *sbi = EXT4_SB(sb);
5232
5233 if (unlikely(ext4_forced_shutdown(sbi)))
5234 return 0;
5235
5236 trace_ext4_sync_fs(sb, wait);
5237 flush_workqueue(sbi->rsv_conversion_wq);
5238 /*
5239 * Writeback quota in non-journalled quota case - journalled quota has
5240 * no dirty dquots
5241 */
5242 dquot_writeback_dquots(sb, -1);
5243 /*
5244 * Data writeback is possible w/o journal transaction, so barrier must
5245 * being sent at the end of the function. But we can skip it if
5246 * transaction_commit will do it for us.
5247 */
5248 if (sbi->s_journal) {
5249 target = jbd2_get_latest_transaction(sbi->s_journal);
5250 if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
5251 !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
5252 needs_barrier = true;
5253
5254 if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
5255 if (wait)
5256 ret = jbd2_log_wait_commit(sbi->s_journal,
5257 target);
5258 }
5259 } else if (wait && test_opt(sb, BARRIER))
5260 needs_barrier = true;
5261 if (needs_barrier) {
5262 int err;
5263 err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
5264 if (!ret)
5265 ret = err;
5266 }
5267
5268 return ret;
5269}
5270
5271/*
5272 * LVM calls this function before a (read-only) snapshot is created. This
5273 * gives us a chance to flush the journal completely and mark the fs clean.
5274 *
5275 * Note that only this function cannot bring a filesystem to be in a clean
5276 * state independently. It relies on upper layer to stop all data & metadata
5277 * modifications.
5278 */
5279static int ext4_freeze(struct super_block *sb)
5280{
5281 int error = 0;
5282 journal_t *journal;
5283
5284 if (sb_rdonly(sb))
5285 return 0;
5286
5287 journal = EXT4_SB(sb)->s_journal;
5288
5289 if (journal) {
5290 /* Now we set up the journal barrier. */
5291 jbd2_journal_lock_updates(journal);
5292
5293 /*
5294 * Don't clear the needs_recovery flag if we failed to
5295 * flush the journal.
5296 */
5297 error = jbd2_journal_flush(journal);
5298 if (error < 0)
5299 goto out;
5300
5301 /* Journal blocked and flushed, clear needs_recovery flag. */
5302 ext4_clear_feature_journal_needs_recovery(sb);
5303 }
5304
5305 error = ext4_commit_super(sb, 1);
5306out:
5307 if (journal)
5308 /* we rely on upper layer to stop further updates */
5309 jbd2_journal_unlock_updates(journal);
5310 return error;
5311}
5312
5313/*
5314 * Called by LVM after the snapshot is done. We need to reset the RECOVER
5315 * flag here, even though the filesystem is not technically dirty yet.
5316 */
5317static int ext4_unfreeze(struct super_block *sb)
5318{
5319 if (sb_rdonly(sb) || ext4_forced_shutdown(EXT4_SB(sb)))
5320 return 0;
5321
5322 if (EXT4_SB(sb)->s_journal) {
5323 /* Reset the needs_recovery flag before the fs is unlocked. */
5324 ext4_set_feature_journal_needs_recovery(sb);
5325 }
5326
5327 ext4_commit_super(sb, 1);
5328 return 0;
5329}
5330
5331/*
5332 * Structure to save mount options for ext4_remount's benefit
5333 */
5334struct ext4_mount_options {
5335 unsigned long s_mount_opt;
5336 unsigned long s_mount_opt2;
5337 kuid_t s_resuid;
5338 kgid_t s_resgid;
5339 unsigned long s_commit_interval;
5340 u32 s_min_batch_time, s_max_batch_time;
5341#ifdef CONFIG_QUOTA
5342 int s_jquota_fmt;
5343 char *s_qf_names[EXT4_MAXQUOTAS];
5344#endif
5345};
5346
5347static int ext4_remount(struct super_block *sb, int *flags, char *data)
5348{
5349 struct ext4_super_block *es;
5350 struct ext4_sb_info *sbi = EXT4_SB(sb);
Olivier Deprez0e641232021-09-23 10:07:05 +02005351 unsigned long old_sb_flags, vfs_flags;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005352 struct ext4_mount_options old_opts;
5353 int enable_quota = 0;
5354 ext4_group_t g;
5355 unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
5356 int err = 0;
5357#ifdef CONFIG_QUOTA
5358 int i, j;
5359 char *to_free[EXT4_MAXQUOTAS];
5360#endif
5361 char *orig_data = kstrdup(data, GFP_KERNEL);
5362
5363 if (data && !orig_data)
5364 return -ENOMEM;
5365
5366 /* Store the original options */
5367 old_sb_flags = sb->s_flags;
5368 old_opts.s_mount_opt = sbi->s_mount_opt;
5369 old_opts.s_mount_opt2 = sbi->s_mount_opt2;
5370 old_opts.s_resuid = sbi->s_resuid;
5371 old_opts.s_resgid = sbi->s_resgid;
5372 old_opts.s_commit_interval = sbi->s_commit_interval;
5373 old_opts.s_min_batch_time = sbi->s_min_batch_time;
5374 old_opts.s_max_batch_time = sbi->s_max_batch_time;
5375#ifdef CONFIG_QUOTA
5376 old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
5377 for (i = 0; i < EXT4_MAXQUOTAS; i++)
5378 if (sbi->s_qf_names[i]) {
5379 char *qf_name = get_qf_name(sb, sbi, i);
5380
5381 old_opts.s_qf_names[i] = kstrdup(qf_name, GFP_KERNEL);
5382 if (!old_opts.s_qf_names[i]) {
5383 for (j = 0; j < i; j++)
5384 kfree(old_opts.s_qf_names[j]);
5385 kfree(orig_data);
5386 return -ENOMEM;
5387 }
5388 } else
5389 old_opts.s_qf_names[i] = NULL;
5390#endif
5391 if (sbi->s_journal && sbi->s_journal->j_task->io_context)
5392 journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
5393
Olivier Deprez0e641232021-09-23 10:07:05 +02005394 /*
5395 * Some options can be enabled by ext4 and/or by VFS mount flag
5396 * either way we need to make sure it matches in both *flags and
5397 * s_flags. Copy those selected flags from *flags to s_flags
5398 */
5399 vfs_flags = SB_LAZYTIME | SB_I_VERSION;
5400 sb->s_flags = (sb->s_flags & ~vfs_flags) | (*flags & vfs_flags);
5401
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005402 if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
5403 err = -EINVAL;
5404 goto restore_opts;
5405 }
5406
5407 if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
5408 test_opt(sb, JOURNAL_CHECKSUM)) {
5409 ext4_msg(sb, KERN_ERR, "changing journal_checksum "
5410 "during remount not supported; ignoring");
5411 sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
5412 }
5413
5414 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
5415 if (test_opt2(sb, EXPLICIT_DELALLOC)) {
5416 ext4_msg(sb, KERN_ERR, "can't mount with "
5417 "both data=journal and delalloc");
5418 err = -EINVAL;
5419 goto restore_opts;
5420 }
5421 if (test_opt(sb, DIOREAD_NOLOCK)) {
5422 ext4_msg(sb, KERN_ERR, "can't mount with "
5423 "both data=journal and dioread_nolock");
5424 err = -EINVAL;
5425 goto restore_opts;
5426 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005427 } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
5428 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
5429 ext4_msg(sb, KERN_ERR, "can't mount with "
5430 "journal_async_commit in data=ordered mode");
5431 err = -EINVAL;
5432 goto restore_opts;
5433 }
5434 }
5435
5436 if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) {
5437 ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount");
5438 err = -EINVAL;
5439 goto restore_opts;
5440 }
5441
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005442 if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
5443 ext4_abort(sb, "Abort forced by user");
5444
5445 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
5446 (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
5447
5448 es = sbi->s_es;
5449
5450 if (sbi->s_journal) {
5451 ext4_init_journal_params(sb, sbi->s_journal);
5452 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
5453 }
5454
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005455 if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
5456 if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
5457 err = -EROFS;
5458 goto restore_opts;
5459 }
5460
5461 if (*flags & SB_RDONLY) {
5462 err = sync_filesystem(sb);
5463 if (err < 0)
5464 goto restore_opts;
5465 err = dquot_suspend(sb, -1);
5466 if (err < 0)
5467 goto restore_opts;
5468
5469 /*
5470 * First of all, the unconditional stuff we have to do
5471 * to disable replay of the journal when we next remount
5472 */
5473 sb->s_flags |= SB_RDONLY;
5474
5475 /*
5476 * OK, test if we are remounting a valid rw partition
5477 * readonly, and if so set the rdonly flag and then
5478 * mark the partition as valid again.
5479 */
5480 if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
5481 (sbi->s_mount_state & EXT4_VALID_FS))
5482 es->s_state = cpu_to_le16(sbi->s_mount_state);
5483
Olivier Deprez0e641232021-09-23 10:07:05 +02005484 if (sbi->s_journal) {
5485 /*
5486 * We let remount-ro finish even if marking fs
5487 * as clean failed...
5488 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005489 ext4_mark_recovery_complete(sb, es);
Olivier Deprez0e641232021-09-23 10:07:05 +02005490 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005491 if (sbi->s_mmp_tsk)
5492 kthread_stop(sbi->s_mmp_tsk);
5493 } else {
5494 /* Make sure we can mount this feature set readwrite */
5495 if (ext4_has_feature_readonly(sb) ||
5496 !ext4_feature_set_ok(sb, 0)) {
5497 err = -EROFS;
5498 goto restore_opts;
5499 }
5500 /*
5501 * Make sure the group descriptor checksums
5502 * are sane. If they aren't, refuse to remount r/w.
5503 */
5504 for (g = 0; g < sbi->s_groups_count; g++) {
5505 struct ext4_group_desc *gdp =
5506 ext4_get_group_desc(sb, g, NULL);
5507
5508 if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
5509 ext4_msg(sb, KERN_ERR,
5510 "ext4_remount: Checksum for group %u failed (%u!=%u)",
5511 g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)),
5512 le16_to_cpu(gdp->bg_checksum));
5513 err = -EFSBADCRC;
5514 goto restore_opts;
5515 }
5516 }
5517
5518 /*
5519 * If we have an unprocessed orphan list hanging
5520 * around from a previously readonly bdev mount,
5521 * require a full umount/remount for now.
5522 */
5523 if (es->s_last_orphan) {
5524 ext4_msg(sb, KERN_WARNING, "Couldn't "
5525 "remount RDWR because of unprocessed "
5526 "orphan inode list. Please "
5527 "umount/remount instead");
5528 err = -EINVAL;
5529 goto restore_opts;
5530 }
5531
5532 /*
5533 * Mounting a RDONLY partition read-write, so reread
5534 * and store the current valid flag. (It may have
5535 * been changed by e2fsck since we originally mounted
5536 * the partition.)
5537 */
Olivier Deprez0e641232021-09-23 10:07:05 +02005538 if (sbi->s_journal) {
5539 err = ext4_clear_journal_err(sb, es);
5540 if (err)
5541 goto restore_opts;
5542 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005543 sbi->s_mount_state = le16_to_cpu(es->s_state);
5544
5545 err = ext4_setup_super(sb, es, 0);
5546 if (err)
5547 goto restore_opts;
5548
5549 sb->s_flags &= ~SB_RDONLY;
5550 if (ext4_has_feature_mmp(sb))
5551 if (ext4_multi_mount_protect(sb,
5552 le64_to_cpu(es->s_mmp_block))) {
5553 err = -EROFS;
5554 goto restore_opts;
5555 }
5556 enable_quota = 1;
5557 }
5558 }
5559
5560 /*
5561 * Reinitialize lazy itable initialization thread based on
5562 * current settings
5563 */
5564 if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
5565 ext4_unregister_li_request(sb);
5566 else {
5567 ext4_group_t first_not_zeroed;
5568 first_not_zeroed = ext4_has_uninit_itable(sb);
5569 ext4_register_li_request(sb, first_not_zeroed);
5570 }
5571
Olivier Deprez0e641232021-09-23 10:07:05 +02005572 /*
5573 * Handle creation of system zone data early because it can fail.
5574 * Releasing of existing data is done when we are sure remount will
5575 * succeed.
5576 */
5577 if (test_opt(sb, BLOCK_VALIDITY) && !sbi->system_blks) {
5578 err = ext4_setup_system_zone(sb);
5579 if (err)
5580 goto restore_opts;
5581 }
5582
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005583 if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) {
5584 err = ext4_commit_super(sb, 1);
5585 if (err)
5586 goto restore_opts;
5587 }
5588
5589#ifdef CONFIG_QUOTA
5590 /* Release old quota file names */
5591 for (i = 0; i < EXT4_MAXQUOTAS; i++)
5592 kfree(old_opts.s_qf_names[i]);
5593 if (enable_quota) {
5594 if (sb_any_quota_suspended(sb))
5595 dquot_resume(sb, -1);
5596 else if (ext4_has_feature_quota(sb)) {
5597 err = ext4_enable_quotas(sb);
5598 if (err)
5599 goto restore_opts;
5600 }
5601 }
5602#endif
Olivier Deprez0e641232021-09-23 10:07:05 +02005603 if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks)
5604 ext4_release_system_zone(sb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005605
Olivier Deprez0e641232021-09-23 10:07:05 +02005606 /*
5607 * Some options can be enabled by ext4 and/or by VFS mount flag
5608 * either way we need to make sure it matches in both *flags and
5609 * s_flags. Copy those selected flags from s_flags to *flags
5610 */
5611 *flags = (*flags & ~vfs_flags) | (sb->s_flags & vfs_flags);
5612
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005613 ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
5614 kfree(orig_data);
5615 return 0;
5616
5617restore_opts:
5618 sb->s_flags = old_sb_flags;
5619 sbi->s_mount_opt = old_opts.s_mount_opt;
5620 sbi->s_mount_opt2 = old_opts.s_mount_opt2;
5621 sbi->s_resuid = old_opts.s_resuid;
5622 sbi->s_resgid = old_opts.s_resgid;
5623 sbi->s_commit_interval = old_opts.s_commit_interval;
5624 sbi->s_min_batch_time = old_opts.s_min_batch_time;
5625 sbi->s_max_batch_time = old_opts.s_max_batch_time;
Olivier Deprez0e641232021-09-23 10:07:05 +02005626 if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks)
5627 ext4_release_system_zone(sb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005628#ifdef CONFIG_QUOTA
5629 sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
5630 for (i = 0; i < EXT4_MAXQUOTAS; i++) {
5631 to_free[i] = get_qf_name(sb, sbi, i);
5632 rcu_assign_pointer(sbi->s_qf_names[i], old_opts.s_qf_names[i]);
5633 }
5634 synchronize_rcu();
5635 for (i = 0; i < EXT4_MAXQUOTAS; i++)
5636 kfree(to_free[i]);
5637#endif
5638 kfree(orig_data);
5639 return err;
5640}
5641
5642#ifdef CONFIG_QUOTA
5643static int ext4_statfs_project(struct super_block *sb,
5644 kprojid_t projid, struct kstatfs *buf)
5645{
5646 struct kqid qid;
5647 struct dquot *dquot;
5648 u64 limit;
5649 u64 curblock;
5650
5651 qid = make_kqid_projid(projid);
5652 dquot = dqget(sb, qid);
5653 if (IS_ERR(dquot))
5654 return PTR_ERR(dquot);
5655 spin_lock(&dquot->dq_dqb_lock);
5656
Olivier Deprez0e641232021-09-23 10:07:05 +02005657 limit = 0;
5658 if (dquot->dq_dqb.dqb_bsoftlimit &&
5659 (!limit || dquot->dq_dqb.dqb_bsoftlimit < limit))
5660 limit = dquot->dq_dqb.dqb_bsoftlimit;
5661 if (dquot->dq_dqb.dqb_bhardlimit &&
5662 (!limit || dquot->dq_dqb.dqb_bhardlimit < limit))
5663 limit = dquot->dq_dqb.dqb_bhardlimit;
5664 limit >>= sb->s_blocksize_bits;
5665
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005666 if (limit && buf->f_blocks > limit) {
5667 curblock = (dquot->dq_dqb.dqb_curspace +
5668 dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
5669 buf->f_blocks = limit;
5670 buf->f_bfree = buf->f_bavail =
5671 (buf->f_blocks > curblock) ?
5672 (buf->f_blocks - curblock) : 0;
5673 }
5674
Olivier Deprez0e641232021-09-23 10:07:05 +02005675 limit = 0;
5676 if (dquot->dq_dqb.dqb_isoftlimit &&
5677 (!limit || dquot->dq_dqb.dqb_isoftlimit < limit))
5678 limit = dquot->dq_dqb.dqb_isoftlimit;
5679 if (dquot->dq_dqb.dqb_ihardlimit &&
5680 (!limit || dquot->dq_dqb.dqb_ihardlimit < limit))
5681 limit = dquot->dq_dqb.dqb_ihardlimit;
5682
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005683 if (limit && buf->f_files > limit) {
5684 buf->f_files = limit;
5685 buf->f_ffree =
5686 (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
5687 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
5688 }
5689
5690 spin_unlock(&dquot->dq_dqb_lock);
5691 dqput(dquot);
5692 return 0;
5693}
5694#endif
5695
5696static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
5697{
5698 struct super_block *sb = dentry->d_sb;
5699 struct ext4_sb_info *sbi = EXT4_SB(sb);
5700 struct ext4_super_block *es = sbi->s_es;
5701 ext4_fsblk_t overhead = 0, resv_blocks;
5702 u64 fsid;
5703 s64 bfree;
5704 resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
5705
5706 if (!test_opt(sb, MINIX_DF))
5707 overhead = sbi->s_overhead;
5708
5709 buf->f_type = EXT4_SUPER_MAGIC;
5710 buf->f_bsize = sb->s_blocksize;
5711 buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
5712 bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
5713 percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
5714 /* prevent underflow in case that few free space is available */
5715 buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
5716 buf->f_bavail = buf->f_bfree -
5717 (ext4_r_blocks_count(es) + resv_blocks);
5718 if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
5719 buf->f_bavail = 0;
5720 buf->f_files = le32_to_cpu(es->s_inodes_count);
5721 buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
5722 buf->f_namelen = EXT4_NAME_LEN;
5723 fsid = le64_to_cpup((void *)es->s_uuid) ^
5724 le64_to_cpup((void *)es->s_uuid + sizeof(u64));
5725 buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
5726 buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
5727
5728#ifdef CONFIG_QUOTA
5729 if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) &&
5730 sb_has_quota_limits_enabled(sb, PRJQUOTA))
5731 ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf);
5732#endif
5733 return 0;
5734}
5735
5736
5737#ifdef CONFIG_QUOTA
5738
5739/*
5740 * Helper functions so that transaction is started before we acquire dqio_sem
5741 * to keep correct lock ordering of transaction > dqio_sem
5742 */
5743static inline struct inode *dquot_to_inode(struct dquot *dquot)
5744{
5745 return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
5746}
5747
5748static int ext4_write_dquot(struct dquot *dquot)
5749{
5750 int ret, err;
5751 handle_t *handle;
5752 struct inode *inode;
5753
5754 inode = dquot_to_inode(dquot);
5755 handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5756 EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
5757 if (IS_ERR(handle))
5758 return PTR_ERR(handle);
5759 ret = dquot_commit(dquot);
5760 err = ext4_journal_stop(handle);
5761 if (!ret)
5762 ret = err;
5763 return ret;
5764}
5765
5766static int ext4_acquire_dquot(struct dquot *dquot)
5767{
5768 int ret, err;
5769 handle_t *handle;
5770
5771 handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
5772 EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
5773 if (IS_ERR(handle))
5774 return PTR_ERR(handle);
5775 ret = dquot_acquire(dquot);
5776 err = ext4_journal_stop(handle);
5777 if (!ret)
5778 ret = err;
5779 return ret;
5780}
5781
5782static int ext4_release_dquot(struct dquot *dquot)
5783{
5784 int ret, err;
5785 handle_t *handle;
5786
5787 handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
5788 EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
5789 if (IS_ERR(handle)) {
5790 /* Release dquot anyway to avoid endless cycle in dqput() */
5791 dquot_release(dquot);
5792 return PTR_ERR(handle);
5793 }
5794 ret = dquot_release(dquot);
5795 err = ext4_journal_stop(handle);
5796 if (!ret)
5797 ret = err;
5798 return ret;
5799}
5800
5801static int ext4_mark_dquot_dirty(struct dquot *dquot)
5802{
5803 struct super_block *sb = dquot->dq_sb;
5804 struct ext4_sb_info *sbi = EXT4_SB(sb);
5805
5806 /* Are we journaling quotas? */
5807 if (ext4_has_feature_quota(sb) ||
5808 sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
5809 dquot_mark_dquot_dirty(dquot);
5810 return ext4_write_dquot(dquot);
5811 } else {
5812 return dquot_mark_dquot_dirty(dquot);
5813 }
5814}
5815
5816static int ext4_write_info(struct super_block *sb, int type)
5817{
5818 int ret, err;
5819 handle_t *handle;
5820
5821 /* Data block + inode block */
5822 handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2);
5823 if (IS_ERR(handle))
5824 return PTR_ERR(handle);
5825 ret = dquot_commit_info(sb, type);
5826 err = ext4_journal_stop(handle);
5827 if (!ret)
5828 ret = err;
5829 return ret;
5830}
5831
5832/*
5833 * Turn on quotas during mount time - we need to find
5834 * the quota file and such...
5835 */
5836static int ext4_quota_on_mount(struct super_block *sb, int type)
5837{
5838 return dquot_quota_on_mount(sb, get_qf_name(sb, EXT4_SB(sb), type),
5839 EXT4_SB(sb)->s_jquota_fmt, type);
5840}
5841
5842static void lockdep_set_quota_inode(struct inode *inode, int subclass)
5843{
5844 struct ext4_inode_info *ei = EXT4_I(inode);
5845
5846 /* The first argument of lockdep_set_subclass has to be
5847 * *exactly* the same as the argument to init_rwsem() --- in
5848 * this case, in init_once() --- or lockdep gets unhappy
5849 * because the name of the lock is set using the
5850 * stringification of the argument to init_rwsem().
5851 */
5852 (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */
5853 lockdep_set_subclass(&ei->i_data_sem, subclass);
5854}
5855
5856/*
5857 * Standard function to be called on quota_on
5858 */
5859static int ext4_quota_on(struct super_block *sb, int type, int format_id,
5860 const struct path *path)
5861{
5862 int err;
5863
5864 if (!test_opt(sb, QUOTA))
5865 return -EINVAL;
5866
5867 /* Quotafile not on the same filesystem? */
5868 if (path->dentry->d_sb != sb)
5869 return -EXDEV;
Olivier Deprez0e641232021-09-23 10:07:05 +02005870
5871 /* Quota already enabled for this file? */
5872 if (IS_NOQUOTA(d_inode(path->dentry)))
5873 return -EBUSY;
5874
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005875 /* Journaling quota? */
5876 if (EXT4_SB(sb)->s_qf_names[type]) {
5877 /* Quotafile not in fs root? */
5878 if (path->dentry->d_parent != sb->s_root)
5879 ext4_msg(sb, KERN_WARNING,
5880 "Quota file not on filesystem root. "
5881 "Journaled quota will not work");
5882 sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY;
5883 } else {
5884 /*
5885 * Clear the flag just in case mount options changed since
5886 * last time.
5887 */
5888 sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY;
5889 }
5890
5891 /*
5892 * When we journal data on quota file, we have to flush journal to see
5893 * all updates to the file when we bypass pagecache...
5894 */
5895 if (EXT4_SB(sb)->s_journal &&
5896 ext4_should_journal_data(d_inode(path->dentry))) {
5897 /*
5898 * We don't need to lock updates but journal_flush() could
5899 * otherwise be livelocked...
5900 */
5901 jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
5902 err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
5903 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
5904 if (err)
5905 return err;
5906 }
5907
5908 lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
5909 err = dquot_quota_on(sb, type, format_id, path);
5910 if (err) {
5911 lockdep_set_quota_inode(path->dentry->d_inode,
5912 I_DATA_SEM_NORMAL);
5913 } else {
5914 struct inode *inode = d_inode(path->dentry);
5915 handle_t *handle;
5916
5917 /*
5918 * Set inode flags to prevent userspace from messing with quota
5919 * files. If this fails, we return success anyway since quotas
5920 * are already enabled and this is not a hard failure.
5921 */
5922 inode_lock(inode);
5923 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
5924 if (IS_ERR(handle))
5925 goto unlock_inode;
5926 EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL;
5927 inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
5928 S_NOATIME | S_IMMUTABLE);
5929 ext4_mark_inode_dirty(handle, inode);
5930 ext4_journal_stop(handle);
5931 unlock_inode:
5932 inode_unlock(inode);
5933 }
5934 return err;
5935}
5936
5937static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
5938 unsigned int flags)
5939{
5940 int err;
5941 struct inode *qf_inode;
5942 unsigned long qf_inums[EXT4_MAXQUOTAS] = {
5943 le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
5944 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
5945 le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
5946 };
5947
5948 BUG_ON(!ext4_has_feature_quota(sb));
5949
5950 if (!qf_inums[type])
5951 return -EPERM;
5952
David Brazdil0f672f62019-12-10 10:32:29 +00005953 qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005954 if (IS_ERR(qf_inode)) {
5955 ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
5956 return PTR_ERR(qf_inode);
5957 }
5958
5959 /* Don't account quota for quota files to avoid recursion */
5960 qf_inode->i_flags |= S_NOQUOTA;
5961 lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
5962 err = dquot_enable(qf_inode, type, format_id, flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005963 if (err)
5964 lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
David Brazdil0f672f62019-12-10 10:32:29 +00005965 iput(qf_inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005966
5967 return err;
5968}
5969
5970/* Enable usage tracking for all quota types. */
5971static int ext4_enable_quotas(struct super_block *sb)
5972{
5973 int type, err = 0;
5974 unsigned long qf_inums[EXT4_MAXQUOTAS] = {
5975 le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
5976 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
5977 le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
5978 };
5979 bool quota_mopt[EXT4_MAXQUOTAS] = {
5980 test_opt(sb, USRQUOTA),
5981 test_opt(sb, GRPQUOTA),
5982 test_opt(sb, PRJQUOTA),
5983 };
5984
5985 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
5986 for (type = 0; type < EXT4_MAXQUOTAS; type++) {
5987 if (qf_inums[type]) {
5988 err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
5989 DQUOT_USAGE_ENABLED |
5990 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
5991 if (err) {
5992 ext4_warning(sb,
5993 "Failed to enable quota tracking "
5994 "(type=%d, err=%d). Please run "
5995 "e2fsck to fix.", type, err);
5996 for (type--; type >= 0; type--)
5997 dquot_quota_off(sb, type);
5998
5999 return err;
6000 }
6001 }
6002 }
6003 return 0;
6004}
6005
6006static int ext4_quota_off(struct super_block *sb, int type)
6007{
6008 struct inode *inode = sb_dqopt(sb)->files[type];
6009 handle_t *handle;
6010 int err;
6011
6012 /* Force all delayed allocation blocks to be allocated.
6013 * Caller already holds s_umount sem */
6014 if (test_opt(sb, DELALLOC))
6015 sync_filesystem(sb);
6016
6017 if (!inode || !igrab(inode))
6018 goto out;
6019
6020 err = dquot_quota_off(sb, type);
6021 if (err || ext4_has_feature_quota(sb))
6022 goto out_put;
6023
6024 inode_lock(inode);
6025 /*
6026 * Update modification times of quota files when userspace can
6027 * start looking at them. If we fail, we return success anyway since
6028 * this is not a hard failure and quotas are already disabled.
6029 */
6030 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
6031 if (IS_ERR(handle))
6032 goto out_unlock;
6033 EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
6034 inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
6035 inode->i_mtime = inode->i_ctime = current_time(inode);
6036 ext4_mark_inode_dirty(handle, inode);
6037 ext4_journal_stop(handle);
6038out_unlock:
6039 inode_unlock(inode);
6040out_put:
6041 lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
6042 iput(inode);
6043 return err;
6044out:
6045 return dquot_quota_off(sb, type);
6046}
6047
6048/* Read data from quotafile - avoid pagecache and such because we cannot afford
6049 * acquiring the locks... As quota files are never truncated and quota code
6050 * itself serializes the operations (and no one else should touch the files)
6051 * we don't have to be afraid of races */
6052static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
6053 size_t len, loff_t off)
6054{
6055 struct inode *inode = sb_dqopt(sb)->files[type];
6056 ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
6057 int offset = off & (sb->s_blocksize - 1);
6058 int tocopy;
6059 size_t toread;
6060 struct buffer_head *bh;
6061 loff_t i_size = i_size_read(inode);
6062
6063 if (off > i_size)
6064 return 0;
6065 if (off+len > i_size)
6066 len = i_size-off;
6067 toread = len;
6068 while (toread > 0) {
6069 tocopy = sb->s_blocksize - offset < toread ?
6070 sb->s_blocksize - offset : toread;
6071 bh = ext4_bread(NULL, inode, blk, 0);
6072 if (IS_ERR(bh))
6073 return PTR_ERR(bh);
6074 if (!bh) /* A hole? */
6075 memset(data, 0, tocopy);
6076 else
6077 memcpy(data, bh->b_data+offset, tocopy);
6078 brelse(bh);
6079 offset = 0;
6080 toread -= tocopy;
6081 data += tocopy;
6082 blk++;
6083 }
6084 return len;
6085}
6086
6087/* Write to quotafile (we know the transaction is already started and has
6088 * enough credits) */
6089static ssize_t ext4_quota_write(struct super_block *sb, int type,
6090 const char *data, size_t len, loff_t off)
6091{
6092 struct inode *inode = sb_dqopt(sb)->files[type];
6093 ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
6094 int err, offset = off & (sb->s_blocksize - 1);
6095 int retries = 0;
6096 struct buffer_head *bh;
6097 handle_t *handle = journal_current_handle();
6098
6099 if (EXT4_SB(sb)->s_journal && !handle) {
6100 ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
6101 " cancelled because transaction is not started",
6102 (unsigned long long)off, (unsigned long long)len);
6103 return -EIO;
6104 }
6105 /*
6106 * Since we account only one data block in transaction credits,
6107 * then it is impossible to cross a block boundary.
6108 */
6109 if (sb->s_blocksize - offset < len) {
6110 ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
6111 " cancelled because not block aligned",
6112 (unsigned long long)off, (unsigned long long)len);
6113 return -EIO;
6114 }
6115
6116 do {
6117 bh = ext4_bread(handle, inode, blk,
6118 EXT4_GET_BLOCKS_CREATE |
6119 EXT4_GET_BLOCKS_METADATA_NOFAIL);
6120 } while (IS_ERR(bh) && (PTR_ERR(bh) == -ENOSPC) &&
6121 ext4_should_retry_alloc(inode->i_sb, &retries));
6122 if (IS_ERR(bh))
6123 return PTR_ERR(bh);
6124 if (!bh)
6125 goto out;
6126 BUFFER_TRACE(bh, "get write access");
6127 err = ext4_journal_get_write_access(handle, bh);
6128 if (err) {
6129 brelse(bh);
6130 return err;
6131 }
6132 lock_buffer(bh);
6133 memcpy(bh->b_data+offset, data, len);
6134 flush_dcache_page(bh->b_page);
6135 unlock_buffer(bh);
6136 err = ext4_handle_dirty_metadata(handle, NULL, bh);
6137 brelse(bh);
6138out:
6139 if (inode->i_size < off + len) {
6140 i_size_write(inode, off + len);
6141 EXT4_I(inode)->i_disksize = inode->i_size;
6142 ext4_mark_inode_dirty(handle, inode);
6143 }
6144 return len;
6145}
6146
6147static int ext4_get_next_id(struct super_block *sb, struct kqid *qid)
6148{
6149 const struct quota_format_ops *ops;
6150
6151 if (!sb_has_quota_loaded(sb, qid->type))
6152 return -ESRCH;
6153 ops = sb_dqopt(sb)->ops[qid->type];
6154 if (!ops || !ops->get_next_id)
6155 return -ENOSYS;
6156 return dquot_get_next_id(sb, qid);
6157}
6158#endif
6159
6160static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
6161 const char *dev_name, void *data)
6162{
6163 return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
6164}
6165
6166#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
6167static inline void register_as_ext2(void)
6168{
6169 int err = register_filesystem(&ext2_fs_type);
6170 if (err)
6171 printk(KERN_WARNING
6172 "EXT4-fs: Unable to register as ext2 (%d)\n", err);
6173}
6174
6175static inline void unregister_as_ext2(void)
6176{
6177 unregister_filesystem(&ext2_fs_type);
6178}
6179
6180static inline int ext2_feature_set_ok(struct super_block *sb)
6181{
6182 if (ext4_has_unknown_ext2_incompat_features(sb))
6183 return 0;
6184 if (sb_rdonly(sb))
6185 return 1;
6186 if (ext4_has_unknown_ext2_ro_compat_features(sb))
6187 return 0;
6188 return 1;
6189}
6190#else
6191static inline void register_as_ext2(void) { }
6192static inline void unregister_as_ext2(void) { }
6193static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
6194#endif
6195
6196static inline void register_as_ext3(void)
6197{
6198 int err = register_filesystem(&ext3_fs_type);
6199 if (err)
6200 printk(KERN_WARNING
6201 "EXT4-fs: Unable to register as ext3 (%d)\n", err);
6202}
6203
6204static inline void unregister_as_ext3(void)
6205{
6206 unregister_filesystem(&ext3_fs_type);
6207}
6208
6209static inline int ext3_feature_set_ok(struct super_block *sb)
6210{
6211 if (ext4_has_unknown_ext3_incompat_features(sb))
6212 return 0;
6213 if (!ext4_has_feature_journal(sb))
6214 return 0;
6215 if (sb_rdonly(sb))
6216 return 1;
6217 if (ext4_has_unknown_ext3_ro_compat_features(sb))
6218 return 0;
6219 return 1;
6220}
6221
6222static struct file_system_type ext4_fs_type = {
6223 .owner = THIS_MODULE,
6224 .name = "ext4",
6225 .mount = ext4_mount,
6226 .kill_sb = kill_block_super,
6227 .fs_flags = FS_REQUIRES_DEV,
6228};
6229MODULE_ALIAS_FS("ext4");
6230
6231/* Shared across all ext4 file systems */
6232wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
6233
6234static int __init ext4_init_fs(void)
6235{
6236 int i, err;
6237
6238 ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
6239 ext4_li_info = NULL;
6240 mutex_init(&ext4_li_mtx);
6241
6242 /* Build-time check for flags consistency */
6243 ext4_check_flag_values();
6244
6245 for (i = 0; i < EXT4_WQ_HASH_SZ; i++)
6246 init_waitqueue_head(&ext4__ioend_wq[i]);
6247
6248 err = ext4_init_es();
6249 if (err)
6250 return err;
6251
David Brazdil0f672f62019-12-10 10:32:29 +00006252 err = ext4_init_pending();
6253 if (err)
6254 goto out7;
6255
6256 err = ext4_init_post_read_processing();
6257 if (err)
6258 goto out6;
6259
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006260 err = ext4_init_pageio();
6261 if (err)
6262 goto out5;
6263
6264 err = ext4_init_system_zone();
6265 if (err)
6266 goto out4;
6267
6268 err = ext4_init_sysfs();
6269 if (err)
6270 goto out3;
6271
6272 err = ext4_init_mballoc();
6273 if (err)
6274 goto out2;
6275 err = init_inodecache();
6276 if (err)
6277 goto out1;
6278 register_as_ext3();
6279 register_as_ext2();
6280 err = register_filesystem(&ext4_fs_type);
6281 if (err)
6282 goto out;
6283
6284 return 0;
6285out:
6286 unregister_as_ext2();
6287 unregister_as_ext3();
6288 destroy_inodecache();
6289out1:
6290 ext4_exit_mballoc();
6291out2:
6292 ext4_exit_sysfs();
6293out3:
6294 ext4_exit_system_zone();
6295out4:
6296 ext4_exit_pageio();
6297out5:
David Brazdil0f672f62019-12-10 10:32:29 +00006298 ext4_exit_post_read_processing();
6299out6:
6300 ext4_exit_pending();
6301out7:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006302 ext4_exit_es();
6303
6304 return err;
6305}
6306
6307static void __exit ext4_exit_fs(void)
6308{
6309 ext4_destroy_lazyinit_thread();
6310 unregister_as_ext2();
6311 unregister_as_ext3();
6312 unregister_filesystem(&ext4_fs_type);
6313 destroy_inodecache();
6314 ext4_exit_mballoc();
6315 ext4_exit_sysfs();
6316 ext4_exit_system_zone();
6317 ext4_exit_pageio();
David Brazdil0f672f62019-12-10 10:32:29 +00006318 ext4_exit_post_read_processing();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006319 ext4_exit_es();
David Brazdil0f672f62019-12-10 10:32:29 +00006320 ext4_exit_pending();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006321}
6322
6323MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
6324MODULE_DESCRIPTION("Fourth Extended Filesystem");
6325MODULE_LICENSE("GPL");
6326MODULE_SOFTDEP("pre: crc32c");
6327module_init(ext4_init_fs)
6328module_exit(ext4_exit_fs)