blob: af98abb17c272a1b1d2b1c32547da694916211a9 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * fs/f2fs/super.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 */
8#include <linux/module.h>
9#include <linux/init.h>
10#include <linux/fs.h>
11#include <linux/statfs.h>
12#include <linux/buffer_head.h>
13#include <linux/backing-dev.h>
14#include <linux/kthread.h>
15#include <linux/parser.h>
16#include <linux/mount.h>
17#include <linux/seq_file.h>
18#include <linux/proc_fs.h>
19#include <linux/random.h>
20#include <linux/exportfs.h>
21#include <linux/blkdev.h>
22#include <linux/quotaops.h>
23#include <linux/f2fs_fs.h>
24#include <linux/sysfs.h>
25#include <linux/quota.h>
David Brazdil0f672f62019-12-10 10:32:29 +000026#include <linux/unicode.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020027#include <linux/part_stat.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000028
29#include "f2fs.h"
30#include "node.h"
31#include "segment.h"
32#include "xattr.h"
33#include "gc.h"
34#include "trace.h"
35
36#define CREATE_TRACE_POINTS
37#include <trace/events/f2fs.h>
38
39static struct kmem_cache *f2fs_inode_cachep;
40
41#ifdef CONFIG_F2FS_FAULT_INJECTION
42
David Brazdil0f672f62019-12-10 10:32:29 +000043const char *f2fs_fault_name[FAULT_MAX] = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000044 [FAULT_KMALLOC] = "kmalloc",
45 [FAULT_KVMALLOC] = "kvmalloc",
46 [FAULT_PAGE_ALLOC] = "page alloc",
47 [FAULT_PAGE_GET] = "page get",
48 [FAULT_ALLOC_BIO] = "alloc bio",
49 [FAULT_ALLOC_NID] = "alloc nid",
50 [FAULT_ORPHAN] = "orphan",
51 [FAULT_BLOCK] = "no more block",
52 [FAULT_DIR_DEPTH] = "too big dir depth",
53 [FAULT_EVICT_INODE] = "evict_inode fail",
54 [FAULT_TRUNCATE] = "truncate fail",
David Brazdil0f672f62019-12-10 10:32:29 +000055 [FAULT_READ_IO] = "read IO error",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000056 [FAULT_CHECKPOINT] = "checkpoint error",
57 [FAULT_DISCARD] = "discard error",
David Brazdil0f672f62019-12-10 10:32:29 +000058 [FAULT_WRITE_IO] = "write IO error",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059};
60
61void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
62 unsigned int type)
63{
64 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
65
66 if (rate) {
67 atomic_set(&ffi->inject_ops, 0);
68 ffi->inject_rate = rate;
69 }
70
71 if (type)
72 ffi->inject_type = type;
73
74 if (!rate && !type)
75 memset(ffi, 0, sizeof(struct f2fs_fault_info));
76}
77#endif
78
79/* f2fs-wide shrinker description */
80static struct shrinker f2fs_shrinker_info = {
81 .scan_objects = f2fs_shrink_scan,
82 .count_objects = f2fs_shrink_count,
83 .seeks = DEFAULT_SEEKS,
84};
85
86enum {
87 Opt_gc_background,
88 Opt_disable_roll_forward,
89 Opt_norecovery,
90 Opt_discard,
91 Opt_nodiscard,
92 Opt_noheap,
93 Opt_heap,
94 Opt_user_xattr,
95 Opt_nouser_xattr,
96 Opt_acl,
97 Opt_noacl,
98 Opt_active_logs,
99 Opt_disable_ext_identify,
100 Opt_inline_xattr,
101 Opt_noinline_xattr,
102 Opt_inline_xattr_size,
103 Opt_inline_data,
104 Opt_inline_dentry,
105 Opt_noinline_dentry,
106 Opt_flush_merge,
107 Opt_noflush_merge,
108 Opt_nobarrier,
109 Opt_fastboot,
110 Opt_extent_cache,
111 Opt_noextent_cache,
112 Opt_noinline_data,
113 Opt_data_flush,
114 Opt_reserve_root,
115 Opt_resgid,
116 Opt_resuid,
117 Opt_mode,
118 Opt_io_size_bits,
119 Opt_fault_injection,
120 Opt_fault_type,
121 Opt_lazytime,
122 Opt_nolazytime,
123 Opt_quota,
124 Opt_noquota,
125 Opt_usrquota,
126 Opt_grpquota,
127 Opt_prjquota,
128 Opt_usrjquota,
129 Opt_grpjquota,
130 Opt_prjjquota,
131 Opt_offusrjquota,
132 Opt_offgrpjquota,
133 Opt_offprjjquota,
134 Opt_jqfmt_vfsold,
135 Opt_jqfmt_vfsv0,
136 Opt_jqfmt_vfsv1,
137 Opt_whint,
138 Opt_alloc,
139 Opt_fsync,
140 Opt_test_dummy_encryption,
Olivier Deprez157378f2022-04-04 15:47:50 +0200141 Opt_inlinecrypt,
David Brazdil0f672f62019-12-10 10:32:29 +0000142 Opt_checkpoint_disable,
143 Opt_checkpoint_disable_cap,
144 Opt_checkpoint_disable_cap_perc,
145 Opt_checkpoint_enable,
Olivier Deprez157378f2022-04-04 15:47:50 +0200146 Opt_compress_algorithm,
147 Opt_compress_log_size,
148 Opt_compress_extension,
149 Opt_atgc,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000150 Opt_err,
151};
152
153static match_table_t f2fs_tokens = {
154 {Opt_gc_background, "background_gc=%s"},
155 {Opt_disable_roll_forward, "disable_roll_forward"},
156 {Opt_norecovery, "norecovery"},
157 {Opt_discard, "discard"},
158 {Opt_nodiscard, "nodiscard"},
159 {Opt_noheap, "no_heap"},
160 {Opt_heap, "heap"},
161 {Opt_user_xattr, "user_xattr"},
162 {Opt_nouser_xattr, "nouser_xattr"},
163 {Opt_acl, "acl"},
164 {Opt_noacl, "noacl"},
165 {Opt_active_logs, "active_logs=%u"},
166 {Opt_disable_ext_identify, "disable_ext_identify"},
167 {Opt_inline_xattr, "inline_xattr"},
168 {Opt_noinline_xattr, "noinline_xattr"},
169 {Opt_inline_xattr_size, "inline_xattr_size=%u"},
170 {Opt_inline_data, "inline_data"},
171 {Opt_inline_dentry, "inline_dentry"},
172 {Opt_noinline_dentry, "noinline_dentry"},
173 {Opt_flush_merge, "flush_merge"},
174 {Opt_noflush_merge, "noflush_merge"},
175 {Opt_nobarrier, "nobarrier"},
176 {Opt_fastboot, "fastboot"},
177 {Opt_extent_cache, "extent_cache"},
178 {Opt_noextent_cache, "noextent_cache"},
179 {Opt_noinline_data, "noinline_data"},
180 {Opt_data_flush, "data_flush"},
181 {Opt_reserve_root, "reserve_root=%u"},
182 {Opt_resgid, "resgid=%u"},
183 {Opt_resuid, "resuid=%u"},
184 {Opt_mode, "mode=%s"},
185 {Opt_io_size_bits, "io_bits=%u"},
186 {Opt_fault_injection, "fault_injection=%u"},
187 {Opt_fault_type, "fault_type=%u"},
188 {Opt_lazytime, "lazytime"},
189 {Opt_nolazytime, "nolazytime"},
190 {Opt_quota, "quota"},
191 {Opt_noquota, "noquota"},
192 {Opt_usrquota, "usrquota"},
193 {Opt_grpquota, "grpquota"},
194 {Opt_prjquota, "prjquota"},
195 {Opt_usrjquota, "usrjquota=%s"},
196 {Opt_grpjquota, "grpjquota=%s"},
197 {Opt_prjjquota, "prjjquota=%s"},
198 {Opt_offusrjquota, "usrjquota="},
199 {Opt_offgrpjquota, "grpjquota="},
200 {Opt_offprjjquota, "prjjquota="},
201 {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
202 {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
203 {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
204 {Opt_whint, "whint_mode=%s"},
205 {Opt_alloc, "alloc_mode=%s"},
206 {Opt_fsync, "fsync_mode=%s"},
Olivier Deprez157378f2022-04-04 15:47:50 +0200207 {Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000208 {Opt_test_dummy_encryption, "test_dummy_encryption"},
Olivier Deprez157378f2022-04-04 15:47:50 +0200209 {Opt_inlinecrypt, "inlinecrypt"},
David Brazdil0f672f62019-12-10 10:32:29 +0000210 {Opt_checkpoint_disable, "checkpoint=disable"},
211 {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
212 {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
213 {Opt_checkpoint_enable, "checkpoint=enable"},
Olivier Deprez157378f2022-04-04 15:47:50 +0200214 {Opt_compress_algorithm, "compress_algorithm=%s"},
215 {Opt_compress_log_size, "compress_log_size=%u"},
216 {Opt_compress_extension, "compress_extension=%s"},
217 {Opt_atgc, "atgc"},
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000218 {Opt_err, NULL},
219};
220
David Brazdil0f672f62019-12-10 10:32:29 +0000221void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000222{
223 struct va_format vaf;
224 va_list args;
David Brazdil0f672f62019-12-10 10:32:29 +0000225 int level;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000226
227 va_start(args, fmt);
David Brazdil0f672f62019-12-10 10:32:29 +0000228
229 level = printk_get_level(fmt);
230 vaf.fmt = printk_skip_level(fmt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000231 vaf.va = &args;
David Brazdil0f672f62019-12-10 10:32:29 +0000232 printk("%c%cF2FS-fs (%s): %pV\n",
233 KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
234
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000235 va_end(args);
236}
237
David Brazdil0f672f62019-12-10 10:32:29 +0000238#ifdef CONFIG_UNICODE
239static const struct f2fs_sb_encodings {
240 __u16 magic;
241 char *name;
242 char *version;
243} f2fs_sb_encoding_map[] = {
244 {F2FS_ENC_UTF8_12_1, "utf8", "12.1.0"},
245};
246
247static int f2fs_sb_read_encoding(const struct f2fs_super_block *sb,
248 const struct f2fs_sb_encodings **encoding,
249 __u16 *flags)
250{
251 __u16 magic = le16_to_cpu(sb->s_encoding);
252 int i;
253
254 for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++)
255 if (magic == f2fs_sb_encoding_map[i].magic)
256 break;
257
258 if (i >= ARRAY_SIZE(f2fs_sb_encoding_map))
259 return -EINVAL;
260
261 *encoding = &f2fs_sb_encoding_map[i];
262 *flags = le16_to_cpu(sb->s_encoding_flags);
263
264 return 0;
265}
266#endif
267
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000268static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
269{
David Brazdil0f672f62019-12-10 10:32:29 +0000270 block_t limit = min((sbi->user_block_count << 1) / 1000,
271 sbi->user_block_count - sbi->reserved_blocks);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000272
273 /* limit is 0.2% */
274 if (test_opt(sbi, RESERVE_ROOT) &&
275 F2FS_OPTION(sbi).root_reserved_blocks > limit) {
276 F2FS_OPTION(sbi).root_reserved_blocks = limit;
David Brazdil0f672f62019-12-10 10:32:29 +0000277 f2fs_info(sbi, "Reduce reserved blocks for root = %u",
278 F2FS_OPTION(sbi).root_reserved_blocks);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000279 }
280 if (!test_opt(sbi, RESERVE_ROOT) &&
281 (!uid_eq(F2FS_OPTION(sbi).s_resuid,
282 make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
283 !gid_eq(F2FS_OPTION(sbi).s_resgid,
284 make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
David Brazdil0f672f62019-12-10 10:32:29 +0000285 f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
286 from_kuid_munged(&init_user_ns,
287 F2FS_OPTION(sbi).s_resuid),
288 from_kgid_munged(&init_user_ns,
289 F2FS_OPTION(sbi).s_resgid));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000290}
291
Olivier Deprez157378f2022-04-04 15:47:50 +0200292static inline int adjust_reserved_segment(struct f2fs_sb_info *sbi)
293{
294 unsigned int sec_blks = sbi->blocks_per_seg * sbi->segs_per_sec;
295 unsigned int avg_vblocks;
296 unsigned int wanted_reserved_segments;
297 block_t avail_user_block_count;
298
299 if (!F2FS_IO_ALIGNED(sbi))
300 return 0;
301
302 /* average valid block count in section in worst case */
303 avg_vblocks = sec_blks / F2FS_IO_SIZE(sbi);
304
305 /*
306 * we need enough free space when migrating one section in worst case
307 */
308 wanted_reserved_segments = (F2FS_IO_SIZE(sbi) / avg_vblocks) *
309 reserved_segments(sbi);
310 wanted_reserved_segments -= reserved_segments(sbi);
311
312 avail_user_block_count = sbi->user_block_count -
313 sbi->current_reserved_blocks -
314 F2FS_OPTION(sbi).root_reserved_blocks;
315
316 if (wanted_reserved_segments * sbi->blocks_per_seg >
317 avail_user_block_count) {
318 f2fs_err(sbi, "IO align feature can't grab additional reserved segment: %u, available segments: %u",
319 wanted_reserved_segments,
320 avail_user_block_count >> sbi->log_blocks_per_seg);
321 return -ENOSPC;
322 }
323
324 SM_I(sbi)->additional_reserved_segments = wanted_reserved_segments;
325
326 f2fs_info(sbi, "IO align feature needs additional reserved segment: %u",
327 wanted_reserved_segments);
328
329 return 0;
330}
331
Olivier Deprez0e641232021-09-23 10:07:05 +0200332static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi)
333{
334 if (!F2FS_OPTION(sbi).unusable_cap_perc)
335 return;
336
337 if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
338 F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count;
339 else
340 F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) *
341 F2FS_OPTION(sbi).unusable_cap_perc;
342
343 f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%",
344 F2FS_OPTION(sbi).unusable_cap,
345 F2FS_OPTION(sbi).unusable_cap_perc);
346}
347
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000348static void init_once(void *foo)
349{
350 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
351
352 inode_init_once(&fi->vfs_inode);
353}
354
355#ifdef CONFIG_QUOTA
356static const char * const quotatypes[] = INITQFNAMES;
357#define QTYPE2NAME(t) (quotatypes[t])
358static int f2fs_set_qf_name(struct super_block *sb, int qtype,
359 substring_t *args)
360{
361 struct f2fs_sb_info *sbi = F2FS_SB(sb);
362 char *qname;
363 int ret = -EINVAL;
364
365 if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
David Brazdil0f672f62019-12-10 10:32:29 +0000366 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000367 return -EINVAL;
368 }
David Brazdil0f672f62019-12-10 10:32:29 +0000369 if (f2fs_sb_has_quota_ino(sbi)) {
370 f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000371 return 0;
372 }
373
374 qname = match_strdup(args);
375 if (!qname) {
David Brazdil0f672f62019-12-10 10:32:29 +0000376 f2fs_err(sbi, "Not enough memory for storing quotafile name");
377 return -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000378 }
379 if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
380 if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
381 ret = 0;
382 else
David Brazdil0f672f62019-12-10 10:32:29 +0000383 f2fs_err(sbi, "%s quota file already specified",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000384 QTYPE2NAME(qtype));
385 goto errout;
386 }
387 if (strchr(qname, '/')) {
David Brazdil0f672f62019-12-10 10:32:29 +0000388 f2fs_err(sbi, "quotafile must be on filesystem root");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000389 goto errout;
390 }
391 F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
392 set_opt(sbi, QUOTA);
393 return 0;
394errout:
Olivier Deprez157378f2022-04-04 15:47:50 +0200395 kfree(qname);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000396 return ret;
397}
398
399static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
400{
401 struct f2fs_sb_info *sbi = F2FS_SB(sb);
402
403 if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
David Brazdil0f672f62019-12-10 10:32:29 +0000404 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000405 return -EINVAL;
406 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200407 kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000408 F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
409 return 0;
410}
411
412static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
413{
414 /*
415 * We do the test below only for project quotas. 'usrquota' and
416 * 'grpquota' mount options are allowed even without quota feature
417 * to support legacy quotas in quota files.
418 */
David Brazdil0f672f62019-12-10 10:32:29 +0000419 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
420 f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000421 return -1;
422 }
423 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
424 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
425 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
426 if (test_opt(sbi, USRQUOTA) &&
427 F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
428 clear_opt(sbi, USRQUOTA);
429
430 if (test_opt(sbi, GRPQUOTA) &&
431 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
432 clear_opt(sbi, GRPQUOTA);
433
434 if (test_opt(sbi, PRJQUOTA) &&
435 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
436 clear_opt(sbi, PRJQUOTA);
437
438 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
439 test_opt(sbi, PRJQUOTA)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000440 f2fs_err(sbi, "old and new quota format mixing");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000441 return -1;
442 }
443
444 if (!F2FS_OPTION(sbi).s_jquota_fmt) {
David Brazdil0f672f62019-12-10 10:32:29 +0000445 f2fs_err(sbi, "journaled quota format not specified");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000446 return -1;
447 }
448 }
449
David Brazdil0f672f62019-12-10 10:32:29 +0000450 if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
451 f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000452 F2FS_OPTION(sbi).s_jquota_fmt = 0;
453 }
454 return 0;
455}
456#endif
457
Olivier Deprez157378f2022-04-04 15:47:50 +0200458static int f2fs_set_test_dummy_encryption(struct super_block *sb,
459 const char *opt,
460 const substring_t *arg,
461 bool is_remount)
462{
463 struct f2fs_sb_info *sbi = F2FS_SB(sb);
464#ifdef CONFIG_FS_ENCRYPTION
465 int err;
466
467 if (!f2fs_sb_has_encrypt(sbi)) {
468 f2fs_err(sbi, "Encrypt feature is off");
469 return -EINVAL;
470 }
471
472 /*
473 * This mount option is just for testing, and it's not worthwhile to
474 * implement the extra complexity (e.g. RCU protection) that would be
475 * needed to allow it to be set or changed during remount. We do allow
476 * it to be specified during remount, but only if there is no change.
477 */
478 if (is_remount && !F2FS_OPTION(sbi).dummy_enc_policy.policy) {
479 f2fs_warn(sbi, "Can't set test_dummy_encryption on remount");
480 return -EINVAL;
481 }
482 err = fscrypt_set_test_dummy_encryption(
483 sb, arg->from, &F2FS_OPTION(sbi).dummy_enc_policy);
484 if (err) {
485 if (err == -EEXIST)
486 f2fs_warn(sbi,
487 "Can't change test_dummy_encryption on remount");
488 else if (err == -EINVAL)
489 f2fs_warn(sbi, "Value of option \"%s\" is unrecognized",
490 opt);
491 else
492 f2fs_warn(sbi, "Error processing option \"%s\" [%d]",
493 opt, err);
494 return -EINVAL;
495 }
496 f2fs_warn(sbi, "Test dummy encryption mode enabled");
497#else
498 f2fs_warn(sbi, "Test dummy encryption mount option ignored");
499#endif
500 return 0;
501}
502
503static int parse_options(struct super_block *sb, char *options, bool is_remount)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000504{
505 struct f2fs_sb_info *sbi = F2FS_SB(sb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000506 substring_t args[MAX_OPT_ARGS];
Olivier Deprez157378f2022-04-04 15:47:50 +0200507#ifdef CONFIG_F2FS_FS_COMPRESSION
508 unsigned char (*ext)[F2FS_EXTENSION_LEN];
509 int ext_cnt;
510#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000511 char *p, *name;
512 int arg = 0;
513 kuid_t uid;
514 kgid_t gid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000515 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000516
517 if (!options)
518 return 0;
519
520 while ((p = strsep(&options, ",")) != NULL) {
521 int token;
522 if (!*p)
523 continue;
524 /*
525 * Initialize args struct so we know whether arg was
526 * found; some options take optional arguments.
527 */
528 args[0].to = args[0].from = NULL;
529 token = match_token(p, f2fs_tokens, args);
530
531 switch (token) {
532 case Opt_gc_background:
533 name = match_strdup(&args[0]);
534
535 if (!name)
536 return -ENOMEM;
Olivier Deprez157378f2022-04-04 15:47:50 +0200537 if (!strcmp(name, "on")) {
538 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
539 } else if (!strcmp(name, "off")) {
540 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
541 } else if (!strcmp(name, "sync")) {
542 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000543 } else {
Olivier Deprez157378f2022-04-04 15:47:50 +0200544 kfree(name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000545 return -EINVAL;
546 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200547 kfree(name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000548 break;
549 case Opt_disable_roll_forward:
550 set_opt(sbi, DISABLE_ROLL_FORWARD);
551 break;
552 case Opt_norecovery:
553 /* this option mounts f2fs with ro */
Olivier Deprez0e641232021-09-23 10:07:05 +0200554 set_opt(sbi, NORECOVERY);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000555 if (!f2fs_readonly(sb))
556 return -EINVAL;
557 break;
558 case Opt_discard:
David Brazdil0f672f62019-12-10 10:32:29 +0000559 set_opt(sbi, DISCARD);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000560 break;
561 case Opt_nodiscard:
David Brazdil0f672f62019-12-10 10:32:29 +0000562 if (f2fs_sb_has_blkzoned(sbi)) {
563 f2fs_warn(sbi, "discard is required for zoned block devices");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000564 return -EINVAL;
565 }
566 clear_opt(sbi, DISCARD);
567 break;
568 case Opt_noheap:
569 set_opt(sbi, NOHEAP);
570 break;
571 case Opt_heap:
572 clear_opt(sbi, NOHEAP);
573 break;
574#ifdef CONFIG_F2FS_FS_XATTR
575 case Opt_user_xattr:
576 set_opt(sbi, XATTR_USER);
577 break;
578 case Opt_nouser_xattr:
579 clear_opt(sbi, XATTR_USER);
580 break;
581 case Opt_inline_xattr:
582 set_opt(sbi, INLINE_XATTR);
583 break;
584 case Opt_noinline_xattr:
585 clear_opt(sbi, INLINE_XATTR);
586 break;
587 case Opt_inline_xattr_size:
588 if (args->from && match_int(args, &arg))
589 return -EINVAL;
590 set_opt(sbi, INLINE_XATTR_SIZE);
591 F2FS_OPTION(sbi).inline_xattr_size = arg;
592 break;
593#else
594 case Opt_user_xattr:
David Brazdil0f672f62019-12-10 10:32:29 +0000595 f2fs_info(sbi, "user_xattr options not supported");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000596 break;
597 case Opt_nouser_xattr:
David Brazdil0f672f62019-12-10 10:32:29 +0000598 f2fs_info(sbi, "nouser_xattr options not supported");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000599 break;
600 case Opt_inline_xattr:
David Brazdil0f672f62019-12-10 10:32:29 +0000601 f2fs_info(sbi, "inline_xattr options not supported");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000602 break;
603 case Opt_noinline_xattr:
David Brazdil0f672f62019-12-10 10:32:29 +0000604 f2fs_info(sbi, "noinline_xattr options not supported");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000605 break;
606#endif
607#ifdef CONFIG_F2FS_FS_POSIX_ACL
608 case Opt_acl:
609 set_opt(sbi, POSIX_ACL);
610 break;
611 case Opt_noacl:
612 clear_opt(sbi, POSIX_ACL);
613 break;
614#else
615 case Opt_acl:
David Brazdil0f672f62019-12-10 10:32:29 +0000616 f2fs_info(sbi, "acl options not supported");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000617 break;
618 case Opt_noacl:
David Brazdil0f672f62019-12-10 10:32:29 +0000619 f2fs_info(sbi, "noacl options not supported");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000620 break;
621#endif
622 case Opt_active_logs:
623 if (args->from && match_int(args, &arg))
624 return -EINVAL;
Olivier Deprez157378f2022-04-04 15:47:50 +0200625 if (arg != 2 && arg != 4 &&
626 arg != NR_CURSEG_PERSIST_TYPE)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000627 return -EINVAL;
628 F2FS_OPTION(sbi).active_logs = arg;
629 break;
630 case Opt_disable_ext_identify:
631 set_opt(sbi, DISABLE_EXT_IDENTIFY);
632 break;
633 case Opt_inline_data:
634 set_opt(sbi, INLINE_DATA);
635 break;
636 case Opt_inline_dentry:
637 set_opt(sbi, INLINE_DENTRY);
638 break;
639 case Opt_noinline_dentry:
640 clear_opt(sbi, INLINE_DENTRY);
641 break;
642 case Opt_flush_merge:
643 set_opt(sbi, FLUSH_MERGE);
644 break;
645 case Opt_noflush_merge:
646 clear_opt(sbi, FLUSH_MERGE);
647 break;
648 case Opt_nobarrier:
649 set_opt(sbi, NOBARRIER);
650 break;
651 case Opt_fastboot:
652 set_opt(sbi, FASTBOOT);
653 break;
654 case Opt_extent_cache:
655 set_opt(sbi, EXTENT_CACHE);
656 break;
657 case Opt_noextent_cache:
658 clear_opt(sbi, EXTENT_CACHE);
659 break;
660 case Opt_noinline_data:
661 clear_opt(sbi, INLINE_DATA);
662 break;
663 case Opt_data_flush:
664 set_opt(sbi, DATA_FLUSH);
665 break;
666 case Opt_reserve_root:
667 if (args->from && match_int(args, &arg))
668 return -EINVAL;
669 if (test_opt(sbi, RESERVE_ROOT)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000670 f2fs_info(sbi, "Preserve previous reserve_root=%u",
671 F2FS_OPTION(sbi).root_reserved_blocks);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000672 } else {
673 F2FS_OPTION(sbi).root_reserved_blocks = arg;
674 set_opt(sbi, RESERVE_ROOT);
675 }
676 break;
677 case Opt_resuid:
678 if (args->from && match_int(args, &arg))
679 return -EINVAL;
680 uid = make_kuid(current_user_ns(), arg);
681 if (!uid_valid(uid)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000682 f2fs_err(sbi, "Invalid uid value %d", arg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000683 return -EINVAL;
684 }
685 F2FS_OPTION(sbi).s_resuid = uid;
686 break;
687 case Opt_resgid:
688 if (args->from && match_int(args, &arg))
689 return -EINVAL;
690 gid = make_kgid(current_user_ns(), arg);
691 if (!gid_valid(gid)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000692 f2fs_err(sbi, "Invalid gid value %d", arg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000693 return -EINVAL;
694 }
695 F2FS_OPTION(sbi).s_resgid = gid;
696 break;
697 case Opt_mode:
698 name = match_strdup(&args[0]);
699
700 if (!name)
701 return -ENOMEM;
Olivier Deprez157378f2022-04-04 15:47:50 +0200702 if (!strcmp(name, "adaptive")) {
David Brazdil0f672f62019-12-10 10:32:29 +0000703 if (f2fs_sb_has_blkzoned(sbi)) {
704 f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
Olivier Deprez157378f2022-04-04 15:47:50 +0200705 kfree(name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000706 return -EINVAL;
707 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200708 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
709 } else if (!strcmp(name, "lfs")) {
710 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000711 } else {
Olivier Deprez157378f2022-04-04 15:47:50 +0200712 kfree(name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000713 return -EINVAL;
714 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200715 kfree(name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000716 break;
717 case Opt_io_size_bits:
718 if (args->from && match_int(args, &arg))
719 return -EINVAL;
David Brazdil0f672f62019-12-10 10:32:29 +0000720 if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_PAGES)) {
721 f2fs_warn(sbi, "Not support %d, larger than %d",
722 1 << arg, BIO_MAX_PAGES);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000723 return -EINVAL;
724 }
725 F2FS_OPTION(sbi).write_io_size_bits = arg;
726 break;
David Brazdil0f672f62019-12-10 10:32:29 +0000727#ifdef CONFIG_F2FS_FAULT_INJECTION
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000728 case Opt_fault_injection:
729 if (args->from && match_int(args, &arg))
730 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000731 f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
732 set_opt(sbi, FAULT_INJECTION);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000733 break;
David Brazdil0f672f62019-12-10 10:32:29 +0000734
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000735 case Opt_fault_type:
736 if (args->from && match_int(args, &arg))
737 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000738 f2fs_build_fault_attr(sbi, 0, arg);
739 set_opt(sbi, FAULT_INJECTION);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000740 break;
David Brazdil0f672f62019-12-10 10:32:29 +0000741#else
742 case Opt_fault_injection:
743 f2fs_info(sbi, "fault_injection options not supported");
744 break;
745
746 case Opt_fault_type:
747 f2fs_info(sbi, "fault_type options not supported");
748 break;
749#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000750 case Opt_lazytime:
751 sb->s_flags |= SB_LAZYTIME;
752 break;
753 case Opt_nolazytime:
754 sb->s_flags &= ~SB_LAZYTIME;
755 break;
756#ifdef CONFIG_QUOTA
757 case Opt_quota:
758 case Opt_usrquota:
759 set_opt(sbi, USRQUOTA);
760 break;
761 case Opt_grpquota:
762 set_opt(sbi, GRPQUOTA);
763 break;
764 case Opt_prjquota:
765 set_opt(sbi, PRJQUOTA);
766 break;
767 case Opt_usrjquota:
768 ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
769 if (ret)
770 return ret;
771 break;
772 case Opt_grpjquota:
773 ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
774 if (ret)
775 return ret;
776 break;
777 case Opt_prjjquota:
778 ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
779 if (ret)
780 return ret;
781 break;
782 case Opt_offusrjquota:
783 ret = f2fs_clear_qf_name(sb, USRQUOTA);
784 if (ret)
785 return ret;
786 break;
787 case Opt_offgrpjquota:
788 ret = f2fs_clear_qf_name(sb, GRPQUOTA);
789 if (ret)
790 return ret;
791 break;
792 case Opt_offprjjquota:
793 ret = f2fs_clear_qf_name(sb, PRJQUOTA);
794 if (ret)
795 return ret;
796 break;
797 case Opt_jqfmt_vfsold:
798 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
799 break;
800 case Opt_jqfmt_vfsv0:
801 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
802 break;
803 case Opt_jqfmt_vfsv1:
804 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
805 break;
806 case Opt_noquota:
807 clear_opt(sbi, QUOTA);
808 clear_opt(sbi, USRQUOTA);
809 clear_opt(sbi, GRPQUOTA);
810 clear_opt(sbi, PRJQUOTA);
811 break;
812#else
813 case Opt_quota:
814 case Opt_usrquota:
815 case Opt_grpquota:
816 case Opt_prjquota:
817 case Opt_usrjquota:
818 case Opt_grpjquota:
819 case Opt_prjjquota:
820 case Opt_offusrjquota:
821 case Opt_offgrpjquota:
822 case Opt_offprjjquota:
823 case Opt_jqfmt_vfsold:
824 case Opt_jqfmt_vfsv0:
825 case Opt_jqfmt_vfsv1:
826 case Opt_noquota:
David Brazdil0f672f62019-12-10 10:32:29 +0000827 f2fs_info(sbi, "quota operations not supported");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000828 break;
829#endif
830 case Opt_whint:
831 name = match_strdup(&args[0]);
832 if (!name)
833 return -ENOMEM;
Olivier Deprez157378f2022-04-04 15:47:50 +0200834 if (!strcmp(name, "user-based")) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000835 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER;
Olivier Deprez157378f2022-04-04 15:47:50 +0200836 } else if (!strcmp(name, "off")) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000837 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
Olivier Deprez157378f2022-04-04 15:47:50 +0200838 } else if (!strcmp(name, "fs-based")) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000839 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
840 } else {
Olivier Deprez157378f2022-04-04 15:47:50 +0200841 kfree(name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000842 return -EINVAL;
843 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200844 kfree(name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000845 break;
846 case Opt_alloc:
847 name = match_strdup(&args[0]);
848 if (!name)
849 return -ENOMEM;
850
Olivier Deprez157378f2022-04-04 15:47:50 +0200851 if (!strcmp(name, "default")) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000852 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
Olivier Deprez157378f2022-04-04 15:47:50 +0200853 } else if (!strcmp(name, "reuse")) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000854 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
855 } else {
Olivier Deprez157378f2022-04-04 15:47:50 +0200856 kfree(name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000857 return -EINVAL;
858 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200859 kfree(name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000860 break;
861 case Opt_fsync:
862 name = match_strdup(&args[0]);
863 if (!name)
864 return -ENOMEM;
Olivier Deprez157378f2022-04-04 15:47:50 +0200865 if (!strcmp(name, "posix")) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000866 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
Olivier Deprez157378f2022-04-04 15:47:50 +0200867 } else if (!strcmp(name, "strict")) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000868 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
Olivier Deprez157378f2022-04-04 15:47:50 +0200869 } else if (!strcmp(name, "nobarrier")) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000870 F2FS_OPTION(sbi).fsync_mode =
871 FSYNC_MODE_NOBARRIER;
872 } else {
Olivier Deprez157378f2022-04-04 15:47:50 +0200873 kfree(name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000874 return -EINVAL;
875 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200876 kfree(name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000877 break;
878 case Opt_test_dummy_encryption:
Olivier Deprez157378f2022-04-04 15:47:50 +0200879 ret = f2fs_set_test_dummy_encryption(sb, p, &args[0],
880 is_remount);
881 if (ret)
882 return ret;
883 break;
884 case Opt_inlinecrypt:
885#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
886 sb->s_flags |= SB_INLINECRYPT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000887#else
Olivier Deprez157378f2022-04-04 15:47:50 +0200888 f2fs_info(sbi, "inline encryption not supported");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000889#endif
890 break;
David Brazdil0f672f62019-12-10 10:32:29 +0000891 case Opt_checkpoint_disable_cap_perc:
892 if (args->from && match_int(args, &arg))
893 return -EINVAL;
894 if (arg < 0 || arg > 100)
895 return -EINVAL;
Olivier Deprez0e641232021-09-23 10:07:05 +0200896 F2FS_OPTION(sbi).unusable_cap_perc = arg;
David Brazdil0f672f62019-12-10 10:32:29 +0000897 set_opt(sbi, DISABLE_CHECKPOINT);
898 break;
899 case Opt_checkpoint_disable_cap:
900 if (args->from && match_int(args, &arg))
901 return -EINVAL;
902 F2FS_OPTION(sbi).unusable_cap = arg;
903 set_opt(sbi, DISABLE_CHECKPOINT);
904 break;
905 case Opt_checkpoint_disable:
906 set_opt(sbi, DISABLE_CHECKPOINT);
907 break;
908 case Opt_checkpoint_enable:
909 clear_opt(sbi, DISABLE_CHECKPOINT);
910 break;
Olivier Deprez157378f2022-04-04 15:47:50 +0200911#ifdef CONFIG_F2FS_FS_COMPRESSION
912 case Opt_compress_algorithm:
913 if (!f2fs_sb_has_compression(sbi)) {
914 f2fs_info(sbi, "Image doesn't support compression");
915 break;
916 }
917 name = match_strdup(&args[0]);
918 if (!name)
919 return -ENOMEM;
920 if (!strcmp(name, "lzo")) {
921 F2FS_OPTION(sbi).compress_algorithm =
922 COMPRESS_LZO;
923 } else if (!strcmp(name, "lz4")) {
924 F2FS_OPTION(sbi).compress_algorithm =
925 COMPRESS_LZ4;
926 } else if (!strcmp(name, "zstd")) {
927 F2FS_OPTION(sbi).compress_algorithm =
928 COMPRESS_ZSTD;
929 } else if (!strcmp(name, "lzo-rle")) {
930 F2FS_OPTION(sbi).compress_algorithm =
931 COMPRESS_LZORLE;
932 } else {
933 kfree(name);
934 return -EINVAL;
935 }
936 kfree(name);
937 break;
938 case Opt_compress_log_size:
939 if (!f2fs_sb_has_compression(sbi)) {
940 f2fs_info(sbi, "Image doesn't support compression");
941 break;
942 }
943 if (args->from && match_int(args, &arg))
944 return -EINVAL;
945 if (arg < MIN_COMPRESS_LOG_SIZE ||
946 arg > MAX_COMPRESS_LOG_SIZE) {
947 f2fs_err(sbi,
948 "Compress cluster log size is out of range");
949 return -EINVAL;
950 }
951 F2FS_OPTION(sbi).compress_log_size = arg;
952 break;
953 case Opt_compress_extension:
954 if (!f2fs_sb_has_compression(sbi)) {
955 f2fs_info(sbi, "Image doesn't support compression");
956 break;
957 }
958 name = match_strdup(&args[0]);
959 if (!name)
960 return -ENOMEM;
961
962 ext = F2FS_OPTION(sbi).extensions;
963 ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
964
965 if (strlen(name) >= F2FS_EXTENSION_LEN ||
966 ext_cnt >= COMPRESS_EXT_NUM) {
967 f2fs_err(sbi,
968 "invalid extension length/number");
969 kfree(name);
970 return -EINVAL;
971 }
972
973 strcpy(ext[ext_cnt], name);
974 F2FS_OPTION(sbi).compress_ext_cnt++;
975 kfree(name);
976 break;
977#else
978 case Opt_compress_algorithm:
979 case Opt_compress_log_size:
980 case Opt_compress_extension:
981 f2fs_info(sbi, "compression options not supported");
982 break;
983#endif
984 case Opt_atgc:
985 set_opt(sbi, ATGC);
986 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000987 default:
David Brazdil0f672f62019-12-10 10:32:29 +0000988 f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
989 p);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000990 return -EINVAL;
991 }
992 }
993#ifdef CONFIG_QUOTA
994 if (f2fs_check_quota_options(sbi))
995 return -EINVAL;
996#else
David Brazdil0f672f62019-12-10 10:32:29 +0000997 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
998 f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000999 return -EINVAL;
1000 }
David Brazdil0f672f62019-12-10 10:32:29 +00001001 if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
1002 f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1003 return -EINVAL;
1004 }
1005#endif
1006#ifndef CONFIG_UNICODE
1007 if (f2fs_sb_has_casefold(sbi)) {
1008 f2fs_err(sbi,
1009 "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001010 return -EINVAL;
1011 }
1012#endif
Olivier Deprez157378f2022-04-04 15:47:50 +02001013 /*
1014 * The BLKZONED feature indicates that the drive was formatted with
1015 * zone alignment optimization. This is optional for host-aware
1016 * devices, but mandatory for host-managed zoned block devices.
1017 */
1018#ifndef CONFIG_BLK_DEV_ZONED
1019 if (f2fs_sb_has_blkzoned(sbi)) {
1020 f2fs_err(sbi, "Zoned block device support is not enabled");
1021 return -EINVAL;
1022 }
1023#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001024
Olivier Deprez157378f2022-04-04 15:47:50 +02001025 if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001026 f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO",
1027 F2FS_IO_SIZE_KB(sbi));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001028 return -EINVAL;
1029 }
1030
1031 if (test_opt(sbi, INLINE_XATTR_SIZE)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001032 int min_size, max_size;
1033
1034 if (!f2fs_sb_has_extra_attr(sbi) ||
1035 !f2fs_sb_has_flexible_inline_xattr(sbi)) {
1036 f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001037 return -EINVAL;
1038 }
1039 if (!test_opt(sbi, INLINE_XATTR)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001040 f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001041 return -EINVAL;
1042 }
David Brazdil0f672f62019-12-10 10:32:29 +00001043
1044 min_size = sizeof(struct f2fs_xattr_header) / sizeof(__le32);
1045 max_size = MAX_INLINE_XATTR_SIZE;
1046
1047 if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
1048 F2FS_OPTION(sbi).inline_xattr_size > max_size) {
1049 f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d",
1050 min_size, max_size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001051 return -EINVAL;
1052 }
1053 }
1054
Olivier Deprez157378f2022-04-04 15:47:50 +02001055 if (test_opt(sbi, DISABLE_CHECKPOINT) && f2fs_lfs_mode(sbi)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001056 f2fs_err(sbi, "LFS not compatible with checkpoint=disable\n");
1057 return -EINVAL;
1058 }
1059
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001060 /* Not pass down write hints if the number of active logs is lesser
Olivier Deprez157378f2022-04-04 15:47:50 +02001061 * than NR_CURSEG_PERSIST_TYPE.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001062 */
Olivier Deprez157378f2022-04-04 15:47:50 +02001063 if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_PERSIST_TYPE)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001064 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
1065 return 0;
1066}
1067
1068static struct inode *f2fs_alloc_inode(struct super_block *sb)
1069{
1070 struct f2fs_inode_info *fi;
1071
1072 fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
1073 if (!fi)
1074 return NULL;
1075
1076 init_once((void *) fi);
1077
1078 /* Initialize f2fs-specific inode info */
1079 atomic_set(&fi->dirty_pages, 0);
Olivier Deprez157378f2022-04-04 15:47:50 +02001080 atomic_set(&fi->i_compr_blocks, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001081 init_rwsem(&fi->i_sem);
Olivier Deprez157378f2022-04-04 15:47:50 +02001082 spin_lock_init(&fi->i_size_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001083 INIT_LIST_HEAD(&fi->dirty_list);
1084 INIT_LIST_HEAD(&fi->gdirty_list);
1085 INIT_LIST_HEAD(&fi->inmem_ilist);
1086 INIT_LIST_HEAD(&fi->inmem_pages);
1087 mutex_init(&fi->inmem_lock);
1088 init_rwsem(&fi->i_gc_rwsem[READ]);
1089 init_rwsem(&fi->i_gc_rwsem[WRITE]);
1090 init_rwsem(&fi->i_mmap_sem);
1091 init_rwsem(&fi->i_xattr_sem);
1092
1093 /* Will be used by directory only */
1094 fi->i_dir_level = F2FS_SB(sb)->dir_level;
1095
Olivier Deprez157378f2022-04-04 15:47:50 +02001096 fi->ra_offset = -1;
1097
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001098 return &fi->vfs_inode;
1099}
1100
1101static int f2fs_drop_inode(struct inode *inode)
1102{
David Brazdil0f672f62019-12-10 10:32:29 +00001103 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001104 int ret;
David Brazdil0f672f62019-12-10 10:32:29 +00001105
1106 /*
1107 * during filesystem shutdown, if checkpoint is disabled,
1108 * drop useless meta/node dirty pages.
1109 */
1110 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1111 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1112 inode->i_ino == F2FS_META_INO(sbi)) {
1113 trace_f2fs_drop_inode(inode, 1);
1114 return 1;
1115 }
1116 }
1117
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001118 /*
1119 * This is to avoid a deadlock condition like below.
1120 * writeback_single_inode(inode)
1121 * - f2fs_write_data_page
1122 * - f2fs_gc -> iput -> evict
1123 * - inode_wait_for_writeback(inode)
1124 */
1125 if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
1126 if (!inode->i_nlink && !is_bad_inode(inode)) {
1127 /* to avoid evict_inode call simultaneously */
1128 atomic_inc(&inode->i_count);
1129 spin_unlock(&inode->i_lock);
1130
1131 /* some remained atomic pages should discarded */
1132 if (f2fs_is_atomic_file(inode))
1133 f2fs_drop_inmem_pages(inode);
1134
1135 /* should remain fi->extent_tree for writepage */
1136 f2fs_destroy_extent_node(inode);
1137
1138 sb_start_intwrite(inode->i_sb);
1139 f2fs_i_size_write(inode, 0);
1140
David Brazdil0f672f62019-12-10 10:32:29 +00001141 f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
1142 inode, NULL, 0, DATA);
1143 truncate_inode_pages_final(inode->i_mapping);
1144
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001145 if (F2FS_HAS_BLOCKS(inode))
1146 f2fs_truncate(inode);
1147
1148 sb_end_intwrite(inode->i_sb);
1149
1150 spin_lock(&inode->i_lock);
1151 atomic_dec(&inode->i_count);
1152 }
1153 trace_f2fs_drop_inode(inode, 0);
1154 return 0;
1155 }
1156 ret = generic_drop_inode(inode);
David Brazdil0f672f62019-12-10 10:32:29 +00001157 if (!ret)
1158 ret = fscrypt_drop_inode(inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001159 trace_f2fs_drop_inode(inode, ret);
1160 return ret;
1161}
1162
1163int f2fs_inode_dirtied(struct inode *inode, bool sync)
1164{
1165 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1166 int ret = 0;
1167
1168 spin_lock(&sbi->inode_lock[DIRTY_META]);
1169 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1170 ret = 1;
1171 } else {
1172 set_inode_flag(inode, FI_DIRTY_INODE);
1173 stat_inc_dirty_inode(sbi, DIRTY_META);
1174 }
1175 if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
1176 list_add_tail(&F2FS_I(inode)->gdirty_list,
1177 &sbi->inode_list[DIRTY_META]);
1178 inc_page_count(sbi, F2FS_DIRTY_IMETA);
1179 }
1180 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1181 return ret;
1182}
1183
1184void f2fs_inode_synced(struct inode *inode)
1185{
1186 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1187
1188 spin_lock(&sbi->inode_lock[DIRTY_META]);
1189 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1190 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1191 return;
1192 }
1193 if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
1194 list_del_init(&F2FS_I(inode)->gdirty_list);
1195 dec_page_count(sbi, F2FS_DIRTY_IMETA);
1196 }
1197 clear_inode_flag(inode, FI_DIRTY_INODE);
1198 clear_inode_flag(inode, FI_AUTO_RECOVER);
1199 stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
1200 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1201}
1202
1203/*
1204 * f2fs_dirty_inode() is called from __mark_inode_dirty()
1205 *
1206 * We should call set_dirty_inode to write the dirty inode through write_inode.
1207 */
1208static void f2fs_dirty_inode(struct inode *inode, int flags)
1209{
1210 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1211
1212 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1213 inode->i_ino == F2FS_META_INO(sbi))
1214 return;
1215
1216 if (flags == I_DIRTY_TIME)
1217 return;
1218
1219 if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
1220 clear_inode_flag(inode, FI_AUTO_RECOVER);
1221
1222 f2fs_inode_dirtied(inode, false);
1223}
1224
David Brazdil0f672f62019-12-10 10:32:29 +00001225static void f2fs_free_inode(struct inode *inode)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001226{
David Brazdil0f672f62019-12-10 10:32:29 +00001227 fscrypt_free_inode(inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001228 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
1229}
1230
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001231static void destroy_percpu_info(struct f2fs_sb_info *sbi)
1232{
1233 percpu_counter_destroy(&sbi->alloc_valid_block_count);
1234 percpu_counter_destroy(&sbi->total_valid_inode_count);
1235}
1236
1237static void destroy_device_list(struct f2fs_sb_info *sbi)
1238{
1239 int i;
1240
1241 for (i = 0; i < sbi->s_ndevs; i++) {
1242 blkdev_put(FDEV(i).bdev, FMODE_EXCL);
1243#ifdef CONFIG_BLK_DEV_ZONED
David Brazdil0f672f62019-12-10 10:32:29 +00001244 kvfree(FDEV(i).blkz_seq);
Olivier Deprez157378f2022-04-04 15:47:50 +02001245 kfree(FDEV(i).zone_capacity_blocks);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001246#endif
1247 }
David Brazdil0f672f62019-12-10 10:32:29 +00001248 kvfree(sbi->devs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001249}
1250
1251static void f2fs_put_super(struct super_block *sb)
1252{
1253 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1254 int i;
1255 bool dropped;
1256
Olivier Deprez0e641232021-09-23 10:07:05 +02001257 /* unregister procfs/sysfs entries in advance to avoid race case */
1258 f2fs_unregister_sysfs(sbi);
1259
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001260 f2fs_quota_off_umount(sb);
1261
1262 /* prevent remaining shrinker jobs */
1263 mutex_lock(&sbi->umount_mutex);
1264
1265 /*
1266 * We don't need to do checkpoint when superblock is clean.
1267 * But, the previous checkpoint was not done by umount, it needs to do
1268 * clean checkpoint again.
1269 */
David Brazdil0f672f62019-12-10 10:32:29 +00001270 if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
1271 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001272 struct cp_control cpc = {
1273 .reason = CP_UMOUNT,
1274 };
1275 f2fs_write_checkpoint(sbi, &cpc);
1276 }
1277
1278 /* be sure to wait for any on-going discard commands */
David Brazdil0f672f62019-12-10 10:32:29 +00001279 dropped = f2fs_issue_discard_timeout(sbi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001280
David Brazdil0f672f62019-12-10 10:32:29 +00001281 if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) &&
1282 !sbi->discard_blks && !dropped) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001283 struct cp_control cpc = {
1284 .reason = CP_UMOUNT | CP_TRIMMED,
1285 };
1286 f2fs_write_checkpoint(sbi, &cpc);
1287 }
1288
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001289 /*
1290 * normally superblock is clean, so we need to release this.
1291 * In addition, EIO will skip do checkpoint, we need this as well.
1292 */
1293 f2fs_release_ino_entry(sbi, true);
1294
1295 f2fs_leave_shrinker(sbi);
1296 mutex_unlock(&sbi->umount_mutex);
1297
1298 /* our cp_error case, we can wait for any writeback page */
1299 f2fs_flush_merged_writes(sbi);
1300
Olivier Deprez0e641232021-09-23 10:07:05 +02001301 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001302
1303 f2fs_bug_on(sbi, sbi->fsync_node_num);
1304
1305 iput(sbi->node_inode);
David Brazdil0f672f62019-12-10 10:32:29 +00001306 sbi->node_inode = NULL;
1307
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001308 iput(sbi->meta_inode);
David Brazdil0f672f62019-12-10 10:32:29 +00001309 sbi->meta_inode = NULL;
1310
1311 /*
1312 * iput() can update stat information, if f2fs_write_checkpoint()
1313 * above failed with error.
1314 */
1315 f2fs_destroy_stats(sbi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001316
1317 /* destroy f2fs internal modules */
1318 f2fs_destroy_node_manager(sbi);
1319 f2fs_destroy_segment_manager(sbi);
1320
Olivier Deprez157378f2022-04-04 15:47:50 +02001321 f2fs_destroy_post_read_wq(sbi);
1322
David Brazdil0f672f62019-12-10 10:32:29 +00001323 kvfree(sbi->ckpt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001324
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001325 sb->s_fs_info = NULL;
1326 if (sbi->s_chksum_driver)
1327 crypto_free_shash(sbi->s_chksum_driver);
Olivier Deprez157378f2022-04-04 15:47:50 +02001328 kfree(sbi->raw_super);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001329
1330 destroy_device_list(sbi);
Olivier Deprez157378f2022-04-04 15:47:50 +02001331 f2fs_destroy_page_array_cache(sbi);
1332 f2fs_destroy_xattr_caches(sbi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001333 mempool_destroy(sbi->write_io_dummy);
1334#ifdef CONFIG_QUOTA
1335 for (i = 0; i < MAXQUOTAS; i++)
Olivier Deprez157378f2022-04-04 15:47:50 +02001336 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001337#endif
Olivier Deprez157378f2022-04-04 15:47:50 +02001338 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001339 destroy_percpu_info(sbi);
1340 for (i = 0; i < NR_PAGE_TYPE; i++)
David Brazdil0f672f62019-12-10 10:32:29 +00001341 kvfree(sbi->write_io[i]);
1342#ifdef CONFIG_UNICODE
Olivier Deprez157378f2022-04-04 15:47:50 +02001343 utf8_unload(sb->s_encoding);
David Brazdil0f672f62019-12-10 10:32:29 +00001344#endif
Olivier Deprez157378f2022-04-04 15:47:50 +02001345 kfree(sbi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001346}
1347
1348int f2fs_sync_fs(struct super_block *sb, int sync)
1349{
1350 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1351 int err = 0;
1352
1353 if (unlikely(f2fs_cp_error(sbi)))
1354 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001355 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
1356 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001357
1358 trace_f2fs_sync_fs(sb, sync);
1359
1360 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1361 return -EAGAIN;
1362
1363 if (sync) {
1364 struct cp_control cpc;
1365
1366 cpc.reason = __get_cp_reason(sbi);
1367
Olivier Deprez157378f2022-04-04 15:47:50 +02001368 down_write(&sbi->gc_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001369 err = f2fs_write_checkpoint(sbi, &cpc);
Olivier Deprez157378f2022-04-04 15:47:50 +02001370 up_write(&sbi->gc_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001371 }
1372 f2fs_trace_ios(NULL, 1);
1373
1374 return err;
1375}
1376
1377static int f2fs_freeze(struct super_block *sb)
1378{
1379 if (f2fs_readonly(sb))
1380 return 0;
1381
1382 /* IO error happened before */
1383 if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
1384 return -EIO;
1385
1386 /* must be clean, since sync_filesystem() was already called */
1387 if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
1388 return -EINVAL;
1389 return 0;
1390}
1391
1392static int f2fs_unfreeze(struct super_block *sb)
1393{
1394 return 0;
1395}
1396
1397#ifdef CONFIG_QUOTA
1398static int f2fs_statfs_project(struct super_block *sb,
1399 kprojid_t projid, struct kstatfs *buf)
1400{
1401 struct kqid qid;
1402 struct dquot *dquot;
1403 u64 limit;
1404 u64 curblock;
1405
1406 qid = make_kqid_projid(projid);
1407 dquot = dqget(sb, qid);
1408 if (IS_ERR(dquot))
1409 return PTR_ERR(dquot);
1410 spin_lock(&dquot->dq_dqb_lock);
1411
Olivier Deprez0e641232021-09-23 10:07:05 +02001412 limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
1413 dquot->dq_dqb.dqb_bhardlimit);
1414 if (limit)
1415 limit >>= sb->s_blocksize_bits;
1416
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001417 if (limit && buf->f_blocks > limit) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001418 curblock = (dquot->dq_dqb.dqb_curspace +
1419 dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001420 buf->f_blocks = limit;
1421 buf->f_bfree = buf->f_bavail =
1422 (buf->f_blocks > curblock) ?
1423 (buf->f_blocks - curblock) : 0;
1424 }
1425
Olivier Deprez0e641232021-09-23 10:07:05 +02001426 limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
1427 dquot->dq_dqb.dqb_ihardlimit);
1428
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001429 if (limit && buf->f_files > limit) {
1430 buf->f_files = limit;
1431 buf->f_ffree =
1432 (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
1433 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
1434 }
1435
1436 spin_unlock(&dquot->dq_dqb_lock);
1437 dqput(dquot);
1438 return 0;
1439}
1440#endif
1441
1442static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
1443{
1444 struct super_block *sb = dentry->d_sb;
1445 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1446 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
1447 block_t total_count, user_block_count, start_count;
1448 u64 avail_node_count;
1449
1450 total_count = le64_to_cpu(sbi->raw_super->block_count);
1451 user_block_count = sbi->user_block_count;
1452 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
1453 buf->f_type = F2FS_SUPER_MAGIC;
1454 buf->f_bsize = sbi->blocksize;
1455
1456 buf->f_blocks = total_count - start_count;
1457 buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
1458 sbi->current_reserved_blocks;
David Brazdil0f672f62019-12-10 10:32:29 +00001459
1460 spin_lock(&sbi->stat_lock);
1461 if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
1462 buf->f_bfree = 0;
1463 else
1464 buf->f_bfree -= sbi->unusable_block_count;
1465 spin_unlock(&sbi->stat_lock);
1466
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001467 if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
1468 buf->f_bavail = buf->f_bfree -
1469 F2FS_OPTION(sbi).root_reserved_blocks;
1470 else
1471 buf->f_bavail = 0;
1472
David Brazdil0f672f62019-12-10 10:32:29 +00001473 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001474
1475 if (avail_node_count > user_block_count) {
1476 buf->f_files = user_block_count;
1477 buf->f_ffree = buf->f_bavail;
1478 } else {
1479 buf->f_files = avail_node_count;
1480 buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
1481 buf->f_bavail);
1482 }
1483
1484 buf->f_namelen = F2FS_NAME_LEN;
Olivier Deprez157378f2022-04-04 15:47:50 +02001485 buf->f_fsid = u64_to_fsid(id);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001486
1487#ifdef CONFIG_QUOTA
1488 if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
1489 sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
1490 f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
1491 }
1492#endif
1493 return 0;
1494}
1495
1496static inline void f2fs_show_quota_options(struct seq_file *seq,
1497 struct super_block *sb)
1498{
1499#ifdef CONFIG_QUOTA
1500 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1501
1502 if (F2FS_OPTION(sbi).s_jquota_fmt) {
1503 char *fmtname = "";
1504
1505 switch (F2FS_OPTION(sbi).s_jquota_fmt) {
1506 case QFMT_VFS_OLD:
1507 fmtname = "vfsold";
1508 break;
1509 case QFMT_VFS_V0:
1510 fmtname = "vfsv0";
1511 break;
1512 case QFMT_VFS_V1:
1513 fmtname = "vfsv1";
1514 break;
1515 }
1516 seq_printf(seq, ",jqfmt=%s", fmtname);
1517 }
1518
1519 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
1520 seq_show_option(seq, "usrjquota",
1521 F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
1522
1523 if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
1524 seq_show_option(seq, "grpjquota",
1525 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
1526
1527 if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
1528 seq_show_option(seq, "prjjquota",
1529 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
1530#endif
1531}
1532
Olivier Deprez157378f2022-04-04 15:47:50 +02001533static inline void f2fs_show_compress_options(struct seq_file *seq,
1534 struct super_block *sb)
1535{
1536 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1537 char *algtype = "";
1538 int i;
1539
1540 if (!f2fs_sb_has_compression(sbi))
1541 return;
1542
1543 switch (F2FS_OPTION(sbi).compress_algorithm) {
1544 case COMPRESS_LZO:
1545 algtype = "lzo";
1546 break;
1547 case COMPRESS_LZ4:
1548 algtype = "lz4";
1549 break;
1550 case COMPRESS_ZSTD:
1551 algtype = "zstd";
1552 break;
1553 case COMPRESS_LZORLE:
1554 algtype = "lzo-rle";
1555 break;
1556 }
1557 seq_printf(seq, ",compress_algorithm=%s", algtype);
1558
1559 seq_printf(seq, ",compress_log_size=%u",
1560 F2FS_OPTION(sbi).compress_log_size);
1561
1562 for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) {
1563 seq_printf(seq, ",compress_extension=%s",
1564 F2FS_OPTION(sbi).extensions[i]);
1565 }
1566}
1567
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001568static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
1569{
1570 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
1571
Olivier Deprez157378f2022-04-04 15:47:50 +02001572 if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC)
1573 seq_printf(seq, ",background_gc=%s", "sync");
1574 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON)
1575 seq_printf(seq, ",background_gc=%s", "on");
1576 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001577 seq_printf(seq, ",background_gc=%s", "off");
Olivier Deprez157378f2022-04-04 15:47:50 +02001578
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001579 if (test_opt(sbi, DISABLE_ROLL_FORWARD))
1580 seq_puts(seq, ",disable_roll_forward");
Olivier Deprez0e641232021-09-23 10:07:05 +02001581 if (test_opt(sbi, NORECOVERY))
1582 seq_puts(seq, ",norecovery");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001583 if (test_opt(sbi, DISCARD))
1584 seq_puts(seq, ",discard");
David Brazdil0f672f62019-12-10 10:32:29 +00001585 else
1586 seq_puts(seq, ",nodiscard");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001587 if (test_opt(sbi, NOHEAP))
1588 seq_puts(seq, ",no_heap");
1589 else
1590 seq_puts(seq, ",heap");
1591#ifdef CONFIG_F2FS_FS_XATTR
1592 if (test_opt(sbi, XATTR_USER))
1593 seq_puts(seq, ",user_xattr");
1594 else
1595 seq_puts(seq, ",nouser_xattr");
1596 if (test_opt(sbi, INLINE_XATTR))
1597 seq_puts(seq, ",inline_xattr");
1598 else
1599 seq_puts(seq, ",noinline_xattr");
1600 if (test_opt(sbi, INLINE_XATTR_SIZE))
1601 seq_printf(seq, ",inline_xattr_size=%u",
1602 F2FS_OPTION(sbi).inline_xattr_size);
1603#endif
1604#ifdef CONFIG_F2FS_FS_POSIX_ACL
1605 if (test_opt(sbi, POSIX_ACL))
1606 seq_puts(seq, ",acl");
1607 else
1608 seq_puts(seq, ",noacl");
1609#endif
1610 if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
1611 seq_puts(seq, ",disable_ext_identify");
1612 if (test_opt(sbi, INLINE_DATA))
1613 seq_puts(seq, ",inline_data");
1614 else
1615 seq_puts(seq, ",noinline_data");
1616 if (test_opt(sbi, INLINE_DENTRY))
1617 seq_puts(seq, ",inline_dentry");
1618 else
1619 seq_puts(seq, ",noinline_dentry");
1620 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
1621 seq_puts(seq, ",flush_merge");
1622 if (test_opt(sbi, NOBARRIER))
1623 seq_puts(seq, ",nobarrier");
1624 if (test_opt(sbi, FASTBOOT))
1625 seq_puts(seq, ",fastboot");
1626 if (test_opt(sbi, EXTENT_CACHE))
1627 seq_puts(seq, ",extent_cache");
1628 else
1629 seq_puts(seq, ",noextent_cache");
1630 if (test_opt(sbi, DATA_FLUSH))
1631 seq_puts(seq, ",data_flush");
1632
1633 seq_puts(seq, ",mode=");
Olivier Deprez157378f2022-04-04 15:47:50 +02001634 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001635 seq_puts(seq, "adaptive");
Olivier Deprez157378f2022-04-04 15:47:50 +02001636 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001637 seq_puts(seq, "lfs");
1638 seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
1639 if (test_opt(sbi, RESERVE_ROOT))
1640 seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
1641 F2FS_OPTION(sbi).root_reserved_blocks,
1642 from_kuid_munged(&init_user_ns,
1643 F2FS_OPTION(sbi).s_resuid),
1644 from_kgid_munged(&init_user_ns,
1645 F2FS_OPTION(sbi).s_resgid));
1646 if (F2FS_IO_SIZE_BITS(sbi))
David Brazdil0f672f62019-12-10 10:32:29 +00001647 seq_printf(seq, ",io_bits=%u",
1648 F2FS_OPTION(sbi).write_io_size_bits);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001649#ifdef CONFIG_F2FS_FAULT_INJECTION
1650 if (test_opt(sbi, FAULT_INJECTION)) {
1651 seq_printf(seq, ",fault_injection=%u",
1652 F2FS_OPTION(sbi).fault_info.inject_rate);
1653 seq_printf(seq, ",fault_type=%u",
1654 F2FS_OPTION(sbi).fault_info.inject_type);
1655 }
1656#endif
1657#ifdef CONFIG_QUOTA
1658 if (test_opt(sbi, QUOTA))
1659 seq_puts(seq, ",quota");
1660 if (test_opt(sbi, USRQUOTA))
1661 seq_puts(seq, ",usrquota");
1662 if (test_opt(sbi, GRPQUOTA))
1663 seq_puts(seq, ",grpquota");
1664 if (test_opt(sbi, PRJQUOTA))
1665 seq_puts(seq, ",prjquota");
1666#endif
1667 f2fs_show_quota_options(seq, sbi->sb);
1668 if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER)
1669 seq_printf(seq, ",whint_mode=%s", "user-based");
1670 else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
1671 seq_printf(seq, ",whint_mode=%s", "fs-based");
Olivier Deprez157378f2022-04-04 15:47:50 +02001672
1673 fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb);
1674
1675 if (sbi->sb->s_flags & SB_INLINECRYPT)
1676 seq_puts(seq, ",inlinecrypt");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001677
1678 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
1679 seq_printf(seq, ",alloc_mode=%s", "default");
1680 else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
1681 seq_printf(seq, ",alloc_mode=%s", "reuse");
1682
David Brazdil0f672f62019-12-10 10:32:29 +00001683 if (test_opt(sbi, DISABLE_CHECKPOINT))
1684 seq_printf(seq, ",checkpoint=disable:%u",
1685 F2FS_OPTION(sbi).unusable_cap);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001686 if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
1687 seq_printf(seq, ",fsync_mode=%s", "posix");
1688 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
1689 seq_printf(seq, ",fsync_mode=%s", "strict");
1690 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
1691 seq_printf(seq, ",fsync_mode=%s", "nobarrier");
Olivier Deprez157378f2022-04-04 15:47:50 +02001692
1693#ifdef CONFIG_F2FS_FS_COMPRESSION
1694 f2fs_show_compress_options(seq, sbi->sb);
1695#endif
1696
1697 if (test_opt(sbi, ATGC))
1698 seq_puts(seq, ",atgc");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001699 return 0;
1700}
1701
1702static void default_options(struct f2fs_sb_info *sbi)
1703{
1704 /* init some FS parameters */
Olivier Deprez157378f2022-04-04 15:47:50 +02001705 F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001706 F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
1707 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
1708 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
1709 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001710 F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
1711 F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
Olivier Deprez157378f2022-04-04 15:47:50 +02001712 F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4;
1713 F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE;
1714 F2FS_OPTION(sbi).compress_ext_cnt = 0;
1715 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001716
Olivier Deprez157378f2022-04-04 15:47:50 +02001717 sbi->sb->s_flags &= ~SB_INLINECRYPT;
1718
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001719 set_opt(sbi, INLINE_XATTR);
1720 set_opt(sbi, INLINE_DATA);
1721 set_opt(sbi, INLINE_DENTRY);
1722 set_opt(sbi, EXTENT_CACHE);
1723 set_opt(sbi, NOHEAP);
David Brazdil0f672f62019-12-10 10:32:29 +00001724 clear_opt(sbi, DISABLE_CHECKPOINT);
1725 F2FS_OPTION(sbi).unusable_cap = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001726 sbi->sb->s_flags |= SB_LAZYTIME;
1727 set_opt(sbi, FLUSH_MERGE);
David Brazdil0f672f62019-12-10 10:32:29 +00001728 set_opt(sbi, DISCARD);
1729 if (f2fs_sb_has_blkzoned(sbi))
Olivier Deprez157378f2022-04-04 15:47:50 +02001730 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001731 else
Olivier Deprez157378f2022-04-04 15:47:50 +02001732 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001733
1734#ifdef CONFIG_F2FS_FS_XATTR
1735 set_opt(sbi, XATTR_USER);
1736#endif
1737#ifdef CONFIG_F2FS_FS_POSIX_ACL
1738 set_opt(sbi, POSIX_ACL);
1739#endif
1740
1741 f2fs_build_fault_attr(sbi, 0, 0);
1742}
1743
1744#ifdef CONFIG_QUOTA
1745static int f2fs_enable_quotas(struct super_block *sb);
1746#endif
David Brazdil0f672f62019-12-10 10:32:29 +00001747
1748static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
1749{
1750 unsigned int s_flags = sbi->sb->s_flags;
1751 struct cp_control cpc;
1752 int err = 0;
1753 int ret;
1754 block_t unusable;
1755
1756 if (s_flags & SB_RDONLY) {
1757 f2fs_err(sbi, "checkpoint=disable on readonly fs");
1758 return -EINVAL;
1759 }
1760 sbi->sb->s_flags |= SB_ACTIVE;
1761
1762 f2fs_update_time(sbi, DISABLE_TIME);
1763
1764 while (!f2fs_time_over(sbi, DISABLE_TIME)) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001765 down_write(&sbi->gc_lock);
1766 err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
David Brazdil0f672f62019-12-10 10:32:29 +00001767 if (err == -ENODATA) {
1768 err = 0;
1769 break;
1770 }
1771 if (err && err != -EAGAIN)
1772 break;
1773 }
1774
1775 ret = sync_filesystem(sbi->sb);
1776 if (ret || err) {
1777 err = ret ? ret: err;
1778 goto restore_flag;
1779 }
1780
1781 unusable = f2fs_get_unusable_blocks(sbi);
1782 if (f2fs_disable_cp_again(sbi, unusable)) {
1783 err = -EAGAIN;
1784 goto restore_flag;
1785 }
1786
Olivier Deprez157378f2022-04-04 15:47:50 +02001787 down_write(&sbi->gc_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00001788 cpc.reason = CP_PAUSE;
1789 set_sbi_flag(sbi, SBI_CP_DISABLED);
1790 err = f2fs_write_checkpoint(sbi, &cpc);
1791 if (err)
1792 goto out_unlock;
1793
1794 spin_lock(&sbi->stat_lock);
1795 sbi->unusable_block_count = unusable;
1796 spin_unlock(&sbi->stat_lock);
1797
1798out_unlock:
Olivier Deprez157378f2022-04-04 15:47:50 +02001799 up_write(&sbi->gc_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00001800restore_flag:
Olivier Deprez157378f2022-04-04 15:47:50 +02001801 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
David Brazdil0f672f62019-12-10 10:32:29 +00001802 return err;
1803}
1804
1805static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
1806{
Olivier Deprez157378f2022-04-04 15:47:50 +02001807 int retry = DEFAULT_RETRY_IO_COUNT;
1808
1809 /* we should flush all the data to keep data consistency */
1810 do {
1811 sync_inodes_sb(sbi->sb);
1812 cond_resched();
1813 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
1814 } while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--);
1815
1816 if (unlikely(retry < 0))
1817 f2fs_warn(sbi, "checkpoint=enable has some unwritten data.");
1818
1819 down_write(&sbi->gc_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00001820 f2fs_dirty_to_prefree(sbi);
1821
1822 clear_sbi_flag(sbi, SBI_CP_DISABLED);
1823 set_sbi_flag(sbi, SBI_IS_DIRTY);
Olivier Deprez157378f2022-04-04 15:47:50 +02001824 up_write(&sbi->gc_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00001825
1826 f2fs_sync_fs(sbi->sb, 1);
1827}
1828
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001829static int f2fs_remount(struct super_block *sb, int *flags, char *data)
1830{
1831 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1832 struct f2fs_mount_info org_mount_opt;
1833 unsigned long old_sb_flags;
1834 int err;
1835 bool need_restart_gc = false;
1836 bool need_stop_gc = false;
1837 bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
David Brazdil0f672f62019-12-10 10:32:29 +00001838 bool disable_checkpoint = test_opt(sbi, DISABLE_CHECKPOINT);
1839 bool no_io_align = !F2FS_IO_ALIGNED(sbi);
Olivier Deprez157378f2022-04-04 15:47:50 +02001840 bool no_atgc = !test_opt(sbi, ATGC);
David Brazdil0f672f62019-12-10 10:32:29 +00001841 bool checkpoint_changed;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001842#ifdef CONFIG_QUOTA
1843 int i, j;
1844#endif
1845
1846 /*
1847 * Save the old mount options in case we
1848 * need to restore them.
1849 */
1850 org_mount_opt = sbi->mount_opt;
1851 old_sb_flags = sb->s_flags;
1852
1853#ifdef CONFIG_QUOTA
1854 org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
1855 for (i = 0; i < MAXQUOTAS; i++) {
1856 if (F2FS_OPTION(sbi).s_qf_names[i]) {
1857 org_mount_opt.s_qf_names[i] =
1858 kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
1859 GFP_KERNEL);
1860 if (!org_mount_opt.s_qf_names[i]) {
1861 for (j = 0; j < i; j++)
Olivier Deprez157378f2022-04-04 15:47:50 +02001862 kfree(org_mount_opt.s_qf_names[j]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001863 return -ENOMEM;
1864 }
1865 } else {
1866 org_mount_opt.s_qf_names[i] = NULL;
1867 }
1868 }
1869#endif
1870
1871 /* recover superblocks we couldn't write due to previous RO mount */
1872 if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
1873 err = f2fs_commit_super(sbi, false);
David Brazdil0f672f62019-12-10 10:32:29 +00001874 f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
1875 err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001876 if (!err)
1877 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
1878 }
1879
1880 default_options(sbi);
1881
1882 /* parse mount options */
Olivier Deprez157378f2022-04-04 15:47:50 +02001883 err = parse_options(sb, data, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001884 if (err)
1885 goto restore_opts;
David Brazdil0f672f62019-12-10 10:32:29 +00001886 checkpoint_changed =
1887 disable_checkpoint != test_opt(sbi, DISABLE_CHECKPOINT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001888
1889 /*
1890 * Previous and new state of filesystem is RO,
1891 * so skip checking GC and FLUSH_MERGE conditions.
1892 */
1893 if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
1894 goto skip;
1895
1896#ifdef CONFIG_QUOTA
1897 if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
1898 err = dquot_suspend(sb, -1);
1899 if (err < 0)
1900 goto restore_opts;
David Brazdil0f672f62019-12-10 10:32:29 +00001901 } else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001902 /* dquot_resume needs RW */
1903 sb->s_flags &= ~SB_RDONLY;
1904 if (sb_any_quota_suspended(sb)) {
1905 dquot_resume(sb, -1);
David Brazdil0f672f62019-12-10 10:32:29 +00001906 } else if (f2fs_sb_has_quota_ino(sbi)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001907 err = f2fs_enable_quotas(sb);
1908 if (err)
1909 goto restore_opts;
1910 }
1911 }
1912#endif
Olivier Deprez157378f2022-04-04 15:47:50 +02001913 /* disallow enable atgc dynamically */
1914 if (no_atgc == !!test_opt(sbi, ATGC)) {
1915 err = -EINVAL;
1916 f2fs_warn(sbi, "switch atgc option is not allowed");
1917 goto restore_opts;
1918 }
1919
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001920 /* disallow enable/disable extent_cache dynamically */
1921 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
1922 err = -EINVAL;
David Brazdil0f672f62019-12-10 10:32:29 +00001923 f2fs_warn(sbi, "switch extent_cache option is not allowed");
1924 goto restore_opts;
1925 }
1926
1927 if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) {
1928 err = -EINVAL;
1929 f2fs_warn(sbi, "switch io_bits option is not allowed");
1930 goto restore_opts;
1931 }
1932
1933 if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
1934 err = -EINVAL;
1935 f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001936 goto restore_opts;
1937 }
1938
1939 /*
1940 * We stop the GC thread if FS is mounted as RO
1941 * or if background_gc = off is passed in mount
1942 * option. Also sync the filesystem.
1943 */
Olivier Deprez157378f2022-04-04 15:47:50 +02001944 if ((*flags & SB_RDONLY) ||
1945 F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001946 if (sbi->gc_thread) {
1947 f2fs_stop_gc_thread(sbi);
1948 need_restart_gc = true;
1949 }
1950 } else if (!sbi->gc_thread) {
1951 err = f2fs_start_gc_thread(sbi);
1952 if (err)
1953 goto restore_opts;
1954 need_stop_gc = true;
1955 }
1956
1957 if (*flags & SB_RDONLY ||
1958 F2FS_OPTION(sbi).whint_mode != org_mount_opt.whint_mode) {
1959 writeback_inodes_sb(sb, WB_REASON_SYNC);
1960 sync_inodes_sb(sb);
1961
1962 set_sbi_flag(sbi, SBI_IS_DIRTY);
1963 set_sbi_flag(sbi, SBI_IS_CLOSE);
1964 f2fs_sync_fs(sb, 1);
1965 clear_sbi_flag(sbi, SBI_IS_CLOSE);
1966 }
1967
David Brazdil0f672f62019-12-10 10:32:29 +00001968 if (checkpoint_changed) {
1969 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
1970 err = f2fs_disable_checkpoint(sbi);
1971 if (err)
1972 goto restore_gc;
1973 } else {
1974 f2fs_enable_checkpoint(sbi);
1975 }
1976 }
1977
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001978 /*
1979 * We stop issue flush thread if FS is mounted as RO
1980 * or if flush_merge is not passed in mount option.
1981 */
1982 if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
1983 clear_opt(sbi, FLUSH_MERGE);
1984 f2fs_destroy_flush_cmd_control(sbi, false);
1985 } else {
1986 err = f2fs_create_flush_cmd_control(sbi);
1987 if (err)
1988 goto restore_gc;
1989 }
1990skip:
1991#ifdef CONFIG_QUOTA
1992 /* Release old quota file names */
1993 for (i = 0; i < MAXQUOTAS; i++)
Olivier Deprez157378f2022-04-04 15:47:50 +02001994 kfree(org_mount_opt.s_qf_names[i]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001995#endif
1996 /* Update the POSIXACL Flag */
1997 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
1998 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
1999
2000 limit_reserve_root(sbi);
Olivier Deprez0e641232021-09-23 10:07:05 +02002001 adjust_unusable_cap_perc(sbi);
David Brazdil0f672f62019-12-10 10:32:29 +00002002 *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002003 return 0;
2004restore_gc:
2005 if (need_restart_gc) {
2006 if (f2fs_start_gc_thread(sbi))
David Brazdil0f672f62019-12-10 10:32:29 +00002007 f2fs_warn(sbi, "background gc thread has stopped");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002008 } else if (need_stop_gc) {
2009 f2fs_stop_gc_thread(sbi);
2010 }
2011restore_opts:
2012#ifdef CONFIG_QUOTA
2013 F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
2014 for (i = 0; i < MAXQUOTAS; i++) {
Olivier Deprez157378f2022-04-04 15:47:50 +02002015 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002016 F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
2017 }
2018#endif
2019 sbi->mount_opt = org_mount_opt;
2020 sb->s_flags = old_sb_flags;
2021 return err;
2022}
2023
2024#ifdef CONFIG_QUOTA
2025/* Read data from quotafile */
2026static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
2027 size_t len, loff_t off)
2028{
2029 struct inode *inode = sb_dqopt(sb)->files[type];
2030 struct address_space *mapping = inode->i_mapping;
2031 block_t blkidx = F2FS_BYTES_TO_BLK(off);
2032 int offset = off & (sb->s_blocksize - 1);
2033 int tocopy;
2034 size_t toread;
2035 loff_t i_size = i_size_read(inode);
2036 struct page *page;
2037 char *kaddr;
2038
2039 if (off > i_size)
2040 return 0;
2041
2042 if (off + len > i_size)
2043 len = i_size - off;
2044 toread = len;
2045 while (toread > 0) {
2046 tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
2047repeat:
2048 page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
2049 if (IS_ERR(page)) {
2050 if (PTR_ERR(page) == -ENOMEM) {
Olivier Deprez157378f2022-04-04 15:47:50 +02002051 congestion_wait(BLK_RW_ASYNC,
2052 DEFAULT_IO_TIMEOUT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002053 goto repeat;
2054 }
David Brazdil0f672f62019-12-10 10:32:29 +00002055 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002056 return PTR_ERR(page);
2057 }
2058
2059 lock_page(page);
2060
2061 if (unlikely(page->mapping != mapping)) {
2062 f2fs_put_page(page, 1);
2063 goto repeat;
2064 }
2065 if (unlikely(!PageUptodate(page))) {
2066 f2fs_put_page(page, 1);
David Brazdil0f672f62019-12-10 10:32:29 +00002067 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002068 return -EIO;
2069 }
2070
2071 kaddr = kmap_atomic(page);
2072 memcpy(data, kaddr + offset, tocopy);
2073 kunmap_atomic(kaddr);
2074 f2fs_put_page(page, 1);
2075
2076 offset = 0;
2077 toread -= tocopy;
2078 data += tocopy;
2079 blkidx++;
2080 }
2081 return len;
2082}
2083
2084/* Write to quotafile */
2085static ssize_t f2fs_quota_write(struct super_block *sb, int type,
2086 const char *data, size_t len, loff_t off)
2087{
2088 struct inode *inode = sb_dqopt(sb)->files[type];
2089 struct address_space *mapping = inode->i_mapping;
2090 const struct address_space_operations *a_ops = mapping->a_ops;
2091 int offset = off & (sb->s_blocksize - 1);
2092 size_t towrite = len;
2093 struct page *page;
Olivier Deprez0e641232021-09-23 10:07:05 +02002094 void *fsdata = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002095 char *kaddr;
2096 int err = 0;
2097 int tocopy;
2098
2099 while (towrite > 0) {
2100 tocopy = min_t(unsigned long, sb->s_blocksize - offset,
2101 towrite);
2102retry:
2103 err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
Olivier Deprez0e641232021-09-23 10:07:05 +02002104 &page, &fsdata);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002105 if (unlikely(err)) {
2106 if (err == -ENOMEM) {
Olivier Deprez157378f2022-04-04 15:47:50 +02002107 congestion_wait(BLK_RW_ASYNC,
2108 DEFAULT_IO_TIMEOUT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002109 goto retry;
2110 }
David Brazdil0f672f62019-12-10 10:32:29 +00002111 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002112 break;
2113 }
2114
2115 kaddr = kmap_atomic(page);
2116 memcpy(kaddr + offset, data, tocopy);
2117 kunmap_atomic(kaddr);
2118 flush_dcache_page(page);
2119
2120 a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
Olivier Deprez0e641232021-09-23 10:07:05 +02002121 page, fsdata);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002122 offset = 0;
2123 towrite -= tocopy;
2124 off += tocopy;
2125 data += tocopy;
2126 cond_resched();
2127 }
2128
2129 if (len == towrite)
2130 return err;
2131 inode->i_mtime = inode->i_ctime = current_time(inode);
2132 f2fs_mark_inode_dirty_sync(inode, false);
2133 return len - towrite;
2134}
2135
2136static struct dquot **f2fs_get_dquots(struct inode *inode)
2137{
2138 return F2FS_I(inode)->i_dquot;
2139}
2140
2141static qsize_t *f2fs_get_reserved_space(struct inode *inode)
2142{
2143 return &F2FS_I(inode)->i_reserved_quota;
2144}
2145
2146static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
2147{
David Brazdil0f672f62019-12-10 10:32:29 +00002148 if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
2149 f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it");
2150 return 0;
2151 }
2152
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002153 return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
2154 F2FS_OPTION(sbi).s_jquota_fmt, type);
2155}
2156
2157int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
2158{
2159 int enabled = 0;
2160 int i, err;
2161
David Brazdil0f672f62019-12-10 10:32:29 +00002162 if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002163 err = f2fs_enable_quotas(sbi->sb);
2164 if (err) {
David Brazdil0f672f62019-12-10 10:32:29 +00002165 f2fs_err(sbi, "Cannot turn on quota_ino: %d", err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002166 return 0;
2167 }
2168 return 1;
2169 }
2170
2171 for (i = 0; i < MAXQUOTAS; i++) {
2172 if (F2FS_OPTION(sbi).s_qf_names[i]) {
2173 err = f2fs_quota_on_mount(sbi, i);
2174 if (!err) {
2175 enabled = 1;
2176 continue;
2177 }
David Brazdil0f672f62019-12-10 10:32:29 +00002178 f2fs_err(sbi, "Cannot turn on quotas: %d on %d",
2179 err, i);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002180 }
2181 }
2182 return enabled;
2183}
2184
2185static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
2186 unsigned int flags)
2187{
2188 struct inode *qf_inode;
2189 unsigned long qf_inum;
2190 int err;
2191
David Brazdil0f672f62019-12-10 10:32:29 +00002192 BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002193
2194 qf_inum = f2fs_qf_ino(sb, type);
2195 if (!qf_inum)
2196 return -EPERM;
2197
2198 qf_inode = f2fs_iget(sb, qf_inum);
2199 if (IS_ERR(qf_inode)) {
David Brazdil0f672f62019-12-10 10:32:29 +00002200 f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002201 return PTR_ERR(qf_inode);
2202 }
2203
2204 /* Don't account quota for quota files to avoid recursion */
2205 qf_inode->i_flags |= S_NOQUOTA;
Olivier Deprez157378f2022-04-04 15:47:50 +02002206 err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002207 iput(qf_inode);
2208 return err;
2209}
2210
2211static int f2fs_enable_quotas(struct super_block *sb)
2212{
David Brazdil0f672f62019-12-10 10:32:29 +00002213 struct f2fs_sb_info *sbi = F2FS_SB(sb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002214 int type, err = 0;
2215 unsigned long qf_inum;
2216 bool quota_mopt[MAXQUOTAS] = {
David Brazdil0f672f62019-12-10 10:32:29 +00002217 test_opt(sbi, USRQUOTA),
2218 test_opt(sbi, GRPQUOTA),
2219 test_opt(sbi, PRJQUOTA),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002220 };
2221
David Brazdil0f672f62019-12-10 10:32:29 +00002222 if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
2223 f2fs_err(sbi, "quota file may be corrupted, skip loading it");
2224 return 0;
2225 }
2226
2227 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
2228
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002229 for (type = 0; type < MAXQUOTAS; type++) {
2230 qf_inum = f2fs_qf_ino(sb, type);
2231 if (qf_inum) {
2232 err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
2233 DQUOT_USAGE_ENABLED |
2234 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
2235 if (err) {
David Brazdil0f672f62019-12-10 10:32:29 +00002236 f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
2237 type, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002238 for (type--; type >= 0; type--)
2239 dquot_quota_off(sb, type);
David Brazdil0f672f62019-12-10 10:32:29 +00002240 set_sbi_flag(F2FS_SB(sb),
2241 SBI_QUOTA_NEED_REPAIR);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002242 return err;
2243 }
2244 }
2245 }
2246 return 0;
2247}
2248
Olivier Deprez0e641232021-09-23 10:07:05 +02002249static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type)
2250{
2251 struct quota_info *dqopt = sb_dqopt(sbi->sb);
2252 struct address_space *mapping = dqopt->files[type]->i_mapping;
2253 int ret = 0;
2254
2255 ret = dquot_writeback_dquots(sbi->sb, type);
2256 if (ret)
2257 goto out;
2258
2259 ret = filemap_fdatawrite(mapping);
2260 if (ret)
2261 goto out;
2262
2263 /* if we are using journalled quota */
2264 if (is_journalled_quota(sbi))
2265 goto out;
2266
2267 ret = filemap_fdatawait(mapping);
2268
2269 truncate_inode_pages(&dqopt->files[type]->i_data, 0);
2270out:
2271 if (ret)
2272 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2273 return ret;
2274}
2275
David Brazdil0f672f62019-12-10 10:32:29 +00002276int f2fs_quota_sync(struct super_block *sb, int type)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002277{
David Brazdil0f672f62019-12-10 10:32:29 +00002278 struct f2fs_sb_info *sbi = F2FS_SB(sb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002279 struct quota_info *dqopt = sb_dqopt(sb);
2280 int cnt;
2281 int ret;
2282
David Brazdil0f672f62019-12-10 10:32:29 +00002283 /*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002284 * Now when everything is written we can discard the pagecache so
2285 * that userspace sees the changes.
2286 */
2287 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
David Brazdil0f672f62019-12-10 10:32:29 +00002288
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002289 if (type != -1 && cnt != type)
2290 continue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002291
Olivier Deprez0e641232021-09-23 10:07:05 +02002292 if (!sb_has_quota_active(sb, type))
2293 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002294
2295 inode_lock(dqopt->files[cnt]);
Olivier Deprez0e641232021-09-23 10:07:05 +02002296
2297 /*
2298 * do_quotactl
2299 * f2fs_quota_sync
2300 * down_read(quota_sem)
2301 * dquot_writeback_dquots()
2302 * f2fs_dquot_commit
2303 * block_operation
2304 * down_read(quota_sem)
2305 */
2306 f2fs_lock_op(sbi);
2307 down_read(&sbi->quota_sem);
2308
2309 ret = f2fs_quota_sync_file(sbi, cnt);
2310
2311 up_read(&sbi->quota_sem);
2312 f2fs_unlock_op(sbi);
2313
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002314 inode_unlock(dqopt->files[cnt]);
Olivier Deprez0e641232021-09-23 10:07:05 +02002315
2316 if (ret)
2317 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002318 }
David Brazdil0f672f62019-12-10 10:32:29 +00002319 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002320}
2321
2322static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
2323 const struct path *path)
2324{
2325 struct inode *inode;
2326 int err;
2327
David Brazdil0f672f62019-12-10 10:32:29 +00002328 /* if quota sysfile exists, deny enabling quota with specific file */
2329 if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) {
2330 f2fs_err(F2FS_SB(sb), "quota sysfile already exists");
2331 return -EBUSY;
2332 }
2333
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002334 err = f2fs_quota_sync(sb, type);
2335 if (err)
2336 return err;
2337
2338 err = dquot_quota_on(sb, type, format_id, path);
2339 if (err)
2340 return err;
2341
2342 inode = d_inode(path->dentry);
2343
2344 inode_lock(inode);
2345 F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
David Brazdil0f672f62019-12-10 10:32:29 +00002346 f2fs_set_inode_flags(inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002347 inode_unlock(inode);
2348 f2fs_mark_inode_dirty_sync(inode, false);
2349
2350 return 0;
2351}
2352
David Brazdil0f672f62019-12-10 10:32:29 +00002353static int __f2fs_quota_off(struct super_block *sb, int type)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002354{
2355 struct inode *inode = sb_dqopt(sb)->files[type];
2356 int err;
2357
2358 if (!inode || !igrab(inode))
2359 return dquot_quota_off(sb, type);
2360
2361 err = f2fs_quota_sync(sb, type);
2362 if (err)
2363 goto out_put;
2364
2365 err = dquot_quota_off(sb, type);
David Brazdil0f672f62019-12-10 10:32:29 +00002366 if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002367 goto out_put;
2368
2369 inode_lock(inode);
2370 F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
David Brazdil0f672f62019-12-10 10:32:29 +00002371 f2fs_set_inode_flags(inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002372 inode_unlock(inode);
2373 f2fs_mark_inode_dirty_sync(inode, false);
2374out_put:
2375 iput(inode);
2376 return err;
2377}
2378
David Brazdil0f672f62019-12-10 10:32:29 +00002379static int f2fs_quota_off(struct super_block *sb, int type)
2380{
2381 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2382 int err;
2383
2384 err = __f2fs_quota_off(sb, type);
2385
2386 /*
2387 * quotactl can shutdown journalled quota, result in inconsistence
2388 * between quota record and fs data by following updates, tag the
2389 * flag to let fsck be aware of it.
2390 */
2391 if (is_journalled_quota(sbi))
2392 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2393 return err;
2394}
2395
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002396void f2fs_quota_off_umount(struct super_block *sb)
2397{
2398 int type;
2399 int err;
2400
2401 for (type = 0; type < MAXQUOTAS; type++) {
David Brazdil0f672f62019-12-10 10:32:29 +00002402 err = __f2fs_quota_off(sb, type);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002403 if (err) {
2404 int ret = dquot_quota_off(sb, type);
2405
David Brazdil0f672f62019-12-10 10:32:29 +00002406 f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
2407 type, err, ret);
2408 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002409 }
2410 }
David Brazdil0f672f62019-12-10 10:32:29 +00002411 /*
2412 * In case of checkpoint=disable, we must flush quota blocks.
2413 * This can cause NULL exception for node_inode in end_io, since
2414 * put_super already dropped it.
2415 */
2416 sync_filesystem(sb);
2417}
2418
2419static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
2420{
2421 struct quota_info *dqopt = sb_dqopt(sb);
2422 int type;
2423
2424 for (type = 0; type < MAXQUOTAS; type++) {
2425 if (!dqopt->files[type])
2426 continue;
2427 f2fs_inode_synced(dqopt->files[type]);
2428 }
2429}
2430
2431static int f2fs_dquot_commit(struct dquot *dquot)
2432{
2433 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2434 int ret;
2435
Olivier Deprez157378f2022-04-04 15:47:50 +02002436 down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
David Brazdil0f672f62019-12-10 10:32:29 +00002437 ret = dquot_commit(dquot);
2438 if (ret < 0)
2439 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2440 up_read(&sbi->quota_sem);
2441 return ret;
2442}
2443
2444static int f2fs_dquot_acquire(struct dquot *dquot)
2445{
2446 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2447 int ret;
2448
2449 down_read(&sbi->quota_sem);
2450 ret = dquot_acquire(dquot);
2451 if (ret < 0)
2452 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2453 up_read(&sbi->quota_sem);
2454 return ret;
2455}
2456
2457static int f2fs_dquot_release(struct dquot *dquot)
2458{
2459 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
Olivier Deprez157378f2022-04-04 15:47:50 +02002460 int ret = dquot_release(dquot);
David Brazdil0f672f62019-12-10 10:32:29 +00002461
David Brazdil0f672f62019-12-10 10:32:29 +00002462 if (ret < 0)
2463 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
David Brazdil0f672f62019-12-10 10:32:29 +00002464 return ret;
2465}
2466
2467static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
2468{
2469 struct super_block *sb = dquot->dq_sb;
2470 struct f2fs_sb_info *sbi = F2FS_SB(sb);
Olivier Deprez157378f2022-04-04 15:47:50 +02002471 int ret = dquot_mark_dquot_dirty(dquot);
David Brazdil0f672f62019-12-10 10:32:29 +00002472
2473 /* if we are using journalled quota */
2474 if (is_journalled_quota(sbi))
2475 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
2476
David Brazdil0f672f62019-12-10 10:32:29 +00002477 return ret;
2478}
2479
2480static int f2fs_dquot_commit_info(struct super_block *sb, int type)
2481{
2482 struct f2fs_sb_info *sbi = F2FS_SB(sb);
Olivier Deprez157378f2022-04-04 15:47:50 +02002483 int ret = dquot_commit_info(sb, type);
David Brazdil0f672f62019-12-10 10:32:29 +00002484
David Brazdil0f672f62019-12-10 10:32:29 +00002485 if (ret < 0)
2486 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
David Brazdil0f672f62019-12-10 10:32:29 +00002487 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002488}
2489
2490static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
2491{
2492 *projid = F2FS_I(inode)->i_projid;
2493 return 0;
2494}
2495
2496static const struct dquot_operations f2fs_quota_operations = {
2497 .get_reserved_space = f2fs_get_reserved_space,
David Brazdil0f672f62019-12-10 10:32:29 +00002498 .write_dquot = f2fs_dquot_commit,
2499 .acquire_dquot = f2fs_dquot_acquire,
2500 .release_dquot = f2fs_dquot_release,
2501 .mark_dirty = f2fs_dquot_mark_dquot_dirty,
2502 .write_info = f2fs_dquot_commit_info,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002503 .alloc_dquot = dquot_alloc,
2504 .destroy_dquot = dquot_destroy,
2505 .get_projid = f2fs_get_projid,
2506 .get_next_id = dquot_get_next_id,
2507};
2508
2509static const struct quotactl_ops f2fs_quotactl_ops = {
2510 .quota_on = f2fs_quota_on,
2511 .quota_off = f2fs_quota_off,
2512 .quota_sync = f2fs_quota_sync,
2513 .get_state = dquot_get_state,
2514 .set_info = dquot_set_dqinfo,
2515 .get_dqblk = dquot_get_dqblk,
2516 .set_dqblk = dquot_set_dqblk,
2517 .get_nextdqblk = dquot_get_next_dqblk,
2518};
2519#else
David Brazdil0f672f62019-12-10 10:32:29 +00002520int f2fs_quota_sync(struct super_block *sb, int type)
2521{
2522 return 0;
2523}
2524
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002525void f2fs_quota_off_umount(struct super_block *sb)
2526{
2527}
2528#endif
2529
2530static const struct super_operations f2fs_sops = {
2531 .alloc_inode = f2fs_alloc_inode,
David Brazdil0f672f62019-12-10 10:32:29 +00002532 .free_inode = f2fs_free_inode,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002533 .drop_inode = f2fs_drop_inode,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002534 .write_inode = f2fs_write_inode,
2535 .dirty_inode = f2fs_dirty_inode,
2536 .show_options = f2fs_show_options,
2537#ifdef CONFIG_QUOTA
2538 .quota_read = f2fs_quota_read,
2539 .quota_write = f2fs_quota_write,
2540 .get_dquots = f2fs_get_dquots,
2541#endif
2542 .evict_inode = f2fs_evict_inode,
2543 .put_super = f2fs_put_super,
2544 .sync_fs = f2fs_sync_fs,
2545 .freeze_fs = f2fs_freeze,
2546 .unfreeze_fs = f2fs_unfreeze,
2547 .statfs = f2fs_statfs,
2548 .remount_fs = f2fs_remount,
2549};
2550
David Brazdil0f672f62019-12-10 10:32:29 +00002551#ifdef CONFIG_FS_ENCRYPTION
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002552static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
2553{
2554 return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
2555 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
2556 ctx, len, NULL);
2557}
2558
2559static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
2560 void *fs_data)
2561{
2562 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2563
2564 /*
2565 * Encrypting the root directory is not allowed because fsck
2566 * expects lost+found directory to exist and remain unencrypted
2567 * if LOST_FOUND feature is enabled.
2568 *
2569 */
David Brazdil0f672f62019-12-10 10:32:29 +00002570 if (f2fs_sb_has_lost_found(sbi) &&
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002571 inode->i_ino == F2FS_ROOT_INO(sbi))
2572 return -EPERM;
2573
2574 return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
2575 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
2576 ctx, len, fs_data, XATTR_CREATE);
2577}
2578
Olivier Deprez157378f2022-04-04 15:47:50 +02002579static const union fscrypt_policy *f2fs_get_dummy_policy(struct super_block *sb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002580{
Olivier Deprez157378f2022-04-04 15:47:50 +02002581 return F2FS_OPTION(F2FS_SB(sb)).dummy_enc_policy.policy;
2582}
2583
2584static bool f2fs_has_stable_inodes(struct super_block *sb)
2585{
2586 return true;
2587}
2588
2589static void f2fs_get_ino_and_lblk_bits(struct super_block *sb,
2590 int *ino_bits_ret, int *lblk_bits_ret)
2591{
2592 *ino_bits_ret = 8 * sizeof(nid_t);
2593 *lblk_bits_ret = 8 * sizeof(block_t);
2594}
2595
2596static int f2fs_get_num_devices(struct super_block *sb)
2597{
2598 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2599
2600 if (f2fs_is_multi_device(sbi))
2601 return sbi->s_ndevs;
2602 return 1;
2603}
2604
2605static void f2fs_get_devices(struct super_block *sb,
2606 struct request_queue **devs)
2607{
2608 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2609 int i;
2610
2611 for (i = 0; i < sbi->s_ndevs; i++)
2612 devs[i] = bdev_get_queue(FDEV(i).bdev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002613}
2614
2615static const struct fscrypt_operations f2fs_cryptops = {
Olivier Deprez157378f2022-04-04 15:47:50 +02002616 .key_prefix = "f2fs:",
2617 .get_context = f2fs_get_context,
2618 .set_context = f2fs_set_context,
2619 .get_dummy_policy = f2fs_get_dummy_policy,
2620 .empty_dir = f2fs_empty_dir,
2621 .max_namelen = F2FS_NAME_LEN,
2622 .has_stable_inodes = f2fs_has_stable_inodes,
2623 .get_ino_and_lblk_bits = f2fs_get_ino_and_lblk_bits,
2624 .get_num_devices = f2fs_get_num_devices,
2625 .get_devices = f2fs_get_devices,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002626};
2627#endif
2628
2629static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
2630 u64 ino, u32 generation)
2631{
2632 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2633 struct inode *inode;
2634
2635 if (f2fs_check_nid_range(sbi, ino))
2636 return ERR_PTR(-ESTALE);
2637
2638 /*
2639 * f2fs_iget isn't quite right if the inode is currently unallocated!
2640 * However f2fs_iget currently does appropriate checks to handle stale
2641 * inodes so everything is OK.
2642 */
2643 inode = f2fs_iget(sb, ino);
2644 if (IS_ERR(inode))
2645 return ERR_CAST(inode);
2646 if (unlikely(generation && inode->i_generation != generation)) {
2647 /* we didn't find the right inode.. */
2648 iput(inode);
2649 return ERR_PTR(-ESTALE);
2650 }
2651 return inode;
2652}
2653
2654static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
2655 int fh_len, int fh_type)
2656{
2657 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
2658 f2fs_nfs_get_inode);
2659}
2660
2661static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
2662 int fh_len, int fh_type)
2663{
2664 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
2665 f2fs_nfs_get_inode);
2666}
2667
2668static const struct export_operations f2fs_export_ops = {
2669 .fh_to_dentry = f2fs_fh_to_dentry,
2670 .fh_to_parent = f2fs_fh_to_parent,
2671 .get_parent = f2fs_get_parent,
2672};
2673
2674static loff_t max_file_blocks(void)
2675{
2676 loff_t result = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00002677 loff_t leaf_count = DEF_ADDRS_PER_BLOCK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002678
2679 /*
2680 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
2681 * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
2682 * space in inode.i_addr, it will be more safe to reassign
2683 * result as zero.
2684 */
2685
2686 /* two direct node blocks */
2687 result += (leaf_count * 2);
2688
2689 /* two indirect node blocks */
2690 leaf_count *= NIDS_PER_BLOCK;
2691 result += (leaf_count * 2);
2692
2693 /* one double indirect node block */
2694 leaf_count *= NIDS_PER_BLOCK;
2695 result += leaf_count;
2696
2697 return result;
2698}
2699
2700static int __f2fs_commit_super(struct buffer_head *bh,
2701 struct f2fs_super_block *super)
2702{
2703 lock_buffer(bh);
2704 if (super)
2705 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
2706 set_buffer_dirty(bh);
2707 unlock_buffer(bh);
2708
2709 /* it's rare case, we can do fua all the time */
2710 return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
2711}
2712
2713static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
2714 struct buffer_head *bh)
2715{
2716 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
2717 (bh->b_data + F2FS_SUPER_OFFSET);
2718 struct super_block *sb = sbi->sb;
2719 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
2720 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
2721 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
2722 u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
2723 u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
2724 u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
2725 u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
2726 u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
2727 u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
2728 u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
2729 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
2730 u32 segment_count = le32_to_cpu(raw_super->segment_count);
2731 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2732 u64 main_end_blkaddr = main_blkaddr +
2733 (segment_count_main << log_blocks_per_seg);
2734 u64 seg_end_blkaddr = segment0_blkaddr +
2735 (segment_count << log_blocks_per_seg);
2736
2737 if (segment0_blkaddr != cp_blkaddr) {
David Brazdil0f672f62019-12-10 10:32:29 +00002738 f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
2739 segment0_blkaddr, cp_blkaddr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002740 return true;
2741 }
2742
2743 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
2744 sit_blkaddr) {
David Brazdil0f672f62019-12-10 10:32:29 +00002745 f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
2746 cp_blkaddr, sit_blkaddr,
2747 segment_count_ckpt << log_blocks_per_seg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002748 return true;
2749 }
2750
2751 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
2752 nat_blkaddr) {
David Brazdil0f672f62019-12-10 10:32:29 +00002753 f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
2754 sit_blkaddr, nat_blkaddr,
2755 segment_count_sit << log_blocks_per_seg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002756 return true;
2757 }
2758
2759 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
2760 ssa_blkaddr) {
David Brazdil0f672f62019-12-10 10:32:29 +00002761 f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
2762 nat_blkaddr, ssa_blkaddr,
2763 segment_count_nat << log_blocks_per_seg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002764 return true;
2765 }
2766
2767 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
2768 main_blkaddr) {
David Brazdil0f672f62019-12-10 10:32:29 +00002769 f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
2770 ssa_blkaddr, main_blkaddr,
2771 segment_count_ssa << log_blocks_per_seg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002772 return true;
2773 }
2774
2775 if (main_end_blkaddr > seg_end_blkaddr) {
Olivier Deprez157378f2022-04-04 15:47:50 +02002776 f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)",
2777 main_blkaddr, seg_end_blkaddr,
David Brazdil0f672f62019-12-10 10:32:29 +00002778 segment_count_main << log_blocks_per_seg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002779 return true;
2780 } else if (main_end_blkaddr < seg_end_blkaddr) {
2781 int err = 0;
2782 char *res;
2783
2784 /* fix in-memory information all the time */
2785 raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
2786 segment0_blkaddr) >> log_blocks_per_seg);
2787
2788 if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
2789 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2790 res = "internally";
2791 } else {
2792 err = __f2fs_commit_super(bh, NULL);
2793 res = err ? "failed" : "done";
2794 }
Olivier Deprez157378f2022-04-04 15:47:50 +02002795 f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
2796 res, main_blkaddr, seg_end_blkaddr,
David Brazdil0f672f62019-12-10 10:32:29 +00002797 segment_count_main << log_blocks_per_seg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002798 if (err)
2799 return true;
2800 }
2801 return false;
2802}
2803
2804static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2805 struct buffer_head *bh)
2806{
Olivier Deprez157378f2022-04-04 15:47:50 +02002807 block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002808 block_t total_sections, blocks_per_seg;
2809 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
2810 (bh->b_data + F2FS_SUPER_OFFSET);
David Brazdil0f672f62019-12-10 10:32:29 +00002811 size_t crc_offset = 0;
2812 __u32 crc = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002813
David Brazdil0f672f62019-12-10 10:32:29 +00002814 if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
2815 f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
2816 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
2817 return -EINVAL;
2818 }
2819
2820 /* Check checksum_offset and crc in superblock */
2821 if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
2822 crc_offset = le32_to_cpu(raw_super->checksum_offset);
2823 if (crc_offset !=
2824 offsetof(struct f2fs_super_block, crc)) {
2825 f2fs_info(sbi, "Invalid SB checksum offset: %zu",
2826 crc_offset);
2827 return -EFSCORRUPTED;
2828 }
2829 crc = le32_to_cpu(raw_super->crc);
2830 if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
2831 f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
2832 return -EFSCORRUPTED;
2833 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002834 }
2835
2836 /* Currently, support only 4KB page cache size */
2837 if (F2FS_BLKSIZE != PAGE_SIZE) {
David Brazdil0f672f62019-12-10 10:32:29 +00002838 f2fs_info(sbi, "Invalid page_cache_size (%lu), supports only 4KB",
2839 PAGE_SIZE);
2840 return -EFSCORRUPTED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002841 }
2842
2843 /* Currently, support only 4KB block size */
Olivier Deprez0e641232021-09-23 10:07:05 +02002844 if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) {
2845 f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u",
2846 le32_to_cpu(raw_super->log_blocksize),
2847 F2FS_BLKSIZE_BITS);
David Brazdil0f672f62019-12-10 10:32:29 +00002848 return -EFSCORRUPTED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002849 }
2850
2851 /* check log blocks per segment */
2852 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
David Brazdil0f672f62019-12-10 10:32:29 +00002853 f2fs_info(sbi, "Invalid log blocks per segment (%u)",
2854 le32_to_cpu(raw_super->log_blocks_per_seg));
2855 return -EFSCORRUPTED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002856 }
2857
2858 /* Currently, support 512/1024/2048/4096 bytes sector size */
2859 if (le32_to_cpu(raw_super->log_sectorsize) >
2860 F2FS_MAX_LOG_SECTOR_SIZE ||
2861 le32_to_cpu(raw_super->log_sectorsize) <
2862 F2FS_MIN_LOG_SECTOR_SIZE) {
David Brazdil0f672f62019-12-10 10:32:29 +00002863 f2fs_info(sbi, "Invalid log sectorsize (%u)",
2864 le32_to_cpu(raw_super->log_sectorsize));
2865 return -EFSCORRUPTED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002866 }
2867 if (le32_to_cpu(raw_super->log_sectors_per_block) +
2868 le32_to_cpu(raw_super->log_sectorsize) !=
2869 F2FS_MAX_LOG_SECTOR_SIZE) {
David Brazdil0f672f62019-12-10 10:32:29 +00002870 f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
2871 le32_to_cpu(raw_super->log_sectors_per_block),
2872 le32_to_cpu(raw_super->log_sectorsize));
2873 return -EFSCORRUPTED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002874 }
2875
2876 segment_count = le32_to_cpu(raw_super->segment_count);
Olivier Deprez157378f2022-04-04 15:47:50 +02002877 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002878 segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
2879 secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
2880 total_sections = le32_to_cpu(raw_super->section_count);
2881
2882 /* blocks_per_seg should be 512, given the above check */
2883 blocks_per_seg = 1 << le32_to_cpu(raw_super->log_blocks_per_seg);
2884
2885 if (segment_count > F2FS_MAX_SEGMENT ||
2886 segment_count < F2FS_MIN_SEGMENTS) {
David Brazdil0f672f62019-12-10 10:32:29 +00002887 f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
2888 return -EFSCORRUPTED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002889 }
2890
Olivier Deprez157378f2022-04-04 15:47:50 +02002891 if (total_sections > segment_count_main || total_sections < 1 ||
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002892 segs_per_sec > segment_count || !segs_per_sec) {
David Brazdil0f672f62019-12-10 10:32:29 +00002893 f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
2894 segment_count, total_sections, segs_per_sec);
2895 return -EFSCORRUPTED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002896 }
2897
Olivier Deprez157378f2022-04-04 15:47:50 +02002898 if (segment_count_main != total_sections * segs_per_sec) {
2899 f2fs_info(sbi, "Invalid segment/section count (%u != %u * %u)",
2900 segment_count_main, total_sections, segs_per_sec);
2901 return -EFSCORRUPTED;
2902 }
2903
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002904 if ((segment_count / segs_per_sec) < total_sections) {
David Brazdil0f672f62019-12-10 10:32:29 +00002905 f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
2906 segment_count, segs_per_sec, total_sections);
2907 return -EFSCORRUPTED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002908 }
2909
David Brazdil0f672f62019-12-10 10:32:29 +00002910 if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
2911 f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
2912 segment_count, le64_to_cpu(raw_super->block_count));
2913 return -EFSCORRUPTED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002914 }
2915
Olivier Deprez157378f2022-04-04 15:47:50 +02002916 if (RDEV(0).path[0]) {
2917 block_t dev_seg_count = le32_to_cpu(RDEV(0).total_segments);
2918 int i = 1;
2919
2920 while (i < MAX_DEVICES && RDEV(i).path[0]) {
2921 dev_seg_count += le32_to_cpu(RDEV(i).total_segments);
2922 i++;
2923 }
2924 if (segment_count != dev_seg_count) {
2925 f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)",
2926 segment_count, dev_seg_count);
2927 return -EFSCORRUPTED;
2928 }
2929 } else {
2930 if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_BLKZONED) &&
2931 !bdev_is_zoned(sbi->sb->s_bdev)) {
2932 f2fs_info(sbi, "Zoned block device path is missing");
2933 return -EFSCORRUPTED;
2934 }
2935 }
2936
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002937 if (secs_per_zone > total_sections || !secs_per_zone) {
David Brazdil0f672f62019-12-10 10:32:29 +00002938 f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
2939 secs_per_zone, total_sections);
2940 return -EFSCORRUPTED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002941 }
2942 if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
2943 raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
2944 (le32_to_cpu(raw_super->extension_count) +
2945 raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
David Brazdil0f672f62019-12-10 10:32:29 +00002946 f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)",
2947 le32_to_cpu(raw_super->extension_count),
2948 raw_super->hot_ext_count,
2949 F2FS_MAX_EXTENSION);
2950 return -EFSCORRUPTED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002951 }
2952
Olivier Deprez157378f2022-04-04 15:47:50 +02002953 if (le32_to_cpu(raw_super->cp_payload) >=
2954 (blocks_per_seg - F2FS_CP_PACKS -
2955 NR_CURSEG_PERSIST_TYPE)) {
2956 f2fs_info(sbi, "Insane cp_payload (%u >= %u)",
David Brazdil0f672f62019-12-10 10:32:29 +00002957 le32_to_cpu(raw_super->cp_payload),
Olivier Deprez157378f2022-04-04 15:47:50 +02002958 blocks_per_seg - F2FS_CP_PACKS -
2959 NR_CURSEG_PERSIST_TYPE);
David Brazdil0f672f62019-12-10 10:32:29 +00002960 return -EFSCORRUPTED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002961 }
2962
2963 /* check reserved ino info */
2964 if (le32_to_cpu(raw_super->node_ino) != 1 ||
2965 le32_to_cpu(raw_super->meta_ino) != 2 ||
2966 le32_to_cpu(raw_super->root_ino) != 3) {
David Brazdil0f672f62019-12-10 10:32:29 +00002967 f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
2968 le32_to_cpu(raw_super->node_ino),
2969 le32_to_cpu(raw_super->meta_ino),
2970 le32_to_cpu(raw_super->root_ino));
2971 return -EFSCORRUPTED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002972 }
2973
2974 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
2975 if (sanity_check_area_boundary(sbi, bh))
David Brazdil0f672f62019-12-10 10:32:29 +00002976 return -EFSCORRUPTED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002977
2978 return 0;
2979}
2980
2981int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
2982{
2983 unsigned int total, fsmeta;
2984 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2985 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2986 unsigned int ovp_segments, reserved_segments;
2987 unsigned int main_segs, blocks_per_seg;
2988 unsigned int sit_segs, nat_segs;
2989 unsigned int sit_bitmap_size, nat_bitmap_size;
2990 unsigned int log_blocks_per_seg;
2991 unsigned int segment_count_main;
2992 unsigned int cp_pack_start_sum, cp_payload;
David Brazdil0f672f62019-12-10 10:32:29 +00002993 block_t user_block_count, valid_user_blocks;
2994 block_t avail_node_count, valid_node_count;
Olivier Deprez157378f2022-04-04 15:47:50 +02002995 unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks;
David Brazdil0f672f62019-12-10 10:32:29 +00002996 int i, j;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002997
2998 total = le32_to_cpu(raw_super->segment_count);
2999 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
3000 sit_segs = le32_to_cpu(raw_super->segment_count_sit);
3001 fsmeta += sit_segs;
3002 nat_segs = le32_to_cpu(raw_super->segment_count_nat);
3003 fsmeta += nat_segs;
3004 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
3005 fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
3006
3007 if (unlikely(fsmeta >= total))
3008 return 1;
3009
3010 ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
3011 reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
3012
Olivier Deprez157378f2022-04-04 15:47:50 +02003013 if (unlikely(fsmeta < F2FS_MIN_META_SEGMENTS ||
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003014 ovp_segments == 0 || reserved_segments == 0)) {
David Brazdil0f672f62019-12-10 10:32:29 +00003015 f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003016 return 1;
3017 }
3018
3019 user_block_count = le64_to_cpu(ckpt->user_block_count);
3020 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
3021 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3022 if (!user_block_count || user_block_count >=
3023 segment_count_main << log_blocks_per_seg) {
David Brazdil0f672f62019-12-10 10:32:29 +00003024 f2fs_err(sbi, "Wrong user_block_count: %u",
3025 user_block_count);
3026 return 1;
3027 }
3028
3029 valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
3030 if (valid_user_blocks > user_block_count) {
3031 f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u",
3032 valid_user_blocks, user_block_count);
3033 return 1;
3034 }
3035
3036 valid_node_count = le32_to_cpu(ckpt->valid_node_count);
3037 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
3038 if (valid_node_count > avail_node_count) {
3039 f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u",
3040 valid_node_count, avail_node_count);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003041 return 1;
3042 }
3043
3044 main_segs = le32_to_cpu(raw_super->segment_count_main);
3045 blocks_per_seg = sbi->blocks_per_seg;
3046
3047 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
3048 if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
3049 le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
3050 return 1;
David Brazdil0f672f62019-12-10 10:32:29 +00003051 for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
3052 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
3053 le32_to_cpu(ckpt->cur_node_segno[j])) {
3054 f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u",
3055 i, j,
3056 le32_to_cpu(ckpt->cur_node_segno[i]));
3057 return 1;
3058 }
3059 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003060 }
3061 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
3062 if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
3063 le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
3064 return 1;
David Brazdil0f672f62019-12-10 10:32:29 +00003065 for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
3066 if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
3067 le32_to_cpu(ckpt->cur_data_segno[j])) {
3068 f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u",
3069 i, j,
3070 le32_to_cpu(ckpt->cur_data_segno[i]));
3071 return 1;
3072 }
3073 }
3074 }
3075 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
3076 for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
3077 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
3078 le32_to_cpu(ckpt->cur_data_segno[j])) {
3079 f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u",
3080 i, j,
3081 le32_to_cpu(ckpt->cur_node_segno[i]));
3082 return 1;
3083 }
3084 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003085 }
3086
3087 sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
3088 nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
3089
3090 if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
3091 nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
David Brazdil0f672f62019-12-10 10:32:29 +00003092 f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u",
3093 sit_bitmap_size, nat_bitmap_size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003094 return 1;
3095 }
3096
3097 cp_pack_start_sum = __start_sum_addr(sbi);
3098 cp_payload = __cp_payload(sbi);
3099 if (cp_pack_start_sum < cp_payload + 1 ||
3100 cp_pack_start_sum > blocks_per_seg - 1 -
Olivier Deprez157378f2022-04-04 15:47:50 +02003101 NR_CURSEG_PERSIST_TYPE) {
David Brazdil0f672f62019-12-10 10:32:29 +00003102 f2fs_err(sbi, "Wrong cp_pack_start_sum: %u",
3103 cp_pack_start_sum);
3104 return 1;
3105 }
3106
3107 if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) &&
3108 le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
3109 f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, "
3110 "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
3111 "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
3112 le32_to_cpu(ckpt->checksum_offset));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003113 return 1;
3114 }
3115
Olivier Deprez157378f2022-04-04 15:47:50 +02003116 nat_blocks = nat_segs << log_blocks_per_seg;
3117 nat_bits_bytes = nat_blocks / BITS_PER_BYTE;
3118 nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
3119 if (__is_set_ckpt_flags(ckpt, CP_NAT_BITS_FLAG) &&
3120 (cp_payload + F2FS_CP_PACKS +
3121 NR_CURSEG_PERSIST_TYPE + nat_bits_blocks >= blocks_per_seg)) {
3122 f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)",
3123 cp_payload, nat_bits_blocks);
3124 return 1;
3125 }
3126
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003127 if (unlikely(f2fs_cp_error(sbi))) {
David Brazdil0f672f62019-12-10 10:32:29 +00003128 f2fs_err(sbi, "A bug case: need to run fsck");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003129 return 1;
3130 }
3131 return 0;
3132}
3133
3134static void init_sb_info(struct f2fs_sb_info *sbi)
3135{
3136 struct f2fs_super_block *raw_super = sbi->raw_super;
David Brazdil0f672f62019-12-10 10:32:29 +00003137 int i;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003138
3139 sbi->log_sectors_per_block =
3140 le32_to_cpu(raw_super->log_sectors_per_block);
3141 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
3142 sbi->blocksize = 1 << sbi->log_blocksize;
3143 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3144 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
3145 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
3146 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
3147 sbi->total_sections = le32_to_cpu(raw_super->section_count);
3148 sbi->total_node_count =
3149 (le32_to_cpu(raw_super->segment_count_nat) / 2)
3150 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
3151 sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
3152 sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
3153 sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
3154 sbi->cur_victim_sec = NULL_SECNO;
David Brazdil0f672f62019-12-10 10:32:29 +00003155 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
3156 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003157 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
David Brazdil0f672f62019-12-10 10:32:29 +00003158 sbi->migration_granularity = sbi->segs_per_sec;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003159
3160 sbi->dir_level = DEF_DIR_LEVEL;
3161 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
3162 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
David Brazdil0f672f62019-12-10 10:32:29 +00003163 sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
3164 sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
3165 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
3166 sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
3167 DEF_UMOUNT_DISCARD_TIMEOUT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003168 clear_sbi_flag(sbi, SBI_NEED_FSCK);
3169
3170 for (i = 0; i < NR_COUNT_TYPE; i++)
3171 atomic_set(&sbi->nr_pages[i], 0);
3172
3173 for (i = 0; i < META; i++)
3174 atomic_set(&sbi->wb_sync_req[i], 0);
3175
3176 INIT_LIST_HEAD(&sbi->s_list);
3177 mutex_init(&sbi->umount_mutex);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003178 init_rwsem(&sbi->io_order_lock);
3179 spin_lock_init(&sbi->cp_lock);
3180
3181 sbi->dirty_device = 0;
3182 spin_lock_init(&sbi->dev_lock);
3183
3184 init_rwsem(&sbi->sb_lock);
Olivier Deprez157378f2022-04-04 15:47:50 +02003185 init_rwsem(&sbi->pin_sem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003186}
3187
3188static int init_percpu_info(struct f2fs_sb_info *sbi)
3189{
3190 int err;
3191
3192 err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
3193 if (err)
3194 return err;
3195
David Brazdil0f672f62019-12-10 10:32:29 +00003196 err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003197 GFP_KERNEL);
David Brazdil0f672f62019-12-10 10:32:29 +00003198 if (err)
3199 percpu_counter_destroy(&sbi->alloc_valid_block_count);
3200
3201 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003202}
3203
3204#ifdef CONFIG_BLK_DEV_ZONED
Olivier Deprez157378f2022-04-04 15:47:50 +02003205
3206struct f2fs_report_zones_args {
3207 struct f2fs_dev_info *dev;
3208 bool zone_cap_mismatch;
3209};
3210
3211static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
3212 void *data)
3213{
3214 struct f2fs_report_zones_args *rz_args = data;
3215
3216 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
3217 return 0;
3218
3219 set_bit(idx, rz_args->dev->blkz_seq);
3220 rz_args->dev->zone_capacity_blocks[idx] = zone->capacity >>
3221 F2FS_LOG_SECTORS_PER_BLOCK;
3222 if (zone->len != zone->capacity && !rz_args->zone_cap_mismatch)
3223 rz_args->zone_cap_mismatch = true;
3224
3225 return 0;
3226}
3227
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003228static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
3229{
3230 struct block_device *bdev = FDEV(devi).bdev;
3231 sector_t nr_sectors = bdev->bd_part->nr_sects;
Olivier Deprez157378f2022-04-04 15:47:50 +02003232 struct f2fs_report_zones_args rep_zone_arg;
3233 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003234
David Brazdil0f672f62019-12-10 10:32:29 +00003235 if (!f2fs_sb_has_blkzoned(sbi))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003236 return 0;
3237
3238 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
3239 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
3240 return -EINVAL;
3241 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
3242 if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
3243 __ilog2_u32(sbi->blocks_per_blkz))
3244 return -EINVAL;
3245 sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
3246 FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
3247 sbi->log_blocks_per_blkz;
3248 if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
3249 FDEV(devi).nr_blkz++;
3250
Olivier Deprez0e641232021-09-23 10:07:05 +02003251 FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi,
David Brazdil0f672f62019-12-10 10:32:29 +00003252 BITS_TO_LONGS(FDEV(devi).nr_blkz)
3253 * sizeof(unsigned long),
3254 GFP_KERNEL);
3255 if (!FDEV(devi).blkz_seq)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003256 return -ENOMEM;
3257
Olivier Deprez157378f2022-04-04 15:47:50 +02003258 /* Get block zones type and zone-capacity */
3259 FDEV(devi).zone_capacity_blocks = f2fs_kzalloc(sbi,
3260 FDEV(devi).nr_blkz * sizeof(block_t),
3261 GFP_KERNEL);
3262 if (!FDEV(devi).zone_capacity_blocks)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003263 return -ENOMEM;
3264
Olivier Deprez157378f2022-04-04 15:47:50 +02003265 rep_zone_arg.dev = &FDEV(devi);
3266 rep_zone_arg.zone_cap_mismatch = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003267
Olivier Deprez157378f2022-04-04 15:47:50 +02003268 ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
3269 &rep_zone_arg);
3270 if (ret < 0)
3271 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003272
Olivier Deprez157378f2022-04-04 15:47:50 +02003273 if (!rep_zone_arg.zone_cap_mismatch) {
3274 kfree(FDEV(devi).zone_capacity_blocks);
3275 FDEV(devi).zone_capacity_blocks = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003276 }
3277
Olivier Deprez157378f2022-04-04 15:47:50 +02003278 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003279}
3280#endif
3281
3282/*
3283 * Read f2fs raw super block.
3284 * Because we have two copies of super block, so read both of them
3285 * to get the first valid one. If any one of them is broken, we pass
3286 * them recovery flag back to the caller.
3287 */
3288static int read_raw_super_block(struct f2fs_sb_info *sbi,
3289 struct f2fs_super_block **raw_super,
3290 int *valid_super_block, int *recovery)
3291{
3292 struct super_block *sb = sbi->sb;
3293 int block;
3294 struct buffer_head *bh;
3295 struct f2fs_super_block *super;
3296 int err = 0;
3297
3298 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
3299 if (!super)
3300 return -ENOMEM;
3301
3302 for (block = 0; block < 2; block++) {
3303 bh = sb_bread(sb, block);
3304 if (!bh) {
David Brazdil0f672f62019-12-10 10:32:29 +00003305 f2fs_err(sbi, "Unable to read %dth superblock",
3306 block + 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003307 err = -EIO;
Olivier Deprez157378f2022-04-04 15:47:50 +02003308 *recovery = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003309 continue;
3310 }
3311
3312 /* sanity checking of raw super */
David Brazdil0f672f62019-12-10 10:32:29 +00003313 err = sanity_check_raw_super(sbi, bh);
3314 if (err) {
3315 f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
3316 block + 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003317 brelse(bh);
Olivier Deprez157378f2022-04-04 15:47:50 +02003318 *recovery = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003319 continue;
3320 }
3321
3322 if (!*raw_super) {
3323 memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
3324 sizeof(*super));
3325 *valid_super_block = block;
3326 *raw_super = super;
3327 }
3328 brelse(bh);
3329 }
3330
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003331 /* No valid superblock */
3332 if (!*raw_super)
Olivier Deprez157378f2022-04-04 15:47:50 +02003333 kfree(super);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003334 else
3335 err = 0;
3336
3337 return err;
3338}
3339
3340int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
3341{
3342 struct buffer_head *bh;
David Brazdil0f672f62019-12-10 10:32:29 +00003343 __u32 crc = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003344 int err;
3345
3346 if ((recover && f2fs_readonly(sbi->sb)) ||
3347 bdev_read_only(sbi->sb->s_bdev)) {
3348 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
3349 return -EROFS;
3350 }
3351
David Brazdil0f672f62019-12-10 10:32:29 +00003352 /* we should update superblock crc here */
3353 if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
3354 crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
3355 offsetof(struct f2fs_super_block, crc));
3356 F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
3357 }
3358
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003359 /* write back-up superblock first */
3360 bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
3361 if (!bh)
3362 return -EIO;
3363 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
3364 brelse(bh);
3365
3366 /* if we are in recovery path, skip writing valid superblock */
3367 if (recover || err)
3368 return err;
3369
3370 /* write current valid superblock */
3371 bh = sb_bread(sbi->sb, sbi->valid_super_block);
3372 if (!bh)
3373 return -EIO;
3374 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
3375 brelse(bh);
3376 return err;
3377}
3378
3379static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
3380{
3381 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3382 unsigned int max_devices = MAX_DEVICES;
3383 int i;
3384
3385 /* Initialize single device information */
3386 if (!RDEV(0).path[0]) {
3387 if (!bdev_is_zoned(sbi->sb->s_bdev))
3388 return 0;
3389 max_devices = 1;
3390 }
3391
3392 /*
3393 * Initialize multiple devices information, or single
3394 * zoned block device information.
3395 */
3396 sbi->devs = f2fs_kzalloc(sbi,
3397 array_size(max_devices,
3398 sizeof(struct f2fs_dev_info)),
3399 GFP_KERNEL);
3400 if (!sbi->devs)
3401 return -ENOMEM;
3402
3403 for (i = 0; i < max_devices; i++) {
3404
3405 if (i > 0 && !RDEV(i).path[0])
3406 break;
3407
3408 if (max_devices == 1) {
3409 /* Single zoned block device mount */
3410 FDEV(0).bdev =
3411 blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
3412 sbi->sb->s_mode, sbi->sb->s_type);
3413 } else {
3414 /* Multi-device mount */
3415 memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
3416 FDEV(i).total_segments =
3417 le32_to_cpu(RDEV(i).total_segments);
3418 if (i == 0) {
3419 FDEV(i).start_blk = 0;
3420 FDEV(i).end_blk = FDEV(i).start_blk +
3421 (FDEV(i).total_segments <<
3422 sbi->log_blocks_per_seg) - 1 +
3423 le32_to_cpu(raw_super->segment0_blkaddr);
3424 } else {
3425 FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
3426 FDEV(i).end_blk = FDEV(i).start_blk +
3427 (FDEV(i).total_segments <<
3428 sbi->log_blocks_per_seg) - 1;
3429 }
3430 FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
3431 sbi->sb->s_mode, sbi->sb->s_type);
3432 }
3433 if (IS_ERR(FDEV(i).bdev))
3434 return PTR_ERR(FDEV(i).bdev);
3435
3436 /* to release errored devices */
3437 sbi->s_ndevs = i + 1;
3438
3439#ifdef CONFIG_BLK_DEV_ZONED
3440 if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
David Brazdil0f672f62019-12-10 10:32:29 +00003441 !f2fs_sb_has_blkzoned(sbi)) {
3442 f2fs_err(sbi, "Zoned block device feature not enabled\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003443 return -EINVAL;
3444 }
3445 if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
3446 if (init_blkz_info(sbi, i)) {
David Brazdil0f672f62019-12-10 10:32:29 +00003447 f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003448 return -EINVAL;
3449 }
3450 if (max_devices == 1)
3451 break;
David Brazdil0f672f62019-12-10 10:32:29 +00003452 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
3453 i, FDEV(i).path,
3454 FDEV(i).total_segments,
3455 FDEV(i).start_blk, FDEV(i).end_blk,
3456 bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
3457 "Host-aware" : "Host-managed");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003458 continue;
3459 }
3460#endif
David Brazdil0f672f62019-12-10 10:32:29 +00003461 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
3462 i, FDEV(i).path,
3463 FDEV(i).total_segments,
3464 FDEV(i).start_blk, FDEV(i).end_blk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003465 }
David Brazdil0f672f62019-12-10 10:32:29 +00003466 f2fs_info(sbi,
3467 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
3468 return 0;
3469}
3470
3471static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
3472{
3473#ifdef CONFIG_UNICODE
Olivier Deprez157378f2022-04-04 15:47:50 +02003474 if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) {
David Brazdil0f672f62019-12-10 10:32:29 +00003475 const struct f2fs_sb_encodings *encoding_info;
3476 struct unicode_map *encoding;
3477 __u16 encoding_flags;
3478
3479 if (f2fs_sb_has_encrypt(sbi)) {
3480 f2fs_err(sbi,
3481 "Can't mount with encoding and encryption");
3482 return -EINVAL;
3483 }
3484
3485 if (f2fs_sb_read_encoding(sbi->raw_super, &encoding_info,
3486 &encoding_flags)) {
3487 f2fs_err(sbi,
3488 "Encoding requested by superblock is unknown");
3489 return -EINVAL;
3490 }
3491
3492 encoding = utf8_load(encoding_info->version);
3493 if (IS_ERR(encoding)) {
3494 f2fs_err(sbi,
3495 "can't mount with superblock charset: %s-%s "
3496 "not supported by the kernel. flags: 0x%x.",
3497 encoding_info->name, encoding_info->version,
3498 encoding_flags);
3499 return PTR_ERR(encoding);
3500 }
3501 f2fs_info(sbi, "Using encoding defined by superblock: "
3502 "%s-%s with flags 0x%hx", encoding_info->name,
3503 encoding_info->version?:"\b", encoding_flags);
3504
Olivier Deprez157378f2022-04-04 15:47:50 +02003505 sbi->sb->s_encoding = encoding;
3506 sbi->sb->s_encoding_flags = encoding_flags;
David Brazdil0f672f62019-12-10 10:32:29 +00003507 sbi->sb->s_d_op = &f2fs_dentry_ops;
3508 }
3509#else
3510 if (f2fs_sb_has_casefold(sbi)) {
3511 f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
3512 return -EINVAL;
3513 }
3514#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003515 return 0;
3516}
3517
3518static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
3519{
3520 struct f2fs_sm_info *sm_i = SM_I(sbi);
3521
3522 /* adjust parameters according to the volume size */
3523 if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) {
3524 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
3525 sm_i->dcc_info->discard_granularity = 1;
3526 sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
3527 }
3528
3529 sbi->readdir_ra = 1;
3530}
3531
3532static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
3533{
3534 struct f2fs_sb_info *sbi;
3535 struct f2fs_super_block *raw_super;
3536 struct inode *root;
3537 int err;
David Brazdil0f672f62019-12-10 10:32:29 +00003538 bool skip_recovery = false, need_fsck = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003539 char *options = NULL;
3540 int recovery, i, valid_super_block;
3541 struct curseg_info *seg_i;
David Brazdil0f672f62019-12-10 10:32:29 +00003542 int retry_cnt = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003543
3544try_onemore:
3545 err = -EINVAL;
3546 raw_super = NULL;
3547 valid_super_block = -1;
3548 recovery = 0;
3549
3550 /* allocate memory for f2fs-specific super block info */
3551 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
3552 if (!sbi)
3553 return -ENOMEM;
3554
3555 sbi->sb = sb;
3556
3557 /* Load the checksum driver */
3558 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
3559 if (IS_ERR(sbi->s_chksum_driver)) {
David Brazdil0f672f62019-12-10 10:32:29 +00003560 f2fs_err(sbi, "Cannot load crc32 driver.");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003561 err = PTR_ERR(sbi->s_chksum_driver);
3562 sbi->s_chksum_driver = NULL;
3563 goto free_sbi;
3564 }
3565
3566 /* set a block size */
3567 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
David Brazdil0f672f62019-12-10 10:32:29 +00003568 f2fs_err(sbi, "unable to set blocksize");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003569 goto free_sbi;
3570 }
3571
3572 err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
3573 &recovery);
3574 if (err)
3575 goto free_sbi;
3576
3577 sb->s_fs_info = sbi;
3578 sbi->raw_super = raw_super;
3579
3580 /* precompute checksum seed for metadata */
David Brazdil0f672f62019-12-10 10:32:29 +00003581 if (f2fs_sb_has_inode_chksum(sbi))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003582 sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
3583 sizeof(raw_super->uuid));
3584
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003585 default_options(sbi);
3586 /* parse mount options */
3587 options = kstrdup((const char *)data, GFP_KERNEL);
3588 if (data && !options) {
3589 err = -ENOMEM;
3590 goto free_sb_buf;
3591 }
3592
Olivier Deprez157378f2022-04-04 15:47:50 +02003593 err = parse_options(sb, options, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003594 if (err)
3595 goto free_options;
3596
3597 sbi->max_file_blocks = max_file_blocks();
3598 sb->s_maxbytes = sbi->max_file_blocks <<
3599 le32_to_cpu(raw_super->log_blocksize);
3600 sb->s_max_links = F2FS_LINK_MAX;
David Brazdil0f672f62019-12-10 10:32:29 +00003601
3602 err = f2fs_setup_casefold(sbi);
3603 if (err)
3604 goto free_options;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003605
3606#ifdef CONFIG_QUOTA
3607 sb->dq_op = &f2fs_quota_operations;
David Brazdil0f672f62019-12-10 10:32:29 +00003608 sb->s_qcop = &f2fs_quotactl_ops;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003609 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
3610
David Brazdil0f672f62019-12-10 10:32:29 +00003611 if (f2fs_sb_has_quota_ino(sbi)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003612 for (i = 0; i < MAXQUOTAS; i++) {
3613 if (f2fs_qf_ino(sbi->sb, i))
3614 sbi->nquota_files++;
3615 }
3616 }
3617#endif
3618
3619 sb->s_op = &f2fs_sops;
David Brazdil0f672f62019-12-10 10:32:29 +00003620#ifdef CONFIG_FS_ENCRYPTION
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003621 sb->s_cop = &f2fs_cryptops;
3622#endif
David Brazdil0f672f62019-12-10 10:32:29 +00003623#ifdef CONFIG_FS_VERITY
3624 sb->s_vop = &f2fs_verityops;
3625#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003626 sb->s_xattr = f2fs_xattr_handlers;
3627 sb->s_export_op = &f2fs_export_ops;
3628 sb->s_magic = F2FS_SUPER_MAGIC;
3629 sb->s_time_gran = 1;
3630 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
3631 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
3632 memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
3633 sb->s_iflags |= SB_I_CGROUPWB;
3634
3635 /* init f2fs-specific super block info */
3636 sbi->valid_super_block = valid_super_block;
Olivier Deprez157378f2022-04-04 15:47:50 +02003637 init_rwsem(&sbi->gc_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003638 mutex_init(&sbi->writepages);
3639 mutex_init(&sbi->cp_mutex);
3640 init_rwsem(&sbi->node_write);
3641 init_rwsem(&sbi->node_change);
3642
3643 /* disallow all the data/node/meta page writes */
3644 set_sbi_flag(sbi, SBI_POR_DOING);
3645 spin_lock_init(&sbi->stat_lock);
3646
3647 /* init iostat info */
3648 spin_lock_init(&sbi->iostat_lock);
3649 sbi->iostat_enable = false;
Olivier Deprez157378f2022-04-04 15:47:50 +02003650 sbi->iostat_period_ms = DEFAULT_IOSTAT_PERIOD_MS;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003651
3652 for (i = 0; i < NR_PAGE_TYPE; i++) {
3653 int n = (i == META) ? 1: NR_TEMP_TYPE;
3654 int j;
3655
3656 sbi->write_io[i] =
3657 f2fs_kmalloc(sbi,
3658 array_size(n,
3659 sizeof(struct f2fs_bio_info)),
3660 GFP_KERNEL);
3661 if (!sbi->write_io[i]) {
3662 err = -ENOMEM;
David Brazdil0f672f62019-12-10 10:32:29 +00003663 goto free_bio_info;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003664 }
3665
3666 for (j = HOT; j < n; j++) {
3667 init_rwsem(&sbi->write_io[i][j].io_rwsem);
3668 sbi->write_io[i][j].sbi = sbi;
3669 sbi->write_io[i][j].bio = NULL;
3670 spin_lock_init(&sbi->write_io[i][j].io_lock);
3671 INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
Olivier Deprez157378f2022-04-04 15:47:50 +02003672 INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
3673 init_rwsem(&sbi->write_io[i][j].bio_list_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003674 }
3675 }
3676
3677 init_rwsem(&sbi->cp_rwsem);
David Brazdil0f672f62019-12-10 10:32:29 +00003678 init_rwsem(&sbi->quota_sem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003679 init_waitqueue_head(&sbi->cp_wait);
3680 init_sb_info(sbi);
3681
3682 err = init_percpu_info(sbi);
3683 if (err)
3684 goto free_bio_info;
3685
David Brazdil0f672f62019-12-10 10:32:29 +00003686 if (F2FS_IO_ALIGNED(sbi)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003687 sbi->write_io_dummy =
3688 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
3689 if (!sbi->write_io_dummy) {
3690 err = -ENOMEM;
3691 goto free_percpu;
3692 }
3693 }
3694
Olivier Deprez157378f2022-04-04 15:47:50 +02003695 /* init per sbi slab cache */
3696 err = f2fs_init_xattr_caches(sbi);
3697 if (err)
3698 goto free_io_dummy;
3699 err = f2fs_init_page_array_cache(sbi);
3700 if (err)
3701 goto free_xattr_cache;
3702
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003703 /* get an inode for meta space */
3704 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
3705 if (IS_ERR(sbi->meta_inode)) {
David Brazdil0f672f62019-12-10 10:32:29 +00003706 f2fs_err(sbi, "Failed to read F2FS meta data inode");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003707 err = PTR_ERR(sbi->meta_inode);
Olivier Deprez157378f2022-04-04 15:47:50 +02003708 goto free_page_array_cache;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003709 }
3710
3711 err = f2fs_get_valid_checkpoint(sbi);
3712 if (err) {
David Brazdil0f672f62019-12-10 10:32:29 +00003713 f2fs_err(sbi, "Failed to get valid F2FS checkpoint");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003714 goto free_meta_inode;
3715 }
3716
David Brazdil0f672f62019-12-10 10:32:29 +00003717 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
3718 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3719 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) {
3720 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
3721 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
3722 }
3723
3724 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG))
3725 set_sbi_flag(sbi, SBI_NEED_FSCK);
3726
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003727 /* Initialize device list */
3728 err = f2fs_scan_devices(sbi);
3729 if (err) {
David Brazdil0f672f62019-12-10 10:32:29 +00003730 f2fs_err(sbi, "Failed to find devices");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003731 goto free_devices;
3732 }
3733
Olivier Deprez157378f2022-04-04 15:47:50 +02003734 err = f2fs_init_post_read_wq(sbi);
3735 if (err) {
3736 f2fs_err(sbi, "Failed to initialize post read workqueue");
3737 goto free_devices;
3738 }
3739
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003740 sbi->total_valid_node_count =
3741 le32_to_cpu(sbi->ckpt->valid_node_count);
3742 percpu_counter_set(&sbi->total_valid_inode_count,
3743 le32_to_cpu(sbi->ckpt->valid_inode_count));
3744 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
3745 sbi->total_valid_block_count =
3746 le64_to_cpu(sbi->ckpt->valid_block_count);
3747 sbi->last_valid_block_count = sbi->total_valid_block_count;
3748 sbi->reserved_blocks = 0;
3749 sbi->current_reserved_blocks = 0;
3750 limit_reserve_root(sbi);
Olivier Deprez0e641232021-09-23 10:07:05 +02003751 adjust_unusable_cap_perc(sbi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003752
3753 for (i = 0; i < NR_INODE_TYPE; i++) {
3754 INIT_LIST_HEAD(&sbi->inode_list[i]);
3755 spin_lock_init(&sbi->inode_lock[i]);
3756 }
David Brazdil0f672f62019-12-10 10:32:29 +00003757 mutex_init(&sbi->flush_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003758
3759 f2fs_init_extent_cache_info(sbi);
3760
3761 f2fs_init_ino_entry_info(sbi);
3762
3763 f2fs_init_fsync_node_info(sbi);
3764
3765 /* setup f2fs internal modules */
3766 err = f2fs_build_segment_manager(sbi);
3767 if (err) {
David Brazdil0f672f62019-12-10 10:32:29 +00003768 f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)",
3769 err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003770 goto free_sm;
3771 }
3772 err = f2fs_build_node_manager(sbi);
3773 if (err) {
David Brazdil0f672f62019-12-10 10:32:29 +00003774 f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)",
3775 err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003776 goto free_nm;
3777 }
3778
Olivier Deprez157378f2022-04-04 15:47:50 +02003779 err = adjust_reserved_segment(sbi);
3780 if (err)
3781 goto free_nm;
3782
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003783 /* For write statistics */
3784 if (sb->s_bdev->bd_part)
3785 sbi->sectors_written_start =
3786 (u64)part_stat_read(sb->s_bdev->bd_part,
3787 sectors[STAT_WRITE]);
3788
3789 /* Read accumulated write IO statistics if exists */
3790 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
3791 if (__exist_node_summaries(sbi))
3792 sbi->kbytes_written =
3793 le64_to_cpu(seg_i->journal->info.kbytes_written);
3794
3795 f2fs_build_gc_manager(sbi);
3796
David Brazdil0f672f62019-12-10 10:32:29 +00003797 err = f2fs_build_stats(sbi);
3798 if (err)
3799 goto free_nm;
3800
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003801 /* get an inode for node space */
3802 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
3803 if (IS_ERR(sbi->node_inode)) {
David Brazdil0f672f62019-12-10 10:32:29 +00003804 f2fs_err(sbi, "Failed to read node inode");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003805 err = PTR_ERR(sbi->node_inode);
David Brazdil0f672f62019-12-10 10:32:29 +00003806 goto free_stats;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003807 }
3808
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003809 /* read root inode and dentry */
3810 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
3811 if (IS_ERR(root)) {
David Brazdil0f672f62019-12-10 10:32:29 +00003812 f2fs_err(sbi, "Failed to read root inode");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003813 err = PTR_ERR(root);
David Brazdil0f672f62019-12-10 10:32:29 +00003814 goto free_node_inode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003815 }
3816 if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
3817 !root->i_size || !root->i_nlink) {
3818 iput(root);
3819 err = -EINVAL;
David Brazdil0f672f62019-12-10 10:32:29 +00003820 goto free_node_inode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003821 }
3822
3823 sb->s_root = d_make_root(root); /* allocate root dentry */
3824 if (!sb->s_root) {
3825 err = -ENOMEM;
David Brazdil0f672f62019-12-10 10:32:29 +00003826 goto free_node_inode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003827 }
3828
3829 err = f2fs_register_sysfs(sbi);
3830 if (err)
3831 goto free_root_inode;
3832
3833#ifdef CONFIG_QUOTA
3834 /* Enable quota usage during mount */
David Brazdil0f672f62019-12-10 10:32:29 +00003835 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003836 err = f2fs_enable_quotas(sb);
David Brazdil0f672f62019-12-10 10:32:29 +00003837 if (err)
3838 f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003839 }
3840#endif
Olivier Deprez157378f2022-04-04 15:47:50 +02003841 /* if there are any orphan inodes, free them */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003842 err = f2fs_recover_orphan_inodes(sbi);
3843 if (err)
3844 goto free_meta;
3845
David Brazdil0f672f62019-12-10 10:32:29 +00003846 if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
3847 goto reset_checkpoint;
3848
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003849 /* recover fsynced data */
Olivier Deprez0e641232021-09-23 10:07:05 +02003850 if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
3851 !test_opt(sbi, NORECOVERY)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003852 /*
3853 * mount should be failed, when device has readonly mode, and
3854 * previous checkpoint was not done by clean system shutdown.
3855 */
David Brazdil0f672f62019-12-10 10:32:29 +00003856 if (f2fs_hw_is_readonly(sbi)) {
3857 if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
3858 err = -EROFS;
3859 f2fs_err(sbi, "Need to recover fsync data, but write access unavailable");
3860 goto free_meta;
3861 }
3862 f2fs_info(sbi, "write access unavailable, skipping recovery");
3863 goto reset_checkpoint;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003864 }
3865
3866 if (need_fsck)
3867 set_sbi_flag(sbi, SBI_NEED_FSCK);
3868
David Brazdil0f672f62019-12-10 10:32:29 +00003869 if (skip_recovery)
3870 goto reset_checkpoint;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003871
3872 err = f2fs_recover_fsync_data(sbi, false);
3873 if (err < 0) {
David Brazdil0f672f62019-12-10 10:32:29 +00003874 if (err != -ENOMEM)
3875 skip_recovery = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003876 need_fsck = true;
David Brazdil0f672f62019-12-10 10:32:29 +00003877 f2fs_err(sbi, "Cannot recover all fsync data errno=%d",
3878 err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003879 goto free_meta;
3880 }
3881 } else {
3882 err = f2fs_recover_fsync_data(sbi, true);
3883
3884 if (!f2fs_readonly(sb) && err > 0) {
3885 err = -EINVAL;
David Brazdil0f672f62019-12-10 10:32:29 +00003886 f2fs_err(sbi, "Need to recover fsync data");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003887 goto free_meta;
3888 }
3889 }
Olivier Deprez157378f2022-04-04 15:47:50 +02003890
3891 /*
3892 * If the f2fs is not readonly and fsync data recovery succeeds,
3893 * check zoned block devices' write pointer consistency.
3894 */
3895 if (!err && !f2fs_readonly(sb) && f2fs_sb_has_blkzoned(sbi)) {
3896 err = f2fs_check_write_pointer(sbi);
3897 if (err)
3898 goto free_meta;
3899 }
3900
David Brazdil0f672f62019-12-10 10:32:29 +00003901reset_checkpoint:
Olivier Deprez157378f2022-04-04 15:47:50 +02003902 f2fs_init_inmem_curseg(sbi);
3903
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003904 /* f2fs_recover_fsync_data() cleared this already */
3905 clear_sbi_flag(sbi, SBI_POR_DOING);
3906
David Brazdil0f672f62019-12-10 10:32:29 +00003907 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
3908 err = f2fs_disable_checkpoint(sbi);
3909 if (err)
3910 goto sync_free_meta;
3911 } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
3912 f2fs_enable_checkpoint(sbi);
3913 }
3914
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003915 /*
3916 * If filesystem is not mounted as read-only then
3917 * do start the gc_thread.
3918 */
Olivier Deprez157378f2022-04-04 15:47:50 +02003919 if (F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF && !f2fs_readonly(sb)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003920 /* After POR, we can run background GC thread.*/
3921 err = f2fs_start_gc_thread(sbi);
3922 if (err)
David Brazdil0f672f62019-12-10 10:32:29 +00003923 goto sync_free_meta;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003924 }
David Brazdil0f672f62019-12-10 10:32:29 +00003925 kvfree(options);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003926
3927 /* recover broken superblock */
3928 if (recovery) {
3929 err = f2fs_commit_super(sbi, true);
David Brazdil0f672f62019-12-10 10:32:29 +00003930 f2fs_info(sbi, "Try to recover %dth superblock, ret: %d",
3931 sbi->valid_super_block ? 1 : 2, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003932 }
3933
3934 f2fs_join_shrinker(sbi);
3935
3936 f2fs_tuning_parameters(sbi);
3937
David Brazdil0f672f62019-12-10 10:32:29 +00003938 f2fs_notice(sbi, "Mounted with checkpoint version = %llx",
3939 cur_cp_version(F2FS_CKPT(sbi)));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003940 f2fs_update_time(sbi, CP_TIME);
3941 f2fs_update_time(sbi, REQ_TIME);
David Brazdil0f672f62019-12-10 10:32:29 +00003942 clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003943 return 0;
3944
David Brazdil0f672f62019-12-10 10:32:29 +00003945sync_free_meta:
3946 /* safe to flush all the data */
3947 sync_filesystem(sbi->sb);
3948 retry_cnt = 0;
3949
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003950free_meta:
3951#ifdef CONFIG_QUOTA
David Brazdil0f672f62019-12-10 10:32:29 +00003952 f2fs_truncate_quota_inode_pages(sb);
3953 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003954 f2fs_quota_off_umount(sbi->sb);
3955#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003956 /*
3957 * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
3958 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
3959 * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
3960 * falls into an infinite loop in f2fs_sync_meta_pages().
3961 */
3962 truncate_inode_pages_final(META_MAPPING(sbi));
David Brazdil0f672f62019-12-10 10:32:29 +00003963 /* evict some inodes being cached by GC */
3964 evict_inodes(sb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003965 f2fs_unregister_sysfs(sbi);
3966free_root_inode:
3967 dput(sb->s_root);
3968 sb->s_root = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003969free_node_inode:
3970 f2fs_release_ino_entry(sbi, true);
3971 truncate_inode_pages_final(NODE_MAPPING(sbi));
3972 iput(sbi->node_inode);
David Brazdil0f672f62019-12-10 10:32:29 +00003973 sbi->node_inode = NULL;
3974free_stats:
3975 f2fs_destroy_stats(sbi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003976free_nm:
3977 f2fs_destroy_node_manager(sbi);
3978free_sm:
3979 f2fs_destroy_segment_manager(sbi);
Olivier Deprez157378f2022-04-04 15:47:50 +02003980 f2fs_destroy_post_read_wq(sbi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003981free_devices:
3982 destroy_device_list(sbi);
David Brazdil0f672f62019-12-10 10:32:29 +00003983 kvfree(sbi->ckpt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003984free_meta_inode:
3985 make_bad_inode(sbi->meta_inode);
3986 iput(sbi->meta_inode);
David Brazdil0f672f62019-12-10 10:32:29 +00003987 sbi->meta_inode = NULL;
Olivier Deprez157378f2022-04-04 15:47:50 +02003988free_page_array_cache:
3989 f2fs_destroy_page_array_cache(sbi);
3990free_xattr_cache:
3991 f2fs_destroy_xattr_caches(sbi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003992free_io_dummy:
3993 mempool_destroy(sbi->write_io_dummy);
3994free_percpu:
3995 destroy_percpu_info(sbi);
3996free_bio_info:
3997 for (i = 0; i < NR_PAGE_TYPE; i++)
David Brazdil0f672f62019-12-10 10:32:29 +00003998 kvfree(sbi->write_io[i]);
3999
4000#ifdef CONFIG_UNICODE
Olivier Deprez157378f2022-04-04 15:47:50 +02004001 utf8_unload(sb->s_encoding);
4002 sb->s_encoding = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00004003#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004004free_options:
4005#ifdef CONFIG_QUOTA
4006 for (i = 0; i < MAXQUOTAS; i++)
Olivier Deprez157378f2022-04-04 15:47:50 +02004007 kfree(F2FS_OPTION(sbi).s_qf_names[i]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004008#endif
Olivier Deprez157378f2022-04-04 15:47:50 +02004009 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
David Brazdil0f672f62019-12-10 10:32:29 +00004010 kvfree(options);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004011free_sb_buf:
Olivier Deprez157378f2022-04-04 15:47:50 +02004012 kfree(raw_super);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004013free_sbi:
4014 if (sbi->s_chksum_driver)
4015 crypto_free_shash(sbi->s_chksum_driver);
Olivier Deprez157378f2022-04-04 15:47:50 +02004016 kfree(sbi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004017
4018 /* give only one another chance */
David Brazdil0f672f62019-12-10 10:32:29 +00004019 if (retry_cnt > 0 && skip_recovery) {
4020 retry_cnt--;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004021 shrink_dcache_sb(sb);
4022 goto try_onemore;
4023 }
4024 return err;
4025}
4026
4027static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
4028 const char *dev_name, void *data)
4029{
4030 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
4031}
4032
4033static void kill_f2fs_super(struct super_block *sb)
4034{
4035 if (sb->s_root) {
4036 struct f2fs_sb_info *sbi = F2FS_SB(sb);
4037
4038 set_sbi_flag(sbi, SBI_IS_CLOSE);
4039 f2fs_stop_gc_thread(sbi);
4040 f2fs_stop_discard_thread(sbi);
4041
4042 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
4043 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
4044 struct cp_control cpc = {
4045 .reason = CP_UMOUNT,
4046 };
4047 f2fs_write_checkpoint(sbi, &cpc);
4048 }
4049
4050 if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
4051 sb->s_flags &= ~SB_RDONLY;
4052 }
4053 kill_block_super(sb);
4054}
4055
4056static struct file_system_type f2fs_fs_type = {
4057 .owner = THIS_MODULE,
4058 .name = "f2fs",
4059 .mount = f2fs_mount,
4060 .kill_sb = kill_f2fs_super,
4061 .fs_flags = FS_REQUIRES_DEV,
4062};
4063MODULE_ALIAS_FS("f2fs");
4064
4065static int __init init_inodecache(void)
4066{
4067 f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
4068 sizeof(struct f2fs_inode_info), 0,
4069 SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
4070 if (!f2fs_inode_cachep)
4071 return -ENOMEM;
4072 return 0;
4073}
4074
4075static void destroy_inodecache(void)
4076{
4077 /*
4078 * Make sure all delayed rcu free inodes are flushed before we
4079 * destroy cache.
4080 */
4081 rcu_barrier();
4082 kmem_cache_destroy(f2fs_inode_cachep);
4083}
4084
4085static int __init init_f2fs_fs(void)
4086{
4087 int err;
4088
4089 if (PAGE_SIZE != F2FS_BLKSIZE) {
4090 printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
4091 PAGE_SIZE, F2FS_BLKSIZE);
4092 return -EINVAL;
4093 }
4094
4095 f2fs_build_trace_ios();
4096
4097 err = init_inodecache();
4098 if (err)
4099 goto fail;
4100 err = f2fs_create_node_manager_caches();
4101 if (err)
4102 goto free_inodecache;
4103 err = f2fs_create_segment_manager_caches();
4104 if (err)
4105 goto free_node_manager_caches;
4106 err = f2fs_create_checkpoint_caches();
4107 if (err)
4108 goto free_segment_manager_caches;
Olivier Deprez157378f2022-04-04 15:47:50 +02004109 err = f2fs_create_recovery_cache();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004110 if (err)
4111 goto free_checkpoint_caches;
Olivier Deprez157378f2022-04-04 15:47:50 +02004112 err = f2fs_create_extent_cache();
4113 if (err)
4114 goto free_recovery_cache;
4115 err = f2fs_create_garbage_collection_cache();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004116 if (err)
4117 goto free_extent_cache;
Olivier Deprez157378f2022-04-04 15:47:50 +02004118 err = f2fs_init_sysfs();
4119 if (err)
4120 goto free_garbage_collection_cache;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004121 err = register_shrinker(&f2fs_shrinker_info);
4122 if (err)
4123 goto free_sysfs;
4124 err = register_filesystem(&f2fs_fs_type);
4125 if (err)
4126 goto free_shrinker;
David Brazdil0f672f62019-12-10 10:32:29 +00004127 f2fs_create_root_stats();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004128 err = f2fs_init_post_read_processing();
4129 if (err)
4130 goto free_root_stats;
Olivier Deprez157378f2022-04-04 15:47:50 +02004131 err = f2fs_init_bio_entry_cache();
4132 if (err)
4133 goto free_post_read;
4134 err = f2fs_init_bioset();
4135 if (err)
4136 goto free_bio_enrty_cache;
4137 err = f2fs_init_compress_mempool();
4138 if (err)
4139 goto free_bioset;
4140 err = f2fs_init_compress_cache();
4141 if (err)
4142 goto free_compress_mempool;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004143 return 0;
Olivier Deprez157378f2022-04-04 15:47:50 +02004144free_compress_mempool:
4145 f2fs_destroy_compress_mempool();
4146free_bioset:
4147 f2fs_destroy_bioset();
4148free_bio_enrty_cache:
4149 f2fs_destroy_bio_entry_cache();
4150free_post_read:
4151 f2fs_destroy_post_read_processing();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004152free_root_stats:
4153 f2fs_destroy_root_stats();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004154 unregister_filesystem(&f2fs_fs_type);
4155free_shrinker:
4156 unregister_shrinker(&f2fs_shrinker_info);
4157free_sysfs:
4158 f2fs_exit_sysfs();
Olivier Deprez157378f2022-04-04 15:47:50 +02004159free_garbage_collection_cache:
4160 f2fs_destroy_garbage_collection_cache();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004161free_extent_cache:
4162 f2fs_destroy_extent_cache();
Olivier Deprez157378f2022-04-04 15:47:50 +02004163free_recovery_cache:
4164 f2fs_destroy_recovery_cache();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004165free_checkpoint_caches:
4166 f2fs_destroy_checkpoint_caches();
4167free_segment_manager_caches:
4168 f2fs_destroy_segment_manager_caches();
4169free_node_manager_caches:
4170 f2fs_destroy_node_manager_caches();
4171free_inodecache:
4172 destroy_inodecache();
4173fail:
4174 return err;
4175}
4176
4177static void __exit exit_f2fs_fs(void)
4178{
Olivier Deprez157378f2022-04-04 15:47:50 +02004179 f2fs_destroy_compress_cache();
4180 f2fs_destroy_compress_mempool();
4181 f2fs_destroy_bioset();
4182 f2fs_destroy_bio_entry_cache();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004183 f2fs_destroy_post_read_processing();
4184 f2fs_destroy_root_stats();
4185 unregister_filesystem(&f2fs_fs_type);
4186 unregister_shrinker(&f2fs_shrinker_info);
4187 f2fs_exit_sysfs();
Olivier Deprez157378f2022-04-04 15:47:50 +02004188 f2fs_destroy_garbage_collection_cache();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004189 f2fs_destroy_extent_cache();
Olivier Deprez157378f2022-04-04 15:47:50 +02004190 f2fs_destroy_recovery_cache();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004191 f2fs_destroy_checkpoint_caches();
4192 f2fs_destroy_segment_manager_caches();
4193 f2fs_destroy_node_manager_caches();
4194 destroy_inodecache();
4195 f2fs_destroy_trace_ios();
4196}
4197
4198module_init(init_f2fs_fs)
4199module_exit(exit_f2fs_fs)
4200
4201MODULE_AUTHOR("Samsung Electronics's Praesto Team");
4202MODULE_DESCRIPTION("Flash Friendly File System");
4203MODULE_LICENSE("GPL");
Olivier Deprez0e641232021-09-23 10:07:05 +02004204MODULE_SOFTDEP("pre: crc32");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004205