blob: 3baa62ef6e3a363e4abebc8ea380b206a1d9c7ef [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * fs/f2fs/gc.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 */
8#include <linux/fs.h>
9#include <linux/module.h>
10#include <linux/backing-dev.h>
11#include <linux/init.h>
12#include <linux/f2fs_fs.h>
13#include <linux/kthread.h>
14#include <linux/delay.h>
15#include <linux/freezer.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020016#include <linux/sched/signal.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000017
18#include "f2fs.h"
19#include "node.h"
20#include "segment.h"
21#include "gc.h"
22#include <trace/events/f2fs.h>
23
Olivier Deprez157378f2022-04-04 15:47:50 +020024static struct kmem_cache *victim_entry_slab;
25
26static unsigned int count_bits(const unsigned long *addr,
27 unsigned int offset, unsigned int len);
28
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000029static int gc_thread_func(void *data)
30{
31 struct f2fs_sb_info *sbi = data;
32 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
33 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
34 unsigned int wait_ms;
35
36 wait_ms = gc_th->min_sleep_time;
37
38 set_freezable();
39 do {
Olivier Deprez157378f2022-04-04 15:47:50 +020040 bool sync_mode;
41
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000042 wait_event_interruptible_timeout(*wq,
43 kthread_should_stop() || freezing(current) ||
44 gc_th->gc_wake,
45 msecs_to_jiffies(wait_ms));
46
47 /* give it a try one time */
48 if (gc_th->gc_wake)
49 gc_th->gc_wake = 0;
50
David Brazdil0f672f62019-12-10 10:32:29 +000051 if (try_to_freeze()) {
52 stat_other_skip_bggc_count(sbi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000053 continue;
David Brazdil0f672f62019-12-10 10:32:29 +000054 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000055 if (kthread_should_stop())
56 break;
57
58 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
59 increase_sleep_time(gc_th, &wait_ms);
David Brazdil0f672f62019-12-10 10:32:29 +000060 stat_other_skip_bggc_count(sbi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000061 continue;
62 }
63
64 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
Olivier Deprez0e641232021-09-23 10:07:05 +020065 f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066 f2fs_stop_checkpoint(sbi, false);
67 }
68
David Brazdil0f672f62019-12-10 10:32:29 +000069 if (!sb_start_write_trylock(sbi->sb)) {
70 stat_other_skip_bggc_count(sbi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000071 continue;
David Brazdil0f672f62019-12-10 10:32:29 +000072 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000073
74 /*
75 * [GC triggering condition]
76 * 0. GC is not conducted currently.
77 * 1. There are enough dirty segments.
78 * 2. IO subsystem is idle by checking the # of writeback pages.
79 * 3. IO subsystem is idle by checking the # of requests in
80 * bdev's request list.
81 *
82 * Note) We have to avoid triggering GCs frequently.
83 * Because it is possible that some segments can be
84 * invalidated soon after by user update or deletion.
85 * So, I'd like to wait some time to collect dirty segments.
86 */
Olivier Deprez157378f2022-04-04 15:47:50 +020087 if (sbi->gc_mode == GC_URGENT_HIGH) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000088 wait_ms = gc_th->urgent_sleep_time;
Olivier Deprez157378f2022-04-04 15:47:50 +020089 down_write(&sbi->gc_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000090 goto do_gc;
91 }
92
Olivier Deprez157378f2022-04-04 15:47:50 +020093 if (!down_write_trylock(&sbi->gc_lock)) {
David Brazdil0f672f62019-12-10 10:32:29 +000094 stat_other_skip_bggc_count(sbi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000095 goto next;
David Brazdil0f672f62019-12-10 10:32:29 +000096 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000097
David Brazdil0f672f62019-12-10 10:32:29 +000098 if (!is_idle(sbi, GC_TIME)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000099 increase_sleep_time(gc_th, &wait_ms);
Olivier Deprez157378f2022-04-04 15:47:50 +0200100 up_write(&sbi->gc_lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000101 stat_io_skip_bggc_count(sbi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000102 goto next;
103 }
104
105 if (has_enough_invalid_blocks(sbi))
106 decrease_sleep_time(gc_th, &wait_ms);
107 else
108 increase_sleep_time(gc_th, &wait_ms);
109do_gc:
Olivier Deprez157378f2022-04-04 15:47:50 +0200110 stat_inc_bggc_count(sbi->stat_info);
111
112 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000113
114 /* if return value is not zero, no victim was selected */
Olivier Deprez157378f2022-04-04 15:47:50 +0200115 if (f2fs_gc(sbi, sync_mode, true, false, NULL_SEGNO))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000116 wait_ms = gc_th->no_gc_sleep_time;
117
118 trace_f2fs_background_gc(sbi->sb, wait_ms,
119 prefree_segments(sbi), free_segments(sbi));
120
121 /* balancing f2fs's metadata periodically */
Olivier Deprez157378f2022-04-04 15:47:50 +0200122 f2fs_balance_fs_bg(sbi, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000123next:
124 sb_end_write(sbi->sb);
125
126 } while (!kthread_should_stop());
127 return 0;
128}
129
130int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
131{
132 struct f2fs_gc_kthread *gc_th;
133 dev_t dev = sbi->sb->s_bdev->bd_dev;
134 int err = 0;
135
136 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
137 if (!gc_th) {
138 err = -ENOMEM;
139 goto out;
140 }
141
142 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
143 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
144 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
145 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
146
147 gc_th->gc_wake= 0;
148
149 sbi->gc_thread = gc_th;
150 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
151 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
152 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
153 if (IS_ERR(gc_th->f2fs_gc_task)) {
154 err = PTR_ERR(gc_th->f2fs_gc_task);
Olivier Deprez157378f2022-04-04 15:47:50 +0200155 kfree(gc_th);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000156 sbi->gc_thread = NULL;
157 }
158out:
159 return err;
160}
161
162void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
163{
164 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
165 if (!gc_th)
166 return;
167 kthread_stop(gc_th->f2fs_gc_task);
Olivier Deprez157378f2022-04-04 15:47:50 +0200168 kfree(gc_th);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000169 sbi->gc_thread = NULL;
170}
171
172static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
173{
Olivier Deprez157378f2022-04-04 15:47:50 +0200174 int gc_mode;
175
176 if (gc_type == BG_GC) {
177 if (sbi->am.atgc_enabled)
178 gc_mode = GC_AT;
179 else
180 gc_mode = GC_CB;
181 } else {
182 gc_mode = GC_GREEDY;
183 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000184
185 switch (sbi->gc_mode) {
186 case GC_IDLE_CB:
187 gc_mode = GC_CB;
188 break;
189 case GC_IDLE_GREEDY:
Olivier Deprez157378f2022-04-04 15:47:50 +0200190 case GC_URGENT_HIGH:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000191 gc_mode = GC_GREEDY;
192 break;
Olivier Deprez157378f2022-04-04 15:47:50 +0200193 case GC_IDLE_AT:
194 gc_mode = GC_AT;
195 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000196 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200197
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000198 return gc_mode;
199}
200
201static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
202 int type, struct victim_sel_policy *p)
203{
204 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
205
206 if (p->alloc_mode == SSR) {
207 p->gc_mode = GC_GREEDY;
Olivier Deprez157378f2022-04-04 15:47:50 +0200208 p->dirty_bitmap = dirty_i->dirty_segmap[type];
209 p->max_search = dirty_i->nr_dirty[type];
210 p->ofs_unit = 1;
211 } else if (p->alloc_mode == AT_SSR) {
212 p->gc_mode = GC_GREEDY;
213 p->dirty_bitmap = dirty_i->dirty_segmap[type];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000214 p->max_search = dirty_i->nr_dirty[type];
215 p->ofs_unit = 1;
216 } else {
217 p->gc_mode = select_gc_type(sbi, gc_type);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000218 p->ofs_unit = sbi->segs_per_sec;
Olivier Deprez157378f2022-04-04 15:47:50 +0200219 if (__is_large_section(sbi)) {
220 p->dirty_bitmap = dirty_i->dirty_secmap;
221 p->max_search = count_bits(p->dirty_bitmap,
222 0, MAIN_SECS(sbi));
223 } else {
224 p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
225 p->max_search = dirty_i->nr_dirty[DIRTY];
226 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000227 }
228
Olivier Deprez157378f2022-04-04 15:47:50 +0200229 /*
230 * adjust candidates range, should select all dirty segments for
231 * foreground GC and urgent GC cases.
232 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000233 if (gc_type != FG_GC &&
Olivier Deprez157378f2022-04-04 15:47:50 +0200234 (sbi->gc_mode != GC_URGENT_HIGH) &&
235 (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000236 p->max_search > sbi->max_victim_search)
237 p->max_search = sbi->max_victim_search;
238
239 /* let's select beginning hot/small space first in no_heap mode*/
240 if (test_opt(sbi, NOHEAP) &&
241 (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
242 p->offset = 0;
243 else
244 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
245}
246
247static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
248 struct victim_sel_policy *p)
249{
250 /* SSR allocates in a segment unit */
251 if (p->alloc_mode == SSR)
252 return sbi->blocks_per_seg;
Olivier Deprez157378f2022-04-04 15:47:50 +0200253 else if (p->alloc_mode == AT_SSR)
254 return UINT_MAX;
255
256 /* LFS */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000257 if (p->gc_mode == GC_GREEDY)
258 return 2 * sbi->blocks_per_seg * p->ofs_unit;
259 else if (p->gc_mode == GC_CB)
260 return UINT_MAX;
Olivier Deprez157378f2022-04-04 15:47:50 +0200261 else if (p->gc_mode == GC_AT)
262 return UINT_MAX;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000263 else /* No other gc_mode */
264 return 0;
265}
266
267static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
268{
269 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
270 unsigned int secno;
271
272 /*
273 * If the gc_type is FG_GC, we can select victim segments
274 * selected by background GC before.
275 * Those segments guarantee they have small valid blocks.
276 */
277 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
278 if (sec_usage_check(sbi, secno))
279 continue;
280 clear_bit(secno, dirty_i->victim_secmap);
281 return GET_SEG_FROM_SEC(sbi, secno);
282 }
283 return NULL_SEGNO;
284}
285
286static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
287{
288 struct sit_info *sit_i = SIT_I(sbi);
289 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
290 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
291 unsigned long long mtime = 0;
292 unsigned int vblocks;
293 unsigned char age = 0;
294 unsigned char u;
295 unsigned int i;
Olivier Deprez157378f2022-04-04 15:47:50 +0200296 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000297
Olivier Deprez157378f2022-04-04 15:47:50 +0200298 for (i = 0; i < usable_segs_per_sec; i++)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000299 mtime += get_seg_entry(sbi, start + i)->mtime;
300 vblocks = get_valid_blocks(sbi, segno, true);
301
Olivier Deprez157378f2022-04-04 15:47:50 +0200302 mtime = div_u64(mtime, usable_segs_per_sec);
303 vblocks = div_u64(vblocks, usable_segs_per_sec);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000304
305 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
306
307 /* Handle if the system time has changed by the user */
308 if (mtime < sit_i->min_mtime)
309 sit_i->min_mtime = mtime;
310 if (mtime > sit_i->max_mtime)
311 sit_i->max_mtime = mtime;
312 if (sit_i->max_mtime != sit_i->min_mtime)
313 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
314 sit_i->max_mtime - sit_i->min_mtime);
315
316 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
317}
318
319static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
320 unsigned int segno, struct victim_sel_policy *p)
321{
322 if (p->alloc_mode == SSR)
323 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
324
325 /* alloc_mode == LFS */
326 if (p->gc_mode == GC_GREEDY)
327 return get_valid_blocks(sbi, segno, true);
Olivier Deprez157378f2022-04-04 15:47:50 +0200328 else if (p->gc_mode == GC_CB)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000329 return get_cb_cost(sbi, segno);
Olivier Deprez157378f2022-04-04 15:47:50 +0200330
331 f2fs_bug_on(sbi, 1);
332 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000333}
334
335static unsigned int count_bits(const unsigned long *addr,
336 unsigned int offset, unsigned int len)
337{
338 unsigned int end = offset + len, sum = 0;
339
340 while (offset < end) {
341 if (test_bit(offset++, addr))
342 ++sum;
343 }
344 return sum;
345}
346
Olivier Deprez157378f2022-04-04 15:47:50 +0200347static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi,
348 unsigned long long mtime, unsigned int segno,
349 struct rb_node *parent, struct rb_node **p,
350 bool left_most)
351{
352 struct atgc_management *am = &sbi->am;
353 struct victim_entry *ve;
354
355 ve = f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS);
356
357 ve->mtime = mtime;
358 ve->segno = segno;
359
360 rb_link_node(&ve->rb_node, parent, p);
361 rb_insert_color_cached(&ve->rb_node, &am->root, left_most);
362
363 list_add_tail(&ve->list, &am->victim_list);
364
365 am->victim_count++;
366
367 return ve;
368}
369
370static void insert_victim_entry(struct f2fs_sb_info *sbi,
371 unsigned long long mtime, unsigned int segno)
372{
373 struct atgc_management *am = &sbi->am;
374 struct rb_node **p;
375 struct rb_node *parent = NULL;
376 bool left_most = true;
377
378 p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most);
379 attach_victim_entry(sbi, mtime, segno, parent, p, left_most);
380}
381
382static void add_victim_entry(struct f2fs_sb_info *sbi,
383 struct victim_sel_policy *p, unsigned int segno)
384{
385 struct sit_info *sit_i = SIT_I(sbi);
386 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
387 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
388 unsigned long long mtime = 0;
389 unsigned int i;
390
391 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
392 if (p->gc_mode == GC_AT &&
393 get_valid_blocks(sbi, segno, true) == 0)
394 return;
395 }
396
397 for (i = 0; i < sbi->segs_per_sec; i++)
398 mtime += get_seg_entry(sbi, start + i)->mtime;
399 mtime = div_u64(mtime, sbi->segs_per_sec);
400
401 /* Handle if the system time has changed by the user */
402 if (mtime < sit_i->min_mtime)
403 sit_i->min_mtime = mtime;
404 if (mtime > sit_i->max_mtime)
405 sit_i->max_mtime = mtime;
406 if (mtime < sit_i->dirty_min_mtime)
407 sit_i->dirty_min_mtime = mtime;
408 if (mtime > sit_i->dirty_max_mtime)
409 sit_i->dirty_max_mtime = mtime;
410
411 /* don't choose young section as candidate */
412 if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
413 return;
414
415 insert_victim_entry(sbi, mtime, segno);
416}
417
418static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi,
419 struct victim_sel_policy *p)
420{
421 struct atgc_management *am = &sbi->am;
422 struct rb_node *parent = NULL;
423 bool left_most;
424
425 f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most);
426
427 return parent;
428}
429
430static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
431 struct victim_sel_policy *p)
432{
433 struct sit_info *sit_i = SIT_I(sbi);
434 struct atgc_management *am = &sbi->am;
435 struct rb_root_cached *root = &am->root;
436 struct rb_node *node;
437 struct rb_entry *re;
438 struct victim_entry *ve;
439 unsigned long long total_time;
440 unsigned long long age, u, accu;
441 unsigned long long max_mtime = sit_i->dirty_max_mtime;
442 unsigned long long min_mtime = sit_i->dirty_min_mtime;
443 unsigned int sec_blocks = BLKS_PER_SEC(sbi);
444 unsigned int vblocks;
445 unsigned int dirty_threshold = max(am->max_candidate_count,
446 am->candidate_ratio *
447 am->victim_count / 100);
448 unsigned int age_weight = am->age_weight;
449 unsigned int cost;
450 unsigned int iter = 0;
451
452 if (max_mtime < min_mtime)
453 return;
454
455 max_mtime += 1;
456 total_time = max_mtime - min_mtime;
457
458 accu = div64_u64(ULLONG_MAX, total_time);
459 accu = min_t(unsigned long long, div_u64(accu, 100),
460 DEFAULT_ACCURACY_CLASS);
461
462 node = rb_first_cached(root);
463next:
464 re = rb_entry_safe(node, struct rb_entry, rb_node);
465 if (!re)
466 return;
467
468 ve = (struct victim_entry *)re;
469
470 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
471 goto skip;
472
473 /* age = 10000 * x% * 60 */
474 age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
475 age_weight;
476
477 vblocks = get_valid_blocks(sbi, ve->segno, true);
478 f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
479
480 /* u = 10000 * x% * 40 */
481 u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
482 (100 - age_weight);
483
484 f2fs_bug_on(sbi, age + u >= UINT_MAX);
485
486 cost = UINT_MAX - (age + u);
487 iter++;
488
489 if (cost < p->min_cost ||
490 (cost == p->min_cost && age > p->oldest_age)) {
491 p->min_cost = cost;
492 p->oldest_age = age;
493 p->min_segno = ve->segno;
494 }
495skip:
496 if (iter < dirty_threshold) {
497 node = rb_next(node);
498 goto next;
499 }
500}
501
502/*
503 * select candidates around source section in range of
504 * [target - dirty_threshold, target + dirty_threshold]
505 */
506static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
507 struct victim_sel_policy *p)
508{
509 struct sit_info *sit_i = SIT_I(sbi);
510 struct atgc_management *am = &sbi->am;
511 struct rb_node *node;
512 struct rb_entry *re;
513 struct victim_entry *ve;
514 unsigned long long age;
515 unsigned long long max_mtime = sit_i->dirty_max_mtime;
516 unsigned long long min_mtime = sit_i->dirty_min_mtime;
517 unsigned int seg_blocks = sbi->blocks_per_seg;
518 unsigned int vblocks;
519 unsigned int dirty_threshold = max(am->max_candidate_count,
520 am->candidate_ratio *
521 am->victim_count / 100);
522 unsigned int cost;
523 unsigned int iter = 0;
524 int stage = 0;
525
526 if (max_mtime < min_mtime)
527 return;
528 max_mtime += 1;
529next_stage:
530 node = lookup_central_victim(sbi, p);
531next_node:
532 re = rb_entry_safe(node, struct rb_entry, rb_node);
533 if (!re) {
534 if (stage == 0)
535 goto skip_stage;
536 return;
537 }
538
539 ve = (struct victim_entry *)re;
540
541 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
542 goto skip_node;
543
544 age = max_mtime - ve->mtime;
545
546 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
547 f2fs_bug_on(sbi, !vblocks);
548
549 /* rare case */
550 if (vblocks == seg_blocks)
551 goto skip_node;
552
553 iter++;
554
555 age = max_mtime - abs(p->age - age);
556 cost = UINT_MAX - vblocks;
557
558 if (cost < p->min_cost ||
559 (cost == p->min_cost && age > p->oldest_age)) {
560 p->min_cost = cost;
561 p->oldest_age = age;
562 p->min_segno = ve->segno;
563 }
564skip_node:
565 if (iter < dirty_threshold) {
566 if (stage == 0)
567 node = rb_prev(node);
568 else if (stage == 1)
569 node = rb_next(node);
570 goto next_node;
571 }
572skip_stage:
573 if (stage < 1) {
574 stage++;
575 iter = 0;
576 goto next_stage;
577 }
578}
579static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
580 struct victim_sel_policy *p)
581{
582 f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
583 &sbi->am.root, true));
584
585 if (p->gc_mode == GC_AT)
586 atgc_lookup_victim(sbi, p);
587 else if (p->alloc_mode == AT_SSR)
588 atssr_lookup_victim(sbi, p);
589 else
590 f2fs_bug_on(sbi, 1);
591}
592
593static void release_victim_entry(struct f2fs_sb_info *sbi)
594{
595 struct atgc_management *am = &sbi->am;
596 struct victim_entry *ve, *tmp;
597
598 list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
599 list_del(&ve->list);
600 kmem_cache_free(victim_entry_slab, ve);
601 am->victim_count--;
602 }
603
604 am->root = RB_ROOT_CACHED;
605
606 f2fs_bug_on(sbi, am->victim_count);
607 f2fs_bug_on(sbi, !list_empty(&am->victim_list));
608}
609
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000610/*
611 * This function is called from two paths.
612 * One is garbage collection and the other is SSR segment selection.
613 * When it is called during GC, it just gets a victim segment
614 * and it does not remove it from dirty seglist.
615 * When it is called from SSR segment selection, it finds a segment
616 * which has minimum valid blocks and removes it from dirty seglist.
617 */
618static int get_victim_by_default(struct f2fs_sb_info *sbi,
Olivier Deprez157378f2022-04-04 15:47:50 +0200619 unsigned int *result, int gc_type, int type,
620 char alloc_mode, unsigned long long age)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000621{
622 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
623 struct sit_info *sm = SIT_I(sbi);
624 struct victim_sel_policy p;
625 unsigned int secno, last_victim;
David Brazdil0f672f62019-12-10 10:32:29 +0000626 unsigned int last_segment;
Olivier Deprez157378f2022-04-04 15:47:50 +0200627 unsigned int nsearched;
628 bool is_atgc;
629 int ret = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000630
631 mutex_lock(&dirty_i->seglist_lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000632 last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000633
634 p.alloc_mode = alloc_mode;
Olivier Deprez157378f2022-04-04 15:47:50 +0200635 p.age = age;
636 p.age_threshold = sbi->am.age_threshold;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000637
Olivier Deprez157378f2022-04-04 15:47:50 +0200638retry:
639 select_policy(sbi, gc_type, type, &p);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000640 p.min_segno = NULL_SEGNO;
Olivier Deprez157378f2022-04-04 15:47:50 +0200641 p.oldest_age = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000642 p.min_cost = get_max_cost(sbi, &p);
643
Olivier Deprez157378f2022-04-04 15:47:50 +0200644 is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
645 nsearched = 0;
646
647 if (is_atgc)
648 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
649
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000650 if (*result != NULL_SEGNO) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200651 if (!get_valid_blocks(sbi, *result, false)) {
652 ret = -ENODATA;
653 goto out;
654 }
655
656 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
657 ret = -EBUSY;
658 else
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000659 p.min_segno = *result;
660 goto out;
661 }
662
Olivier Deprez157378f2022-04-04 15:47:50 +0200663 ret = -ENODATA;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000664 if (p.max_search == 0)
665 goto out;
666
David Brazdil0f672f62019-12-10 10:32:29 +0000667 if (__is_large_section(sbi) && p.alloc_mode == LFS) {
668 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
669 p.min_segno = sbi->next_victim_seg[BG_GC];
670 *result = p.min_segno;
671 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
672 goto got_result;
673 }
674 if (gc_type == FG_GC &&
675 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
676 p.min_segno = sbi->next_victim_seg[FG_GC];
677 *result = p.min_segno;
678 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
679 goto got_result;
680 }
681 }
682
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000683 last_victim = sm->last_victim[p.gc_mode];
684 if (p.alloc_mode == LFS && gc_type == FG_GC) {
685 p.min_segno = check_bg_victims(sbi);
686 if (p.min_segno != NULL_SEGNO)
687 goto got_it;
688 }
689
690 while (1) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200691 unsigned long cost, *dirty_bitmap;
692 unsigned int unit_no, segno;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000693
Olivier Deprez157378f2022-04-04 15:47:50 +0200694 dirty_bitmap = p.dirty_bitmap;
695 unit_no = find_next_bit(dirty_bitmap,
696 last_segment / p.ofs_unit,
697 p.offset / p.ofs_unit);
698 segno = unit_no * p.ofs_unit;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000699 if (segno >= last_segment) {
700 if (sm->last_victim[p.gc_mode]) {
701 last_segment =
702 sm->last_victim[p.gc_mode];
703 sm->last_victim[p.gc_mode] = 0;
704 p.offset = 0;
705 continue;
706 }
707 break;
708 }
709
710 p.offset = segno + p.ofs_unit;
Olivier Deprez157378f2022-04-04 15:47:50 +0200711 nsearched++;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000712
David Brazdil0f672f62019-12-10 10:32:29 +0000713#ifdef CONFIG_F2FS_CHECK_FS
714 /*
715 * skip selecting the invalid segno (that is failed due to block
716 * validity check failure during GC) to avoid endless GC loop in
717 * such cases.
718 */
719 if (test_bit(segno, sm->invalid_segmap))
720 goto next;
721#endif
722
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000723 secno = GET_SEC_FROM_SEG(sbi, segno);
724
725 if (sec_usage_check(sbi, secno))
726 goto next;
Olivier Deprez157378f2022-04-04 15:47:50 +0200727
David Brazdil0f672f62019-12-10 10:32:29 +0000728 /* Don't touch checkpointed data */
Olivier Deprez157378f2022-04-04 15:47:50 +0200729 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
730 if (p.alloc_mode == LFS) {
731 /*
732 * LFS is set to find source section during GC.
733 * The victim should have no checkpointed data.
734 */
735 if (get_ckpt_valid_blocks(sbi, segno, true))
736 goto next;
737 } else {
738 /*
739 * SSR | AT_SSR are set to find target segment
740 * for writes which can be full by checkpointed
741 * and newly written blocks.
742 */
743 if (!f2fs_segment_has_free_slot(sbi, segno))
744 goto next;
745 }
746 }
747
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000748 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
749 goto next;
750
Olivier Deprez157378f2022-04-04 15:47:50 +0200751 if (is_atgc) {
752 add_victim_entry(sbi, &p, segno);
753 goto next;
754 }
755
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000756 cost = get_gc_cost(sbi, segno, &p);
757
758 if (p.min_cost > cost) {
759 p.min_segno = segno;
760 p.min_cost = cost;
761 }
762next:
763 if (nsearched >= p.max_search) {
764 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
Olivier Deprez157378f2022-04-04 15:47:50 +0200765 sm->last_victim[p.gc_mode] =
766 last_victim + p.ofs_unit;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000767 else
Olivier Deprez157378f2022-04-04 15:47:50 +0200768 sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
David Brazdil0f672f62019-12-10 10:32:29 +0000769 sm->last_victim[p.gc_mode] %=
770 (MAIN_SECS(sbi) * sbi->segs_per_sec);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000771 break;
772 }
773 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200774
775 /* get victim for GC_AT/AT_SSR */
776 if (is_atgc) {
777 lookup_victim_by_age(sbi, &p);
778 release_victim_entry(sbi);
779 }
780
781 if (is_atgc && p.min_segno == NULL_SEGNO &&
782 sm->elapsed_time < p.age_threshold) {
783 p.age_threshold = 0;
784 goto retry;
785 }
786
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000787 if (p.min_segno != NULL_SEGNO) {
788got_it:
David Brazdil0f672f62019-12-10 10:32:29 +0000789 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
790got_result:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000791 if (p.alloc_mode == LFS) {
792 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
793 if (gc_type == FG_GC)
794 sbi->cur_victim_sec = secno;
795 else
796 set_bit(secno, dirty_i->victim_secmap);
797 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200798 ret = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000799
David Brazdil0f672f62019-12-10 10:32:29 +0000800 }
801out:
802 if (p.min_segno != NULL_SEGNO)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000803 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
804 sbi->cur_victim_sec,
805 prefree_segments(sbi), free_segments(sbi));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000806 mutex_unlock(&dirty_i->seglist_lock);
807
Olivier Deprez157378f2022-04-04 15:47:50 +0200808 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000809}
810
811static const struct victim_selection default_v_ops = {
812 .get_victim = get_victim_by_default,
813};
814
815static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
816{
817 struct inode_entry *ie;
818
819 ie = radix_tree_lookup(&gc_list->iroot, ino);
820 if (ie)
821 return ie->inode;
822 return NULL;
823}
824
825static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
826{
827 struct inode_entry *new_ie;
828
829 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
830 iput(inode);
831 return;
832 }
833 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS);
834 new_ie->inode = inode;
835
836 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
837 list_add_tail(&new_ie->list, &gc_list->ilist);
838}
839
840static void put_gc_inode(struct gc_inode_list *gc_list)
841{
842 struct inode_entry *ie, *next_ie;
843 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
844 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
845 iput(ie->inode);
846 list_del(&ie->list);
847 kmem_cache_free(f2fs_inode_entry_slab, ie);
848 }
849}
850
851static int check_valid_map(struct f2fs_sb_info *sbi,
852 unsigned int segno, int offset)
853{
854 struct sit_info *sit_i = SIT_I(sbi);
855 struct seg_entry *sentry;
856 int ret;
857
858 down_read(&sit_i->sentry_lock);
859 sentry = get_seg_entry(sbi, segno);
860 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
861 up_read(&sit_i->sentry_lock);
862 return ret;
863}
864
865/*
866 * This function compares node address got in summary with that in NAT.
867 * On validity, copy that node with cold status, otherwise (invalid node)
868 * ignore that.
869 */
David Brazdil0f672f62019-12-10 10:32:29 +0000870static int gc_node_segment(struct f2fs_sb_info *sbi,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000871 struct f2fs_summary *sum, unsigned int segno, int gc_type)
872{
873 struct f2fs_summary *entry;
874 block_t start_addr;
875 int off;
876 int phase = 0;
877 bool fggc = (gc_type == FG_GC);
David Brazdil0f672f62019-12-10 10:32:29 +0000878 int submitted = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +0200879 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000880
881 start_addr = START_BLOCK(sbi, segno);
882
883next_step:
884 entry = sum;
885
886 if (fggc && phase == 2)
887 atomic_inc(&sbi->wb_sync_req[NODE]);
888
Olivier Deprez157378f2022-04-04 15:47:50 +0200889 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000890 nid_t nid = le32_to_cpu(entry->nid);
891 struct page *node_page;
892 struct node_info ni;
David Brazdil0f672f62019-12-10 10:32:29 +0000893 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000894
895 /* stop BG_GC if there is not enough free sections. */
896 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
David Brazdil0f672f62019-12-10 10:32:29 +0000897 return submitted;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000898
899 if (check_valid_map(sbi, segno, off) == 0)
900 continue;
901
902 if (phase == 0) {
903 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
904 META_NAT, true);
905 continue;
906 }
907
908 if (phase == 1) {
909 f2fs_ra_node_page(sbi, nid);
910 continue;
911 }
912
913 /* phase == 2 */
914 node_page = f2fs_get_node_page(sbi, nid);
915 if (IS_ERR(node_page))
916 continue;
917
918 /* block may become invalid during f2fs_get_node_page */
919 if (check_valid_map(sbi, segno, off) == 0) {
920 f2fs_put_page(node_page, 1);
921 continue;
922 }
923
924 if (f2fs_get_node_info(sbi, nid, &ni)) {
925 f2fs_put_page(node_page, 1);
926 continue;
927 }
928
929 if (ni.blk_addr != start_addr + off) {
930 f2fs_put_page(node_page, 1);
931 continue;
932 }
933
David Brazdil0f672f62019-12-10 10:32:29 +0000934 err = f2fs_move_node_page(node_page, gc_type);
935 if (!err && gc_type == FG_GC)
936 submitted++;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000937 stat_inc_node_blk_count(sbi, 1, gc_type);
938 }
939
940 if (++phase < 3)
941 goto next_step;
942
943 if (fggc)
944 atomic_dec(&sbi->wb_sync_req[NODE]);
David Brazdil0f672f62019-12-10 10:32:29 +0000945 return submitted;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000946}
947
948/*
949 * Calculate start block index indicating the given node offset.
950 * Be careful, caller should give this node offset only indicating direct node
951 * blocks. If any node offsets, which point the other types of node blocks such
952 * as indirect or double indirect node blocks, are given, it must be a caller's
953 * bug.
954 */
955block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
956{
957 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
958 unsigned int bidx;
959
960 if (node_ofs == 0)
961 return 0;
962
963 if (node_ofs <= 2) {
964 bidx = node_ofs - 1;
965 } else if (node_ofs <= indirect_blks) {
966 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
967 bidx = node_ofs - 2 - dec;
968 } else {
969 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
970 bidx = node_ofs - 5 - dec;
971 }
David Brazdil0f672f62019-12-10 10:32:29 +0000972 return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000973}
974
975static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
976 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
977{
978 struct page *node_page;
979 nid_t nid;
Olivier Deprez92d4c212022-12-06 15:05:30 +0100980 unsigned int ofs_in_node, max_addrs;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000981 block_t source_blkaddr;
982
983 nid = le32_to_cpu(sum->nid);
984 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
985
986 node_page = f2fs_get_node_page(sbi, nid);
987 if (IS_ERR(node_page))
988 return false;
989
990 if (f2fs_get_node_info(sbi, nid, dni)) {
991 f2fs_put_page(node_page, 1);
992 return false;
993 }
994
995 if (sum->version != dni->version) {
David Brazdil0f672f62019-12-10 10:32:29 +0000996 f2fs_warn(sbi, "%s: valid data with mismatched node version.",
997 __func__);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000998 set_sbi_flag(sbi, SBI_NEED_FSCK);
999 }
1000
Olivier Deprez92d4c212022-12-06 15:05:30 +01001001 if (f2fs_check_nid_range(sbi, dni->ino)) {
1002 f2fs_put_page(node_page, 1);
Olivier Deprez157378f2022-04-04 15:47:50 +02001003 return false;
Olivier Deprez92d4c212022-12-06 15:05:30 +01001004 }
1005
1006 max_addrs = IS_INODE(node_page) ? DEF_ADDRS_PER_INODE :
1007 DEF_ADDRS_PER_BLOCK;
1008 if (ofs_in_node >= max_addrs) {
1009 f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%u, nid:%u, max:%u",
1010 ofs_in_node, dni->ino, dni->nid, max_addrs);
1011 return false;
1012 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001013
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001014 *nofs = ofs_of_node(node_page);
Olivier Deprez157378f2022-04-04 15:47:50 +02001015 source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001016 f2fs_put_page(node_page, 1);
1017
David Brazdil0f672f62019-12-10 10:32:29 +00001018 if (source_blkaddr != blkaddr) {
1019#ifdef CONFIG_F2FS_CHECK_FS
1020 unsigned int segno = GET_SEGNO(sbi, blkaddr);
1021 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1022
1023 if (unlikely(check_valid_map(sbi, segno, offset))) {
1024 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1025 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u\n",
1026 blkaddr, source_blkaddr, segno);
1027 f2fs_bug_on(sbi, 1);
1028 }
1029 }
1030#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001031 return false;
David Brazdil0f672f62019-12-10 10:32:29 +00001032 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001033 return true;
1034}
1035
1036static int ra_data_block(struct inode *inode, pgoff_t index)
1037{
1038 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1039 struct address_space *mapping = inode->i_mapping;
1040 struct dnode_of_data dn;
1041 struct page *page;
1042 struct extent_info ei = {0, 0, 0};
1043 struct f2fs_io_info fio = {
1044 .sbi = sbi,
1045 .ino = inode->i_ino,
1046 .type = DATA,
1047 .temp = COLD,
1048 .op = REQ_OP_READ,
1049 .op_flags = 0,
1050 .encrypted_page = NULL,
1051 .in_list = false,
1052 .retry = false,
1053 };
1054 int err;
1055
1056 page = f2fs_grab_cache_page(mapping, index, true);
1057 if (!page)
1058 return -ENOMEM;
1059
1060 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1061 dn.data_blkaddr = ei.blk + index - ei.fofs;
David Brazdil0f672f62019-12-10 10:32:29 +00001062 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1063 DATA_GENERIC_ENHANCE_READ))) {
1064 err = -EFSCORRUPTED;
1065 goto put_page;
1066 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001067 goto got_it;
1068 }
1069
1070 set_new_dnode(&dn, inode, NULL, NULL, 0);
1071 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1072 if (err)
1073 goto put_page;
1074 f2fs_put_dnode(&dn);
1075
David Brazdil0f672f62019-12-10 10:32:29 +00001076 if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1077 err = -ENOENT;
1078 goto put_page;
1079 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001080 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
David Brazdil0f672f62019-12-10 10:32:29 +00001081 DATA_GENERIC_ENHANCE))) {
1082 err = -EFSCORRUPTED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001083 goto put_page;
1084 }
1085got_it:
1086 /* read page */
1087 fio.page = page;
1088 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1089
David Brazdil0f672f62019-12-10 10:32:29 +00001090 /*
1091 * don't cache encrypted data into meta inode until previous dirty
1092 * data were writebacked to avoid racing between GC and flush.
1093 */
1094 f2fs_wait_on_page_writeback(page, DATA, true, true);
1095
1096 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1097
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001098 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1099 dn.data_blkaddr,
1100 FGP_LOCK | FGP_CREAT, GFP_NOFS);
1101 if (!fio.encrypted_page) {
1102 err = -ENOMEM;
1103 goto put_page;
1104 }
1105
1106 err = f2fs_submit_page_bio(&fio);
1107 if (err)
1108 goto put_encrypted_page;
1109 f2fs_put_page(fio.encrypted_page, 0);
1110 f2fs_put_page(page, 1);
Olivier Deprez157378f2022-04-04 15:47:50 +02001111
1112 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1113 f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1114
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001115 return 0;
1116put_encrypted_page:
1117 f2fs_put_page(fio.encrypted_page, 1);
1118put_page:
1119 f2fs_put_page(page, 1);
1120 return err;
1121}
1122
1123/*
1124 * Move data block via META_MAPPING while keeping locked data page.
1125 * This can be used to move blocks, aka LBAs, directly on disk.
1126 */
David Brazdil0f672f62019-12-10 10:32:29 +00001127static int move_data_block(struct inode *inode, block_t bidx,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001128 int gc_type, unsigned int segno, int off)
1129{
1130 struct f2fs_io_info fio = {
1131 .sbi = F2FS_I_SB(inode),
1132 .ino = inode->i_ino,
1133 .type = DATA,
1134 .temp = COLD,
1135 .op = REQ_OP_READ,
1136 .op_flags = 0,
1137 .encrypted_page = NULL,
1138 .in_list = false,
1139 .retry = false,
1140 };
1141 struct dnode_of_data dn;
1142 struct f2fs_summary sum;
1143 struct node_info ni;
1144 struct page *page, *mpage;
1145 block_t newaddr;
David Brazdil0f672f62019-12-10 10:32:29 +00001146 int err = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +02001147 bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1148 int type = fio.sbi->am.atgc_enabled ?
1149 CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001150
1151 /* do not read out */
1152 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
1153 if (!page)
David Brazdil0f672f62019-12-10 10:32:29 +00001154 return -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001155
David Brazdil0f672f62019-12-10 10:32:29 +00001156 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1157 err = -ENOENT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001158 goto out;
David Brazdil0f672f62019-12-10 10:32:29 +00001159 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001160
1161 if (f2fs_is_atomic_file(inode)) {
1162 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
1163 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
David Brazdil0f672f62019-12-10 10:32:29 +00001164 err = -EAGAIN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001165 goto out;
1166 }
1167
1168 if (f2fs_is_pinned_file(inode)) {
Olivier Deprez92d4c212022-12-06 15:05:30 +01001169 if (gc_type == FG_GC)
1170 f2fs_pin_file_control(inode, true);
David Brazdil0f672f62019-12-10 10:32:29 +00001171 err = -EAGAIN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001172 goto out;
1173 }
1174
1175 set_new_dnode(&dn, inode, NULL, NULL, 0);
1176 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
1177 if (err)
1178 goto out;
1179
1180 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1181 ClearPageUptodate(page);
David Brazdil0f672f62019-12-10 10:32:29 +00001182 err = -ENOENT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001183 goto put_out;
1184 }
1185
1186 /*
1187 * don't cache encrypted data into meta inode until previous dirty
1188 * data were writebacked to avoid racing between GC and flush.
1189 */
David Brazdil0f672f62019-12-10 10:32:29 +00001190 f2fs_wait_on_page_writeback(page, DATA, true, true);
1191
1192 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001193
1194 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
1195 if (err)
1196 goto put_out;
1197
1198 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1199
1200 /* read page */
1201 fio.page = page;
1202 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1203
1204 if (lfs_mode)
1205 down_write(&fio.sbi->io_order_lock);
1206
David Brazdil0f672f62019-12-10 10:32:29 +00001207 mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
1208 fio.old_blkaddr, false);
Olivier Deprez157378f2022-04-04 15:47:50 +02001209 if (!mpage) {
1210 err = -ENOMEM;
David Brazdil0f672f62019-12-10 10:32:29 +00001211 goto up_out;
Olivier Deprez157378f2022-04-04 15:47:50 +02001212 }
David Brazdil0f672f62019-12-10 10:32:29 +00001213
1214 fio.encrypted_page = mpage;
1215
1216 /* read source block in mpage */
1217 if (!PageUptodate(mpage)) {
1218 err = f2fs_submit_page_bio(&fio);
1219 if (err) {
1220 f2fs_put_page(mpage, 1);
1221 goto up_out;
1222 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001223
1224 f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1225 f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1226
David Brazdil0f672f62019-12-10 10:32:29 +00001227 lock_page(mpage);
1228 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
1229 !PageUptodate(mpage))) {
1230 err = -EIO;
1231 f2fs_put_page(mpage, 1);
1232 goto up_out;
1233 }
1234 }
1235
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001236 f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
Olivier Deprez157378f2022-04-04 15:47:50 +02001237 &sum, type, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001238
1239 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1240 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
1241 if (!fio.encrypted_page) {
1242 err = -ENOMEM;
David Brazdil0f672f62019-12-10 10:32:29 +00001243 f2fs_put_page(mpage, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001244 goto recover_block;
1245 }
1246
David Brazdil0f672f62019-12-10 10:32:29 +00001247 /* write target block */
1248 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
1249 memcpy(page_address(fio.encrypted_page),
1250 page_address(mpage), PAGE_SIZE);
1251 f2fs_put_page(mpage, 1);
1252 invalidate_mapping_pages(META_MAPPING(fio.sbi),
1253 fio.old_blkaddr, fio.old_blkaddr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001254
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001255 set_page_dirty(fio.encrypted_page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001256 if (clear_page_dirty_for_io(fio.encrypted_page))
1257 dec_page_count(fio.sbi, F2FS_DIRTY_META);
1258
1259 set_page_writeback(fio.encrypted_page);
1260 ClearPageError(page);
1261
1262 /* allocate block address */
David Brazdil0f672f62019-12-10 10:32:29 +00001263 f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001264
1265 fio.op = REQ_OP_WRITE;
1266 fio.op_flags = REQ_SYNC;
1267 fio.new_blkaddr = newaddr;
1268 f2fs_submit_page_write(&fio);
1269 if (fio.retry) {
David Brazdil0f672f62019-12-10 10:32:29 +00001270 err = -EAGAIN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001271 if (PageWriteback(fio.encrypted_page))
1272 end_page_writeback(fio.encrypted_page);
1273 goto put_page_out;
1274 }
1275
1276 f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
1277
1278 f2fs_update_data_blkaddr(&dn, newaddr);
1279 set_inode_flag(inode, FI_APPEND_WRITE);
1280 if (page->index == 0)
1281 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1282put_page_out:
1283 f2fs_put_page(fio.encrypted_page, 1);
1284recover_block:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001285 if (err)
1286 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
Olivier Deprez157378f2022-04-04 15:47:50 +02001287 true, true, true);
David Brazdil0f672f62019-12-10 10:32:29 +00001288up_out:
1289 if (lfs_mode)
1290 up_write(&fio.sbi->io_order_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001291put_out:
1292 f2fs_put_dnode(&dn);
1293out:
1294 f2fs_put_page(page, 1);
David Brazdil0f672f62019-12-10 10:32:29 +00001295 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001296}
1297
David Brazdil0f672f62019-12-10 10:32:29 +00001298static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001299 unsigned int segno, int off)
1300{
1301 struct page *page;
David Brazdil0f672f62019-12-10 10:32:29 +00001302 int err = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001303
1304 page = f2fs_get_lock_data_page(inode, bidx, true);
1305 if (IS_ERR(page))
David Brazdil0f672f62019-12-10 10:32:29 +00001306 return PTR_ERR(page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001307
David Brazdil0f672f62019-12-10 10:32:29 +00001308 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1309 err = -ENOENT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001310 goto out;
David Brazdil0f672f62019-12-10 10:32:29 +00001311 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001312
1313 if (f2fs_is_atomic_file(inode)) {
1314 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
1315 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
David Brazdil0f672f62019-12-10 10:32:29 +00001316 err = -EAGAIN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001317 goto out;
1318 }
1319 if (f2fs_is_pinned_file(inode)) {
1320 if (gc_type == FG_GC)
1321 f2fs_pin_file_control(inode, true);
David Brazdil0f672f62019-12-10 10:32:29 +00001322 err = -EAGAIN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001323 goto out;
1324 }
1325
1326 if (gc_type == BG_GC) {
David Brazdil0f672f62019-12-10 10:32:29 +00001327 if (PageWriteback(page)) {
1328 err = -EAGAIN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001329 goto out;
David Brazdil0f672f62019-12-10 10:32:29 +00001330 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001331 set_page_dirty(page);
1332 set_cold_data(page);
1333 } else {
1334 struct f2fs_io_info fio = {
1335 .sbi = F2FS_I_SB(inode),
1336 .ino = inode->i_ino,
1337 .type = DATA,
1338 .temp = COLD,
1339 .op = REQ_OP_WRITE,
1340 .op_flags = REQ_SYNC,
1341 .old_blkaddr = NULL_ADDR,
1342 .page = page,
1343 .encrypted_page = NULL,
1344 .need_lock = LOCK_REQ,
1345 .io_type = FS_GC_DATA_IO,
1346 };
1347 bool is_dirty = PageDirty(page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001348
1349retry:
David Brazdil0f672f62019-12-10 10:32:29 +00001350 f2fs_wait_on_page_writeback(page, DATA, true, true);
1351
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001352 set_page_dirty(page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001353 if (clear_page_dirty_for_io(page)) {
1354 inode_dec_dirty_pages(inode);
1355 f2fs_remove_dirty_inode(inode);
1356 }
1357
1358 set_cold_data(page);
1359
1360 err = f2fs_do_write_data_page(&fio);
1361 if (err) {
1362 clear_cold_data(page);
1363 if (err == -ENOMEM) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001364 congestion_wait(BLK_RW_ASYNC,
1365 DEFAULT_IO_TIMEOUT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001366 goto retry;
1367 }
1368 if (is_dirty)
1369 set_page_dirty(page);
1370 }
1371 }
1372out:
1373 f2fs_put_page(page, 1);
David Brazdil0f672f62019-12-10 10:32:29 +00001374 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001375}
1376
1377/*
1378 * This function tries to get parent node of victim data block, and identifies
1379 * data block validity. If the block is valid, copy that with cold status and
1380 * modify parent node.
1381 * If the parent node is not valid or the data block address is different,
1382 * the victim data block is ignored.
1383 */
David Brazdil0f672f62019-12-10 10:32:29 +00001384static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
Olivier Deprez157378f2022-04-04 15:47:50 +02001385 struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1386 bool force_migrate)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001387{
1388 struct super_block *sb = sbi->sb;
1389 struct f2fs_summary *entry;
1390 block_t start_addr;
1391 int off;
1392 int phase = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001393 int submitted = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +02001394 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001395
1396 start_addr = START_BLOCK(sbi, segno);
1397
1398next_step:
1399 entry = sum;
1400
Olivier Deprez157378f2022-04-04 15:47:50 +02001401 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001402 struct page *data_page;
1403 struct inode *inode;
1404 struct node_info dni; /* dnode info for the data */
1405 unsigned int ofs_in_node, nofs;
1406 block_t start_bidx;
1407 nid_t nid = le32_to_cpu(entry->nid);
1408
Olivier Deprez0e641232021-09-23 10:07:05 +02001409 /*
1410 * stop BG_GC if there is not enough free sections.
1411 * Or, stop GC if the segment becomes fully valid caused by
1412 * race condition along with SSR block allocation.
1413 */
1414 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
Olivier Deprez157378f2022-04-04 15:47:50 +02001415 (!force_migrate && get_valid_blocks(sbi, segno, true) ==
1416 BLKS_PER_SEC(sbi)))
David Brazdil0f672f62019-12-10 10:32:29 +00001417 return submitted;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001418
1419 if (check_valid_map(sbi, segno, off) == 0)
1420 continue;
1421
1422 if (phase == 0) {
1423 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1424 META_NAT, true);
1425 continue;
1426 }
1427
1428 if (phase == 1) {
1429 f2fs_ra_node_page(sbi, nid);
1430 continue;
1431 }
1432
1433 /* Get an inode by ino with checking validity */
1434 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1435 continue;
1436
1437 if (phase == 2) {
1438 f2fs_ra_node_page(sbi, dni.ino);
1439 continue;
1440 }
1441
1442 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1443
1444 if (phase == 3) {
1445 inode = f2fs_iget(sb, dni.ino);
Olivier Deprez157378f2022-04-04 15:47:50 +02001446 if (IS_ERR(inode) || is_bad_inode(inode)) {
1447 set_sbi_flag(sbi, SBI_NEED_FSCK);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001448 continue;
Olivier Deprez157378f2022-04-04 15:47:50 +02001449 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001450
1451 if (!down_write_trylock(
1452 &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1453 iput(inode);
1454 sbi->skipped_gc_rwsem++;
1455 continue;
1456 }
1457
1458 start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1459 ofs_in_node;
1460
1461 if (f2fs_post_read_required(inode)) {
1462 int err = ra_data_block(inode, start_bidx);
1463
1464 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1465 if (err) {
1466 iput(inode);
1467 continue;
1468 }
1469 add_gc_inode(gc_list, inode);
1470 continue;
1471 }
1472
1473 data_page = f2fs_get_read_data_page(inode,
1474 start_bidx, REQ_RAHEAD, true);
1475 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1476 if (IS_ERR(data_page)) {
1477 iput(inode);
1478 continue;
1479 }
1480
1481 f2fs_put_page(data_page, 0);
1482 add_gc_inode(gc_list, inode);
1483 continue;
1484 }
1485
1486 /* phase 4 */
1487 inode = find_gc_inode(gc_list, dni.ino);
1488 if (inode) {
1489 struct f2fs_inode_info *fi = F2FS_I(inode);
1490 bool locked = false;
David Brazdil0f672f62019-12-10 10:32:29 +00001491 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001492
1493 if (S_ISREG(inode->i_mode)) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001494 if (!down_write_trylock(&fi->i_gc_rwsem[READ])) {
1495 sbi->skipped_gc_rwsem++;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001496 continue;
Olivier Deprez0e641232021-09-23 10:07:05 +02001497 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001498 if (!down_write_trylock(
1499 &fi->i_gc_rwsem[WRITE])) {
1500 sbi->skipped_gc_rwsem++;
1501 up_write(&fi->i_gc_rwsem[READ]);
1502 continue;
1503 }
1504 locked = true;
1505
1506 /* wait for all inflight aio data */
1507 inode_dio_wait(inode);
1508 }
1509
1510 start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1511 + ofs_in_node;
1512 if (f2fs_post_read_required(inode))
David Brazdil0f672f62019-12-10 10:32:29 +00001513 err = move_data_block(inode, start_bidx,
1514 gc_type, segno, off);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001515 else
David Brazdil0f672f62019-12-10 10:32:29 +00001516 err = move_data_page(inode, start_bidx, gc_type,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001517 segno, off);
1518
David Brazdil0f672f62019-12-10 10:32:29 +00001519 if (!err && (gc_type == FG_GC ||
1520 f2fs_post_read_required(inode)))
1521 submitted++;
1522
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001523 if (locked) {
1524 up_write(&fi->i_gc_rwsem[WRITE]);
1525 up_write(&fi->i_gc_rwsem[READ]);
1526 }
1527
1528 stat_inc_data_blk_count(sbi, 1, gc_type);
1529 }
1530 }
1531
1532 if (++phase < 5)
1533 goto next_step;
David Brazdil0f672f62019-12-10 10:32:29 +00001534
1535 return submitted;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001536}
1537
1538static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1539 int gc_type)
1540{
1541 struct sit_info *sit_i = SIT_I(sbi);
1542 int ret;
1543
1544 down_write(&sit_i->sentry_lock);
1545 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
Olivier Deprez157378f2022-04-04 15:47:50 +02001546 NO_CHECK_TYPE, LFS, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001547 up_write(&sit_i->sentry_lock);
1548 return ret;
1549}
1550
1551static int do_garbage_collect(struct f2fs_sb_info *sbi,
1552 unsigned int start_segno,
Olivier Deprez157378f2022-04-04 15:47:50 +02001553 struct gc_inode_list *gc_list, int gc_type,
1554 bool force_migrate)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001555{
1556 struct page *sum_page;
1557 struct f2fs_summary_block *sum;
1558 struct blk_plug plug;
1559 unsigned int segno = start_segno;
1560 unsigned int end_segno = start_segno + sbi->segs_per_sec;
David Brazdil0f672f62019-12-10 10:32:29 +00001561 int seg_freed = 0, migrated = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001562 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1563 SUM_TYPE_DATA : SUM_TYPE_NODE;
David Brazdil0f672f62019-12-10 10:32:29 +00001564 int submitted = 0;
1565
1566 if (__is_large_section(sbi))
1567 end_segno = rounddown(end_segno, sbi->segs_per_sec);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001568
Olivier Deprez157378f2022-04-04 15:47:50 +02001569 /*
1570 * zone-capacity can be less than zone-size in zoned devices,
1571 * resulting in less than expected usable segments in the zone,
1572 * calculate the end segno in the zone which can be garbage collected
1573 */
1574 if (f2fs_sb_has_blkzoned(sbi))
1575 end_segno -= sbi->segs_per_sec -
1576 f2fs_usable_segs_in_sec(sbi, segno);
1577
1578 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1579
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001580 /* readahead multi ssa blocks those have contiguous address */
David Brazdil0f672f62019-12-10 10:32:29 +00001581 if (__is_large_section(sbi))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001582 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
David Brazdil0f672f62019-12-10 10:32:29 +00001583 end_segno - segno, META_SSA, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001584
1585 /* reference all summary page */
1586 while (segno < end_segno) {
1587 sum_page = f2fs_get_sum_page(sbi, segno++);
David Brazdil0f672f62019-12-10 10:32:29 +00001588 if (IS_ERR(sum_page)) {
1589 int err = PTR_ERR(sum_page);
1590
1591 end_segno = segno - 1;
1592 for (segno = start_segno; segno < end_segno; segno++) {
1593 sum_page = find_get_page(META_MAPPING(sbi),
1594 GET_SUM_BLOCK(sbi, segno));
1595 f2fs_put_page(sum_page, 0);
1596 f2fs_put_page(sum_page, 0);
1597 }
1598 return err;
1599 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001600 unlock_page(sum_page);
1601 }
1602
1603 blk_start_plug(&plug);
1604
1605 for (segno = start_segno; segno < end_segno; segno++) {
1606
1607 /* find segment summary of victim */
1608 sum_page = find_get_page(META_MAPPING(sbi),
1609 GET_SUM_BLOCK(sbi, segno));
1610 f2fs_put_page(sum_page, 0);
1611
David Brazdil0f672f62019-12-10 10:32:29 +00001612 if (get_valid_blocks(sbi, segno, false) == 0)
1613 goto freed;
Olivier Deprez157378f2022-04-04 15:47:50 +02001614 if (gc_type == BG_GC && __is_large_section(sbi) &&
David Brazdil0f672f62019-12-10 10:32:29 +00001615 migrated >= sbi->migration_granularity)
1616 goto skip;
1617 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1618 goto skip;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001619
1620 sum = page_address(sum_page);
1621 if (type != GET_SUM_TYPE((&sum->footer))) {
David Brazdil0f672f62019-12-10 10:32:29 +00001622 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1623 segno, type, GET_SUM_TYPE((&sum->footer)));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001624 set_sbi_flag(sbi, SBI_NEED_FSCK);
David Brazdil0f672f62019-12-10 10:32:29 +00001625 f2fs_stop_checkpoint(sbi, false);
1626 goto skip;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001627 }
1628
1629 /*
1630 * this is to avoid deadlock:
1631 * - lock_page(sum_page) - f2fs_replace_block
1632 * - check_valid_map() - down_write(sentry_lock)
1633 * - down_read(sentry_lock) - change_curseg()
1634 * - lock_page(sum_page)
1635 */
1636 if (type == SUM_TYPE_NODE)
David Brazdil0f672f62019-12-10 10:32:29 +00001637 submitted += gc_node_segment(sbi, sum->entries, segno,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001638 gc_type);
David Brazdil0f672f62019-12-10 10:32:29 +00001639 else
1640 submitted += gc_data_segment(sbi, sum->entries, gc_list,
Olivier Deprez157378f2022-04-04 15:47:50 +02001641 segno, gc_type,
1642 force_migrate);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001643
1644 stat_inc_seg_count(sbi, type, gc_type);
Olivier Deprez157378f2022-04-04 15:47:50 +02001645 migrated++;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001646
David Brazdil0f672f62019-12-10 10:32:29 +00001647freed:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001648 if (gc_type == FG_GC &&
1649 get_valid_blocks(sbi, segno, false) == 0)
1650 seg_freed++;
David Brazdil0f672f62019-12-10 10:32:29 +00001651
1652 if (__is_large_section(sbi) && segno + 1 < end_segno)
1653 sbi->next_victim_seg[gc_type] = segno + 1;
1654skip:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001655 f2fs_put_page(sum_page, 0);
1656 }
1657
David Brazdil0f672f62019-12-10 10:32:29 +00001658 if (submitted)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001659 f2fs_submit_merged_write(sbi,
1660 (type == SUM_TYPE_NODE) ? NODE : DATA);
1661
1662 blk_finish_plug(&plug);
1663
1664 stat_inc_call_count(sbi->stat_info);
1665
1666 return seg_freed;
1667}
1668
1669int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
Olivier Deprez157378f2022-04-04 15:47:50 +02001670 bool background, bool force, unsigned int segno)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001671{
1672 int gc_type = sync ? FG_GC : BG_GC;
1673 int sec_freed = 0, seg_freed = 0, total_freed = 0;
1674 int ret = 0;
1675 struct cp_control cpc;
1676 unsigned int init_segno = segno;
1677 struct gc_inode_list gc_list = {
1678 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1679 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1680 };
1681 unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
1682 unsigned long long first_skipped;
1683 unsigned int skipped_round = 0, round = 0;
1684
1685 trace_f2fs_gc_begin(sbi->sb, sync, background,
1686 get_pages(sbi, F2FS_DIRTY_NODES),
1687 get_pages(sbi, F2FS_DIRTY_DENTS),
1688 get_pages(sbi, F2FS_DIRTY_IMETA),
1689 free_sections(sbi),
1690 free_segments(sbi),
1691 reserved_segments(sbi),
1692 prefree_segments(sbi));
1693
1694 cpc.reason = __get_cp_reason(sbi);
1695 sbi->skipped_gc_rwsem = 0;
1696 first_skipped = last_skipped;
1697gc_more:
1698 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1699 ret = -EINVAL;
1700 goto stop;
1701 }
1702 if (unlikely(f2fs_cp_error(sbi))) {
1703 ret = -EIO;
1704 goto stop;
1705 }
1706
1707 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1708 /*
1709 * For example, if there are many prefree_segments below given
1710 * threshold, we can make them free by checkpoint. Then, we
1711 * secure free segments which doesn't need fggc any more.
1712 */
David Brazdil0f672f62019-12-10 10:32:29 +00001713 if (prefree_segments(sbi) &&
1714 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001715 ret = f2fs_write_checkpoint(sbi, &cpc);
1716 if (ret)
1717 goto stop;
1718 }
1719 if (has_not_enough_free_secs(sbi, 0, 0))
1720 gc_type = FG_GC;
1721 }
1722
1723 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1724 if (gc_type == BG_GC && !background) {
1725 ret = -EINVAL;
1726 goto stop;
1727 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001728 ret = __get_victim(sbi, &segno, gc_type);
1729 if (ret)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001730 goto stop;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001731
Olivier Deprez157378f2022-04-04 15:47:50 +02001732 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, force);
1733 if (gc_type == FG_GC &&
1734 seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001735 sec_freed++;
1736 total_freed += seg_freed;
1737
1738 if (gc_type == FG_GC) {
1739 if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
1740 sbi->skipped_gc_rwsem)
1741 skipped_round++;
1742 last_skipped = sbi->skipped_atomic_files[FG_GC];
1743 round++;
1744 }
1745
David Brazdil0f672f62019-12-10 10:32:29 +00001746 if (gc_type == FG_GC && seg_freed)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001747 sbi->cur_victim_sec = NULL_SEGNO;
1748
1749 if (sync)
1750 goto stop;
1751
Olivier Deprez92d4c212022-12-06 15:05:30 +01001752 if (!has_not_enough_free_secs(sbi, sec_freed, 0))
1753 goto stop;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001754
Olivier Deprez92d4c212022-12-06 15:05:30 +01001755 if (skipped_round <= MAX_SKIP_GC_COUNT || skipped_round * 2 < round) {
1756
1757 /* Write checkpoint to reclaim prefree segments */
1758 if (free_sections(sbi) < NR_CURSEG_PERSIST_TYPE &&
1759 prefree_segments(sbi) &&
1760 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001761 ret = f2fs_write_checkpoint(sbi, &cpc);
Olivier Deprez92d4c212022-12-06 15:05:30 +01001762 if (ret)
1763 goto stop;
1764 }
1765 segno = NULL_SEGNO;
1766 goto gc_more;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001767 }
Olivier Deprez92d4c212022-12-06 15:05:30 +01001768 if (first_skipped < last_skipped &&
1769 (last_skipped - first_skipped) >
1770 sbi->skipped_gc_rwsem) {
1771 f2fs_drop_inmem_pages_all(sbi, true);
1772 segno = NULL_SEGNO;
1773 goto gc_more;
1774 }
1775 if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))
1776 ret = f2fs_write_checkpoint(sbi, &cpc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001777stop:
1778 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1779 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
1780
1781 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1782 get_pages(sbi, F2FS_DIRTY_NODES),
1783 get_pages(sbi, F2FS_DIRTY_DENTS),
1784 get_pages(sbi, F2FS_DIRTY_IMETA),
1785 free_sections(sbi),
1786 free_segments(sbi),
1787 reserved_segments(sbi),
1788 prefree_segments(sbi));
1789
Olivier Deprez157378f2022-04-04 15:47:50 +02001790 up_write(&sbi->gc_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001791
1792 put_gc_inode(&gc_list);
1793
David Brazdil0f672f62019-12-10 10:32:29 +00001794 if (sync && !ret)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001795 ret = sec_freed ? 0 : -EAGAIN;
1796 return ret;
1797}
1798
Olivier Deprez157378f2022-04-04 15:47:50 +02001799int __init f2fs_create_garbage_collection_cache(void)
1800{
1801 victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
1802 sizeof(struct victim_entry));
1803 if (!victim_entry_slab)
1804 return -ENOMEM;
1805 return 0;
1806}
1807
1808void f2fs_destroy_garbage_collection_cache(void)
1809{
1810 kmem_cache_destroy(victim_entry_slab);
1811}
1812
1813static void init_atgc_management(struct f2fs_sb_info *sbi)
1814{
1815 struct atgc_management *am = &sbi->am;
1816
1817 if (test_opt(sbi, ATGC) &&
1818 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
1819 am->atgc_enabled = true;
1820
1821 am->root = RB_ROOT_CACHED;
1822 INIT_LIST_HEAD(&am->victim_list);
1823 am->victim_count = 0;
1824
1825 am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
1826 am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
1827 am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
1828 am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
1829}
1830
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001831void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1832{
1833 DIRTY_I(sbi)->v_ops = &default_v_ops;
1834
1835 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1836
1837 /* give warm/cold data area from slower device */
David Brazdil0f672f62019-12-10 10:32:29 +00001838 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001839 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1840 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
Olivier Deprez157378f2022-04-04 15:47:50 +02001841
1842 init_atgc_management(sbi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001843}
David Brazdil0f672f62019-12-10 10:32:29 +00001844
Olivier Deprez157378f2022-04-04 15:47:50 +02001845static int free_segment_range(struct f2fs_sb_info *sbi,
1846 unsigned int secs, bool gc_only)
David Brazdil0f672f62019-12-10 10:32:29 +00001847{
Olivier Deprez157378f2022-04-04 15:47:50 +02001848 unsigned int segno, next_inuse, start, end;
1849 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
1850 int gc_mode, gc_type;
David Brazdil0f672f62019-12-10 10:32:29 +00001851 int err = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +02001852 int type;
1853
1854 /* Force block allocation for GC */
1855 MAIN_SECS(sbi) -= secs;
1856 start = MAIN_SECS(sbi) * sbi->segs_per_sec;
1857 end = MAIN_SEGS(sbi) - 1;
1858
1859 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
1860 for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
1861 if (SIT_I(sbi)->last_victim[gc_mode] >= start)
1862 SIT_I(sbi)->last_victim[gc_mode] = 0;
1863
1864 for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
1865 if (sbi->next_victim_seg[gc_type] >= start)
1866 sbi->next_victim_seg[gc_type] = NULL_SEGNO;
1867 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00001868
1869 /* Move out cursegs from the target range */
Olivier Deprez157378f2022-04-04 15:47:50 +02001870 for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++)
1871 f2fs_allocate_segment_for_resize(sbi, type, start, end);
David Brazdil0f672f62019-12-10 10:32:29 +00001872
1873 /* do GC to move out valid blocks in the range */
1874 for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
1875 struct gc_inode_list gc_list = {
1876 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1877 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1878 };
1879
Olivier Deprez157378f2022-04-04 15:47:50 +02001880 do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
David Brazdil0f672f62019-12-10 10:32:29 +00001881 put_gc_inode(&gc_list);
1882
Olivier Deprez157378f2022-04-04 15:47:50 +02001883 if (!gc_only && get_valid_blocks(sbi, segno, true)) {
1884 err = -EAGAIN;
1885 goto out;
1886 }
1887 if (fatal_signal_pending(current)) {
1888 err = -ERESTARTSYS;
1889 goto out;
1890 }
David Brazdil0f672f62019-12-10 10:32:29 +00001891 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001892 if (gc_only)
1893 goto out;
David Brazdil0f672f62019-12-10 10:32:29 +00001894
Olivier Deprez157378f2022-04-04 15:47:50 +02001895 err = f2fs_write_checkpoint(sbi, &cpc);
David Brazdil0f672f62019-12-10 10:32:29 +00001896 if (err)
Olivier Deprez157378f2022-04-04 15:47:50 +02001897 goto out;
David Brazdil0f672f62019-12-10 10:32:29 +00001898
1899 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
1900 if (next_inuse <= end) {
1901 f2fs_err(sbi, "segno %u should be free but still inuse!",
1902 next_inuse);
1903 f2fs_bug_on(sbi, 1);
1904 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001905out:
1906 MAIN_SECS(sbi) += secs;
David Brazdil0f672f62019-12-10 10:32:29 +00001907 return err;
1908}
1909
1910static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
1911{
1912 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
Olivier Deprez157378f2022-04-04 15:47:50 +02001913 int section_count;
1914 int segment_count;
1915 int segment_count_main;
1916 long long block_count;
David Brazdil0f672f62019-12-10 10:32:29 +00001917 int segs = secs * sbi->segs_per_sec;
1918
Olivier Deprez157378f2022-04-04 15:47:50 +02001919 down_write(&sbi->sb_lock);
1920
1921 section_count = le32_to_cpu(raw_sb->section_count);
1922 segment_count = le32_to_cpu(raw_sb->segment_count);
1923 segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
1924 block_count = le64_to_cpu(raw_sb->block_count);
1925
David Brazdil0f672f62019-12-10 10:32:29 +00001926 raw_sb->section_count = cpu_to_le32(section_count + secs);
1927 raw_sb->segment_count = cpu_to_le32(segment_count + segs);
1928 raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
1929 raw_sb->block_count = cpu_to_le64(block_count +
1930 (long long)segs * sbi->blocks_per_seg);
Olivier Deprez157378f2022-04-04 15:47:50 +02001931 if (f2fs_is_multi_device(sbi)) {
1932 int last_dev = sbi->s_ndevs - 1;
1933 int dev_segs =
1934 le32_to_cpu(raw_sb->devs[last_dev].total_segments);
1935
1936 raw_sb->devs[last_dev].total_segments =
1937 cpu_to_le32(dev_segs + segs);
1938 }
1939
1940 up_write(&sbi->sb_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00001941}
1942
1943static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
1944{
1945 int segs = secs * sbi->segs_per_sec;
Olivier Deprez157378f2022-04-04 15:47:50 +02001946 long long blks = (long long)segs * sbi->blocks_per_seg;
David Brazdil0f672f62019-12-10 10:32:29 +00001947 long long user_block_count =
1948 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
1949
1950 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
1951 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
Olivier Deprez157378f2022-04-04 15:47:50 +02001952 MAIN_SECS(sbi) += secs;
David Brazdil0f672f62019-12-10 10:32:29 +00001953 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
1954 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
Olivier Deprez157378f2022-04-04 15:47:50 +02001955 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
1956
1957 if (f2fs_is_multi_device(sbi)) {
1958 int last_dev = sbi->s_ndevs - 1;
1959
1960 FDEV(last_dev).total_segments =
1961 (int)FDEV(last_dev).total_segments + segs;
1962 FDEV(last_dev).end_blk =
1963 (long long)FDEV(last_dev).end_blk + blks;
1964#ifdef CONFIG_BLK_DEV_ZONED
1965 FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz +
1966 (int)(blks >> sbi->log_blocks_per_blkz);
1967#endif
1968 }
David Brazdil0f672f62019-12-10 10:32:29 +00001969}
1970
1971int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
1972{
1973 __u64 old_block_count, shrunk_blocks;
Olivier Deprez157378f2022-04-04 15:47:50 +02001974 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
David Brazdil0f672f62019-12-10 10:32:29 +00001975 unsigned int secs;
David Brazdil0f672f62019-12-10 10:32:29 +00001976 int err = 0;
1977 __u32 rem;
1978
1979 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
1980 if (block_count > old_block_count)
1981 return -EINVAL;
1982
Olivier Deprez157378f2022-04-04 15:47:50 +02001983 if (f2fs_is_multi_device(sbi)) {
1984 int last_dev = sbi->s_ndevs - 1;
1985 __u64 last_segs = FDEV(last_dev).total_segments;
1986
1987 if (block_count + last_segs * sbi->blocks_per_seg <=
1988 old_block_count)
1989 return -EINVAL;
1990 }
1991
David Brazdil0f672f62019-12-10 10:32:29 +00001992 /* new fs size should align to section size */
1993 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
1994 if (rem)
1995 return -EINVAL;
1996
1997 if (block_count == old_block_count)
1998 return 0;
1999
2000 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2001 f2fs_err(sbi, "Should run fsck to repair first.");
2002 return -EFSCORRUPTED;
2003 }
2004
2005 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2006 f2fs_err(sbi, "Checkpoint should be enabled.");
2007 return -EINVAL;
2008 }
2009
David Brazdil0f672f62019-12-10 10:32:29 +00002010 shrunk_blocks = old_block_count - block_count;
2011 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
Olivier Deprez157378f2022-04-04 15:47:50 +02002012
2013 /* stop other GC */
2014 if (!down_write_trylock(&sbi->gc_lock))
2015 return -EAGAIN;
2016
2017 /* stop CP to protect MAIN_SEC in free_segment_range */
2018 f2fs_lock_op(sbi);
2019
2020 spin_lock(&sbi->stat_lock);
2021 if (shrunk_blocks + valid_user_blocks(sbi) +
2022 sbi->current_reserved_blocks + sbi->unusable_block_count +
2023 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2024 err = -ENOSPC;
2025 spin_unlock(&sbi->stat_lock);
2026
2027 if (err)
2028 goto out_unlock;
2029
2030 err = free_segment_range(sbi, secs, true);
2031
2032out_unlock:
2033 f2fs_unlock_op(sbi);
2034 up_write(&sbi->gc_lock);
2035 if (err)
2036 return err;
2037
2038 set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2039
2040 freeze_super(sbi->sb);
2041 down_write(&sbi->gc_lock);
2042 mutex_lock(&sbi->cp_mutex);
2043
David Brazdil0f672f62019-12-10 10:32:29 +00002044 spin_lock(&sbi->stat_lock);
2045 if (shrunk_blocks + valid_user_blocks(sbi) +
2046 sbi->current_reserved_blocks + sbi->unusable_block_count +
2047 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2048 err = -ENOSPC;
2049 else
2050 sbi->user_block_count -= shrunk_blocks;
2051 spin_unlock(&sbi->stat_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00002052 if (err)
Olivier Deprez157378f2022-04-04 15:47:50 +02002053 goto out_err;
2054
2055 err = free_segment_range(sbi, secs, false);
2056 if (err)
2057 goto recover_out;
David Brazdil0f672f62019-12-10 10:32:29 +00002058
2059 update_sb_metadata(sbi, -secs);
2060
2061 err = f2fs_commit_super(sbi, false);
2062 if (err) {
2063 update_sb_metadata(sbi, secs);
Olivier Deprez157378f2022-04-04 15:47:50 +02002064 goto recover_out;
David Brazdil0f672f62019-12-10 10:32:29 +00002065 }
2066
2067 update_fs_metadata(sbi, -secs);
2068 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
Olivier Deprez0e641232021-09-23 10:07:05 +02002069 set_sbi_flag(sbi, SBI_IS_DIRTY);
Olivier Deprez0e641232021-09-23 10:07:05 +02002070
Olivier Deprez157378f2022-04-04 15:47:50 +02002071 err = f2fs_write_checkpoint(sbi, &cpc);
David Brazdil0f672f62019-12-10 10:32:29 +00002072 if (err) {
2073 update_fs_metadata(sbi, secs);
2074 update_sb_metadata(sbi, secs);
2075 f2fs_commit_super(sbi, false);
2076 }
Olivier Deprez157378f2022-04-04 15:47:50 +02002077recover_out:
David Brazdil0f672f62019-12-10 10:32:29 +00002078 if (err) {
2079 set_sbi_flag(sbi, SBI_NEED_FSCK);
2080 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2081
David Brazdil0f672f62019-12-10 10:32:29 +00002082 spin_lock(&sbi->stat_lock);
2083 sbi->user_block_count += shrunk_blocks;
2084 spin_unlock(&sbi->stat_lock);
2085 }
Olivier Deprez157378f2022-04-04 15:47:50 +02002086out_err:
2087 mutex_unlock(&sbi->cp_mutex);
2088 up_write(&sbi->gc_lock);
2089 thaw_super(sbi->sb);
David Brazdil0f672f62019-12-10 10:32:29 +00002090 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
David Brazdil0f672f62019-12-10 10:32:29 +00002091 return err;
2092}