blob: d04b449978aa8e4c8146527508bb2d6fcd60096b [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * fs/f2fs/segment.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 */
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include <linux/bio.h>
11#include <linux/blkdev.h>
12#include <linux/prefetch.h>
13#include <linux/kthread.h>
14#include <linux/swap.h>
15#include <linux/timer.h>
16#include <linux/freezer.h>
17#include <linux/sched/signal.h>
18
19#include "f2fs.h"
20#include "segment.h"
21#include "node.h"
22#include "gc.h"
23#include "trace.h"
24#include <trace/events/f2fs.h>
25
26#define __reverse_ffz(x) __reverse_ffs(~(x))
27
28static struct kmem_cache *discard_entry_slab;
29static struct kmem_cache *discard_cmd_slab;
30static struct kmem_cache *sit_entry_set_slab;
31static struct kmem_cache *inmem_entry_slab;
32
33static unsigned long __reverse_ulong(unsigned char *str)
34{
35 unsigned long tmp = 0;
36 int shift = 24, idx = 0;
37
38#if BITS_PER_LONG == 64
39 shift = 56;
40#endif
41 while (shift >= 0) {
42 tmp |= (unsigned long)str[idx++] << shift;
43 shift -= BITS_PER_BYTE;
44 }
45 return tmp;
46}
47
48/*
49 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
50 * MSB and LSB are reversed in a byte by f2fs_set_bit.
51 */
52static inline unsigned long __reverse_ffs(unsigned long word)
53{
54 int num = 0;
55
56#if BITS_PER_LONG == 64
57 if ((word & 0xffffffff00000000UL) == 0)
58 num += 32;
59 else
60 word >>= 32;
61#endif
62 if ((word & 0xffff0000) == 0)
63 num += 16;
64 else
65 word >>= 16;
66
67 if ((word & 0xff00) == 0)
68 num += 8;
69 else
70 word >>= 8;
71
72 if ((word & 0xf0) == 0)
73 num += 4;
74 else
75 word >>= 4;
76
77 if ((word & 0xc) == 0)
78 num += 2;
79 else
80 word >>= 2;
81
82 if ((word & 0x2) == 0)
83 num += 1;
84 return num;
85}
86
87/*
88 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
89 * f2fs_set_bit makes MSB and LSB reversed in a byte.
90 * @size must be integral times of unsigned long.
91 * Example:
92 * MSB <--> LSB
93 * f2fs_set_bit(0, bitmap) => 1000 0000
94 * f2fs_set_bit(7, bitmap) => 0000 0001
95 */
96static unsigned long __find_rev_next_bit(const unsigned long *addr,
97 unsigned long size, unsigned long offset)
98{
99 const unsigned long *p = addr + BIT_WORD(offset);
100 unsigned long result = size;
101 unsigned long tmp;
102
103 if (offset >= size)
104 return size;
105
106 size -= (offset & ~(BITS_PER_LONG - 1));
107 offset %= BITS_PER_LONG;
108
109 while (1) {
110 if (*p == 0)
111 goto pass;
112
113 tmp = __reverse_ulong((unsigned char *)p);
114
115 tmp &= ~0UL >> offset;
116 if (size < BITS_PER_LONG)
117 tmp &= (~0UL << (BITS_PER_LONG - size));
118 if (tmp)
119 goto found;
120pass:
121 if (size <= BITS_PER_LONG)
122 break;
123 size -= BITS_PER_LONG;
124 offset = 0;
125 p++;
126 }
127 return result;
128found:
129 return result - size + __reverse_ffs(tmp);
130}
131
132static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
133 unsigned long size, unsigned long offset)
134{
135 const unsigned long *p = addr + BIT_WORD(offset);
136 unsigned long result = size;
137 unsigned long tmp;
138
139 if (offset >= size)
140 return size;
141
142 size -= (offset & ~(BITS_PER_LONG - 1));
143 offset %= BITS_PER_LONG;
144
145 while (1) {
146 if (*p == ~0UL)
147 goto pass;
148
149 tmp = __reverse_ulong((unsigned char *)p);
150
151 if (offset)
152 tmp |= ~0UL << (BITS_PER_LONG - offset);
153 if (size < BITS_PER_LONG)
154 tmp |= ~0UL >> size;
155 if (tmp != ~0UL)
156 goto found;
157pass:
158 if (size <= BITS_PER_LONG)
159 break;
160 size -= BITS_PER_LONG;
161 offset = 0;
162 p++;
163 }
164 return result;
165found:
166 return result - size + __reverse_ffz(tmp);
167}
168
169bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
170{
171 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
172 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
173 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
174
Olivier Deprez157378f2022-04-04 15:47:50 +0200175 if (f2fs_lfs_mode(sbi))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000176 return false;
Olivier Deprez157378f2022-04-04 15:47:50 +0200177 if (sbi->gc_mode == GC_URGENT_HIGH)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000178 return true;
David Brazdil0f672f62019-12-10 10:32:29 +0000179 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
180 return true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000181
182 return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
183 SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
184}
185
186void f2fs_register_inmem_page(struct inode *inode, struct page *page)
187{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000188 struct inmem_pages *new;
189
190 f2fs_trace_pid(page);
191
Olivier Deprez157378f2022-04-04 15:47:50 +0200192 f2fs_set_page_private(page, ATOMIC_WRITTEN_PAGE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000193
194 new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
195
196 /* add atomic page indices to the list */
197 new->page = page;
198 INIT_LIST_HEAD(&new->list);
199
200 /* increase reference count with clean state */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000201 get_page(page);
David Brazdil0f672f62019-12-10 10:32:29 +0000202 mutex_lock(&F2FS_I(inode)->inmem_lock);
203 list_add_tail(&new->list, &F2FS_I(inode)->inmem_pages);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000204 inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
David Brazdil0f672f62019-12-10 10:32:29 +0000205 mutex_unlock(&F2FS_I(inode)->inmem_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000206
207 trace_f2fs_register_inmem_page(page, INMEM);
208}
209
210static int __revoke_inmem_pages(struct inode *inode,
David Brazdil0f672f62019-12-10 10:32:29 +0000211 struct list_head *head, bool drop, bool recover,
212 bool trylock)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000213{
214 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
215 struct inmem_pages *cur, *tmp;
216 int err = 0;
217
218 list_for_each_entry_safe(cur, tmp, head, list) {
219 struct page *page = cur->page;
220
221 if (drop)
222 trace_f2fs_commit_inmem_page(page, INMEM_DROP);
223
David Brazdil0f672f62019-12-10 10:32:29 +0000224 if (trylock) {
225 /*
226 * to avoid deadlock in between page lock and
227 * inmem_lock.
228 */
229 if (!trylock_page(page))
230 continue;
231 } else {
232 lock_page(page);
233 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000234
David Brazdil0f672f62019-12-10 10:32:29 +0000235 f2fs_wait_on_page_writeback(page, DATA, true, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000236
237 if (recover) {
238 struct dnode_of_data dn;
239 struct node_info ni;
240
241 trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
242retry:
243 set_new_dnode(&dn, inode, NULL, NULL, 0);
244 err = f2fs_get_dnode_of_data(&dn, page->index,
245 LOOKUP_NODE);
246 if (err) {
247 if (err == -ENOMEM) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200248 congestion_wait(BLK_RW_ASYNC,
249 DEFAULT_IO_TIMEOUT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000250 cond_resched();
251 goto retry;
252 }
253 err = -EAGAIN;
254 goto next;
255 }
256
257 err = f2fs_get_node_info(sbi, dn.nid, &ni);
258 if (err) {
259 f2fs_put_dnode(&dn);
260 return err;
261 }
262
263 if (cur->old_addr == NEW_ADDR) {
264 f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
265 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
266 } else
267 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
268 cur->old_addr, ni.version, true, true);
269 f2fs_put_dnode(&dn);
270 }
271next:
272 /* we don't need to invalidate this in the sccessful status */
David Brazdil0f672f62019-12-10 10:32:29 +0000273 if (drop || recover) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000274 ClearPageUptodate(page);
David Brazdil0f672f62019-12-10 10:32:29 +0000275 clear_cold_data(page);
276 }
277 f2fs_clear_page_private(page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000278 f2fs_put_page(page, 1);
279
280 list_del(&cur->list);
281 kmem_cache_free(inmem_entry_slab, cur);
282 dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
283 }
284 return err;
285}
286
287void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure)
288{
289 struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
290 struct inode *inode;
291 struct f2fs_inode_info *fi;
Olivier Deprez0e641232021-09-23 10:07:05 +0200292 unsigned int count = sbi->atomic_files;
293 unsigned int looped = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000294next:
295 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
296 if (list_empty(head)) {
297 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
298 return;
299 }
300 fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist);
301 inode = igrab(&fi->vfs_inode);
Olivier Deprez0e641232021-09-23 10:07:05 +0200302 if (inode)
303 list_move_tail(&fi->inmem_ilist, head);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000304 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
305
306 if (inode) {
307 if (gc_failure) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200308 if (!fi->i_gc_failures[GC_FAILURE_ATOMIC])
309 goto skip;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000310 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000311 set_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
312 f2fs_drop_inmem_pages(inode);
Olivier Deprez0e641232021-09-23 10:07:05 +0200313skip:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000314 iput(inode);
315 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200316 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000317 cond_resched();
Olivier Deprez0e641232021-09-23 10:07:05 +0200318 if (gc_failure) {
319 if (++looped >= count)
320 return;
321 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000322 goto next;
323}
324
325void f2fs_drop_inmem_pages(struct inode *inode)
326{
327 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
328 struct f2fs_inode_info *fi = F2FS_I(inode);
329
Olivier Deprez157378f2022-04-04 15:47:50 +0200330 do {
David Brazdil0f672f62019-12-10 10:32:29 +0000331 mutex_lock(&fi->inmem_lock);
Olivier Deprez157378f2022-04-04 15:47:50 +0200332 if (list_empty(&fi->inmem_pages)) {
333 fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
334
335 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
336 if (!list_empty(&fi->inmem_ilist))
337 list_del_init(&fi->inmem_ilist);
338 if (f2fs_is_atomic_file(inode)) {
339 clear_inode_flag(inode, FI_ATOMIC_FILE);
340 sbi->atomic_files--;
341 }
342 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
343
344 mutex_unlock(&fi->inmem_lock);
345 break;
346 }
David Brazdil0f672f62019-12-10 10:32:29 +0000347 __revoke_inmem_pages(inode, &fi->inmem_pages,
348 true, false, true);
349 mutex_unlock(&fi->inmem_lock);
Olivier Deprez157378f2022-04-04 15:47:50 +0200350 } while (1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000351}
352
353void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
354{
355 struct f2fs_inode_info *fi = F2FS_I(inode);
356 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
357 struct list_head *head = &fi->inmem_pages;
358 struct inmem_pages *cur = NULL;
359
360 f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page));
361
362 mutex_lock(&fi->inmem_lock);
363 list_for_each_entry(cur, head, list) {
364 if (cur->page == page)
365 break;
366 }
367
368 f2fs_bug_on(sbi, list_empty(head) || cur->page != page);
369 list_del(&cur->list);
370 mutex_unlock(&fi->inmem_lock);
371
372 dec_page_count(sbi, F2FS_INMEM_PAGES);
373 kmem_cache_free(inmem_entry_slab, cur);
374
375 ClearPageUptodate(page);
David Brazdil0f672f62019-12-10 10:32:29 +0000376 f2fs_clear_page_private(page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000377 f2fs_put_page(page, 0);
378
379 trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
380}
381
382static int __f2fs_commit_inmem_pages(struct inode *inode)
383{
384 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
385 struct f2fs_inode_info *fi = F2FS_I(inode);
386 struct inmem_pages *cur, *tmp;
387 struct f2fs_io_info fio = {
388 .sbi = sbi,
389 .ino = inode->i_ino,
390 .type = DATA,
391 .op = REQ_OP_WRITE,
392 .op_flags = REQ_SYNC | REQ_PRIO,
393 .io_type = FS_DATA_IO,
394 };
395 struct list_head revoke_list;
David Brazdil0f672f62019-12-10 10:32:29 +0000396 bool submit_bio = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000397 int err = 0;
398
399 INIT_LIST_HEAD(&revoke_list);
400
401 list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
402 struct page *page = cur->page;
403
404 lock_page(page);
405 if (page->mapping == inode->i_mapping) {
406 trace_f2fs_commit_inmem_page(page, INMEM);
407
David Brazdil0f672f62019-12-10 10:32:29 +0000408 f2fs_wait_on_page_writeback(page, DATA, true, true);
409
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000410 set_page_dirty(page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000411 if (clear_page_dirty_for_io(page)) {
412 inode_dec_dirty_pages(inode);
413 f2fs_remove_dirty_inode(inode);
414 }
415retry:
416 fio.page = page;
417 fio.old_blkaddr = NULL_ADDR;
418 fio.encrypted_page = NULL;
419 fio.need_lock = LOCK_DONE;
420 err = f2fs_do_write_data_page(&fio);
421 if (err) {
422 if (err == -ENOMEM) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200423 congestion_wait(BLK_RW_ASYNC,
424 DEFAULT_IO_TIMEOUT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000425 cond_resched();
426 goto retry;
427 }
428 unlock_page(page);
429 break;
430 }
431 /* record old blkaddr for revoking */
432 cur->old_addr = fio.old_blkaddr;
David Brazdil0f672f62019-12-10 10:32:29 +0000433 submit_bio = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000434 }
435 unlock_page(page);
436 list_move_tail(&cur->list, &revoke_list);
437 }
438
David Brazdil0f672f62019-12-10 10:32:29 +0000439 if (submit_bio)
440 f2fs_submit_merged_write_cond(sbi, inode, NULL, 0, DATA);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000441
442 if (err) {
443 /*
444 * try to revoke all committed pages, but still we could fail
445 * due to no memory or other reason, if that happened, EAGAIN
446 * will be returned, which means in such case, transaction is
447 * already not integrity, caller should use journal to do the
448 * recovery or rewrite & commit last transaction. For other
449 * error number, revoking was done by filesystem itself.
450 */
David Brazdil0f672f62019-12-10 10:32:29 +0000451 err = __revoke_inmem_pages(inode, &revoke_list,
452 false, true, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000453
454 /* drop all uncommitted pages */
David Brazdil0f672f62019-12-10 10:32:29 +0000455 __revoke_inmem_pages(inode, &fi->inmem_pages,
456 true, false, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000457 } else {
David Brazdil0f672f62019-12-10 10:32:29 +0000458 __revoke_inmem_pages(inode, &revoke_list,
459 false, false, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000460 }
461
462 return err;
463}
464
465int f2fs_commit_inmem_pages(struct inode *inode)
466{
467 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
468 struct f2fs_inode_info *fi = F2FS_I(inode);
469 int err;
470
471 f2fs_balance_fs(sbi, true);
472
473 down_write(&fi->i_gc_rwsem[WRITE]);
474
475 f2fs_lock_op(sbi);
476 set_inode_flag(inode, FI_ATOMIC_COMMIT);
477
478 mutex_lock(&fi->inmem_lock);
479 err = __f2fs_commit_inmem_pages(inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000480 mutex_unlock(&fi->inmem_lock);
481
482 clear_inode_flag(inode, FI_ATOMIC_COMMIT);
483
484 f2fs_unlock_op(sbi);
485 up_write(&fi->i_gc_rwsem[WRITE]);
486
487 return err;
488}
489
490/*
491 * This function balances dirty node and dentry pages.
492 * In addition, it controls garbage collection.
493 */
494void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
495{
496 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200497 f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000498 f2fs_stop_checkpoint(sbi, false);
499 }
500
501 /* balance_fs_bg is able to be pending */
502 if (need && excess_cached_nats(sbi))
Olivier Deprez157378f2022-04-04 15:47:50 +0200503 f2fs_balance_fs_bg(sbi, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000504
David Brazdil0f672f62019-12-10 10:32:29 +0000505 if (!f2fs_is_checkpoint_ready(sbi))
506 return;
507
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000508 /*
509 * We should do GC or end up with checkpoint, if there are so many dirty
510 * dir/node pages without enough free segments.
511 */
512 if (has_not_enough_free_secs(sbi, 0, 0)) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200513 down_write(&sbi->gc_lock);
514 f2fs_gc(sbi, false, false, false, NULL_SEGNO);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000515 }
516}
517
Olivier Deprez157378f2022-04-04 15:47:50 +0200518void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000519{
520 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
521 return;
522
523 /* try to shrink extent cache when there is no enough memory */
524 if (!f2fs_available_free_memory(sbi, EXTENT_CACHE))
525 f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
526
527 /* check the # of cached NAT entries */
528 if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
529 f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
530
531 if (!f2fs_available_free_memory(sbi, FREE_NIDS))
532 f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS);
533 else
534 f2fs_build_free_nids(sbi, false, false);
535
David Brazdil0f672f62019-12-10 10:32:29 +0000536 if (!is_idle(sbi, REQ_TIME) &&
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000537 (!excess_dirty_nats(sbi) && !excess_dirty_nodes(sbi)))
538 return;
539
540 /* checkpoint is the only way to shrink partial cached entries */
541 if (!f2fs_available_free_memory(sbi, NAT_ENTRIES) ||
542 !f2fs_available_free_memory(sbi, INO_ENTRIES) ||
543 excess_prefree_segs(sbi) ||
544 excess_dirty_nats(sbi) ||
545 excess_dirty_nodes(sbi) ||
546 f2fs_time_over(sbi, CP_TIME)) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200547 if (test_opt(sbi, DATA_FLUSH) && from_bg) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000548 struct blk_plug plug;
549
David Brazdil0f672f62019-12-10 10:32:29 +0000550 mutex_lock(&sbi->flush_lock);
551
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000552 blk_start_plug(&plug);
553 f2fs_sync_dirty_inodes(sbi, FILE_INODE);
554 blk_finish_plug(&plug);
David Brazdil0f672f62019-12-10 10:32:29 +0000555
556 mutex_unlock(&sbi->flush_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000557 }
558 f2fs_sync_fs(sbi->sb, true);
559 stat_inc_bg_cp_count(sbi->stat_info);
560 }
561}
562
563static int __submit_flush_wait(struct f2fs_sb_info *sbi,
564 struct block_device *bdev)
565{
David Brazdil0f672f62019-12-10 10:32:29 +0000566 struct bio *bio;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000567 int ret;
568
David Brazdil0f672f62019-12-10 10:32:29 +0000569 bio = f2fs_bio_alloc(sbi, 0, false);
570 if (!bio)
571 return -ENOMEM;
572
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000573 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
574 bio_set_dev(bio, bdev);
575 ret = submit_bio_wait(bio);
576 bio_put(bio);
577
578 trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
579 test_opt(sbi, FLUSH_MERGE), ret);
580 return ret;
581}
582
583static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
584{
585 int ret = 0;
586 int i;
587
David Brazdil0f672f62019-12-10 10:32:29 +0000588 if (!f2fs_is_multi_device(sbi))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000589 return __submit_flush_wait(sbi, sbi->sb->s_bdev);
590
591 for (i = 0; i < sbi->s_ndevs; i++) {
592 if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO))
593 continue;
594 ret = __submit_flush_wait(sbi, FDEV(i).bdev);
595 if (ret)
596 break;
597 }
598 return ret;
599}
600
601static int issue_flush_thread(void *data)
602{
603 struct f2fs_sb_info *sbi = data;
604 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
605 wait_queue_head_t *q = &fcc->flush_wait_queue;
606repeat:
607 if (kthread_should_stop())
608 return 0;
609
610 sb_start_intwrite(sbi->sb);
611
612 if (!llist_empty(&fcc->issue_list)) {
613 struct flush_cmd *cmd, *next;
614 int ret;
615
616 fcc->dispatch_list = llist_del_all(&fcc->issue_list);
617 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
618
619 cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode);
620
621 ret = submit_flush_wait(sbi, cmd->ino);
622 atomic_inc(&fcc->issued_flush);
623
624 llist_for_each_entry_safe(cmd, next,
625 fcc->dispatch_list, llnode) {
626 cmd->ret = ret;
627 complete(&cmd->wait);
628 }
629 fcc->dispatch_list = NULL;
630 }
631
632 sb_end_intwrite(sbi->sb);
633
634 wait_event_interruptible(*q,
635 kthread_should_stop() || !llist_empty(&fcc->issue_list));
636 goto repeat;
637}
638
639int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
640{
641 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
642 struct flush_cmd cmd;
643 int ret;
644
645 if (test_opt(sbi, NOBARRIER))
646 return 0;
647
648 if (!test_opt(sbi, FLUSH_MERGE)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000649 atomic_inc(&fcc->queued_flush);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000650 ret = submit_flush_wait(sbi, ino);
David Brazdil0f672f62019-12-10 10:32:29 +0000651 atomic_dec(&fcc->queued_flush);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000652 atomic_inc(&fcc->issued_flush);
653 return ret;
654 }
655
David Brazdil0f672f62019-12-10 10:32:29 +0000656 if (atomic_inc_return(&fcc->queued_flush) == 1 ||
657 f2fs_is_multi_device(sbi)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000658 ret = submit_flush_wait(sbi, ino);
David Brazdil0f672f62019-12-10 10:32:29 +0000659 atomic_dec(&fcc->queued_flush);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000660
661 atomic_inc(&fcc->issued_flush);
662 return ret;
663 }
664
665 cmd.ino = ino;
666 init_completion(&cmd.wait);
667
668 llist_add(&cmd.llnode, &fcc->issue_list);
669
670 /* update issue_list before we wake up issue_flush thread */
671 smp_mb();
672
673 if (waitqueue_active(&fcc->flush_wait_queue))
674 wake_up(&fcc->flush_wait_queue);
675
676 if (fcc->f2fs_issue_flush) {
677 wait_for_completion(&cmd.wait);
David Brazdil0f672f62019-12-10 10:32:29 +0000678 atomic_dec(&fcc->queued_flush);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000679 } else {
680 struct llist_node *list;
681
682 list = llist_del_all(&fcc->issue_list);
683 if (!list) {
684 wait_for_completion(&cmd.wait);
David Brazdil0f672f62019-12-10 10:32:29 +0000685 atomic_dec(&fcc->queued_flush);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000686 } else {
687 struct flush_cmd *tmp, *next;
688
689 ret = submit_flush_wait(sbi, ino);
690
691 llist_for_each_entry_safe(tmp, next, list, llnode) {
692 if (tmp == &cmd) {
693 cmd.ret = ret;
David Brazdil0f672f62019-12-10 10:32:29 +0000694 atomic_dec(&fcc->queued_flush);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000695 continue;
696 }
697 tmp->ret = ret;
698 complete(&tmp->wait);
699 }
700 }
701 }
702
703 return cmd.ret;
704}
705
706int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
707{
708 dev_t dev = sbi->sb->s_bdev->bd_dev;
709 struct flush_cmd_control *fcc;
710 int err = 0;
711
712 if (SM_I(sbi)->fcc_info) {
713 fcc = SM_I(sbi)->fcc_info;
714 if (fcc->f2fs_issue_flush)
715 return err;
716 goto init_thread;
717 }
718
719 fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL);
720 if (!fcc)
721 return -ENOMEM;
722 atomic_set(&fcc->issued_flush, 0);
David Brazdil0f672f62019-12-10 10:32:29 +0000723 atomic_set(&fcc->queued_flush, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000724 init_waitqueue_head(&fcc->flush_wait_queue);
725 init_llist_head(&fcc->issue_list);
726 SM_I(sbi)->fcc_info = fcc;
727 if (!test_opt(sbi, FLUSH_MERGE))
728 return err;
729
730init_thread:
731 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
732 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
733 if (IS_ERR(fcc->f2fs_issue_flush)) {
734 err = PTR_ERR(fcc->f2fs_issue_flush);
Olivier Deprez157378f2022-04-04 15:47:50 +0200735 kfree(fcc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000736 SM_I(sbi)->fcc_info = NULL;
737 return err;
738 }
739
740 return err;
741}
742
743void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
744{
745 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
746
747 if (fcc && fcc->f2fs_issue_flush) {
748 struct task_struct *flush_thread = fcc->f2fs_issue_flush;
749
750 fcc->f2fs_issue_flush = NULL;
751 kthread_stop(flush_thread);
752 }
753 if (free) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200754 kfree(fcc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000755 SM_I(sbi)->fcc_info = NULL;
756 }
757}
758
759int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
760{
761 int ret = 0, i;
762
David Brazdil0f672f62019-12-10 10:32:29 +0000763 if (!f2fs_is_multi_device(sbi))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000764 return 0;
765
Olivier Deprez157378f2022-04-04 15:47:50 +0200766 if (test_opt(sbi, NOBARRIER))
767 return 0;
768
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000769 for (i = 1; i < sbi->s_ndevs; i++) {
770 if (!f2fs_test_bit(i, (char *)&sbi->dirty_device))
771 continue;
772 ret = __submit_flush_wait(sbi, FDEV(i).bdev);
773 if (ret)
774 break;
775
776 spin_lock(&sbi->dev_lock);
777 f2fs_clear_bit(i, (char *)&sbi->dirty_device);
778 spin_unlock(&sbi->dev_lock);
779 }
780
781 return ret;
782}
783
784static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
785 enum dirty_type dirty_type)
786{
787 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
788
789 /* need not be added */
790 if (IS_CURSEG(sbi, segno))
791 return;
792
793 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
794 dirty_i->nr_dirty[dirty_type]++;
795
796 if (dirty_type == DIRTY) {
797 struct seg_entry *sentry = get_seg_entry(sbi, segno);
798 enum dirty_type t = sentry->type;
799
800 if (unlikely(t >= DIRTY)) {
801 f2fs_bug_on(sbi, 1);
802 return;
803 }
804 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
805 dirty_i->nr_dirty[t]++;
Olivier Deprez157378f2022-04-04 15:47:50 +0200806
807 if (__is_large_section(sbi)) {
808 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
809 block_t valid_blocks =
810 get_valid_blocks(sbi, segno, true);
811
812 f2fs_bug_on(sbi, unlikely(!valid_blocks ||
813 valid_blocks == BLKS_PER_SEC(sbi)));
814
815 if (!IS_CURSEC(sbi, secno))
816 set_bit(secno, dirty_i->dirty_secmap);
817 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000818 }
819}
820
821static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
822 enum dirty_type dirty_type)
823{
824 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Olivier Deprez157378f2022-04-04 15:47:50 +0200825 block_t valid_blocks;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000826
827 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
828 dirty_i->nr_dirty[dirty_type]--;
829
830 if (dirty_type == DIRTY) {
831 struct seg_entry *sentry = get_seg_entry(sbi, segno);
832 enum dirty_type t = sentry->type;
833
834 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
835 dirty_i->nr_dirty[t]--;
836
Olivier Deprez157378f2022-04-04 15:47:50 +0200837 valid_blocks = get_valid_blocks(sbi, segno, true);
838 if (valid_blocks == 0) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000839 clear_bit(GET_SEC_FROM_SEG(sbi, segno),
840 dirty_i->victim_secmap);
David Brazdil0f672f62019-12-10 10:32:29 +0000841#ifdef CONFIG_F2FS_CHECK_FS
842 clear_bit(segno, SIT_I(sbi)->invalid_segmap);
843#endif
844 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200845 if (__is_large_section(sbi)) {
846 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
847
848 if (!valid_blocks ||
849 valid_blocks == BLKS_PER_SEC(sbi)) {
850 clear_bit(secno, dirty_i->dirty_secmap);
851 return;
852 }
853
854 if (!IS_CURSEC(sbi, secno))
855 set_bit(secno, dirty_i->dirty_secmap);
856 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000857 }
858}
859
860/*
861 * Should not occur error such as -ENOMEM.
862 * Adding dirty entry into seglist is not critical operation.
863 * If a given segment is one of current working segments, it won't be added.
864 */
865static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
866{
867 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
David Brazdil0f672f62019-12-10 10:32:29 +0000868 unsigned short valid_blocks, ckpt_valid_blocks;
Olivier Deprez157378f2022-04-04 15:47:50 +0200869 unsigned int usable_blocks;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000870
871 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
872 return;
873
Olivier Deprez157378f2022-04-04 15:47:50 +0200874 usable_blocks = f2fs_usable_blks_in_seg(sbi, segno);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000875 mutex_lock(&dirty_i->seglist_lock);
876
877 valid_blocks = get_valid_blocks(sbi, segno, false);
Olivier Deprez157378f2022-04-04 15:47:50 +0200878 ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000879
David Brazdil0f672f62019-12-10 10:32:29 +0000880 if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
Olivier Deprez157378f2022-04-04 15:47:50 +0200881 ckpt_valid_blocks == usable_blocks)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000882 __locate_dirty_segment(sbi, segno, PRE);
883 __remove_dirty_segment(sbi, segno, DIRTY);
Olivier Deprez157378f2022-04-04 15:47:50 +0200884 } else if (valid_blocks < usable_blocks) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000885 __locate_dirty_segment(sbi, segno, DIRTY);
886 } else {
887 /* Recovery routine with SSR needs this */
888 __remove_dirty_segment(sbi, segno, DIRTY);
889 }
890
891 mutex_unlock(&dirty_i->seglist_lock);
892}
893
David Brazdil0f672f62019-12-10 10:32:29 +0000894/* This moves currently empty dirty blocks to prefree. Must hold seglist_lock */
895void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
896{
897 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
898 unsigned int segno;
899
900 mutex_lock(&dirty_i->seglist_lock);
901 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
902 if (get_valid_blocks(sbi, segno, false))
903 continue;
904 if (IS_CURSEG(sbi, segno))
905 continue;
906 __locate_dirty_segment(sbi, segno, PRE);
907 __remove_dirty_segment(sbi, segno, DIRTY);
908 }
909 mutex_unlock(&dirty_i->seglist_lock);
910}
911
912block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)
913{
914 int ovp_hole_segs =
915 (overprovision_segments(sbi) - reserved_segments(sbi));
916 block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg;
917 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
918 block_t holes[2] = {0, 0}; /* DATA and NODE */
919 block_t unusable;
920 struct seg_entry *se;
921 unsigned int segno;
922
923 mutex_lock(&dirty_i->seglist_lock);
924 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
925 se = get_seg_entry(sbi, segno);
926 if (IS_NODESEG(se->type))
Olivier Deprez157378f2022-04-04 15:47:50 +0200927 holes[NODE] += f2fs_usable_blks_in_seg(sbi, segno) -
928 se->valid_blocks;
David Brazdil0f672f62019-12-10 10:32:29 +0000929 else
Olivier Deprez157378f2022-04-04 15:47:50 +0200930 holes[DATA] += f2fs_usable_blks_in_seg(sbi, segno) -
931 se->valid_blocks;
David Brazdil0f672f62019-12-10 10:32:29 +0000932 }
933 mutex_unlock(&dirty_i->seglist_lock);
934
935 unusable = holes[DATA] > holes[NODE] ? holes[DATA] : holes[NODE];
936 if (unusable > ovp_holes)
937 return unusable - ovp_holes;
938 return 0;
939}
940
941int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable)
942{
943 int ovp_hole_segs =
944 (overprovision_segments(sbi) - reserved_segments(sbi));
945 if (unusable > F2FS_OPTION(sbi).unusable_cap)
946 return -EAGAIN;
947 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
948 dirty_segments(sbi) > ovp_hole_segs)
949 return -EAGAIN;
950 return 0;
951}
952
953/* This is only used by SBI_CP_DISABLED */
954static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
955{
956 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
957 unsigned int segno = 0;
958
959 mutex_lock(&dirty_i->seglist_lock);
960 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
961 if (get_valid_blocks(sbi, segno, false))
962 continue;
Olivier Deprez157378f2022-04-04 15:47:50 +0200963 if (get_ckpt_valid_blocks(sbi, segno, false))
David Brazdil0f672f62019-12-10 10:32:29 +0000964 continue;
965 mutex_unlock(&dirty_i->seglist_lock);
966 return segno;
967 }
968 mutex_unlock(&dirty_i->seglist_lock);
969 return NULL_SEGNO;
970}
971
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000972static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
973 struct block_device *bdev, block_t lstart,
974 block_t start, block_t len)
975{
976 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
977 struct list_head *pend_list;
978 struct discard_cmd *dc;
979
980 f2fs_bug_on(sbi, !len);
981
982 pend_list = &dcc->pend_list[plist_idx(len)];
983
984 dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
985 INIT_LIST_HEAD(&dc->list);
986 dc->bdev = bdev;
987 dc->lstart = lstart;
988 dc->start = start;
989 dc->len = len;
990 dc->ref = 0;
991 dc->state = D_PREP;
David Brazdil0f672f62019-12-10 10:32:29 +0000992 dc->queued = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000993 dc->error = 0;
994 init_completion(&dc->wait);
995 list_add_tail(&dc->list, pend_list);
996 spin_lock_init(&dc->lock);
997 dc->bio_ref = 0;
998 atomic_inc(&dcc->discard_cmd_cnt);
999 dcc->undiscard_blks += len;
1000
1001 return dc;
1002}
1003
1004static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
1005 struct block_device *bdev, block_t lstart,
1006 block_t start, block_t len,
David Brazdil0f672f62019-12-10 10:32:29 +00001007 struct rb_node *parent, struct rb_node **p,
1008 bool leftmost)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001009{
1010 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1011 struct discard_cmd *dc;
1012
1013 dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
1014
1015 rb_link_node(&dc->rb_node, parent, p);
David Brazdil0f672f62019-12-10 10:32:29 +00001016 rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001017
1018 return dc;
1019}
1020
1021static void __detach_discard_cmd(struct discard_cmd_control *dcc,
1022 struct discard_cmd *dc)
1023{
1024 if (dc->state == D_DONE)
David Brazdil0f672f62019-12-10 10:32:29 +00001025 atomic_sub(dc->queued, &dcc->queued_discard);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001026
1027 list_del(&dc->list);
David Brazdil0f672f62019-12-10 10:32:29 +00001028 rb_erase_cached(&dc->rb_node, &dcc->root);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001029 dcc->undiscard_blks -= dc->len;
1030
1031 kmem_cache_free(discard_cmd_slab, dc);
1032
1033 atomic_dec(&dcc->discard_cmd_cnt);
1034}
1035
1036static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
1037 struct discard_cmd *dc)
1038{
1039 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1040 unsigned long flags;
1041
1042 trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len);
1043
1044 spin_lock_irqsave(&dc->lock, flags);
1045 if (dc->bio_ref) {
1046 spin_unlock_irqrestore(&dc->lock, flags);
1047 return;
1048 }
1049 spin_unlock_irqrestore(&dc->lock, flags);
1050
1051 f2fs_bug_on(sbi, dc->ref);
1052
1053 if (dc->error == -EOPNOTSUPP)
1054 dc->error = 0;
1055
1056 if (dc->error)
David Brazdil0f672f62019-12-10 10:32:29 +00001057 printk_ratelimited(
Olivier Deprez0e641232021-09-23 10:07:05 +02001058 "%sF2FS-fs (%s): Issue discard(%u, %u, %u) failed, ret: %d",
1059 KERN_INFO, sbi->sb->s_id,
1060 dc->lstart, dc->start, dc->len, dc->error);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001061 __detach_discard_cmd(dcc, dc);
1062}
1063
1064static void f2fs_submit_discard_endio(struct bio *bio)
1065{
1066 struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
1067 unsigned long flags;
1068
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001069 spin_lock_irqsave(&dc->lock, flags);
Olivier Deprez157378f2022-04-04 15:47:50 +02001070 if (!dc->error)
1071 dc->error = blk_status_to_errno(bio->bi_status);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001072 dc->bio_ref--;
1073 if (!dc->bio_ref && dc->state == D_SUBMIT) {
1074 dc->state = D_DONE;
1075 complete_all(&dc->wait);
1076 }
1077 spin_unlock_irqrestore(&dc->lock, flags);
1078 bio_put(bio);
1079}
1080
1081static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
1082 block_t start, block_t end)
1083{
1084#ifdef CONFIG_F2FS_CHECK_FS
1085 struct seg_entry *sentry;
1086 unsigned int segno;
1087 block_t blk = start;
1088 unsigned long offset, size, max_blocks = sbi->blocks_per_seg;
1089 unsigned long *map;
1090
1091 while (blk < end) {
1092 segno = GET_SEGNO(sbi, blk);
1093 sentry = get_seg_entry(sbi, segno);
1094 offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
1095
1096 if (end < START_BLOCK(sbi, segno + 1))
1097 size = GET_BLKOFF_FROM_SEG0(sbi, end);
1098 else
1099 size = max_blocks;
1100 map = (unsigned long *)(sentry->cur_valid_map);
1101 offset = __find_rev_next_bit(map, size, offset);
1102 f2fs_bug_on(sbi, offset != size);
1103 blk = START_BLOCK(sbi, segno + 1);
1104 }
1105#endif
1106}
1107
1108static void __init_discard_policy(struct f2fs_sb_info *sbi,
1109 struct discard_policy *dpolicy,
1110 int discard_type, unsigned int granularity)
1111{
1112 /* common policy */
1113 dpolicy->type = discard_type;
1114 dpolicy->sync = true;
1115 dpolicy->ordered = false;
1116 dpolicy->granularity = granularity;
1117
1118 dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
1119 dpolicy->io_aware_gran = MAX_PLIST_NUM;
Olivier Deprez157378f2022-04-04 15:47:50 +02001120 dpolicy->timeout = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001121
1122 if (discard_type == DPOLICY_BG) {
1123 dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
1124 dpolicy->mid_interval = DEF_MID_DISCARD_ISSUE_TIME;
1125 dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
1126 dpolicy->io_aware = true;
1127 dpolicy->sync = false;
1128 dpolicy->ordered = true;
1129 if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) {
1130 dpolicy->granularity = 1;
1131 dpolicy->max_interval = DEF_MIN_DISCARD_ISSUE_TIME;
1132 }
1133 } else if (discard_type == DPOLICY_FORCE) {
1134 dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
1135 dpolicy->mid_interval = DEF_MID_DISCARD_ISSUE_TIME;
1136 dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
1137 dpolicy->io_aware = false;
1138 } else if (discard_type == DPOLICY_FSTRIM) {
1139 dpolicy->io_aware = false;
1140 } else if (discard_type == DPOLICY_UMOUNT) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001141 dpolicy->io_aware = false;
David Brazdil0f672f62019-12-10 10:32:29 +00001142 /* we need to issue all to keep CP_TRIMMED_FLAG */
1143 dpolicy->granularity = 1;
Olivier Deprez157378f2022-04-04 15:47:50 +02001144 dpolicy->timeout = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001145 }
1146}
1147
1148static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1149 struct block_device *bdev, block_t lstart,
1150 block_t start, block_t len);
1151/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
1152static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
1153 struct discard_policy *dpolicy,
1154 struct discard_cmd *dc,
1155 unsigned int *issued)
1156{
1157 struct block_device *bdev = dc->bdev;
1158 struct request_queue *q = bdev_get_queue(bdev);
1159 unsigned int max_discard_blocks =
1160 SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
1161 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1162 struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1163 &(dcc->fstrim_list) : &(dcc->wait_list);
1164 int flag = dpolicy->sync ? REQ_SYNC : 0;
1165 block_t lstart, start, len, total_len;
1166 int err = 0;
1167
1168 if (dc->state != D_PREP)
1169 return 0;
1170
1171 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
1172 return 0;
1173
1174 trace_f2fs_issue_discard(bdev, dc->start, dc->len);
1175
1176 lstart = dc->lstart;
1177 start = dc->start;
1178 len = dc->len;
1179 total_len = len;
1180
1181 dc->len = 0;
1182
1183 while (total_len && *issued < dpolicy->max_requests && !err) {
1184 struct bio *bio = NULL;
1185 unsigned long flags;
1186 bool last = true;
1187
1188 if (len > max_discard_blocks) {
1189 len = max_discard_blocks;
1190 last = false;
1191 }
1192
1193 (*issued)++;
1194 if (*issued == dpolicy->max_requests)
1195 last = true;
1196
1197 dc->len += len;
1198
1199 if (time_to_inject(sbi, FAULT_DISCARD)) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001200 f2fs_show_injection_info(sbi, FAULT_DISCARD);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001201 err = -EIO;
1202 goto submit;
1203 }
1204 err = __blkdev_issue_discard(bdev,
1205 SECTOR_FROM_BLOCK(start),
1206 SECTOR_FROM_BLOCK(len),
1207 GFP_NOFS, 0, &bio);
1208submit:
1209 if (err) {
1210 spin_lock_irqsave(&dc->lock, flags);
1211 if (dc->state == D_PARTIAL)
1212 dc->state = D_SUBMIT;
1213 spin_unlock_irqrestore(&dc->lock, flags);
1214
1215 break;
1216 }
1217
1218 f2fs_bug_on(sbi, !bio);
1219
1220 /*
1221 * should keep before submission to avoid D_DONE
1222 * right away
1223 */
1224 spin_lock_irqsave(&dc->lock, flags);
1225 if (last)
1226 dc->state = D_SUBMIT;
1227 else
1228 dc->state = D_PARTIAL;
1229 dc->bio_ref++;
1230 spin_unlock_irqrestore(&dc->lock, flags);
1231
David Brazdil0f672f62019-12-10 10:32:29 +00001232 atomic_inc(&dcc->queued_discard);
1233 dc->queued++;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001234 list_move_tail(&dc->list, wait_list);
1235
1236 /* sanity check on discard range */
David Brazdil0f672f62019-12-10 10:32:29 +00001237 __check_sit_bitmap(sbi, lstart, lstart + len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001238
1239 bio->bi_private = dc;
1240 bio->bi_end_io = f2fs_submit_discard_endio;
1241 bio->bi_opf |= flag;
1242 submit_bio(bio);
1243
1244 atomic_inc(&dcc->issued_discard);
1245
1246 f2fs_update_iostat(sbi, FS_DISCARD, 1);
1247
1248 lstart += len;
1249 start += len;
1250 total_len -= len;
1251 len = total_len;
1252 }
1253
Olivier Deprez157378f2022-04-04 15:47:50 +02001254 if (!err && len) {
1255 dcc->undiscard_blks -= len;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001256 __update_discard_tree_range(sbi, bdev, lstart, start, len);
Olivier Deprez157378f2022-04-04 15:47:50 +02001257 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001258 return err;
1259}
1260
Olivier Deprez157378f2022-04-04 15:47:50 +02001261static void __insert_discard_tree(struct f2fs_sb_info *sbi,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001262 struct block_device *bdev, block_t lstart,
1263 block_t start, block_t len,
1264 struct rb_node **insert_p,
1265 struct rb_node *insert_parent)
1266{
1267 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1268 struct rb_node **p;
1269 struct rb_node *parent = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00001270 bool leftmost = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001271
1272 if (insert_p && insert_parent) {
1273 parent = insert_parent;
1274 p = insert_p;
1275 goto do_insert;
1276 }
1277
David Brazdil0f672f62019-12-10 10:32:29 +00001278 p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent,
1279 lstart, &leftmost);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001280do_insert:
Olivier Deprez157378f2022-04-04 15:47:50 +02001281 __attach_discard_cmd(sbi, bdev, lstart, start, len, parent,
David Brazdil0f672f62019-12-10 10:32:29 +00001282 p, leftmost);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001283}
1284
1285static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
1286 struct discard_cmd *dc)
1287{
1288 list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]);
1289}
1290
1291static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
1292 struct discard_cmd *dc, block_t blkaddr)
1293{
1294 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1295 struct discard_info di = dc->di;
1296 bool modified = false;
1297
1298 if (dc->state == D_DONE || dc->len == 1) {
1299 __remove_discard_cmd(sbi, dc);
1300 return;
1301 }
1302
1303 dcc->undiscard_blks -= di.len;
1304
1305 if (blkaddr > di.lstart) {
1306 dc->len = blkaddr - dc->lstart;
1307 dcc->undiscard_blks += dc->len;
1308 __relocate_discard_cmd(dcc, dc);
1309 modified = true;
1310 }
1311
1312 if (blkaddr < di.lstart + di.len - 1) {
1313 if (modified) {
1314 __insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
1315 di.start + blkaddr + 1 - di.lstart,
1316 di.lstart + di.len - 1 - blkaddr,
1317 NULL, NULL);
1318 } else {
1319 dc->lstart++;
1320 dc->len--;
1321 dc->start++;
1322 dcc->undiscard_blks += dc->len;
1323 __relocate_discard_cmd(dcc, dc);
1324 }
1325 }
1326}
1327
1328static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1329 struct block_device *bdev, block_t lstart,
1330 block_t start, block_t len)
1331{
1332 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1333 struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1334 struct discard_cmd *dc;
1335 struct discard_info di = {0};
1336 struct rb_node **insert_p = NULL, *insert_parent = NULL;
1337 struct request_queue *q = bdev_get_queue(bdev);
1338 unsigned int max_discard_blocks =
1339 SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
1340 block_t end = lstart + len;
1341
1342 dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
1343 NULL, lstart,
1344 (struct rb_entry **)&prev_dc,
1345 (struct rb_entry **)&next_dc,
David Brazdil0f672f62019-12-10 10:32:29 +00001346 &insert_p, &insert_parent, true, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001347 if (dc)
1348 prev_dc = dc;
1349
1350 if (!prev_dc) {
1351 di.lstart = lstart;
1352 di.len = next_dc ? next_dc->lstart - lstart : len;
1353 di.len = min(di.len, len);
1354 di.start = start;
1355 }
1356
1357 while (1) {
1358 struct rb_node *node;
1359 bool merged = false;
1360 struct discard_cmd *tdc = NULL;
1361
1362 if (prev_dc) {
1363 di.lstart = prev_dc->lstart + prev_dc->len;
1364 if (di.lstart < lstart)
1365 di.lstart = lstart;
1366 if (di.lstart >= end)
1367 break;
1368
1369 if (!next_dc || next_dc->lstart > end)
1370 di.len = end - di.lstart;
1371 else
1372 di.len = next_dc->lstart - di.lstart;
1373 di.start = start + di.lstart - lstart;
1374 }
1375
1376 if (!di.len)
1377 goto next;
1378
1379 if (prev_dc && prev_dc->state == D_PREP &&
1380 prev_dc->bdev == bdev &&
1381 __is_discard_back_mergeable(&di, &prev_dc->di,
1382 max_discard_blocks)) {
1383 prev_dc->di.len += di.len;
1384 dcc->undiscard_blks += di.len;
1385 __relocate_discard_cmd(dcc, prev_dc);
1386 di = prev_dc->di;
1387 tdc = prev_dc;
1388 merged = true;
1389 }
1390
1391 if (next_dc && next_dc->state == D_PREP &&
1392 next_dc->bdev == bdev &&
1393 __is_discard_front_mergeable(&di, &next_dc->di,
1394 max_discard_blocks)) {
1395 next_dc->di.lstart = di.lstart;
1396 next_dc->di.len += di.len;
1397 next_dc->di.start = di.start;
1398 dcc->undiscard_blks += di.len;
1399 __relocate_discard_cmd(dcc, next_dc);
1400 if (tdc)
1401 __remove_discard_cmd(sbi, tdc);
1402 merged = true;
1403 }
1404
1405 if (!merged) {
1406 __insert_discard_tree(sbi, bdev, di.lstart, di.start,
1407 di.len, NULL, NULL);
1408 }
1409 next:
1410 prev_dc = next_dc;
1411 if (!prev_dc)
1412 break;
1413
1414 node = rb_next(&prev_dc->rb_node);
1415 next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1416 }
1417}
1418
1419static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
1420 struct block_device *bdev, block_t blkstart, block_t blklen)
1421{
1422 block_t lblkstart = blkstart;
1423
David Brazdil0f672f62019-12-10 10:32:29 +00001424 if (!f2fs_bdev_support_discard(bdev))
1425 return 0;
1426
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001427 trace_f2fs_queue_discard(bdev, blkstart, blklen);
1428
David Brazdil0f672f62019-12-10 10:32:29 +00001429 if (f2fs_is_multi_device(sbi)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001430 int devi = f2fs_target_device_index(sbi, blkstart);
1431
1432 blkstart -= FDEV(devi).start_blk;
1433 }
1434 mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1435 __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
1436 mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1437 return 0;
1438}
1439
1440static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
1441 struct discard_policy *dpolicy)
1442{
1443 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1444 struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1445 struct rb_node **insert_p = NULL, *insert_parent = NULL;
1446 struct discard_cmd *dc;
1447 struct blk_plug plug;
1448 unsigned int pos = dcc->next_pos;
1449 unsigned int issued = 0;
1450 bool io_interrupted = false;
1451
1452 mutex_lock(&dcc->cmd_lock);
1453 dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
1454 NULL, pos,
1455 (struct rb_entry **)&prev_dc,
1456 (struct rb_entry **)&next_dc,
David Brazdil0f672f62019-12-10 10:32:29 +00001457 &insert_p, &insert_parent, true, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001458 if (!dc)
1459 dc = next_dc;
1460
1461 blk_start_plug(&plug);
1462
1463 while (dc) {
1464 struct rb_node *node;
1465 int err = 0;
1466
1467 if (dc->state != D_PREP)
1468 goto next;
1469
David Brazdil0f672f62019-12-10 10:32:29 +00001470 if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001471 io_interrupted = true;
1472 break;
1473 }
1474
1475 dcc->next_pos = dc->lstart + dc->len;
1476 err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
1477
1478 if (issued >= dpolicy->max_requests)
1479 break;
1480next:
1481 node = rb_next(&dc->rb_node);
1482 if (err)
1483 __remove_discard_cmd(sbi, dc);
1484 dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1485 }
1486
1487 blk_finish_plug(&plug);
1488
1489 if (!dc)
1490 dcc->next_pos = 0;
1491
1492 mutex_unlock(&dcc->cmd_lock);
1493
1494 if (!issued && io_interrupted)
1495 issued = -1;
1496
1497 return issued;
1498}
Olivier Deprez157378f2022-04-04 15:47:50 +02001499static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1500 struct discard_policy *dpolicy);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001501
1502static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
1503 struct discard_policy *dpolicy)
1504{
1505 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1506 struct list_head *pend_list;
1507 struct discard_cmd *dc, *tmp;
1508 struct blk_plug plug;
Olivier Deprez157378f2022-04-04 15:47:50 +02001509 int i, issued;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001510 bool io_interrupted = false;
1511
Olivier Deprez157378f2022-04-04 15:47:50 +02001512 if (dpolicy->timeout)
1513 f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT);
David Brazdil0f672f62019-12-10 10:32:29 +00001514
Olivier Deprez157378f2022-04-04 15:47:50 +02001515retry:
1516 issued = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001517 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001518 if (dpolicy->timeout &&
1519 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
David Brazdil0f672f62019-12-10 10:32:29 +00001520 break;
1521
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001522 if (i + 1 < dpolicy->granularity)
1523 break;
1524
1525 if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
1526 return __issue_discard_cmd_orderly(sbi, dpolicy);
1527
1528 pend_list = &dcc->pend_list[i];
1529
1530 mutex_lock(&dcc->cmd_lock);
1531 if (list_empty(pend_list))
1532 goto next;
1533 if (unlikely(dcc->rbtree_check))
1534 f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
Olivier Deprez157378f2022-04-04 15:47:50 +02001535 &dcc->root, false));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001536 blk_start_plug(&plug);
1537 list_for_each_entry_safe(dc, tmp, pend_list, list) {
1538 f2fs_bug_on(sbi, dc->state != D_PREP);
1539
Olivier Deprez157378f2022-04-04 15:47:50 +02001540 if (dpolicy->timeout &&
1541 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
David Brazdil0f672f62019-12-10 10:32:29 +00001542 break;
1543
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001544 if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
David Brazdil0f672f62019-12-10 10:32:29 +00001545 !is_idle(sbi, DISCARD_TIME)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001546 io_interrupted = true;
1547 break;
1548 }
1549
1550 __submit_discard_cmd(sbi, dpolicy, dc, &issued);
1551
1552 if (issued >= dpolicy->max_requests)
1553 break;
1554 }
1555 blk_finish_plug(&plug);
1556next:
1557 mutex_unlock(&dcc->cmd_lock);
1558
1559 if (issued >= dpolicy->max_requests || io_interrupted)
1560 break;
1561 }
1562
Olivier Deprez157378f2022-04-04 15:47:50 +02001563 if (dpolicy->type == DPOLICY_UMOUNT && issued) {
1564 __wait_all_discard_cmd(sbi, dpolicy);
1565 goto retry;
1566 }
1567
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001568 if (!issued && io_interrupted)
1569 issued = -1;
1570
1571 return issued;
1572}
1573
1574static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
1575{
1576 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1577 struct list_head *pend_list;
1578 struct discard_cmd *dc, *tmp;
1579 int i;
1580 bool dropped = false;
1581
1582 mutex_lock(&dcc->cmd_lock);
1583 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1584 pend_list = &dcc->pend_list[i];
1585 list_for_each_entry_safe(dc, tmp, pend_list, list) {
1586 f2fs_bug_on(sbi, dc->state != D_PREP);
1587 __remove_discard_cmd(sbi, dc);
1588 dropped = true;
1589 }
1590 }
1591 mutex_unlock(&dcc->cmd_lock);
1592
1593 return dropped;
1594}
1595
1596void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi)
1597{
1598 __drop_discard_cmd(sbi);
1599}
1600
1601static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
1602 struct discard_cmd *dc)
1603{
1604 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1605 unsigned int len = 0;
1606
1607 wait_for_completion_io(&dc->wait);
1608 mutex_lock(&dcc->cmd_lock);
1609 f2fs_bug_on(sbi, dc->state != D_DONE);
1610 dc->ref--;
1611 if (!dc->ref) {
1612 if (!dc->error)
1613 len = dc->len;
1614 __remove_discard_cmd(sbi, dc);
1615 }
1616 mutex_unlock(&dcc->cmd_lock);
1617
1618 return len;
1619}
1620
1621static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
1622 struct discard_policy *dpolicy,
1623 block_t start, block_t end)
1624{
1625 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1626 struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1627 &(dcc->fstrim_list) : &(dcc->wait_list);
1628 struct discard_cmd *dc, *tmp;
1629 bool need_wait;
1630 unsigned int trimmed = 0;
1631
1632next:
1633 need_wait = false;
1634
1635 mutex_lock(&dcc->cmd_lock);
1636 list_for_each_entry_safe(dc, tmp, wait_list, list) {
1637 if (dc->lstart + dc->len <= start || end <= dc->lstart)
1638 continue;
1639 if (dc->len < dpolicy->granularity)
1640 continue;
1641 if (dc->state == D_DONE && !dc->ref) {
1642 wait_for_completion_io(&dc->wait);
1643 if (!dc->error)
1644 trimmed += dc->len;
1645 __remove_discard_cmd(sbi, dc);
1646 } else {
1647 dc->ref++;
1648 need_wait = true;
1649 break;
1650 }
1651 }
1652 mutex_unlock(&dcc->cmd_lock);
1653
1654 if (need_wait) {
1655 trimmed += __wait_one_discard_bio(sbi, dc);
1656 goto next;
1657 }
1658
1659 return trimmed;
1660}
1661
1662static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1663 struct discard_policy *dpolicy)
1664{
1665 struct discard_policy dp;
1666 unsigned int discard_blks;
1667
1668 if (dpolicy)
1669 return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
1670
1671 /* wait all */
1672 __init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, 1);
1673 discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1674 __init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, 1);
1675 discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1676
1677 return discard_blks;
1678}
1679
1680/* This should be covered by global mutex, &sit_i->sentry_lock */
1681static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
1682{
1683 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1684 struct discard_cmd *dc;
1685 bool need_wait = false;
1686
1687 mutex_lock(&dcc->cmd_lock);
1688 dc = (struct discard_cmd *)f2fs_lookup_rb_tree(&dcc->root,
1689 NULL, blkaddr);
1690 if (dc) {
1691 if (dc->state == D_PREP) {
1692 __punch_discard_cmd(sbi, dc, blkaddr);
1693 } else {
1694 dc->ref++;
1695 need_wait = true;
1696 }
1697 }
1698 mutex_unlock(&dcc->cmd_lock);
1699
1700 if (need_wait)
1701 __wait_one_discard_bio(sbi, dc);
1702}
1703
1704void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
1705{
1706 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1707
1708 if (dcc && dcc->f2fs_issue_discard) {
1709 struct task_struct *discard_thread = dcc->f2fs_issue_discard;
1710
1711 dcc->f2fs_issue_discard = NULL;
1712 kthread_stop(discard_thread);
1713 }
1714}
1715
1716/* This comes from f2fs_put_super */
David Brazdil0f672f62019-12-10 10:32:29 +00001717bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001718{
1719 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1720 struct discard_policy dpolicy;
1721 bool dropped;
1722
1723 __init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
1724 dcc->discard_granularity);
1725 __issue_discard_cmd(sbi, &dpolicy);
1726 dropped = __drop_discard_cmd(sbi);
1727
1728 /* just to make sure there is no pending discard commands */
1729 __wait_all_discard_cmd(sbi, NULL);
1730
1731 f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
1732 return dropped;
1733}
1734
1735static int issue_discard_thread(void *data)
1736{
1737 struct f2fs_sb_info *sbi = data;
1738 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1739 wait_queue_head_t *q = &dcc->discard_wait_queue;
1740 struct discard_policy dpolicy;
1741 unsigned int wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
1742 int issued;
1743
1744 set_freezable();
1745
1746 do {
1747 __init_discard_policy(sbi, &dpolicy, DPOLICY_BG,
1748 dcc->discard_granularity);
1749
1750 wait_event_interruptible_timeout(*q,
1751 kthread_should_stop() || freezing(current) ||
1752 dcc->discard_wake,
1753 msecs_to_jiffies(wait_ms));
1754
1755 if (dcc->discard_wake)
1756 dcc->discard_wake = 0;
1757
David Brazdil0f672f62019-12-10 10:32:29 +00001758 /* clean up pending candidates before going to sleep */
1759 if (atomic_read(&dcc->queued_discard))
1760 __wait_all_discard_cmd(sbi, NULL);
1761
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001762 if (try_to_freeze())
1763 continue;
1764 if (f2fs_readonly(sbi->sb))
1765 continue;
1766 if (kthread_should_stop())
1767 return 0;
1768 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
1769 wait_ms = dpolicy.max_interval;
1770 continue;
1771 }
1772
Olivier Deprez157378f2022-04-04 15:47:50 +02001773 if (sbi->gc_mode == GC_URGENT_HIGH)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001774 __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1);
1775
1776 sb_start_intwrite(sbi->sb);
1777
1778 issued = __issue_discard_cmd(sbi, &dpolicy);
1779 if (issued > 0) {
1780 __wait_all_discard_cmd(sbi, &dpolicy);
1781 wait_ms = dpolicy.min_interval;
1782 } else if (issued == -1){
David Brazdil0f672f62019-12-10 10:32:29 +00001783 wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME);
1784 if (!wait_ms)
1785 wait_ms = dpolicy.mid_interval;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001786 } else {
1787 wait_ms = dpolicy.max_interval;
1788 }
1789
1790 sb_end_intwrite(sbi->sb);
1791
1792 } while (!kthread_should_stop());
1793 return 0;
1794}
1795
1796#ifdef CONFIG_BLK_DEV_ZONED
1797static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1798 struct block_device *bdev, block_t blkstart, block_t blklen)
1799{
1800 sector_t sector, nr_sects;
1801 block_t lblkstart = blkstart;
1802 int devi = 0;
1803
David Brazdil0f672f62019-12-10 10:32:29 +00001804 if (f2fs_is_multi_device(sbi)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001805 devi = f2fs_target_device_index(sbi, blkstart);
David Brazdil0f672f62019-12-10 10:32:29 +00001806 if (blkstart < FDEV(devi).start_blk ||
1807 blkstart > FDEV(devi).end_blk) {
1808 f2fs_err(sbi, "Invalid block %x", blkstart);
1809 return -EIO;
1810 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001811 blkstart -= FDEV(devi).start_blk;
1812 }
1813
David Brazdil0f672f62019-12-10 10:32:29 +00001814 /* For sequential zones, reset the zone write pointer */
1815 if (f2fs_blkz_is_seq(sbi, devi, blkstart)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001816 sector = SECTOR_FROM_BLOCK(blkstart);
1817 nr_sects = SECTOR_FROM_BLOCK(blklen);
1818
1819 if (sector & (bdev_zone_sectors(bdev) - 1) ||
1820 nr_sects != bdev_zone_sectors(bdev)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001821 f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
1822 devi, sbi->s_ndevs ? FDEV(devi).path : "",
1823 blkstart, blklen);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001824 return -EIO;
1825 }
1826 trace_f2fs_issue_reset_zone(bdev, blkstart);
Olivier Deprez157378f2022-04-04 15:47:50 +02001827 return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
1828 sector, nr_sects, GFP_NOFS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001829 }
David Brazdil0f672f62019-12-10 10:32:29 +00001830
1831 /* For conventional zones, use regular discard if supported */
1832 return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001833}
1834#endif
1835
1836static int __issue_discard_async(struct f2fs_sb_info *sbi,
1837 struct block_device *bdev, block_t blkstart, block_t blklen)
1838{
1839#ifdef CONFIG_BLK_DEV_ZONED
David Brazdil0f672f62019-12-10 10:32:29 +00001840 if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001841 return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
1842#endif
1843 return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
1844}
1845
1846static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
1847 block_t blkstart, block_t blklen)
1848{
1849 sector_t start = blkstart, len = 0;
1850 struct block_device *bdev;
1851 struct seg_entry *se;
1852 unsigned int offset;
1853 block_t i;
1854 int err = 0;
1855
1856 bdev = f2fs_target_device(sbi, blkstart, NULL);
1857
1858 for (i = blkstart; i < blkstart + blklen; i++, len++) {
1859 if (i != start) {
1860 struct block_device *bdev2 =
1861 f2fs_target_device(sbi, i, NULL);
1862
1863 if (bdev2 != bdev) {
1864 err = __issue_discard_async(sbi, bdev,
1865 start, len);
1866 if (err)
1867 return err;
1868 bdev = bdev2;
1869 start = i;
1870 len = 0;
1871 }
1872 }
1873
1874 se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
1875 offset = GET_BLKOFF_FROM_SEG0(sbi, i);
1876
1877 if (!f2fs_test_and_set_bit(offset, se->discard_map))
1878 sbi->discard_blks--;
1879 }
1880
1881 if (len)
1882 err = __issue_discard_async(sbi, bdev, start, len);
1883 return err;
1884}
1885
1886static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
1887 bool check_only)
1888{
1889 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
1890 int max_blocks = sbi->blocks_per_seg;
1891 struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
1892 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
1893 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
1894 unsigned long *discard_map = (unsigned long *)se->discard_map;
1895 unsigned long *dmap = SIT_I(sbi)->tmp_map;
1896 unsigned int start = 0, end = -1;
1897 bool force = (cpc->reason & CP_DISCARD);
1898 struct discard_entry *de = NULL;
1899 struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
1900 int i;
1901
David Brazdil0f672f62019-12-10 10:32:29 +00001902 if (se->valid_blocks == max_blocks || !f2fs_hw_support_discard(sbi))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001903 return false;
1904
1905 if (!force) {
David Brazdil0f672f62019-12-10 10:32:29 +00001906 if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks ||
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001907 SM_I(sbi)->dcc_info->nr_discards >=
1908 SM_I(sbi)->dcc_info->max_discards)
1909 return false;
1910 }
1911
1912 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
1913 for (i = 0; i < entries; i++)
1914 dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
1915 (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
1916
1917 while (force || SM_I(sbi)->dcc_info->nr_discards <=
1918 SM_I(sbi)->dcc_info->max_discards) {
1919 start = __find_rev_next_bit(dmap, max_blocks, end + 1);
1920 if (start >= max_blocks)
1921 break;
1922
1923 end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
1924 if (force && start && end != max_blocks
1925 && (end - start) < cpc->trim_minlen)
1926 continue;
1927
1928 if (check_only)
1929 return true;
1930
1931 if (!de) {
1932 de = f2fs_kmem_cache_alloc(discard_entry_slab,
1933 GFP_F2FS_ZERO);
1934 de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
1935 list_add_tail(&de->list, head);
1936 }
1937
1938 for (i = start; i < end; i++)
1939 __set_bit_le(i, (void *)de->discard_map);
1940
1941 SM_I(sbi)->dcc_info->nr_discards += end - start;
1942 }
1943 return false;
1944}
1945
1946static void release_discard_addr(struct discard_entry *entry)
1947{
1948 list_del(&entry->list);
1949 kmem_cache_free(discard_entry_slab, entry);
1950}
1951
1952void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi)
1953{
1954 struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
1955 struct discard_entry *entry, *this;
1956
1957 /* drop caches */
1958 list_for_each_entry_safe(entry, this, head, list)
1959 release_discard_addr(entry);
1960}
1961
1962/*
1963 * Should call f2fs_clear_prefree_segments after checkpoint is done.
1964 */
1965static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
1966{
1967 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1968 unsigned int segno;
1969
1970 mutex_lock(&dirty_i->seglist_lock);
1971 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
Olivier Deprez157378f2022-04-04 15:47:50 +02001972 __set_test_and_free(sbi, segno, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001973 mutex_unlock(&dirty_i->seglist_lock);
1974}
1975
1976void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
1977 struct cp_control *cpc)
1978{
1979 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1980 struct list_head *head = &dcc->entry_list;
1981 struct discard_entry *entry, *this;
1982 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1983 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
1984 unsigned int start = 0, end = -1;
1985 unsigned int secno, start_segno;
1986 bool force = (cpc->reason & CP_DISCARD);
Olivier Deprez157378f2022-04-04 15:47:50 +02001987 bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001988
1989 mutex_lock(&dirty_i->seglist_lock);
1990
1991 while (1) {
1992 int i;
1993
1994 if (need_align && end != -1)
1995 end--;
1996 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
1997 if (start >= MAIN_SEGS(sbi))
1998 break;
1999 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
2000 start + 1);
2001
2002 if (need_align) {
2003 start = rounddown(start, sbi->segs_per_sec);
2004 end = roundup(end, sbi->segs_per_sec);
2005 }
2006
2007 for (i = start; i < end; i++) {
2008 if (test_and_clear_bit(i, prefree_map))
2009 dirty_i->nr_dirty[PRE]--;
2010 }
2011
David Brazdil0f672f62019-12-10 10:32:29 +00002012 if (!f2fs_realtime_discard_enable(sbi))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002013 continue;
2014
2015 if (force && start >= cpc->trim_start &&
2016 (end - 1) <= cpc->trim_end)
2017 continue;
2018
Olivier Deprez157378f2022-04-04 15:47:50 +02002019 if (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002020 f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
2021 (end - start) << sbi->log_blocks_per_seg);
2022 continue;
2023 }
2024next:
2025 secno = GET_SEC_FROM_SEG(sbi, start);
2026 start_segno = GET_SEG_FROM_SEC(sbi, secno);
2027 if (!IS_CURSEC(sbi, secno) &&
2028 !get_valid_blocks(sbi, start, true))
2029 f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
2030 sbi->segs_per_sec << sbi->log_blocks_per_seg);
2031
2032 start = start_segno + sbi->segs_per_sec;
2033 if (start < end)
2034 goto next;
2035 else
2036 end = start - 1;
2037 }
2038 mutex_unlock(&dirty_i->seglist_lock);
2039
2040 /* send small discards */
2041 list_for_each_entry_safe(entry, this, head, list) {
2042 unsigned int cur_pos = 0, next_pos, len, total_len = 0;
2043 bool is_valid = test_bit_le(0, entry->discard_map);
2044
2045find_next:
2046 if (is_valid) {
2047 next_pos = find_next_zero_bit_le(entry->discard_map,
2048 sbi->blocks_per_seg, cur_pos);
2049 len = next_pos - cur_pos;
2050
David Brazdil0f672f62019-12-10 10:32:29 +00002051 if (f2fs_sb_has_blkzoned(sbi) ||
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002052 (force && len < cpc->trim_minlen))
2053 goto skip;
2054
2055 f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
2056 len);
2057 total_len += len;
2058 } else {
2059 next_pos = find_next_bit_le(entry->discard_map,
2060 sbi->blocks_per_seg, cur_pos);
2061 }
2062skip:
2063 cur_pos = next_pos;
2064 is_valid = !is_valid;
2065
2066 if (cur_pos < sbi->blocks_per_seg)
2067 goto find_next;
2068
2069 release_discard_addr(entry);
2070 dcc->nr_discards -= total_len;
2071 }
2072
2073 wake_up_discard_thread(sbi, false);
2074}
2075
2076static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
2077{
2078 dev_t dev = sbi->sb->s_bdev->bd_dev;
2079 struct discard_cmd_control *dcc;
2080 int err = 0, i;
2081
2082 if (SM_I(sbi)->dcc_info) {
2083 dcc = SM_I(sbi)->dcc_info;
2084 goto init_thread;
2085 }
2086
2087 dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL);
2088 if (!dcc)
2089 return -ENOMEM;
2090
2091 dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
2092 INIT_LIST_HEAD(&dcc->entry_list);
2093 for (i = 0; i < MAX_PLIST_NUM; i++)
2094 INIT_LIST_HEAD(&dcc->pend_list[i]);
2095 INIT_LIST_HEAD(&dcc->wait_list);
2096 INIT_LIST_HEAD(&dcc->fstrim_list);
2097 mutex_init(&dcc->cmd_lock);
2098 atomic_set(&dcc->issued_discard, 0);
David Brazdil0f672f62019-12-10 10:32:29 +00002099 atomic_set(&dcc->queued_discard, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002100 atomic_set(&dcc->discard_cmd_cnt, 0);
2101 dcc->nr_discards = 0;
2102 dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
2103 dcc->undiscard_blks = 0;
2104 dcc->next_pos = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00002105 dcc->root = RB_ROOT_CACHED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002106 dcc->rbtree_check = false;
2107
2108 init_waitqueue_head(&dcc->discard_wait_queue);
2109 SM_I(sbi)->dcc_info = dcc;
2110init_thread:
2111 dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
2112 "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
2113 if (IS_ERR(dcc->f2fs_issue_discard)) {
2114 err = PTR_ERR(dcc->f2fs_issue_discard);
Olivier Deprez157378f2022-04-04 15:47:50 +02002115 kfree(dcc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002116 SM_I(sbi)->dcc_info = NULL;
2117 return err;
2118 }
2119
2120 return err;
2121}
2122
2123static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
2124{
2125 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2126
2127 if (!dcc)
2128 return;
2129
2130 f2fs_stop_discard_thread(sbi);
2131
David Brazdil0f672f62019-12-10 10:32:29 +00002132 /*
2133 * Recovery can cache discard commands, so in error path of
2134 * fill_super(), it needs to give a chance to handle them.
2135 */
2136 if (unlikely(atomic_read(&dcc->discard_cmd_cnt)))
2137 f2fs_issue_discard_timeout(sbi);
2138
Olivier Deprez157378f2022-04-04 15:47:50 +02002139 kfree(dcc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002140 SM_I(sbi)->dcc_info = NULL;
2141}
2142
2143static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
2144{
2145 struct sit_info *sit_i = SIT_I(sbi);
2146
2147 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
2148 sit_i->dirty_sentries++;
2149 return false;
2150 }
2151
2152 return true;
2153}
2154
2155static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
2156 unsigned int segno, int modified)
2157{
2158 struct seg_entry *se = get_seg_entry(sbi, segno);
2159 se->type = type;
2160 if (modified)
2161 __mark_sit_entry_dirty(sbi, segno);
2162}
2163
Olivier Deprez157378f2022-04-04 15:47:50 +02002164static inline unsigned long long get_segment_mtime(struct f2fs_sb_info *sbi,
2165 block_t blkaddr)
2166{
2167 unsigned int segno = GET_SEGNO(sbi, blkaddr);
2168
2169 if (segno == NULL_SEGNO)
2170 return 0;
2171 return get_seg_entry(sbi, segno)->mtime;
2172}
2173
2174static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr,
2175 unsigned long long old_mtime)
2176{
2177 struct seg_entry *se;
2178 unsigned int segno = GET_SEGNO(sbi, blkaddr);
2179 unsigned long long ctime = get_mtime(sbi, false);
2180 unsigned long long mtime = old_mtime ? old_mtime : ctime;
2181
2182 if (segno == NULL_SEGNO)
2183 return;
2184
2185 se = get_seg_entry(sbi, segno);
2186
2187 if (!se->mtime)
2188 se->mtime = mtime;
2189 else
2190 se->mtime = div_u64(se->mtime * se->valid_blocks + mtime,
2191 se->valid_blocks + 1);
2192
2193 if (ctime > SIT_I(sbi)->max_mtime)
2194 SIT_I(sbi)->max_mtime = ctime;
2195}
2196
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002197static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
2198{
2199 struct seg_entry *se;
2200 unsigned int segno, offset;
2201 long int new_vblocks;
2202 bool exist;
2203#ifdef CONFIG_F2FS_CHECK_FS
2204 bool mir_exist;
2205#endif
2206
2207 segno = GET_SEGNO(sbi, blkaddr);
2208
2209 se = get_seg_entry(sbi, segno);
2210 new_vblocks = se->valid_blocks + del;
2211 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2212
Olivier Deprez157378f2022-04-04 15:47:50 +02002213 f2fs_bug_on(sbi, (new_vblocks < 0 ||
2214 (new_vblocks > f2fs_usable_blks_in_seg(sbi, segno))));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002215
2216 se->valid_blocks = new_vblocks;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002217
2218 /* Update valid block bitmap */
2219 if (del > 0) {
2220 exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
2221#ifdef CONFIG_F2FS_CHECK_FS
2222 mir_exist = f2fs_test_and_set_bit(offset,
2223 se->cur_valid_map_mir);
2224 if (unlikely(exist != mir_exist)) {
David Brazdil0f672f62019-12-10 10:32:29 +00002225 f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
2226 blkaddr, exist);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002227 f2fs_bug_on(sbi, 1);
2228 }
2229#endif
2230 if (unlikely(exist)) {
David Brazdil0f672f62019-12-10 10:32:29 +00002231 f2fs_err(sbi, "Bitmap was wrongly set, blk:%u",
2232 blkaddr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002233 f2fs_bug_on(sbi, 1);
2234 se->valid_blocks--;
2235 del = 0;
2236 }
2237
David Brazdil0f672f62019-12-10 10:32:29 +00002238 if (!f2fs_test_and_set_bit(offset, se->discard_map))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002239 sbi->discard_blks--;
2240
David Brazdil0f672f62019-12-10 10:32:29 +00002241 /*
2242 * SSR should never reuse block which is checkpointed
2243 * or newly invalidated.
2244 */
2245 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002246 if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
2247 se->ckpt_valid_blocks++;
2248 }
2249 } else {
2250 exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
2251#ifdef CONFIG_F2FS_CHECK_FS
2252 mir_exist = f2fs_test_and_clear_bit(offset,
2253 se->cur_valid_map_mir);
2254 if (unlikely(exist != mir_exist)) {
David Brazdil0f672f62019-12-10 10:32:29 +00002255 f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
2256 blkaddr, exist);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002257 f2fs_bug_on(sbi, 1);
2258 }
2259#endif
2260 if (unlikely(!exist)) {
David Brazdil0f672f62019-12-10 10:32:29 +00002261 f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u",
2262 blkaddr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002263 f2fs_bug_on(sbi, 1);
2264 se->valid_blocks++;
2265 del = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00002266 } else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2267 /*
2268 * If checkpoints are off, we must not reuse data that
2269 * was used in the previous checkpoint. If it was used
2270 * before, we must track that to know how much space we
2271 * really have.
2272 */
2273 if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
2274 spin_lock(&sbi->stat_lock);
2275 sbi->unusable_block_count++;
2276 spin_unlock(&sbi->stat_lock);
2277 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002278 }
2279
David Brazdil0f672f62019-12-10 10:32:29 +00002280 if (f2fs_test_and_clear_bit(offset, se->discard_map))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002281 sbi->discard_blks++;
2282 }
2283 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
2284 se->ckpt_valid_blocks += del;
2285
2286 __mark_sit_entry_dirty(sbi, segno);
2287
2288 /* update total number of valid blocks to be written in ckpt area */
2289 SIT_I(sbi)->written_valid_blocks += del;
2290
David Brazdil0f672f62019-12-10 10:32:29 +00002291 if (__is_large_section(sbi))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002292 get_sec_entry(sbi, segno)->valid_blocks += del;
2293}
2294
2295void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
2296{
2297 unsigned int segno = GET_SEGNO(sbi, addr);
2298 struct sit_info *sit_i = SIT_I(sbi);
2299
2300 f2fs_bug_on(sbi, addr == NULL_ADDR);
Olivier Deprez157378f2022-04-04 15:47:50 +02002301 if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002302 return;
2303
2304 invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
2305
2306 /* add it into sit main buffer */
2307 down_write(&sit_i->sentry_lock);
2308
Olivier Deprez157378f2022-04-04 15:47:50 +02002309 update_segment_mtime(sbi, addr, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002310 update_sit_entry(sbi, addr, -1);
2311
2312 /* add it into dirty seglist */
2313 locate_dirty_segment(sbi, segno);
2314
2315 up_write(&sit_i->sentry_lock);
2316}
2317
2318bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
2319{
2320 struct sit_info *sit_i = SIT_I(sbi);
2321 unsigned int segno, offset;
2322 struct seg_entry *se;
2323 bool is_cp = false;
2324
David Brazdil0f672f62019-12-10 10:32:29 +00002325 if (!__is_valid_data_blkaddr(blkaddr))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002326 return true;
2327
2328 down_read(&sit_i->sentry_lock);
2329
2330 segno = GET_SEGNO(sbi, blkaddr);
2331 se = get_seg_entry(sbi, segno);
2332 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2333
2334 if (f2fs_test_bit(offset, se->ckpt_valid_map))
2335 is_cp = true;
2336
2337 up_read(&sit_i->sentry_lock);
2338
2339 return is_cp;
2340}
2341
2342/*
2343 * This function should be resided under the curseg_mutex lock
2344 */
2345static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
2346 struct f2fs_summary *sum)
2347{
2348 struct curseg_info *curseg = CURSEG_I(sbi, type);
2349 void *addr = curseg->sum_blk;
2350 addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
2351 memcpy(addr, sum, sizeof(struct f2fs_summary));
2352}
2353
2354/*
2355 * Calculate the number of current summary pages for writing
2356 */
2357int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
2358{
2359 int valid_sum_count = 0;
2360 int i, sum_in_page;
2361
2362 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2363 if (sbi->ckpt->alloc_type[i] == SSR)
2364 valid_sum_count += sbi->blocks_per_seg;
2365 else {
2366 if (for_ra)
2367 valid_sum_count += le16_to_cpu(
2368 F2FS_CKPT(sbi)->cur_data_blkoff[i]);
2369 else
2370 valid_sum_count += curseg_blkoff(sbi, i);
2371 }
2372 }
2373
2374 sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
2375 SUM_FOOTER_SIZE) / SUMMARY_SIZE;
2376 if (valid_sum_count <= sum_in_page)
2377 return 1;
2378 else if ((valid_sum_count - sum_in_page) <=
2379 (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
2380 return 2;
2381 return 3;
2382}
2383
2384/*
2385 * Caller should put this summary page
2386 */
2387struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
2388{
Olivier Deprez0e641232021-09-23 10:07:05 +02002389 if (unlikely(f2fs_cp_error(sbi)))
2390 return ERR_PTR(-EIO);
2391 return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002392}
2393
2394void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
2395 void *src, block_t blk_addr)
2396{
2397 struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2398
2399 memcpy(page_address(page), src, PAGE_SIZE);
2400 set_page_dirty(page);
2401 f2fs_put_page(page, 1);
2402}
2403
2404static void write_sum_page(struct f2fs_sb_info *sbi,
2405 struct f2fs_summary_block *sum_blk, block_t blk_addr)
2406{
2407 f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr);
2408}
2409
2410static void write_current_sum_page(struct f2fs_sb_info *sbi,
2411 int type, block_t blk_addr)
2412{
2413 struct curseg_info *curseg = CURSEG_I(sbi, type);
2414 struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2415 struct f2fs_summary_block *src = curseg->sum_blk;
2416 struct f2fs_summary_block *dst;
2417
2418 dst = (struct f2fs_summary_block *)page_address(page);
2419 memset(dst, 0, PAGE_SIZE);
2420
2421 mutex_lock(&curseg->curseg_mutex);
2422
2423 down_read(&curseg->journal_rwsem);
2424 memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
2425 up_read(&curseg->journal_rwsem);
2426
2427 memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
2428 memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
2429
2430 mutex_unlock(&curseg->curseg_mutex);
2431
2432 set_page_dirty(page);
2433 f2fs_put_page(page, 1);
2434}
2435
Olivier Deprez157378f2022-04-04 15:47:50 +02002436static int is_next_segment_free(struct f2fs_sb_info *sbi,
2437 struct curseg_info *curseg, int type)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002438{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002439 unsigned int segno = curseg->segno + 1;
2440 struct free_segmap_info *free_i = FREE_I(sbi);
2441
2442 if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
2443 return !test_bit(segno, free_i->free_segmap);
2444 return 0;
2445}
2446
2447/*
2448 * Find a new segment from the free segments bitmap to right order
2449 * This function should be returned with success, otherwise BUG
2450 */
2451static void get_new_segment(struct f2fs_sb_info *sbi,
2452 unsigned int *newseg, bool new_sec, int dir)
2453{
2454 struct free_segmap_info *free_i = FREE_I(sbi);
2455 unsigned int segno, secno, zoneno;
2456 unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
2457 unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
2458 unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
2459 unsigned int left_start = hint;
2460 bool init = true;
2461 int go_left = 0;
2462 int i;
2463
2464 spin_lock(&free_i->segmap_lock);
2465
2466 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
2467 segno = find_next_zero_bit(free_i->free_segmap,
2468 GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
2469 if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
2470 goto got_it;
2471 }
2472find_other_zone:
2473 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
2474 if (secno >= MAIN_SECS(sbi)) {
2475 if (dir == ALLOC_RIGHT) {
2476 secno = find_next_zero_bit(free_i->free_secmap,
2477 MAIN_SECS(sbi), 0);
2478 f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
2479 } else {
2480 go_left = 1;
2481 left_start = hint - 1;
2482 }
2483 }
2484 if (go_left == 0)
2485 goto skip_left;
2486
2487 while (test_bit(left_start, free_i->free_secmap)) {
2488 if (left_start > 0) {
2489 left_start--;
2490 continue;
2491 }
2492 left_start = find_next_zero_bit(free_i->free_secmap,
2493 MAIN_SECS(sbi), 0);
2494 f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
2495 break;
2496 }
2497 secno = left_start;
2498skip_left:
2499 segno = GET_SEG_FROM_SEC(sbi, secno);
2500 zoneno = GET_ZONE_FROM_SEC(sbi, secno);
2501
2502 /* give up on finding another zone */
2503 if (!init)
2504 goto got_it;
2505 if (sbi->secs_per_zone == 1)
2506 goto got_it;
2507 if (zoneno == old_zoneno)
2508 goto got_it;
2509 if (dir == ALLOC_LEFT) {
2510 if (!go_left && zoneno + 1 >= total_zones)
2511 goto got_it;
2512 if (go_left && zoneno == 0)
2513 goto got_it;
2514 }
2515 for (i = 0; i < NR_CURSEG_TYPE; i++)
2516 if (CURSEG_I(sbi, i)->zone == zoneno)
2517 break;
2518
2519 if (i < NR_CURSEG_TYPE) {
2520 /* zone is in user, try another */
2521 if (go_left)
2522 hint = zoneno * sbi->secs_per_zone - 1;
2523 else if (zoneno + 1 >= total_zones)
2524 hint = 0;
2525 else
2526 hint = (zoneno + 1) * sbi->secs_per_zone;
2527 init = false;
2528 goto find_other_zone;
2529 }
2530got_it:
2531 /* set it as dirty segment in free segmap */
2532 f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
2533 __set_inuse(sbi, segno);
2534 *newseg = segno;
2535 spin_unlock(&free_i->segmap_lock);
2536}
2537
2538static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
2539{
2540 struct curseg_info *curseg = CURSEG_I(sbi, type);
2541 struct summary_footer *sum_footer;
Olivier Deprez157378f2022-04-04 15:47:50 +02002542 unsigned short seg_type = curseg->seg_type;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002543
Olivier Deprez157378f2022-04-04 15:47:50 +02002544 curseg->inited = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002545 curseg->segno = curseg->next_segno;
2546 curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
2547 curseg->next_blkoff = 0;
2548 curseg->next_segno = NULL_SEGNO;
2549
2550 sum_footer = &(curseg->sum_blk->footer);
2551 memset(sum_footer, 0, sizeof(struct summary_footer));
Olivier Deprez157378f2022-04-04 15:47:50 +02002552
2553 sanity_check_seg_type(sbi, seg_type);
2554
2555 if (IS_DATASEG(seg_type))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002556 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
Olivier Deprez157378f2022-04-04 15:47:50 +02002557 if (IS_NODESEG(seg_type))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002558 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
Olivier Deprez157378f2022-04-04 15:47:50 +02002559 __set_sit_entry_type(sbi, seg_type, curseg->segno, modified);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002560}
2561
2562static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
2563{
Olivier Deprez157378f2022-04-04 15:47:50 +02002564 struct curseg_info *curseg = CURSEG_I(sbi, type);
2565 unsigned short seg_type = curseg->seg_type;
2566
2567 sanity_check_seg_type(sbi, seg_type);
2568
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002569 /* if segs_per_sec is large than 1, we need to keep original policy. */
David Brazdil0f672f62019-12-10 10:32:29 +00002570 if (__is_large_section(sbi))
Olivier Deprez157378f2022-04-04 15:47:50 +02002571 return curseg->segno;
2572
2573 /* inmem log may not locate on any segment after mount */
2574 if (!curseg->inited)
2575 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002576
David Brazdil0f672f62019-12-10 10:32:29 +00002577 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2578 return 0;
2579
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002580 if (test_opt(sbi, NOHEAP) &&
Olivier Deprez157378f2022-04-04 15:47:50 +02002581 (seg_type == CURSEG_HOT_DATA || IS_NODESEG(seg_type)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002582 return 0;
2583
2584 if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
2585 return SIT_I(sbi)->last_victim[ALLOC_NEXT];
2586
2587 /* find segments from 0 to reuse freed segments */
2588 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
2589 return 0;
2590
Olivier Deprez157378f2022-04-04 15:47:50 +02002591 return curseg->segno;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002592}
2593
2594/*
2595 * Allocate a current working segment.
2596 * This function always allocates a free segment in LFS manner.
2597 */
2598static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
2599{
2600 struct curseg_info *curseg = CURSEG_I(sbi, type);
Olivier Deprez157378f2022-04-04 15:47:50 +02002601 unsigned short seg_type = curseg->seg_type;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002602 unsigned int segno = curseg->segno;
2603 int dir = ALLOC_LEFT;
2604
Olivier Deprez157378f2022-04-04 15:47:50 +02002605 if (curseg->inited)
2606 write_sum_page(sbi, curseg->sum_blk,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002607 GET_SUM_BLOCK(sbi, segno));
Olivier Deprez157378f2022-04-04 15:47:50 +02002608 if (seg_type == CURSEG_WARM_DATA || seg_type == CURSEG_COLD_DATA)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002609 dir = ALLOC_RIGHT;
2610
2611 if (test_opt(sbi, NOHEAP))
2612 dir = ALLOC_RIGHT;
2613
2614 segno = __get_next_segno(sbi, type);
2615 get_new_segment(sbi, &segno, new_sec, dir);
2616 curseg->next_segno = segno;
2617 reset_curseg(sbi, type, 1);
2618 curseg->alloc_type = LFS;
2619}
2620
2621static void __next_free_blkoff(struct f2fs_sb_info *sbi,
2622 struct curseg_info *seg, block_t start)
2623{
2624 struct seg_entry *se = get_seg_entry(sbi, seg->segno);
2625 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
2626 unsigned long *target_map = SIT_I(sbi)->tmp_map;
2627 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2628 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2629 int i, pos;
2630
2631 for (i = 0; i < entries; i++)
2632 target_map[i] = ckpt_map[i] | cur_map[i];
2633
2634 pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
2635
2636 seg->next_blkoff = pos;
2637}
2638
2639/*
2640 * If a segment is written by LFS manner, next block offset is just obtained
2641 * by increasing the current block offset. However, if a segment is written by
2642 * SSR manner, next block offset obtained by calling __next_free_blkoff
2643 */
2644static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
2645 struct curseg_info *seg)
2646{
2647 if (seg->alloc_type == SSR)
2648 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
2649 else
2650 seg->next_blkoff++;
2651}
2652
Olivier Deprez157378f2022-04-04 15:47:50 +02002653bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
2654{
2655 struct seg_entry *se = get_seg_entry(sbi, segno);
2656 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
2657 unsigned long *target_map = SIT_I(sbi)->tmp_map;
2658 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2659 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2660 int i, pos;
2661
2662 for (i = 0; i < entries; i++)
2663 target_map[i] = ckpt_map[i] | cur_map[i];
2664
2665 pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, 0);
2666
2667 return pos < sbi->blocks_per_seg;
2668}
2669
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002670/*
2671 * This function always allocates a used segment(from dirty seglist) by SSR
2672 * manner, so it should recover the existing segment information of valid blocks
2673 */
Olivier Deprez157378f2022-04-04 15:47:50 +02002674static void change_curseg(struct f2fs_sb_info *sbi, int type, bool flush)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002675{
2676 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2677 struct curseg_info *curseg = CURSEG_I(sbi, type);
2678 unsigned int new_segno = curseg->next_segno;
2679 struct f2fs_summary_block *sum_node;
2680 struct page *sum_page;
2681
Olivier Deprez157378f2022-04-04 15:47:50 +02002682 if (flush)
2683 write_sum_page(sbi, curseg->sum_blk,
2684 GET_SUM_BLOCK(sbi, curseg->segno));
2685
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002686 __set_test_and_inuse(sbi, new_segno);
2687
2688 mutex_lock(&dirty_i->seglist_lock);
2689 __remove_dirty_segment(sbi, new_segno, PRE);
2690 __remove_dirty_segment(sbi, new_segno, DIRTY);
2691 mutex_unlock(&dirty_i->seglist_lock);
2692
2693 reset_curseg(sbi, type, 1);
2694 curseg->alloc_type = SSR;
2695 __next_free_blkoff(sbi, curseg, 0);
2696
2697 sum_page = f2fs_get_sum_page(sbi, new_segno);
Olivier Deprez0e641232021-09-23 10:07:05 +02002698 if (IS_ERR(sum_page)) {
2699 /* GC won't be able to use stale summary pages by cp_error */
2700 memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE);
2701 return;
2702 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002703 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
2704 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
2705 f2fs_put_page(sum_page, 1);
2706}
2707
Olivier Deprez157378f2022-04-04 15:47:50 +02002708static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2709 int alloc_mode, unsigned long long age);
2710
2711static void get_atssr_segment(struct f2fs_sb_info *sbi, int type,
2712 int target_type, int alloc_mode,
2713 unsigned long long age)
2714{
2715 struct curseg_info *curseg = CURSEG_I(sbi, type);
2716
2717 curseg->seg_type = target_type;
2718
2719 if (get_ssr_segment(sbi, type, alloc_mode, age)) {
2720 struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno);
2721
2722 curseg->seg_type = se->type;
2723 change_curseg(sbi, type, true);
2724 } else {
2725 /* allocate cold segment by default */
2726 curseg->seg_type = CURSEG_COLD_DATA;
2727 new_curseg(sbi, type, true);
2728 }
2729 stat_inc_seg_type(sbi, curseg);
2730}
2731
2732static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi)
2733{
2734 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC);
2735
2736 if (!sbi->am.atgc_enabled)
2737 return;
2738
2739 down_read(&SM_I(sbi)->curseg_lock);
2740
2741 mutex_lock(&curseg->curseg_mutex);
2742 down_write(&SIT_I(sbi)->sentry_lock);
2743
2744 get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC, CURSEG_COLD_DATA, SSR, 0);
2745
2746 up_write(&SIT_I(sbi)->sentry_lock);
2747 mutex_unlock(&curseg->curseg_mutex);
2748
2749 up_read(&SM_I(sbi)->curseg_lock);
2750
2751}
2752void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi)
2753{
2754 __f2fs_init_atgc_curseg(sbi);
2755}
2756
2757static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2758{
2759 struct curseg_info *curseg = CURSEG_I(sbi, type);
2760
2761 mutex_lock(&curseg->curseg_mutex);
2762 if (!curseg->inited)
2763 goto out;
2764
2765 if (get_valid_blocks(sbi, curseg->segno, false)) {
2766 write_sum_page(sbi, curseg->sum_blk,
2767 GET_SUM_BLOCK(sbi, curseg->segno));
2768 } else {
2769 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2770 __set_test_and_free(sbi, curseg->segno, true);
2771 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2772 }
2773out:
2774 mutex_unlock(&curseg->curseg_mutex);
2775}
2776
2777void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi)
2778{
2779 __f2fs_save_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2780
2781 if (sbi->am.atgc_enabled)
2782 __f2fs_save_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2783}
2784
2785static void __f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2786{
2787 struct curseg_info *curseg = CURSEG_I(sbi, type);
2788
2789 mutex_lock(&curseg->curseg_mutex);
2790 if (!curseg->inited)
2791 goto out;
2792 if (get_valid_blocks(sbi, curseg->segno, false))
2793 goto out;
2794
2795 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2796 __set_test_and_inuse(sbi, curseg->segno);
2797 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2798out:
2799 mutex_unlock(&curseg->curseg_mutex);
2800}
2801
2802void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi)
2803{
2804 __f2fs_restore_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2805
2806 if (sbi->am.atgc_enabled)
2807 __f2fs_restore_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2808}
2809
2810static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2811 int alloc_mode, unsigned long long age)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002812{
2813 struct curseg_info *curseg = CURSEG_I(sbi, type);
2814 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
2815 unsigned segno = NULL_SEGNO;
Olivier Deprez157378f2022-04-04 15:47:50 +02002816 unsigned short seg_type = curseg->seg_type;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002817 int i, cnt;
2818 bool reversed = false;
2819
Olivier Deprez157378f2022-04-04 15:47:50 +02002820 sanity_check_seg_type(sbi, seg_type);
2821
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002822 /* f2fs_need_SSR() already forces to do this */
Olivier Deprez157378f2022-04-04 15:47:50 +02002823 if (!v_ops->get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002824 curseg->next_segno = segno;
2825 return 1;
2826 }
2827
2828 /* For node segments, let's do SSR more intensively */
Olivier Deprez157378f2022-04-04 15:47:50 +02002829 if (IS_NODESEG(seg_type)) {
2830 if (seg_type >= CURSEG_WARM_NODE) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002831 reversed = true;
2832 i = CURSEG_COLD_NODE;
2833 } else {
2834 i = CURSEG_HOT_NODE;
2835 }
2836 cnt = NR_CURSEG_NODE_TYPE;
2837 } else {
Olivier Deprez157378f2022-04-04 15:47:50 +02002838 if (seg_type >= CURSEG_WARM_DATA) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002839 reversed = true;
2840 i = CURSEG_COLD_DATA;
2841 } else {
2842 i = CURSEG_HOT_DATA;
2843 }
2844 cnt = NR_CURSEG_DATA_TYPE;
2845 }
2846
2847 for (; cnt-- > 0; reversed ? i-- : i++) {
Olivier Deprez157378f2022-04-04 15:47:50 +02002848 if (i == seg_type)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002849 continue;
Olivier Deprez157378f2022-04-04 15:47:50 +02002850 if (!v_ops->get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002851 curseg->next_segno = segno;
2852 return 1;
2853 }
2854 }
David Brazdil0f672f62019-12-10 10:32:29 +00002855
2856 /* find valid_blocks=0 in dirty list */
2857 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2858 segno = get_free_segment(sbi);
2859 if (segno != NULL_SEGNO) {
2860 curseg->next_segno = segno;
2861 return 1;
2862 }
2863 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002864 return 0;
2865}
2866
2867/*
2868 * flush out current segment and replace it with new segment
2869 * This function should be returned with success, otherwise BUG
2870 */
2871static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
2872 int type, bool force)
2873{
2874 struct curseg_info *curseg = CURSEG_I(sbi, type);
2875
2876 if (force)
2877 new_curseg(sbi, type, true);
2878 else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
Olivier Deprez157378f2022-04-04 15:47:50 +02002879 curseg->seg_type == CURSEG_WARM_NODE)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002880 new_curseg(sbi, type, false);
Olivier Deprez157378f2022-04-04 15:47:50 +02002881 else if (curseg->alloc_type == LFS &&
2882 is_next_segment_free(sbi, curseg, type) &&
David Brazdil0f672f62019-12-10 10:32:29 +00002883 likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002884 new_curseg(sbi, type, false);
Olivier Deprez157378f2022-04-04 15:47:50 +02002885 else if (f2fs_need_SSR(sbi) &&
2886 get_ssr_segment(sbi, type, SSR, 0))
2887 change_curseg(sbi, type, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002888 else
2889 new_curseg(sbi, type, false);
2890
2891 stat_inc_seg_type(sbi, curseg);
2892}
2893
Olivier Deprez157378f2022-04-04 15:47:50 +02002894void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
David Brazdil0f672f62019-12-10 10:32:29 +00002895 unsigned int start, unsigned int end)
2896{
2897 struct curseg_info *curseg = CURSEG_I(sbi, type);
2898 unsigned int segno;
2899
2900 down_read(&SM_I(sbi)->curseg_lock);
2901 mutex_lock(&curseg->curseg_mutex);
2902 down_write(&SIT_I(sbi)->sentry_lock);
2903
2904 segno = CURSEG_I(sbi, type)->segno;
2905 if (segno < start || segno > end)
2906 goto unlock;
2907
Olivier Deprez157378f2022-04-04 15:47:50 +02002908 if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0))
2909 change_curseg(sbi, type, true);
David Brazdil0f672f62019-12-10 10:32:29 +00002910 else
2911 new_curseg(sbi, type, true);
2912
2913 stat_inc_seg_type(sbi, curseg);
2914
2915 locate_dirty_segment(sbi, segno);
2916unlock:
2917 up_write(&SIT_I(sbi)->sentry_lock);
2918
2919 if (segno != curseg->segno)
2920 f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u",
2921 type, segno, curseg->segno);
2922
2923 mutex_unlock(&curseg->curseg_mutex);
2924 up_read(&SM_I(sbi)->curseg_lock);
2925}
2926
Olivier Deprez157378f2022-04-04 15:47:50 +02002927static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
2928 bool new_sec)
2929{
2930 struct curseg_info *curseg = CURSEG_I(sbi, type);
2931 unsigned int old_segno;
2932
2933 if (!curseg->inited)
2934 goto alloc;
2935
2936 if (curseg->next_blkoff ||
2937 get_valid_blocks(sbi, curseg->segno, new_sec))
2938 goto alloc;
2939
2940 if (!get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
2941 return;
2942alloc:
2943 old_segno = curseg->segno;
2944 SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
2945 locate_dirty_segment(sbi, old_segno);
2946}
2947
2948static void __allocate_new_section(struct f2fs_sb_info *sbi, int type)
2949{
2950 __allocate_new_segment(sbi, type, true);
2951}
2952
2953void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type)
2954{
2955 down_read(&SM_I(sbi)->curseg_lock);
2956 down_write(&SIT_I(sbi)->sentry_lock);
2957 __allocate_new_section(sbi, type);
2958 up_write(&SIT_I(sbi)->sentry_lock);
2959 up_read(&SM_I(sbi)->curseg_lock);
2960}
2961
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002962void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
2963{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002964 int i;
2965
Olivier Deprez157378f2022-04-04 15:47:50 +02002966 down_read(&SM_I(sbi)->curseg_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002967 down_write(&SIT_I(sbi)->sentry_lock);
Olivier Deprez157378f2022-04-04 15:47:50 +02002968 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
2969 __allocate_new_segment(sbi, i, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002970 up_write(&SIT_I(sbi)->sentry_lock);
Olivier Deprez157378f2022-04-04 15:47:50 +02002971 up_read(&SM_I(sbi)->curseg_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002972}
2973
2974static const struct segment_allocation default_salloc_ops = {
2975 .allocate_segment = allocate_segment_by_default,
2976};
2977
2978bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
2979 struct cp_control *cpc)
2980{
2981 __u64 trim_start = cpc->trim_start;
2982 bool has_candidate = false;
2983
2984 down_write(&SIT_I(sbi)->sentry_lock);
2985 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
2986 if (add_discard_addrs(sbi, cpc, true)) {
2987 has_candidate = true;
2988 break;
2989 }
2990 }
2991 up_write(&SIT_I(sbi)->sentry_lock);
2992
2993 cpc->trim_start = trim_start;
2994 return has_candidate;
2995}
2996
2997static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
2998 struct discard_policy *dpolicy,
2999 unsigned int start, unsigned int end)
3000{
3001 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
3002 struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
3003 struct rb_node **insert_p = NULL, *insert_parent = NULL;
3004 struct discard_cmd *dc;
3005 struct blk_plug plug;
3006 int issued;
3007 unsigned int trimmed = 0;
3008
3009next:
3010 issued = 0;
3011
3012 mutex_lock(&dcc->cmd_lock);
3013 if (unlikely(dcc->rbtree_check))
3014 f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
Olivier Deprez157378f2022-04-04 15:47:50 +02003015 &dcc->root, false));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003016
3017 dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
3018 NULL, start,
3019 (struct rb_entry **)&prev_dc,
3020 (struct rb_entry **)&next_dc,
David Brazdil0f672f62019-12-10 10:32:29 +00003021 &insert_p, &insert_parent, true, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003022 if (!dc)
3023 dc = next_dc;
3024
3025 blk_start_plug(&plug);
3026
3027 while (dc && dc->lstart <= end) {
3028 struct rb_node *node;
3029 int err = 0;
3030
3031 if (dc->len < dpolicy->granularity)
3032 goto skip;
3033
3034 if (dc->state != D_PREP) {
3035 list_move_tail(&dc->list, &dcc->fstrim_list);
3036 goto skip;
3037 }
3038
3039 err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
3040
3041 if (issued >= dpolicy->max_requests) {
3042 start = dc->lstart + dc->len;
3043
3044 if (err)
3045 __remove_discard_cmd(sbi, dc);
3046
3047 blk_finish_plug(&plug);
3048 mutex_unlock(&dcc->cmd_lock);
3049 trimmed += __wait_all_discard_cmd(sbi, NULL);
Olivier Deprez157378f2022-04-04 15:47:50 +02003050 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003051 goto next;
3052 }
3053skip:
3054 node = rb_next(&dc->rb_node);
3055 if (err)
3056 __remove_discard_cmd(sbi, dc);
3057 dc = rb_entry_safe(node, struct discard_cmd, rb_node);
3058
3059 if (fatal_signal_pending(current))
3060 break;
3061 }
3062
3063 blk_finish_plug(&plug);
3064 mutex_unlock(&dcc->cmd_lock);
3065
3066 return trimmed;
3067}
3068
3069int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
3070{
3071 __u64 start = F2FS_BYTES_TO_BLK(range->start);
3072 __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
3073 unsigned int start_segno, end_segno;
3074 block_t start_block, end_block;
3075 struct cp_control cpc;
3076 struct discard_policy dpolicy;
3077 unsigned long long trimmed = 0;
3078 int err = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +02003079 bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003080
3081 if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
3082 return -EINVAL;
3083
3084 if (end < MAIN_BLKADDR(sbi))
3085 goto out;
3086
3087 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
David Brazdil0f672f62019-12-10 10:32:29 +00003088 f2fs_warn(sbi, "Found FS corruption, run fsck to fix.");
3089 return -EFSCORRUPTED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003090 }
3091
3092 /* start/end segment number in main_area */
3093 start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
3094 end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
3095 GET_SEGNO(sbi, end);
3096 if (need_align) {
3097 start_segno = rounddown(start_segno, sbi->segs_per_sec);
3098 end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1;
3099 }
3100
3101 cpc.reason = CP_DISCARD;
3102 cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
3103 cpc.trim_start = start_segno;
3104 cpc.trim_end = end_segno;
3105
3106 if (sbi->discard_blks == 0)
3107 goto out;
3108
Olivier Deprez157378f2022-04-04 15:47:50 +02003109 down_write(&sbi->gc_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003110 err = f2fs_write_checkpoint(sbi, &cpc);
Olivier Deprez157378f2022-04-04 15:47:50 +02003111 up_write(&sbi->gc_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003112 if (err)
3113 goto out;
3114
3115 /*
3116 * We filed discard candidates, but actually we don't need to wait for
3117 * all of them, since they'll be issued in idle time along with runtime
3118 * discard option. User configuration looks like using runtime discard
3119 * or periodic fstrim instead of it.
3120 */
David Brazdil0f672f62019-12-10 10:32:29 +00003121 if (f2fs_realtime_discard_enable(sbi))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003122 goto out;
3123
3124 start_block = START_BLOCK(sbi, start_segno);
3125 end_block = START_BLOCK(sbi, end_segno + 1);
3126
3127 __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
3128 trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
3129 start_block, end_block);
3130
3131 trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
3132 start_block, end_block);
3133out:
3134 if (!err)
3135 range->len = F2FS_BLK_TO_BYTES(trimmed);
3136 return err;
3137}
3138
Olivier Deprez157378f2022-04-04 15:47:50 +02003139static bool __has_curseg_space(struct f2fs_sb_info *sbi,
3140 struct curseg_info *curseg)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003141{
Olivier Deprez157378f2022-04-04 15:47:50 +02003142 return curseg->next_blkoff < f2fs_usable_blks_in_seg(sbi,
3143 curseg->segno);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003144}
3145
3146int f2fs_rw_hint_to_seg_type(enum rw_hint hint)
3147{
3148 switch (hint) {
3149 case WRITE_LIFE_SHORT:
3150 return CURSEG_HOT_DATA;
3151 case WRITE_LIFE_EXTREME:
3152 return CURSEG_COLD_DATA;
3153 default:
3154 return CURSEG_WARM_DATA;
3155 }
3156}
3157
3158/* This returns write hints for each segment type. This hints will be
3159 * passed down to block layer. There are mapping tables which depend on
3160 * the mount option 'whint_mode'.
3161 *
3162 * 1) whint_mode=off. F2FS only passes down WRITE_LIFE_NOT_SET.
3163 *
3164 * 2) whint_mode=user-based. F2FS tries to pass down hints given by users.
3165 *
3166 * User F2FS Block
3167 * ---- ---- -----
3168 * META WRITE_LIFE_NOT_SET
3169 * HOT_NODE "
3170 * WARM_NODE "
3171 * COLD_NODE "
3172 * ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
3173 * extension list " "
3174 *
3175 * -- buffered io
3176 * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
3177 * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
3178 * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
3179 * WRITE_LIFE_NONE " "
3180 * WRITE_LIFE_MEDIUM " "
3181 * WRITE_LIFE_LONG " "
3182 *
3183 * -- direct io
3184 * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
3185 * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
3186 * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
3187 * WRITE_LIFE_NONE " WRITE_LIFE_NONE
3188 * WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
3189 * WRITE_LIFE_LONG " WRITE_LIFE_LONG
3190 *
3191 * 3) whint_mode=fs-based. F2FS passes down hints with its policy.
3192 *
3193 * User F2FS Block
3194 * ---- ---- -----
3195 * META WRITE_LIFE_MEDIUM;
3196 * HOT_NODE WRITE_LIFE_NOT_SET
3197 * WARM_NODE "
3198 * COLD_NODE WRITE_LIFE_NONE
3199 * ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
3200 * extension list " "
3201 *
3202 * -- buffered io
3203 * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
3204 * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
3205 * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_LONG
3206 * WRITE_LIFE_NONE " "
3207 * WRITE_LIFE_MEDIUM " "
3208 * WRITE_LIFE_LONG " "
3209 *
3210 * -- direct io
3211 * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
3212 * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
3213 * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
3214 * WRITE_LIFE_NONE " WRITE_LIFE_NONE
3215 * WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
3216 * WRITE_LIFE_LONG " WRITE_LIFE_LONG
3217 */
3218
3219enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
3220 enum page_type type, enum temp_type temp)
3221{
3222 if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER) {
3223 if (type == DATA) {
3224 if (temp == WARM)
3225 return WRITE_LIFE_NOT_SET;
3226 else if (temp == HOT)
3227 return WRITE_LIFE_SHORT;
3228 else if (temp == COLD)
3229 return WRITE_LIFE_EXTREME;
3230 } else {
3231 return WRITE_LIFE_NOT_SET;
3232 }
3233 } else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS) {
3234 if (type == DATA) {
3235 if (temp == WARM)
3236 return WRITE_LIFE_LONG;
3237 else if (temp == HOT)
3238 return WRITE_LIFE_SHORT;
3239 else if (temp == COLD)
3240 return WRITE_LIFE_EXTREME;
3241 } else if (type == NODE) {
3242 if (temp == WARM || temp == HOT)
3243 return WRITE_LIFE_NOT_SET;
3244 else if (temp == COLD)
3245 return WRITE_LIFE_NONE;
3246 } else if (type == META) {
3247 return WRITE_LIFE_MEDIUM;
3248 }
3249 }
3250 return WRITE_LIFE_NOT_SET;
3251}
3252
3253static int __get_segment_type_2(struct f2fs_io_info *fio)
3254{
3255 if (fio->type == DATA)
3256 return CURSEG_HOT_DATA;
3257 else
3258 return CURSEG_HOT_NODE;
3259}
3260
3261static int __get_segment_type_4(struct f2fs_io_info *fio)
3262{
3263 if (fio->type == DATA) {
3264 struct inode *inode = fio->page->mapping->host;
3265
3266 if (S_ISDIR(inode->i_mode))
3267 return CURSEG_HOT_DATA;
3268 else
3269 return CURSEG_COLD_DATA;
3270 } else {
3271 if (IS_DNODE(fio->page) && is_cold_node(fio->page))
3272 return CURSEG_WARM_NODE;
3273 else
3274 return CURSEG_COLD_NODE;
3275 }
3276}
3277
3278static int __get_segment_type_6(struct f2fs_io_info *fio)
3279{
3280 if (fio->type == DATA) {
3281 struct inode *inode = fio->page->mapping->host;
3282
Olivier Deprez157378f2022-04-04 15:47:50 +02003283 if (is_cold_data(fio->page)) {
3284 if (fio->sbi->am.atgc_enabled)
3285 return CURSEG_ALL_DATA_ATGC;
3286 else
3287 return CURSEG_COLD_DATA;
3288 }
3289 if (file_is_cold(inode) || f2fs_compressed_file(inode))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003290 return CURSEG_COLD_DATA;
3291 if (file_is_hot(inode) ||
3292 is_inode_flag_set(inode, FI_HOT_DATA) ||
3293 f2fs_is_atomic_file(inode) ||
3294 f2fs_is_volatile_file(inode))
3295 return CURSEG_HOT_DATA;
3296 return f2fs_rw_hint_to_seg_type(inode->i_write_hint);
3297 } else {
3298 if (IS_DNODE(fio->page))
3299 return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
3300 CURSEG_HOT_NODE;
3301 return CURSEG_COLD_NODE;
3302 }
3303}
3304
3305static int __get_segment_type(struct f2fs_io_info *fio)
3306{
3307 int type = 0;
3308
3309 switch (F2FS_OPTION(fio->sbi).active_logs) {
3310 case 2:
3311 type = __get_segment_type_2(fio);
3312 break;
3313 case 4:
3314 type = __get_segment_type_4(fio);
3315 break;
3316 case 6:
3317 type = __get_segment_type_6(fio);
3318 break;
3319 default:
3320 f2fs_bug_on(fio->sbi, true);
3321 }
3322
3323 if (IS_HOT(type))
3324 fio->temp = HOT;
3325 else if (IS_WARM(type))
3326 fio->temp = WARM;
3327 else
3328 fio->temp = COLD;
3329 return type;
3330}
3331
3332void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3333 block_t old_blkaddr, block_t *new_blkaddr,
3334 struct f2fs_summary *sum, int type,
Olivier Deprez157378f2022-04-04 15:47:50 +02003335 struct f2fs_io_info *fio)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003336{
3337 struct sit_info *sit_i = SIT_I(sbi);
3338 struct curseg_info *curseg = CURSEG_I(sbi, type);
Olivier Deprez157378f2022-04-04 15:47:50 +02003339 unsigned long long old_mtime;
3340 bool from_gc = (type == CURSEG_ALL_DATA_ATGC);
3341 struct seg_entry *se = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003342
3343 down_read(&SM_I(sbi)->curseg_lock);
3344
3345 mutex_lock(&curseg->curseg_mutex);
3346 down_write(&sit_i->sentry_lock);
3347
Olivier Deprez157378f2022-04-04 15:47:50 +02003348 if (from_gc) {
3349 f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO);
3350 se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr));
3351 sanity_check_seg_type(sbi, se->type);
3352 f2fs_bug_on(sbi, IS_NODESEG(se->type));
3353 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003354 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3355
Olivier Deprez157378f2022-04-04 15:47:50 +02003356 f2fs_bug_on(sbi, curseg->next_blkoff >= sbi->blocks_per_seg);
3357
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003358 f2fs_wait_discard_bio(sbi, *new_blkaddr);
3359
3360 /*
3361 * __add_sum_entry should be resided under the curseg_mutex
3362 * because, this function updates a summary entry in the
3363 * current summary block.
3364 */
3365 __add_sum_entry(sbi, type, sum);
3366
3367 __refresh_next_blkoff(sbi, curseg);
3368
3369 stat_inc_block_count(sbi, curseg);
3370
Olivier Deprez157378f2022-04-04 15:47:50 +02003371 if (from_gc) {
3372 old_mtime = get_segment_mtime(sbi, old_blkaddr);
3373 } else {
3374 update_segment_mtime(sbi, old_blkaddr, 0);
3375 old_mtime = 0;
3376 }
3377 update_segment_mtime(sbi, *new_blkaddr, old_mtime);
3378
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003379 /*
3380 * SIT information should be updated before segment allocation,
3381 * since SSR needs latest valid block information.
3382 */
3383 update_sit_entry(sbi, *new_blkaddr, 1);
3384 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
3385 update_sit_entry(sbi, old_blkaddr, -1);
3386
Olivier Deprez157378f2022-04-04 15:47:50 +02003387 if (!__has_curseg_space(sbi, curseg)) {
3388 if (from_gc)
3389 get_atssr_segment(sbi, type, se->type,
3390 AT_SSR, se->mtime);
3391 else
3392 sit_i->s_ops->allocate_segment(sbi, type, false);
3393 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003394 /*
3395 * segment dirty status should be updated after segment allocation,
3396 * so we just need to update status only one time after previous
3397 * segment being closed.
3398 */
3399 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3400 locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
3401
3402 up_write(&sit_i->sentry_lock);
3403
3404 if (page && IS_NODESEG(type)) {
3405 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
3406
3407 f2fs_inode_chksum_set(sbi, page);
3408 }
3409
Olivier Deprez157378f2022-04-04 15:47:50 +02003410 if (fio) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003411 struct f2fs_bio_info *io;
3412
Olivier Deprez157378f2022-04-04 15:47:50 +02003413 if (F2FS_IO_ALIGNED(sbi))
3414 fio->retry = false;
3415
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003416 INIT_LIST_HEAD(&fio->list);
3417 fio->in_list = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003418 io = sbi->write_io[fio->type] + fio->temp;
3419 spin_lock(&io->io_lock);
3420 list_add_tail(&fio->list, &io->io_list);
3421 spin_unlock(&io->io_lock);
3422 }
3423
3424 mutex_unlock(&curseg->curseg_mutex);
3425
3426 up_read(&SM_I(sbi)->curseg_lock);
3427}
3428
3429static void update_device_state(struct f2fs_io_info *fio)
3430{
3431 struct f2fs_sb_info *sbi = fio->sbi;
3432 unsigned int devidx;
3433
David Brazdil0f672f62019-12-10 10:32:29 +00003434 if (!f2fs_is_multi_device(sbi))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003435 return;
3436
3437 devidx = f2fs_target_device_index(sbi, fio->new_blkaddr);
3438
3439 /* update device state for fsync */
3440 f2fs_set_dirty_device(sbi, fio->ino, devidx, FLUSH_INO);
3441
3442 /* update device state for checkpoint */
3443 if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
3444 spin_lock(&sbi->dev_lock);
3445 f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
3446 spin_unlock(&sbi->dev_lock);
3447 }
3448}
3449
3450static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
3451{
3452 int type = __get_segment_type(fio);
Olivier Deprez157378f2022-04-04 15:47:50 +02003453 bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003454
3455 if (keep_order)
3456 down_read(&fio->sbi->io_order_lock);
3457reallocate:
3458 f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
Olivier Deprez157378f2022-04-04 15:47:50 +02003459 &fio->new_blkaddr, sum, type, fio);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003460 if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
3461 invalidate_mapping_pages(META_MAPPING(fio->sbi),
3462 fio->old_blkaddr, fio->old_blkaddr);
3463
3464 /* writeout dirty page into bdev */
3465 f2fs_submit_page_write(fio);
3466 if (fio->retry) {
3467 fio->old_blkaddr = fio->new_blkaddr;
3468 goto reallocate;
3469 }
3470
3471 update_device_state(fio);
3472
3473 if (keep_order)
3474 up_read(&fio->sbi->io_order_lock);
3475}
3476
3477void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3478 enum iostat_type io_type)
3479{
3480 struct f2fs_io_info fio = {
3481 .sbi = sbi,
3482 .type = META,
3483 .temp = HOT,
3484 .op = REQ_OP_WRITE,
3485 .op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
3486 .old_blkaddr = page->index,
3487 .new_blkaddr = page->index,
3488 .page = page,
3489 .encrypted_page = NULL,
3490 .in_list = false,
3491 };
3492
3493 if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
3494 fio.op_flags &= ~REQ_META;
3495
3496 set_page_writeback(page);
3497 ClearPageError(page);
3498 f2fs_submit_page_write(&fio);
3499
David Brazdil0f672f62019-12-10 10:32:29 +00003500 stat_inc_meta_count(sbi, page->index);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003501 f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
3502}
3503
3504void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio)
3505{
3506 struct f2fs_summary sum;
3507
3508 set_summary(&sum, nid, 0, 0);
3509 do_write_page(&sum, fio);
3510
3511 f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
3512}
3513
3514void f2fs_outplace_write_data(struct dnode_of_data *dn,
3515 struct f2fs_io_info *fio)
3516{
3517 struct f2fs_sb_info *sbi = fio->sbi;
3518 struct f2fs_summary sum;
3519
3520 f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
3521 set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version);
3522 do_write_page(&sum, fio);
3523 f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
3524
3525 f2fs_update_iostat(sbi, fio->io_type, F2FS_BLKSIZE);
3526}
3527
3528int f2fs_inplace_write_data(struct f2fs_io_info *fio)
3529{
3530 int err;
3531 struct f2fs_sb_info *sbi = fio->sbi;
David Brazdil0f672f62019-12-10 10:32:29 +00003532 unsigned int segno;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003533
3534 fio->new_blkaddr = fio->old_blkaddr;
3535 /* i/o temperature is needed for passing down write hints */
3536 __get_segment_type(fio);
3537
David Brazdil0f672f62019-12-10 10:32:29 +00003538 segno = GET_SEGNO(sbi, fio->new_blkaddr);
3539
3540 if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
3541 set_sbi_flag(sbi, SBI_NEED_FSCK);
3542 f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.",
3543 __func__, segno);
3544 return -EFSCORRUPTED;
3545 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003546
3547 stat_inc_inplace_blocks(fio->sbi);
3548
Olivier Deprez157378f2022-04-04 15:47:50 +02003549 if (fio->bio && !(SM_I(sbi)->ipu_policy & (1 << F2FS_IPU_NOCACHE)))
David Brazdil0f672f62019-12-10 10:32:29 +00003550 err = f2fs_merge_page_bio(fio);
3551 else
3552 err = f2fs_submit_page_bio(fio);
3553 if (!err) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003554 update_device_state(fio);
David Brazdil0f672f62019-12-10 10:32:29 +00003555 f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
3556 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003557
3558 return err;
3559}
3560
3561static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi,
3562 unsigned int segno)
3563{
3564 int i;
3565
3566 for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
3567 if (CURSEG_I(sbi, i)->segno == segno)
3568 break;
3569 }
3570 return i;
3571}
3572
3573void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3574 block_t old_blkaddr, block_t new_blkaddr,
Olivier Deprez157378f2022-04-04 15:47:50 +02003575 bool recover_curseg, bool recover_newaddr,
3576 bool from_gc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003577{
3578 struct sit_info *sit_i = SIT_I(sbi);
3579 struct curseg_info *curseg;
3580 unsigned int segno, old_cursegno;
3581 struct seg_entry *se;
3582 int type;
3583 unsigned short old_blkoff;
3584
3585 segno = GET_SEGNO(sbi, new_blkaddr);
3586 se = get_seg_entry(sbi, segno);
3587 type = se->type;
3588
3589 down_write(&SM_I(sbi)->curseg_lock);
3590
3591 if (!recover_curseg) {
3592 /* for recovery flow */
3593 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
3594 if (old_blkaddr == NULL_ADDR)
3595 type = CURSEG_COLD_DATA;
3596 else
3597 type = CURSEG_WARM_DATA;
3598 }
3599 } else {
3600 if (IS_CURSEG(sbi, segno)) {
3601 /* se->type is volatile as SSR allocation */
3602 type = __f2fs_get_curseg(sbi, segno);
3603 f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
3604 } else {
3605 type = CURSEG_WARM_DATA;
3606 }
3607 }
3608
3609 f2fs_bug_on(sbi, !IS_DATASEG(type));
3610 curseg = CURSEG_I(sbi, type);
3611
3612 mutex_lock(&curseg->curseg_mutex);
3613 down_write(&sit_i->sentry_lock);
3614
3615 old_cursegno = curseg->segno;
3616 old_blkoff = curseg->next_blkoff;
3617
3618 /* change the current segment */
3619 if (segno != curseg->segno) {
3620 curseg->next_segno = segno;
Olivier Deprez157378f2022-04-04 15:47:50 +02003621 change_curseg(sbi, type, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003622 }
3623
3624 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
3625 __add_sum_entry(sbi, type, sum);
3626
Olivier Deprez157378f2022-04-04 15:47:50 +02003627 if (!recover_curseg || recover_newaddr) {
3628 if (!from_gc)
3629 update_segment_mtime(sbi, new_blkaddr, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003630 update_sit_entry(sbi, new_blkaddr, 1);
Olivier Deprez157378f2022-04-04 15:47:50 +02003631 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003632 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
3633 invalidate_mapping_pages(META_MAPPING(sbi),
3634 old_blkaddr, old_blkaddr);
Olivier Deprez157378f2022-04-04 15:47:50 +02003635 if (!from_gc)
3636 update_segment_mtime(sbi, old_blkaddr, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003637 update_sit_entry(sbi, old_blkaddr, -1);
3638 }
3639
3640 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3641 locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
3642
3643 locate_dirty_segment(sbi, old_cursegno);
3644
3645 if (recover_curseg) {
3646 if (old_cursegno != curseg->segno) {
3647 curseg->next_segno = old_cursegno;
Olivier Deprez157378f2022-04-04 15:47:50 +02003648 change_curseg(sbi, type, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003649 }
3650 curseg->next_blkoff = old_blkoff;
3651 }
3652
3653 up_write(&sit_i->sentry_lock);
3654 mutex_unlock(&curseg->curseg_mutex);
3655 up_write(&SM_I(sbi)->curseg_lock);
3656}
3657
3658void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3659 block_t old_addr, block_t new_addr,
3660 unsigned char version, bool recover_curseg,
3661 bool recover_newaddr)
3662{
3663 struct f2fs_summary sum;
3664
3665 set_summary(&sum, dn->nid, dn->ofs_in_node, version);
3666
3667 f2fs_do_replace_block(sbi, &sum, old_addr, new_addr,
Olivier Deprez157378f2022-04-04 15:47:50 +02003668 recover_curseg, recover_newaddr, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003669
3670 f2fs_update_data_blkaddr(dn, new_addr);
3671}
3672
3673void f2fs_wait_on_page_writeback(struct page *page,
David Brazdil0f672f62019-12-10 10:32:29 +00003674 enum page_type type, bool ordered, bool locked)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003675{
3676 if (PageWriteback(page)) {
3677 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
3678
Olivier Deprez157378f2022-04-04 15:47:50 +02003679 /* submit cached LFS IO */
David Brazdil0f672f62019-12-10 10:32:29 +00003680 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
Olivier Deprez157378f2022-04-04 15:47:50 +02003681 /* sbumit cached IPU IO */
3682 f2fs_submit_merged_ipu_write(sbi, NULL, page);
David Brazdil0f672f62019-12-10 10:32:29 +00003683 if (ordered) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003684 wait_on_page_writeback(page);
David Brazdil0f672f62019-12-10 10:32:29 +00003685 f2fs_bug_on(sbi, locked && PageWriteback(page));
3686 } else {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003687 wait_for_stable_page(page);
David Brazdil0f672f62019-12-10 10:32:29 +00003688 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003689 }
3690}
3691
David Brazdil0f672f62019-12-10 10:32:29 +00003692void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003693{
David Brazdil0f672f62019-12-10 10:32:29 +00003694 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003695 struct page *cpage;
3696
David Brazdil0f672f62019-12-10 10:32:29 +00003697 if (!f2fs_post_read_required(inode))
3698 return;
3699
3700 if (!__is_valid_data_blkaddr(blkaddr))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003701 return;
3702
3703 cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
3704 if (cpage) {
David Brazdil0f672f62019-12-10 10:32:29 +00003705 f2fs_wait_on_page_writeback(cpage, DATA, true, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003706 f2fs_put_page(cpage, 1);
3707 }
3708}
3709
David Brazdil0f672f62019-12-10 10:32:29 +00003710void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3711 block_t len)
3712{
3713 block_t i;
3714
3715 for (i = 0; i < len; i++)
3716 f2fs_wait_on_block_writeback(inode, blkaddr + i);
3717}
3718
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003719static int read_compacted_summaries(struct f2fs_sb_info *sbi)
3720{
3721 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3722 struct curseg_info *seg_i;
3723 unsigned char *kaddr;
3724 struct page *page;
3725 block_t start;
3726 int i, j, offset;
3727
3728 start = start_sum_block(sbi);
3729
3730 page = f2fs_get_meta_page(sbi, start++);
3731 if (IS_ERR(page))
3732 return PTR_ERR(page);
3733 kaddr = (unsigned char *)page_address(page);
3734
3735 /* Step 1: restore nat cache */
3736 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
3737 memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
3738
3739 /* Step 2: restore sit cache */
3740 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
3741 memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
3742 offset = 2 * SUM_JOURNAL_SIZE;
3743
3744 /* Step 3: restore summary entries */
3745 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
3746 unsigned short blk_off;
3747 unsigned int segno;
3748
3749 seg_i = CURSEG_I(sbi, i);
3750 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
3751 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
3752 seg_i->next_segno = segno;
3753 reset_curseg(sbi, i, 0);
3754 seg_i->alloc_type = ckpt->alloc_type[i];
3755 seg_i->next_blkoff = blk_off;
3756
3757 if (seg_i->alloc_type == SSR)
3758 blk_off = sbi->blocks_per_seg;
3759
3760 for (j = 0; j < blk_off; j++) {
3761 struct f2fs_summary *s;
3762 s = (struct f2fs_summary *)(kaddr + offset);
3763 seg_i->sum_blk->entries[j] = *s;
3764 offset += SUMMARY_SIZE;
3765 if (offset + SUMMARY_SIZE <= PAGE_SIZE -
3766 SUM_FOOTER_SIZE)
3767 continue;
3768
3769 f2fs_put_page(page, 1);
3770 page = NULL;
3771
3772 page = f2fs_get_meta_page(sbi, start++);
3773 if (IS_ERR(page))
3774 return PTR_ERR(page);
3775 kaddr = (unsigned char *)page_address(page);
3776 offset = 0;
3777 }
3778 }
3779 f2fs_put_page(page, 1);
3780 return 0;
3781}
3782
3783static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
3784{
3785 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3786 struct f2fs_summary_block *sum;
3787 struct curseg_info *curseg;
3788 struct page *new;
3789 unsigned short blk_off;
3790 unsigned int segno = 0;
3791 block_t blk_addr = 0;
3792 int err = 0;
3793
3794 /* get segment number and block addr */
3795 if (IS_DATASEG(type)) {
3796 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
3797 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
3798 CURSEG_HOT_DATA]);
3799 if (__exist_node_summaries(sbi))
Olivier Deprez157378f2022-04-04 15:47:50 +02003800 blk_addr = sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003801 else
3802 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
3803 } else {
3804 segno = le32_to_cpu(ckpt->cur_node_segno[type -
3805 CURSEG_HOT_NODE]);
3806 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
3807 CURSEG_HOT_NODE]);
3808 if (__exist_node_summaries(sbi))
3809 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
3810 type - CURSEG_HOT_NODE);
3811 else
3812 blk_addr = GET_SUM_BLOCK(sbi, segno);
3813 }
3814
3815 new = f2fs_get_meta_page(sbi, blk_addr);
3816 if (IS_ERR(new))
3817 return PTR_ERR(new);
3818 sum = (struct f2fs_summary_block *)page_address(new);
3819
3820 if (IS_NODESEG(type)) {
3821 if (__exist_node_summaries(sbi)) {
3822 struct f2fs_summary *ns = &sum->entries[0];
3823 int i;
3824 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
3825 ns->version = 0;
3826 ns->ofs_in_node = 0;
3827 }
3828 } else {
3829 err = f2fs_restore_node_summary(sbi, segno, sum);
3830 if (err)
3831 goto out;
3832 }
3833 }
3834
3835 /* set uncompleted segment to curseg */
3836 curseg = CURSEG_I(sbi, type);
3837 mutex_lock(&curseg->curseg_mutex);
3838
3839 /* update journal info */
3840 down_write(&curseg->journal_rwsem);
3841 memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
3842 up_write(&curseg->journal_rwsem);
3843
3844 memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
3845 memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
3846 curseg->next_segno = segno;
3847 reset_curseg(sbi, type, 0);
3848 curseg->alloc_type = ckpt->alloc_type[type];
3849 curseg->next_blkoff = blk_off;
3850 mutex_unlock(&curseg->curseg_mutex);
3851out:
3852 f2fs_put_page(new, 1);
3853 return err;
3854}
3855
3856static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
3857{
3858 struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
3859 struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
3860 int type = CURSEG_HOT_DATA;
3861 int err;
3862
3863 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
3864 int npages = f2fs_npages_for_summary_flush(sbi, true);
3865
3866 if (npages >= 2)
3867 f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages,
3868 META_CP, true);
3869
3870 /* restore for compacted data summary */
3871 err = read_compacted_summaries(sbi);
3872 if (err)
3873 return err;
3874 type = CURSEG_HOT_NODE;
3875 }
3876
3877 if (__exist_node_summaries(sbi))
Olivier Deprez157378f2022-04-04 15:47:50 +02003878 f2fs_ra_meta_pages(sbi,
3879 sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type),
3880 NR_CURSEG_PERSIST_TYPE - type, META_CP, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003881
3882 for (; type <= CURSEG_COLD_NODE; type++) {
3883 err = read_normal_summaries(sbi, type);
3884 if (err)
3885 return err;
3886 }
3887
3888 /* sanity check for summary blocks */
3889 if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
David Brazdil0f672f62019-12-10 10:32:29 +00003890 sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) {
3891 f2fs_err(sbi, "invalid journal entries nats %u sits %u\n",
3892 nats_in_cursum(nat_j), sits_in_cursum(sit_j));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003893 return -EINVAL;
David Brazdil0f672f62019-12-10 10:32:29 +00003894 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003895
3896 return 0;
3897}
3898
3899static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
3900{
3901 struct page *page;
3902 unsigned char *kaddr;
3903 struct f2fs_summary *summary;
3904 struct curseg_info *seg_i;
3905 int written_size = 0;
3906 int i, j;
3907
3908 page = f2fs_grab_meta_page(sbi, blkaddr++);
3909 kaddr = (unsigned char *)page_address(page);
3910 memset(kaddr, 0, PAGE_SIZE);
3911
3912 /* Step 1: write nat cache */
3913 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
3914 memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
3915 written_size += SUM_JOURNAL_SIZE;
3916
3917 /* Step 2: write sit cache */
3918 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
3919 memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
3920 written_size += SUM_JOURNAL_SIZE;
3921
3922 /* Step 3: write summary entries */
3923 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
3924 unsigned short blkoff;
3925 seg_i = CURSEG_I(sbi, i);
3926 if (sbi->ckpt->alloc_type[i] == SSR)
3927 blkoff = sbi->blocks_per_seg;
3928 else
3929 blkoff = curseg_blkoff(sbi, i);
3930
3931 for (j = 0; j < blkoff; j++) {
3932 if (!page) {
3933 page = f2fs_grab_meta_page(sbi, blkaddr++);
3934 kaddr = (unsigned char *)page_address(page);
3935 memset(kaddr, 0, PAGE_SIZE);
3936 written_size = 0;
3937 }
3938 summary = (struct f2fs_summary *)(kaddr + written_size);
3939 *summary = seg_i->sum_blk->entries[j];
3940 written_size += SUMMARY_SIZE;
3941
3942 if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
3943 SUM_FOOTER_SIZE)
3944 continue;
3945
3946 set_page_dirty(page);
3947 f2fs_put_page(page, 1);
3948 page = NULL;
3949 }
3950 }
3951 if (page) {
3952 set_page_dirty(page);
3953 f2fs_put_page(page, 1);
3954 }
3955}
3956
3957static void write_normal_summaries(struct f2fs_sb_info *sbi,
3958 block_t blkaddr, int type)
3959{
3960 int i, end;
3961 if (IS_DATASEG(type))
3962 end = type + NR_CURSEG_DATA_TYPE;
3963 else
3964 end = type + NR_CURSEG_NODE_TYPE;
3965
3966 for (i = type; i < end; i++)
3967 write_current_sum_page(sbi, i, blkaddr + (i - type));
3968}
3969
3970void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
3971{
3972 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
3973 write_compacted_summaries(sbi, start_blk);
3974 else
3975 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
3976}
3977
3978void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
3979{
3980 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
3981}
3982
3983int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
3984 unsigned int val, int alloc)
3985{
3986 int i;
3987
3988 if (type == NAT_JOURNAL) {
3989 for (i = 0; i < nats_in_cursum(journal); i++) {
3990 if (le32_to_cpu(nid_in_journal(journal, i)) == val)
3991 return i;
3992 }
3993 if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
3994 return update_nats_in_cursum(journal, 1);
3995 } else if (type == SIT_JOURNAL) {
3996 for (i = 0; i < sits_in_cursum(journal); i++)
3997 if (le32_to_cpu(segno_in_journal(journal, i)) == val)
3998 return i;
3999 if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
4000 return update_sits_in_cursum(journal, 1);
4001 }
4002 return -1;
4003}
4004
4005static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
4006 unsigned int segno)
4007{
Olivier Deprez0e641232021-09-23 10:07:05 +02004008 return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004009}
4010
4011static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
4012 unsigned int start)
4013{
4014 struct sit_info *sit_i = SIT_I(sbi);
4015 struct page *page;
4016 pgoff_t src_off, dst_off;
4017
4018 src_off = current_sit_addr(sbi, start);
4019 dst_off = next_sit_addr(sbi, src_off);
4020
4021 page = f2fs_grab_meta_page(sbi, dst_off);
4022 seg_info_to_sit_page(sbi, page, start);
4023
4024 set_page_dirty(page);
4025 set_to_next_sit(sit_i, start);
4026
4027 return page;
4028}
4029
4030static struct sit_entry_set *grab_sit_entry_set(void)
4031{
4032 struct sit_entry_set *ses =
4033 f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
4034
4035 ses->entry_cnt = 0;
4036 INIT_LIST_HEAD(&ses->set_list);
4037 return ses;
4038}
4039
4040static void release_sit_entry_set(struct sit_entry_set *ses)
4041{
4042 list_del(&ses->set_list);
4043 kmem_cache_free(sit_entry_set_slab, ses);
4044}
4045
4046static void adjust_sit_entry_set(struct sit_entry_set *ses,
4047 struct list_head *head)
4048{
4049 struct sit_entry_set *next = ses;
4050
4051 if (list_is_last(&ses->set_list, head))
4052 return;
4053
4054 list_for_each_entry_continue(next, head, set_list)
4055 if (ses->entry_cnt <= next->entry_cnt)
4056 break;
4057
4058 list_move_tail(&ses->set_list, &next->set_list);
4059}
4060
4061static void add_sit_entry(unsigned int segno, struct list_head *head)
4062{
4063 struct sit_entry_set *ses;
4064 unsigned int start_segno = START_SEGNO(segno);
4065
4066 list_for_each_entry(ses, head, set_list) {
4067 if (ses->start_segno == start_segno) {
4068 ses->entry_cnt++;
4069 adjust_sit_entry_set(ses, head);
4070 return;
4071 }
4072 }
4073
4074 ses = grab_sit_entry_set();
4075
4076 ses->start_segno = start_segno;
4077 ses->entry_cnt++;
4078 list_add(&ses->set_list, head);
4079}
4080
4081static void add_sits_in_set(struct f2fs_sb_info *sbi)
4082{
4083 struct f2fs_sm_info *sm_info = SM_I(sbi);
4084 struct list_head *set_list = &sm_info->sit_entry_set;
4085 unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
4086 unsigned int segno;
4087
4088 for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
4089 add_sit_entry(segno, set_list);
4090}
4091
4092static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
4093{
4094 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4095 struct f2fs_journal *journal = curseg->journal;
4096 int i;
4097
4098 down_write(&curseg->journal_rwsem);
4099 for (i = 0; i < sits_in_cursum(journal); i++) {
4100 unsigned int segno;
4101 bool dirtied;
4102
4103 segno = le32_to_cpu(segno_in_journal(journal, i));
4104 dirtied = __mark_sit_entry_dirty(sbi, segno);
4105
4106 if (!dirtied)
4107 add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
4108 }
4109 update_sits_in_cursum(journal, -i);
4110 up_write(&curseg->journal_rwsem);
4111}
4112
4113/*
4114 * CP calls this function, which flushes SIT entries including sit_journal,
4115 * and moves prefree segs to free segs.
4116 */
4117void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
4118{
4119 struct sit_info *sit_i = SIT_I(sbi);
4120 unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
4121 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4122 struct f2fs_journal *journal = curseg->journal;
4123 struct sit_entry_set *ses, *tmp;
4124 struct list_head *head = &SM_I(sbi)->sit_entry_set;
David Brazdil0f672f62019-12-10 10:32:29 +00004125 bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004126 struct seg_entry *se;
4127
4128 down_write(&sit_i->sentry_lock);
4129
4130 if (!sit_i->dirty_sentries)
4131 goto out;
4132
4133 /*
4134 * add and account sit entries of dirty bitmap in sit entry
4135 * set temporarily
4136 */
4137 add_sits_in_set(sbi);
4138
4139 /*
4140 * if there are no enough space in journal to store dirty sit
4141 * entries, remove all entries from journal and add and account
4142 * them in sit entry set.
4143 */
David Brazdil0f672f62019-12-10 10:32:29 +00004144 if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL) ||
4145 !to_journal)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004146 remove_sits_in_journal(sbi);
4147
4148 /*
4149 * there are two steps to flush sit entries:
4150 * #1, flush sit entries to journal in current cold data summary block.
4151 * #2, flush sit entries to sit page.
4152 */
4153 list_for_each_entry_safe(ses, tmp, head, set_list) {
4154 struct page *page = NULL;
4155 struct f2fs_sit_block *raw_sit = NULL;
4156 unsigned int start_segno = ses->start_segno;
4157 unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
4158 (unsigned long)MAIN_SEGS(sbi));
4159 unsigned int segno = start_segno;
4160
4161 if (to_journal &&
4162 !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
4163 to_journal = false;
4164
4165 if (to_journal) {
4166 down_write(&curseg->journal_rwsem);
4167 } else {
4168 page = get_next_sit_page(sbi, start_segno);
4169 raw_sit = page_address(page);
4170 }
4171
4172 /* flush dirty sit entries in region of current sit set */
4173 for_each_set_bit_from(segno, bitmap, end) {
4174 int offset, sit_offset;
4175
4176 se = get_seg_entry(sbi, segno);
4177#ifdef CONFIG_F2FS_CHECK_FS
4178 if (memcmp(se->cur_valid_map, se->cur_valid_map_mir,
4179 SIT_VBLOCK_MAP_SIZE))
4180 f2fs_bug_on(sbi, 1);
4181#endif
4182
4183 /* add discard candidates */
4184 if (!(cpc->reason & CP_DISCARD)) {
4185 cpc->trim_start = segno;
4186 add_discard_addrs(sbi, cpc, false);
4187 }
4188
4189 if (to_journal) {
4190 offset = f2fs_lookup_journal_in_cursum(journal,
4191 SIT_JOURNAL, segno, 1);
4192 f2fs_bug_on(sbi, offset < 0);
4193 segno_in_journal(journal, offset) =
4194 cpu_to_le32(segno);
4195 seg_info_to_raw_sit(se,
4196 &sit_in_journal(journal, offset));
4197 check_block_count(sbi, segno,
4198 &sit_in_journal(journal, offset));
4199 } else {
4200 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
4201 seg_info_to_raw_sit(se,
4202 &raw_sit->entries[sit_offset]);
4203 check_block_count(sbi, segno,
4204 &raw_sit->entries[sit_offset]);
4205 }
4206
4207 __clear_bit(segno, bitmap);
4208 sit_i->dirty_sentries--;
4209 ses->entry_cnt--;
4210 }
4211
4212 if (to_journal)
4213 up_write(&curseg->journal_rwsem);
4214 else
4215 f2fs_put_page(page, 1);
4216
4217 f2fs_bug_on(sbi, ses->entry_cnt);
4218 release_sit_entry_set(ses);
4219 }
4220
4221 f2fs_bug_on(sbi, !list_empty(head));
4222 f2fs_bug_on(sbi, sit_i->dirty_sentries);
4223out:
4224 if (cpc->reason & CP_DISCARD) {
4225 __u64 trim_start = cpc->trim_start;
4226
4227 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
4228 add_discard_addrs(sbi, cpc, false);
4229
4230 cpc->trim_start = trim_start;
4231 }
4232 up_write(&sit_i->sentry_lock);
4233
4234 set_prefree_as_free_segments(sbi);
4235}
4236
4237static int build_sit_info(struct f2fs_sb_info *sbi)
4238{
4239 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4240 struct sit_info *sit_i;
4241 unsigned int sit_segs, start;
David Brazdil0f672f62019-12-10 10:32:29 +00004242 char *src_bitmap, *bitmap;
4243 unsigned int bitmap_size, main_bitmap_size, sit_bitmap_size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004244
4245 /* allocate memory for SIT information */
4246 sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
4247 if (!sit_i)
4248 return -ENOMEM;
4249
4250 SM_I(sbi)->sit_info = sit_i;
4251
4252 sit_i->sentries =
4253 f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry),
4254 MAIN_SEGS(sbi)),
4255 GFP_KERNEL);
4256 if (!sit_i->sentries)
4257 return -ENOMEM;
4258
David Brazdil0f672f62019-12-10 10:32:29 +00004259 main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4260 sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004261 GFP_KERNEL);
4262 if (!sit_i->dirty_sentries_bitmap)
4263 return -ENOMEM;
4264
David Brazdil0f672f62019-12-10 10:32:29 +00004265#ifdef CONFIG_F2FS_CHECK_FS
4266 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * 4;
4267#else
4268 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * 3;
4269#endif
4270 sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4271 if (!sit_i->bitmap)
4272 return -ENOMEM;
4273
4274 bitmap = sit_i->bitmap;
4275
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004276 for (start = 0; start < MAIN_SEGS(sbi); start++) {
David Brazdil0f672f62019-12-10 10:32:29 +00004277 sit_i->sentries[start].cur_valid_map = bitmap;
4278 bitmap += SIT_VBLOCK_MAP_SIZE;
4279
4280 sit_i->sentries[start].ckpt_valid_map = bitmap;
4281 bitmap += SIT_VBLOCK_MAP_SIZE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004282
4283#ifdef CONFIG_F2FS_CHECK_FS
David Brazdil0f672f62019-12-10 10:32:29 +00004284 sit_i->sentries[start].cur_valid_map_mir = bitmap;
4285 bitmap += SIT_VBLOCK_MAP_SIZE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004286#endif
4287
David Brazdil0f672f62019-12-10 10:32:29 +00004288 sit_i->sentries[start].discard_map = bitmap;
4289 bitmap += SIT_VBLOCK_MAP_SIZE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004290 }
4291
4292 sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
4293 if (!sit_i->tmp_map)
4294 return -ENOMEM;
4295
David Brazdil0f672f62019-12-10 10:32:29 +00004296 if (__is_large_section(sbi)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004297 sit_i->sec_entries =
4298 f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry),
4299 MAIN_SECS(sbi)),
4300 GFP_KERNEL);
4301 if (!sit_i->sec_entries)
4302 return -ENOMEM;
4303 }
4304
4305 /* get information related with SIT */
4306 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
4307
4308 /* setup SIT bitmap from ckeckpoint pack */
David Brazdil0f672f62019-12-10 10:32:29 +00004309 sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004310 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
4311
David Brazdil0f672f62019-12-10 10:32:29 +00004312 sit_i->sit_bitmap = kmemdup(src_bitmap, sit_bitmap_size, GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004313 if (!sit_i->sit_bitmap)
4314 return -ENOMEM;
4315
4316#ifdef CONFIG_F2FS_CHECK_FS
David Brazdil0f672f62019-12-10 10:32:29 +00004317 sit_i->sit_bitmap_mir = kmemdup(src_bitmap,
4318 sit_bitmap_size, GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004319 if (!sit_i->sit_bitmap_mir)
4320 return -ENOMEM;
David Brazdil0f672f62019-12-10 10:32:29 +00004321
4322 sit_i->invalid_segmap = f2fs_kvzalloc(sbi,
4323 main_bitmap_size, GFP_KERNEL);
4324 if (!sit_i->invalid_segmap)
4325 return -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004326#endif
4327
4328 /* init SIT information */
4329 sit_i->s_ops = &default_salloc_ops;
4330
4331 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
4332 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
4333 sit_i->written_valid_blocks = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00004334 sit_i->bitmap_size = sit_bitmap_size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004335 sit_i->dirty_sentries = 0;
4336 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
4337 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
Olivier Deprez157378f2022-04-04 15:47:50 +02004338 sit_i->mounted_time = ktime_get_boottime_seconds();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004339 init_rwsem(&sit_i->sentry_lock);
4340 return 0;
4341}
4342
4343static int build_free_segmap(struct f2fs_sb_info *sbi)
4344{
4345 struct free_segmap_info *free_i;
4346 unsigned int bitmap_size, sec_bitmap_size;
4347
4348 /* allocate memory for free segmap information */
4349 free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL);
4350 if (!free_i)
4351 return -ENOMEM;
4352
4353 SM_I(sbi)->free_info = free_i;
4354
4355 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4356 free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL);
4357 if (!free_i->free_segmap)
4358 return -ENOMEM;
4359
4360 sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4361 free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL);
4362 if (!free_i->free_secmap)
4363 return -ENOMEM;
4364
4365 /* set all segments as dirty temporarily */
4366 memset(free_i->free_segmap, 0xff, bitmap_size);
4367 memset(free_i->free_secmap, 0xff, sec_bitmap_size);
4368
4369 /* init free segmap information */
4370 free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
4371 free_i->free_segments = 0;
4372 free_i->free_sections = 0;
4373 spin_lock_init(&free_i->segmap_lock);
4374 return 0;
4375}
4376
4377static int build_curseg(struct f2fs_sb_info *sbi)
4378{
4379 struct curseg_info *array;
4380 int i;
4381
Olivier Deprez157378f2022-04-04 15:47:50 +02004382 array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE,
4383 sizeof(*array)), GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004384 if (!array)
4385 return -ENOMEM;
4386
4387 SM_I(sbi)->curseg_array = array;
4388
Olivier Deprez157378f2022-04-04 15:47:50 +02004389 for (i = 0; i < NO_CHECK_TYPE; i++) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004390 mutex_init(&array[i].curseg_mutex);
4391 array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL);
4392 if (!array[i].sum_blk)
4393 return -ENOMEM;
4394 init_rwsem(&array[i].journal_rwsem);
4395 array[i].journal = f2fs_kzalloc(sbi,
4396 sizeof(struct f2fs_journal), GFP_KERNEL);
4397 if (!array[i].journal)
4398 return -ENOMEM;
Olivier Deprez157378f2022-04-04 15:47:50 +02004399 if (i < NR_PERSISTENT_LOG)
4400 array[i].seg_type = CURSEG_HOT_DATA + i;
4401 else if (i == CURSEG_COLD_DATA_PINNED)
4402 array[i].seg_type = CURSEG_COLD_DATA;
4403 else if (i == CURSEG_ALL_DATA_ATGC)
4404 array[i].seg_type = CURSEG_COLD_DATA;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004405 array[i].segno = NULL_SEGNO;
4406 array[i].next_blkoff = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +02004407 array[i].inited = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004408 }
4409 return restore_curseg_summaries(sbi);
4410}
4411
4412static int build_sit_entries(struct f2fs_sb_info *sbi)
4413{
4414 struct sit_info *sit_i = SIT_I(sbi);
4415 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4416 struct f2fs_journal *journal = curseg->journal;
4417 struct seg_entry *se;
4418 struct f2fs_sit_entry sit;
4419 int sit_blk_cnt = SIT_BLK_CNT(sbi);
4420 unsigned int i, start, end;
4421 unsigned int readed, start_blk = 0;
4422 int err = 0;
4423 block_t total_node_blocks = 0;
4424
4425 do {
4426 readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
4427 META_SIT, true);
4428
4429 start = start_blk * sit_i->sents_per_block;
4430 end = (start_blk + readed) * sit_i->sents_per_block;
4431
4432 for (; start < end && start < MAIN_SEGS(sbi); start++) {
4433 struct f2fs_sit_block *sit_blk;
4434 struct page *page;
4435
4436 se = &sit_i->sentries[start];
4437 page = get_current_sit_page(sbi, start);
David Brazdil0f672f62019-12-10 10:32:29 +00004438 if (IS_ERR(page))
4439 return PTR_ERR(page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004440 sit_blk = (struct f2fs_sit_block *)page_address(page);
4441 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
4442 f2fs_put_page(page, 1);
4443
4444 err = check_block_count(sbi, start, &sit);
4445 if (err)
4446 return err;
4447 seg_info_from_raw_sit(se, &sit);
4448 if (IS_NODESEG(se->type))
4449 total_node_blocks += se->valid_blocks;
4450
4451 /* build discard map only one time */
David Brazdil0f672f62019-12-10 10:32:29 +00004452 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4453 memset(se->discard_map, 0xff,
4454 SIT_VBLOCK_MAP_SIZE);
4455 } else {
4456 memcpy(se->discard_map,
4457 se->cur_valid_map,
4458 SIT_VBLOCK_MAP_SIZE);
4459 sbi->discard_blks +=
4460 sbi->blocks_per_seg -
4461 se->valid_blocks;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004462 }
4463
David Brazdil0f672f62019-12-10 10:32:29 +00004464 if (__is_large_section(sbi))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004465 get_sec_entry(sbi, start)->valid_blocks +=
4466 se->valid_blocks;
4467 }
4468 start_blk += readed;
4469 } while (start_blk < sit_blk_cnt);
4470
4471 down_read(&curseg->journal_rwsem);
4472 for (i = 0; i < sits_in_cursum(journal); i++) {
4473 unsigned int old_valid_blocks;
4474
4475 start = le32_to_cpu(segno_in_journal(journal, i));
4476 if (start >= MAIN_SEGS(sbi)) {
David Brazdil0f672f62019-12-10 10:32:29 +00004477 f2fs_err(sbi, "Wrong journal entry on segno %u",
4478 start);
4479 err = -EFSCORRUPTED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004480 break;
4481 }
4482
4483 se = &sit_i->sentries[start];
4484 sit = sit_in_journal(journal, i);
4485
4486 old_valid_blocks = se->valid_blocks;
4487 if (IS_NODESEG(se->type))
4488 total_node_blocks -= old_valid_blocks;
4489
4490 err = check_block_count(sbi, start, &sit);
4491 if (err)
4492 break;
4493 seg_info_from_raw_sit(se, &sit);
4494 if (IS_NODESEG(se->type))
4495 total_node_blocks += se->valid_blocks;
4496
David Brazdil0f672f62019-12-10 10:32:29 +00004497 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4498 memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE);
4499 } else {
4500 memcpy(se->discard_map, se->cur_valid_map,
4501 SIT_VBLOCK_MAP_SIZE);
4502 sbi->discard_blks += old_valid_blocks;
4503 sbi->discard_blks -= se->valid_blocks;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004504 }
4505
David Brazdil0f672f62019-12-10 10:32:29 +00004506 if (__is_large_section(sbi)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004507 get_sec_entry(sbi, start)->valid_blocks +=
4508 se->valid_blocks;
4509 get_sec_entry(sbi, start)->valid_blocks -=
4510 old_valid_blocks;
4511 }
4512 }
4513 up_read(&curseg->journal_rwsem);
4514
4515 if (!err && total_node_blocks != valid_node_count(sbi)) {
David Brazdil0f672f62019-12-10 10:32:29 +00004516 f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
4517 total_node_blocks, valid_node_count(sbi));
4518 err = -EFSCORRUPTED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004519 }
4520
4521 return err;
4522}
4523
4524static void init_free_segmap(struct f2fs_sb_info *sbi)
4525{
4526 unsigned int start;
4527 int type;
Olivier Deprez157378f2022-04-04 15:47:50 +02004528 struct seg_entry *sentry;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004529
4530 for (start = 0; start < MAIN_SEGS(sbi); start++) {
Olivier Deprez157378f2022-04-04 15:47:50 +02004531 if (f2fs_usable_blks_in_seg(sbi, start) == 0)
4532 continue;
4533 sentry = get_seg_entry(sbi, start);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004534 if (!sentry->valid_blocks)
4535 __set_free(sbi, start);
4536 else
4537 SIT_I(sbi)->written_valid_blocks +=
4538 sentry->valid_blocks;
4539 }
4540
4541 /* set use the current segments */
4542 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
4543 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
4544 __set_test_and_inuse(sbi, curseg_t->segno);
4545 }
4546}
4547
4548static void init_dirty_segmap(struct f2fs_sb_info *sbi)
4549{
4550 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4551 struct free_segmap_info *free_i = FREE_I(sbi);
Olivier Deprez157378f2022-04-04 15:47:50 +02004552 unsigned int segno = 0, offset = 0, secno;
4553 block_t valid_blocks, usable_blks_in_seg;
4554 block_t blks_per_sec = BLKS_PER_SEC(sbi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004555
4556 while (1) {
4557 /* find dirty segment based on free segmap */
4558 segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
4559 if (segno >= MAIN_SEGS(sbi))
4560 break;
4561 offset = segno + 1;
4562 valid_blocks = get_valid_blocks(sbi, segno, false);
Olivier Deprez157378f2022-04-04 15:47:50 +02004563 usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
4564 if (valid_blocks == usable_blks_in_seg || !valid_blocks)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004565 continue;
Olivier Deprez157378f2022-04-04 15:47:50 +02004566 if (valid_blocks > usable_blks_in_seg) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004567 f2fs_bug_on(sbi, 1);
4568 continue;
4569 }
4570 mutex_lock(&dirty_i->seglist_lock);
4571 __locate_dirty_segment(sbi, segno, DIRTY);
4572 mutex_unlock(&dirty_i->seglist_lock);
4573 }
Olivier Deprez157378f2022-04-04 15:47:50 +02004574
4575 if (!__is_large_section(sbi))
4576 return;
4577
4578 mutex_lock(&dirty_i->seglist_lock);
4579 for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
4580 valid_blocks = get_valid_blocks(sbi, segno, true);
4581 secno = GET_SEC_FROM_SEG(sbi, segno);
4582
4583 if (!valid_blocks || valid_blocks == blks_per_sec)
4584 continue;
4585 if (IS_CURSEC(sbi, secno))
4586 continue;
4587 set_bit(secno, dirty_i->dirty_secmap);
4588 }
4589 mutex_unlock(&dirty_i->seglist_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004590}
4591
4592static int init_victim_secmap(struct f2fs_sb_info *sbi)
4593{
4594 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4595 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4596
4597 dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4598 if (!dirty_i->victim_secmap)
4599 return -ENOMEM;
4600 return 0;
4601}
4602
4603static int build_dirty_segmap(struct f2fs_sb_info *sbi)
4604{
4605 struct dirty_seglist_info *dirty_i;
4606 unsigned int bitmap_size, i;
4607
4608 /* allocate memory for dirty segments list information */
4609 dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info),
4610 GFP_KERNEL);
4611 if (!dirty_i)
4612 return -ENOMEM;
4613
4614 SM_I(sbi)->dirty_info = dirty_i;
4615 mutex_init(&dirty_i->seglist_lock);
4616
4617 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4618
4619 for (i = 0; i < NR_DIRTY_TYPE; i++) {
4620 dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size,
4621 GFP_KERNEL);
4622 if (!dirty_i->dirty_segmap[i])
4623 return -ENOMEM;
4624 }
4625
Olivier Deprez157378f2022-04-04 15:47:50 +02004626 if (__is_large_section(sbi)) {
4627 bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4628 dirty_i->dirty_secmap = f2fs_kvzalloc(sbi,
4629 bitmap_size, GFP_KERNEL);
4630 if (!dirty_i->dirty_secmap)
4631 return -ENOMEM;
4632 }
4633
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004634 init_dirty_segmap(sbi);
4635 return init_victim_secmap(sbi);
4636}
4637
David Brazdil0f672f62019-12-10 10:32:29 +00004638static int sanity_check_curseg(struct f2fs_sb_info *sbi)
4639{
4640 int i;
4641
4642 /*
4643 * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr;
4644 * In LFS curseg, all blkaddr after .next_blkoff should be unused.
4645 */
Olivier Deprez157378f2022-04-04 15:47:50 +02004646 for (i = 0; i < NR_PERSISTENT_LOG; i++) {
David Brazdil0f672f62019-12-10 10:32:29 +00004647 struct curseg_info *curseg = CURSEG_I(sbi, i);
4648 struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
4649 unsigned int blkofs = curseg->next_blkoff;
4650
Olivier Deprez157378f2022-04-04 15:47:50 +02004651 sanity_check_seg_type(sbi, curseg->seg_type);
4652
David Brazdil0f672f62019-12-10 10:32:29 +00004653 if (f2fs_test_bit(blkofs, se->cur_valid_map))
4654 goto out;
4655
4656 if (curseg->alloc_type == SSR)
4657 continue;
4658
4659 for (blkofs += 1; blkofs < sbi->blocks_per_seg; blkofs++) {
4660 if (!f2fs_test_bit(blkofs, se->cur_valid_map))
4661 continue;
4662out:
4663 f2fs_err(sbi,
4664 "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u",
4665 i, curseg->segno, curseg->alloc_type,
4666 curseg->next_blkoff, blkofs);
4667 return -EFSCORRUPTED;
4668 }
4669 }
4670 return 0;
4671}
4672
Olivier Deprez157378f2022-04-04 15:47:50 +02004673#ifdef CONFIG_BLK_DEV_ZONED
4674
4675static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
4676 struct f2fs_dev_info *fdev,
4677 struct blk_zone *zone)
4678{
4679 unsigned int wp_segno, wp_blkoff, zone_secno, zone_segno, segno;
4680 block_t zone_block, wp_block, last_valid_block;
4681 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4682 int i, s, b, ret;
4683 struct seg_entry *se;
4684
4685 if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4686 return 0;
4687
4688 wp_block = fdev->start_blk + (zone->wp >> log_sectors_per_block);
4689 wp_segno = GET_SEGNO(sbi, wp_block);
4690 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
4691 zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block);
4692 zone_segno = GET_SEGNO(sbi, zone_block);
4693 zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno);
4694
4695 if (zone_segno >= MAIN_SEGS(sbi))
4696 return 0;
4697
4698 /*
4699 * Skip check of zones cursegs point to, since
4700 * fix_curseg_write_pointer() checks them.
4701 */
4702 for (i = 0; i < NO_CHECK_TYPE; i++)
4703 if (zone_secno == GET_SEC_FROM_SEG(sbi,
4704 CURSEG_I(sbi, i)->segno))
4705 return 0;
4706
4707 /*
4708 * Get last valid block of the zone.
4709 */
4710 last_valid_block = zone_block - 1;
4711 for (s = sbi->segs_per_sec - 1; s >= 0; s--) {
4712 segno = zone_segno + s;
4713 se = get_seg_entry(sbi, segno);
4714 for (b = sbi->blocks_per_seg - 1; b >= 0; b--)
4715 if (f2fs_test_bit(b, se->cur_valid_map)) {
4716 last_valid_block = START_BLOCK(sbi, segno) + b;
4717 break;
4718 }
4719 if (last_valid_block >= zone_block)
4720 break;
4721 }
4722
4723 /*
4724 * If last valid block is beyond the write pointer, report the
4725 * inconsistency. This inconsistency does not cause write error
4726 * because the zone will not be selected for write operation until
4727 * it get discarded. Just report it.
4728 */
4729 if (last_valid_block >= wp_block) {
4730 f2fs_notice(sbi, "Valid block beyond write pointer: "
4731 "valid block[0x%x,0x%x] wp[0x%x,0x%x]",
4732 GET_SEGNO(sbi, last_valid_block),
4733 GET_BLKOFF_FROM_SEG0(sbi, last_valid_block),
4734 wp_segno, wp_blkoff);
4735 return 0;
4736 }
4737
4738 /*
4739 * If there is no valid block in the zone and if write pointer is
4740 * not at zone start, reset the write pointer.
4741 */
4742 if (last_valid_block + 1 == zone_block && zone->wp != zone->start) {
4743 f2fs_notice(sbi,
4744 "Zone without valid block has non-zero write "
4745 "pointer. Reset the write pointer: wp[0x%x,0x%x]",
4746 wp_segno, wp_blkoff);
4747 ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
4748 zone->len >> log_sectors_per_block);
4749 if (ret) {
4750 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
4751 fdev->path, ret);
4752 return ret;
4753 }
4754 }
4755
4756 return 0;
4757}
4758
4759static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi,
4760 block_t zone_blkaddr)
4761{
4762 int i;
4763
4764 for (i = 0; i < sbi->s_ndevs; i++) {
4765 if (!bdev_is_zoned(FDEV(i).bdev))
4766 continue;
4767 if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr &&
4768 zone_blkaddr <= FDEV(i).end_blk))
4769 return &FDEV(i);
4770 }
4771
4772 return NULL;
4773}
4774
4775static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx,
4776 void *data) {
4777 memcpy(data, zone, sizeof(struct blk_zone));
4778 return 0;
4779}
4780
4781static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
4782{
4783 struct curseg_info *cs = CURSEG_I(sbi, type);
4784 struct f2fs_dev_info *zbd;
4785 struct blk_zone zone;
4786 unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off;
4787 block_t cs_zone_block, wp_block;
4788 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4789 sector_t zone_sector;
4790 int err;
4791
4792 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
4793 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
4794
4795 zbd = get_target_zoned_dev(sbi, cs_zone_block);
4796 if (!zbd)
4797 return 0;
4798
4799 /* report zone for the sector the curseg points to */
4800 zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
4801 << log_sectors_per_block;
4802 err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
4803 report_one_zone_cb, &zone);
4804 if (err != 1) {
4805 f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
4806 zbd->path, err);
4807 return err;
4808 }
4809
4810 if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4811 return 0;
4812
4813 wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
4814 wp_segno = GET_SEGNO(sbi, wp_block);
4815 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
4816 wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
4817
4818 if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
4819 wp_sector_off == 0)
4820 return 0;
4821
4822 f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
4823 "curseg[0x%x,0x%x] wp[0x%x,0x%x]",
4824 type, cs->segno, cs->next_blkoff, wp_segno, wp_blkoff);
4825
4826 f2fs_notice(sbi, "Assign new section to curseg[%d]: "
4827 "curseg[0x%x,0x%x]", type, cs->segno, cs->next_blkoff);
4828 allocate_segment_by_default(sbi, type, true);
4829
4830 /* check consistency of the zone curseg pointed to */
4831 if (check_zone_write_pointer(sbi, zbd, &zone))
4832 return -EIO;
4833
4834 /* check newly assigned zone */
4835 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
4836 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
4837
4838 zbd = get_target_zoned_dev(sbi, cs_zone_block);
4839 if (!zbd)
4840 return 0;
4841
4842 zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
4843 << log_sectors_per_block;
4844 err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
4845 report_one_zone_cb, &zone);
4846 if (err != 1) {
4847 f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
4848 zbd->path, err);
4849 return err;
4850 }
4851
4852 if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4853 return 0;
4854
4855 if (zone.wp != zone.start) {
4856 f2fs_notice(sbi,
4857 "New zone for curseg[%d] is not yet discarded. "
4858 "Reset the zone: curseg[0x%x,0x%x]",
4859 type, cs->segno, cs->next_blkoff);
4860 err = __f2fs_issue_discard_zone(sbi, zbd->bdev,
4861 zone_sector >> log_sectors_per_block,
4862 zone.len >> log_sectors_per_block);
4863 if (err) {
4864 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
4865 zbd->path, err);
4866 return err;
4867 }
4868 }
4869
4870 return 0;
4871}
4872
4873int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
4874{
4875 int i, ret;
4876
4877 for (i = 0; i < NR_PERSISTENT_LOG; i++) {
4878 ret = fix_curseg_write_pointer(sbi, i);
4879 if (ret)
4880 return ret;
4881 }
4882
4883 return 0;
4884}
4885
4886struct check_zone_write_pointer_args {
4887 struct f2fs_sb_info *sbi;
4888 struct f2fs_dev_info *fdev;
4889};
4890
4891static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx,
4892 void *data) {
4893 struct check_zone_write_pointer_args *args;
4894 args = (struct check_zone_write_pointer_args *)data;
4895
4896 return check_zone_write_pointer(args->sbi, args->fdev, zone);
4897}
4898
4899int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
4900{
4901 int i, ret;
4902 struct check_zone_write_pointer_args args;
4903
4904 for (i = 0; i < sbi->s_ndevs; i++) {
4905 if (!bdev_is_zoned(FDEV(i).bdev))
4906 continue;
4907
4908 args.sbi = sbi;
4909 args.fdev = &FDEV(i);
4910 ret = blkdev_report_zones(FDEV(i).bdev, 0, BLK_ALL_ZONES,
4911 check_zone_write_pointer_cb, &args);
4912 if (ret < 0)
4913 return ret;
4914 }
4915
4916 return 0;
4917}
4918
4919static bool is_conv_zone(struct f2fs_sb_info *sbi, unsigned int zone_idx,
4920 unsigned int dev_idx)
4921{
4922 if (!bdev_is_zoned(FDEV(dev_idx).bdev))
4923 return true;
4924 return !test_bit(zone_idx, FDEV(dev_idx).blkz_seq);
4925}
4926
4927/* Return the zone index in the given device */
4928static unsigned int get_zone_idx(struct f2fs_sb_info *sbi, unsigned int secno,
4929 int dev_idx)
4930{
4931 block_t sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
4932
4933 return (sec_start_blkaddr - FDEV(dev_idx).start_blk) >>
4934 sbi->log_blocks_per_blkz;
4935}
4936
4937/*
4938 * Return the usable segments in a section based on the zone's
4939 * corresponding zone capacity. Zone is equal to a section.
4940 */
4941static inline unsigned int f2fs_usable_zone_segs_in_sec(
4942 struct f2fs_sb_info *sbi, unsigned int segno)
4943{
4944 unsigned int dev_idx, zone_idx, unusable_segs_in_sec;
4945
4946 dev_idx = f2fs_target_device_index(sbi, START_BLOCK(sbi, segno));
4947 zone_idx = get_zone_idx(sbi, GET_SEC_FROM_SEG(sbi, segno), dev_idx);
4948
4949 /* Conventional zone's capacity is always equal to zone size */
4950 if (is_conv_zone(sbi, zone_idx, dev_idx))
4951 return sbi->segs_per_sec;
4952
4953 /*
4954 * If the zone_capacity_blocks array is NULL, then zone capacity
4955 * is equal to the zone size for all zones
4956 */
4957 if (!FDEV(dev_idx).zone_capacity_blocks)
4958 return sbi->segs_per_sec;
4959
4960 /* Get the segment count beyond zone capacity block */
4961 unusable_segs_in_sec = (sbi->blocks_per_blkz -
4962 FDEV(dev_idx).zone_capacity_blocks[zone_idx]) >>
4963 sbi->log_blocks_per_seg;
4964 return sbi->segs_per_sec - unusable_segs_in_sec;
4965}
4966
4967/*
4968 * Return the number of usable blocks in a segment. The number of blocks
4969 * returned is always equal to the number of blocks in a segment for
4970 * segments fully contained within a sequential zone capacity or a
4971 * conventional zone. For segments partially contained in a sequential
4972 * zone capacity, the number of usable blocks up to the zone capacity
4973 * is returned. 0 is returned in all other cases.
4974 */
4975static inline unsigned int f2fs_usable_zone_blks_in_seg(
4976 struct f2fs_sb_info *sbi, unsigned int segno)
4977{
4978 block_t seg_start, sec_start_blkaddr, sec_cap_blkaddr;
4979 unsigned int zone_idx, dev_idx, secno;
4980
4981 secno = GET_SEC_FROM_SEG(sbi, segno);
4982 seg_start = START_BLOCK(sbi, segno);
4983 dev_idx = f2fs_target_device_index(sbi, seg_start);
4984 zone_idx = get_zone_idx(sbi, secno, dev_idx);
4985
4986 /*
4987 * Conventional zone's capacity is always equal to zone size,
4988 * so, blocks per segment is unchanged.
4989 */
4990 if (is_conv_zone(sbi, zone_idx, dev_idx))
4991 return sbi->blocks_per_seg;
4992
4993 if (!FDEV(dev_idx).zone_capacity_blocks)
4994 return sbi->blocks_per_seg;
4995
4996 sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
4997 sec_cap_blkaddr = sec_start_blkaddr +
4998 FDEV(dev_idx).zone_capacity_blocks[zone_idx];
4999
5000 /*
5001 * If segment starts before zone capacity and spans beyond
5002 * zone capacity, then usable blocks are from seg start to
5003 * zone capacity. If the segment starts after the zone capacity,
5004 * then there are no usable blocks.
5005 */
5006 if (seg_start >= sec_cap_blkaddr)
5007 return 0;
5008 if (seg_start + sbi->blocks_per_seg > sec_cap_blkaddr)
5009 return sec_cap_blkaddr - seg_start;
5010
5011 return sbi->blocks_per_seg;
5012}
5013#else
5014int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
5015{
5016 return 0;
5017}
5018
5019int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
5020{
5021 return 0;
5022}
5023
5024static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi,
5025 unsigned int segno)
5026{
5027 return 0;
5028}
5029
5030static inline unsigned int f2fs_usable_zone_segs_in_sec(struct f2fs_sb_info *sbi,
5031 unsigned int segno)
5032{
5033 return 0;
5034}
5035#endif
5036unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
5037 unsigned int segno)
5038{
5039 if (f2fs_sb_has_blkzoned(sbi))
5040 return f2fs_usable_zone_blks_in_seg(sbi, segno);
5041
5042 return sbi->blocks_per_seg;
5043}
5044
5045unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
5046 unsigned int segno)
5047{
5048 if (f2fs_sb_has_blkzoned(sbi))
5049 return f2fs_usable_zone_segs_in_sec(sbi, segno);
5050
5051 return sbi->segs_per_sec;
5052}
5053
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005054/*
5055 * Update min, max modified time for cost-benefit GC algorithm
5056 */
5057static void init_min_max_mtime(struct f2fs_sb_info *sbi)
5058{
5059 struct sit_info *sit_i = SIT_I(sbi);
5060 unsigned int segno;
5061
5062 down_write(&sit_i->sentry_lock);
5063
5064 sit_i->min_mtime = ULLONG_MAX;
5065
5066 for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
5067 unsigned int i;
5068 unsigned long long mtime = 0;
5069
5070 for (i = 0; i < sbi->segs_per_sec; i++)
5071 mtime += get_seg_entry(sbi, segno + i)->mtime;
5072
5073 mtime = div_u64(mtime, sbi->segs_per_sec);
5074
5075 if (sit_i->min_mtime > mtime)
5076 sit_i->min_mtime = mtime;
5077 }
5078 sit_i->max_mtime = get_mtime(sbi, false);
Olivier Deprez157378f2022-04-04 15:47:50 +02005079 sit_i->dirty_max_mtime = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005080 up_write(&sit_i->sentry_lock);
5081}
5082
5083int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
5084{
5085 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
5086 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
5087 struct f2fs_sm_info *sm_info;
5088 int err;
5089
5090 sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL);
5091 if (!sm_info)
5092 return -ENOMEM;
5093
5094 /* init sm info */
5095 sbi->sm_info = sm_info;
5096 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
5097 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
5098 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
5099 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
5100 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
5101 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
5102 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
5103 sm_info->rec_prefree_segments = sm_info->main_segments *
5104 DEF_RECLAIM_PREFREE_SEGMENTS / 100;
5105 if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
5106 sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
5107
Olivier Deprez157378f2022-04-04 15:47:50 +02005108 if (!f2fs_lfs_mode(sbi))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005109 sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
5110 sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
5111 sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
5112 sm_info->min_seq_blocks = sbi->blocks_per_seg * sbi->segs_per_sec;
5113 sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
5114 sm_info->min_ssr_sections = reserved_sections(sbi);
5115
5116 INIT_LIST_HEAD(&sm_info->sit_entry_set);
5117
5118 init_rwsem(&sm_info->curseg_lock);
5119
5120 if (!f2fs_readonly(sbi->sb)) {
5121 err = f2fs_create_flush_cmd_control(sbi);
5122 if (err)
5123 return err;
5124 }
5125
5126 err = create_discard_cmd_control(sbi);
5127 if (err)
5128 return err;
5129
5130 err = build_sit_info(sbi);
5131 if (err)
5132 return err;
5133 err = build_free_segmap(sbi);
5134 if (err)
5135 return err;
5136 err = build_curseg(sbi);
5137 if (err)
5138 return err;
5139
5140 /* reinit free segmap based on SIT */
5141 err = build_sit_entries(sbi);
5142 if (err)
5143 return err;
5144
5145 init_free_segmap(sbi);
5146 err = build_dirty_segmap(sbi);
5147 if (err)
5148 return err;
5149
David Brazdil0f672f62019-12-10 10:32:29 +00005150 err = sanity_check_curseg(sbi);
5151 if (err)
5152 return err;
5153
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005154 init_min_max_mtime(sbi);
5155 return 0;
5156}
5157
5158static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
5159 enum dirty_type dirty_type)
5160{
5161 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5162
5163 mutex_lock(&dirty_i->seglist_lock);
5164 kvfree(dirty_i->dirty_segmap[dirty_type]);
5165 dirty_i->nr_dirty[dirty_type] = 0;
5166 mutex_unlock(&dirty_i->seglist_lock);
5167}
5168
5169static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
5170{
5171 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5172 kvfree(dirty_i->victim_secmap);
5173}
5174
5175static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
5176{
5177 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5178 int i;
5179
5180 if (!dirty_i)
5181 return;
5182
5183 /* discard pre-free/dirty segments list */
5184 for (i = 0; i < NR_DIRTY_TYPE; i++)
5185 discard_dirty_segmap(sbi, i);
5186
Olivier Deprez157378f2022-04-04 15:47:50 +02005187 if (__is_large_section(sbi)) {
5188 mutex_lock(&dirty_i->seglist_lock);
5189 kvfree(dirty_i->dirty_secmap);
5190 mutex_unlock(&dirty_i->seglist_lock);
5191 }
5192
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005193 destroy_victim_secmap(sbi);
5194 SM_I(sbi)->dirty_info = NULL;
Olivier Deprez157378f2022-04-04 15:47:50 +02005195 kfree(dirty_i);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005196}
5197
5198static void destroy_curseg(struct f2fs_sb_info *sbi)
5199{
5200 struct curseg_info *array = SM_I(sbi)->curseg_array;
5201 int i;
5202
5203 if (!array)
5204 return;
5205 SM_I(sbi)->curseg_array = NULL;
5206 for (i = 0; i < NR_CURSEG_TYPE; i++) {
Olivier Deprez157378f2022-04-04 15:47:50 +02005207 kfree(array[i].sum_blk);
5208 kfree(array[i].journal);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005209 }
Olivier Deprez157378f2022-04-04 15:47:50 +02005210 kfree(array);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005211}
5212
5213static void destroy_free_segmap(struct f2fs_sb_info *sbi)
5214{
5215 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
5216 if (!free_i)
5217 return;
5218 SM_I(sbi)->free_info = NULL;
5219 kvfree(free_i->free_segmap);
5220 kvfree(free_i->free_secmap);
Olivier Deprez157378f2022-04-04 15:47:50 +02005221 kfree(free_i);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005222}
5223
5224static void destroy_sit_info(struct f2fs_sb_info *sbi)
5225{
5226 struct sit_info *sit_i = SIT_I(sbi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005227
5228 if (!sit_i)
5229 return;
5230
David Brazdil0f672f62019-12-10 10:32:29 +00005231 if (sit_i->sentries)
5232 kvfree(sit_i->bitmap);
Olivier Deprez157378f2022-04-04 15:47:50 +02005233 kfree(sit_i->tmp_map);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005234
5235 kvfree(sit_i->sentries);
5236 kvfree(sit_i->sec_entries);
5237 kvfree(sit_i->dirty_sentries_bitmap);
5238
5239 SM_I(sbi)->sit_info = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00005240 kvfree(sit_i->sit_bitmap);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005241#ifdef CONFIG_F2FS_CHECK_FS
David Brazdil0f672f62019-12-10 10:32:29 +00005242 kvfree(sit_i->sit_bitmap_mir);
5243 kvfree(sit_i->invalid_segmap);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005244#endif
Olivier Deprez157378f2022-04-04 15:47:50 +02005245 kfree(sit_i);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005246}
5247
5248void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
5249{
5250 struct f2fs_sm_info *sm_info = SM_I(sbi);
5251
5252 if (!sm_info)
5253 return;
5254 f2fs_destroy_flush_cmd_control(sbi, true);
5255 destroy_discard_cmd_control(sbi);
5256 destroy_dirty_segmap(sbi);
5257 destroy_curseg(sbi);
5258 destroy_free_segmap(sbi);
5259 destroy_sit_info(sbi);
5260 sbi->sm_info = NULL;
Olivier Deprez157378f2022-04-04 15:47:50 +02005261 kfree(sm_info);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005262}
5263
5264int __init f2fs_create_segment_manager_caches(void)
5265{
Olivier Deprez157378f2022-04-04 15:47:50 +02005266 discard_entry_slab = f2fs_kmem_cache_create("f2fs_discard_entry",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005267 sizeof(struct discard_entry));
5268 if (!discard_entry_slab)
5269 goto fail;
5270
Olivier Deprez157378f2022-04-04 15:47:50 +02005271 discard_cmd_slab = f2fs_kmem_cache_create("f2fs_discard_cmd",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005272 sizeof(struct discard_cmd));
5273 if (!discard_cmd_slab)
5274 goto destroy_discard_entry;
5275
Olivier Deprez157378f2022-04-04 15:47:50 +02005276 sit_entry_set_slab = f2fs_kmem_cache_create("f2fs_sit_entry_set",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005277 sizeof(struct sit_entry_set));
5278 if (!sit_entry_set_slab)
5279 goto destroy_discard_cmd;
5280
Olivier Deprez157378f2022-04-04 15:47:50 +02005281 inmem_entry_slab = f2fs_kmem_cache_create("f2fs_inmem_page_entry",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005282 sizeof(struct inmem_pages));
5283 if (!inmem_entry_slab)
5284 goto destroy_sit_entry_set;
5285 return 0;
5286
5287destroy_sit_entry_set:
5288 kmem_cache_destroy(sit_entry_set_slab);
5289destroy_discard_cmd:
5290 kmem_cache_destroy(discard_cmd_slab);
5291destroy_discard_entry:
5292 kmem_cache_destroy(discard_entry_slab);
5293fail:
5294 return -ENOMEM;
5295}
5296
5297void f2fs_destroy_segment_manager_caches(void)
5298{
5299 kmem_cache_destroy(sit_entry_set_slab);
5300 kmem_cache_destroy(discard_cmd_slab);
5301 kmem_cache_destroy(discard_entry_slab);
5302 kmem_cache_destroy(inmem_entry_slab);
5303}