blob: 6942707f8b034b6f380ddeb07a97407e9701f962 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2011 STRATO. All rights reserved.
4 */
5
6#include <linux/mm.h>
7#include <linux/rbtree.h>
8#include <trace/events/btrfs.h>
9#include "ctree.h"
10#include "disk-io.h"
11#include "backref.h"
12#include "ulist.h"
13#include "transaction.h"
14#include "delayed-ref.h"
15#include "locking.h"
Olivier Deprez157378f2022-04-04 15:47:50 +020016#include "misc.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000017
18/* Just an arbitrary number so we can be sure this happened */
19#define BACKREF_FOUND_SHARED 6
20
21struct extent_inode_elem {
22 u64 inum;
23 u64 offset;
24 struct extent_inode_elem *next;
25};
26
27static int check_extent_in_eb(const struct btrfs_key *key,
28 const struct extent_buffer *eb,
29 const struct btrfs_file_extent_item *fi,
30 u64 extent_item_pos,
31 struct extent_inode_elem **eie,
32 bool ignore_offset)
33{
34 u64 offset = 0;
35 struct extent_inode_elem *e;
36
37 if (!ignore_offset &&
38 !btrfs_file_extent_compression(eb, fi) &&
39 !btrfs_file_extent_encryption(eb, fi) &&
40 !btrfs_file_extent_other_encoding(eb, fi)) {
41 u64 data_offset;
42 u64 data_len;
43
44 data_offset = btrfs_file_extent_offset(eb, fi);
45 data_len = btrfs_file_extent_num_bytes(eb, fi);
46
47 if (extent_item_pos < data_offset ||
48 extent_item_pos >= data_offset + data_len)
49 return 1;
50 offset = extent_item_pos - data_offset;
51 }
52
53 e = kmalloc(sizeof(*e), GFP_NOFS);
54 if (!e)
55 return -ENOMEM;
56
57 e->next = *eie;
58 e->inum = key->objectid;
59 e->offset = key->offset + offset;
60 *eie = e;
61
62 return 0;
63}
64
65static void free_inode_elem_list(struct extent_inode_elem *eie)
66{
67 struct extent_inode_elem *eie_next;
68
69 for (; eie; eie = eie_next) {
70 eie_next = eie->next;
71 kfree(eie);
72 }
73}
74
75static int find_extent_in_eb(const struct extent_buffer *eb,
76 u64 wanted_disk_byte, u64 extent_item_pos,
77 struct extent_inode_elem **eie,
78 bool ignore_offset)
79{
80 u64 disk_byte;
81 struct btrfs_key key;
82 struct btrfs_file_extent_item *fi;
83 int slot;
84 int nritems;
85 int extent_type;
86 int ret;
87
88 /*
89 * from the shared data ref, we only have the leaf but we need
90 * the key. thus, we must look into all items and see that we
91 * find one (some) with a reference to our extent item.
92 */
93 nritems = btrfs_header_nritems(eb);
94 for (slot = 0; slot < nritems; ++slot) {
95 btrfs_item_key_to_cpu(eb, &key, slot);
96 if (key.type != BTRFS_EXTENT_DATA_KEY)
97 continue;
98 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
99 extent_type = btrfs_file_extent_type(eb, fi);
100 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
101 continue;
102 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
103 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
104 if (disk_byte != wanted_disk_byte)
105 continue;
106
107 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
108 if (ret < 0)
109 return ret;
110 }
111
112 return 0;
113}
114
115struct preftree {
David Brazdil0f672f62019-12-10 10:32:29 +0000116 struct rb_root_cached root;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000117 unsigned int count;
118};
119
David Brazdil0f672f62019-12-10 10:32:29 +0000120#define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000121
122struct preftrees {
123 struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
124 struct preftree indirect; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
125 struct preftree indirect_missing_keys;
126};
127
128/*
129 * Checks for a shared extent during backref search.
130 *
131 * The share_count tracks prelim_refs (direct and indirect) having a
132 * ref->count >0:
133 * - incremented when a ref->count transitions to >0
134 * - decremented when a ref->count transitions to <1
135 */
136struct share_check {
137 u64 root_objectid;
138 u64 inum;
139 int share_count;
Olivier Deprez92d4c212022-12-06 15:05:30 +0100140 bool have_delayed_delete_refs;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000141};
142
143static inline int extent_is_shared(struct share_check *sc)
144{
145 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
146}
147
148static struct kmem_cache *btrfs_prelim_ref_cache;
149
150int __init btrfs_prelim_ref_init(void)
151{
152 btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
153 sizeof(struct prelim_ref),
154 0,
155 SLAB_MEM_SPREAD,
156 NULL);
157 if (!btrfs_prelim_ref_cache)
158 return -ENOMEM;
159 return 0;
160}
161
162void __cold btrfs_prelim_ref_exit(void)
163{
164 kmem_cache_destroy(btrfs_prelim_ref_cache);
165}
166
167static void free_pref(struct prelim_ref *ref)
168{
169 kmem_cache_free(btrfs_prelim_ref_cache, ref);
170}
171
172/*
173 * Return 0 when both refs are for the same block (and can be merged).
174 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
175 * indicates a 'higher' block.
176 */
177static int prelim_ref_compare(struct prelim_ref *ref1,
178 struct prelim_ref *ref2)
179{
180 if (ref1->level < ref2->level)
181 return -1;
182 if (ref1->level > ref2->level)
183 return 1;
184 if (ref1->root_id < ref2->root_id)
185 return -1;
186 if (ref1->root_id > ref2->root_id)
187 return 1;
188 if (ref1->key_for_search.type < ref2->key_for_search.type)
189 return -1;
190 if (ref1->key_for_search.type > ref2->key_for_search.type)
191 return 1;
192 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
193 return -1;
194 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
195 return 1;
196 if (ref1->key_for_search.offset < ref2->key_for_search.offset)
197 return -1;
198 if (ref1->key_for_search.offset > ref2->key_for_search.offset)
199 return 1;
200 if (ref1->parent < ref2->parent)
201 return -1;
202 if (ref1->parent > ref2->parent)
203 return 1;
204
205 return 0;
206}
207
208static void update_share_count(struct share_check *sc, int oldcount,
209 int newcount)
210{
211 if ((!sc) || (oldcount == 0 && newcount < 1))
212 return;
213
214 if (oldcount > 0 && newcount < 1)
215 sc->share_count--;
216 else if (oldcount < 1 && newcount > 0)
217 sc->share_count++;
218}
219
220/*
221 * Add @newref to the @root rbtree, merging identical refs.
222 *
223 * Callers should assume that newref has been freed after calling.
224 */
225static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
226 struct preftree *preftree,
227 struct prelim_ref *newref,
228 struct share_check *sc)
229{
David Brazdil0f672f62019-12-10 10:32:29 +0000230 struct rb_root_cached *root;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000231 struct rb_node **p;
232 struct rb_node *parent = NULL;
233 struct prelim_ref *ref;
234 int result;
David Brazdil0f672f62019-12-10 10:32:29 +0000235 bool leftmost = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000236
237 root = &preftree->root;
David Brazdil0f672f62019-12-10 10:32:29 +0000238 p = &root->rb_root.rb_node;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000239
240 while (*p) {
241 parent = *p;
242 ref = rb_entry(parent, struct prelim_ref, rbnode);
243 result = prelim_ref_compare(ref, newref);
244 if (result < 0) {
245 p = &(*p)->rb_left;
246 } else if (result > 0) {
247 p = &(*p)->rb_right;
David Brazdil0f672f62019-12-10 10:32:29 +0000248 leftmost = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000249 } else {
250 /* Identical refs, merge them and free @newref */
251 struct extent_inode_elem *eie = ref->inode_list;
252
253 while (eie && eie->next)
254 eie = eie->next;
255
256 if (!eie)
257 ref->inode_list = newref->inode_list;
258 else
259 eie->next = newref->inode_list;
260 trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
261 preftree->count);
262 /*
263 * A delayed ref can have newref->count < 0.
264 * The ref->count is updated to follow any
265 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
266 */
267 update_share_count(sc, ref->count,
268 ref->count + newref->count);
269 ref->count += newref->count;
270 free_pref(newref);
271 return;
272 }
273 }
274
275 update_share_count(sc, 0, newref->count);
276 preftree->count++;
277 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
278 rb_link_node(&newref->rbnode, parent, p);
David Brazdil0f672f62019-12-10 10:32:29 +0000279 rb_insert_color_cached(&newref->rbnode, root, leftmost);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000280}
281
282/*
283 * Release the entire tree. We don't care about internal consistency so
284 * just free everything and then reset the tree root.
285 */
286static void prelim_release(struct preftree *preftree)
287{
288 struct prelim_ref *ref, *next_ref;
289
David Brazdil0f672f62019-12-10 10:32:29 +0000290 rbtree_postorder_for_each_entry_safe(ref, next_ref,
Olivier Deprez92d4c212022-12-06 15:05:30 +0100291 &preftree->root.rb_root, rbnode) {
292 free_inode_elem_list(ref->inode_list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000293 free_pref(ref);
Olivier Deprez92d4c212022-12-06 15:05:30 +0100294 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000295
David Brazdil0f672f62019-12-10 10:32:29 +0000296 preftree->root = RB_ROOT_CACHED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000297 preftree->count = 0;
298}
299
300/*
301 * the rules for all callers of this function are:
302 * - obtaining the parent is the goal
303 * - if you add a key, you must know that it is a correct key
304 * - if you cannot add the parent or a correct key, then we will look into the
305 * block later to set a correct key
306 *
307 * delayed refs
308 * ============
309 * backref type | shared | indirect | shared | indirect
310 * information | tree | tree | data | data
311 * --------------------+--------+----------+--------+----------
312 * parent logical | y | - | - | -
313 * key to resolve | - | y | y | y
314 * tree block logical | - | - | - | -
315 * root for resolving | y | y | y | y
316 *
317 * - column 1: we've the parent -> done
318 * - column 2, 3, 4: we use the key to find the parent
319 *
320 * on disk refs (inline or keyed)
321 * ==============================
322 * backref type | shared | indirect | shared | indirect
323 * information | tree | tree | data | data
324 * --------------------+--------+----------+--------+----------
325 * parent logical | y | - | y | -
326 * key to resolve | - | - | - | y
327 * tree block logical | y | y | y | y
328 * root for resolving | - | y | y | y
329 *
330 * - column 1, 3: we've the parent -> done
331 * - column 2: we take the first key from the block to find the parent
332 * (see add_missing_keys)
333 * - column 4: we use the key to find the parent
334 *
335 * additional information that's available but not required to find the parent
336 * block might help in merging entries to gain some speed.
337 */
338static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
339 struct preftree *preftree, u64 root_id,
340 const struct btrfs_key *key, int level, u64 parent,
341 u64 wanted_disk_byte, int count,
342 struct share_check *sc, gfp_t gfp_mask)
343{
344 struct prelim_ref *ref;
345
346 if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
347 return 0;
348
349 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
350 if (!ref)
351 return -ENOMEM;
352
353 ref->root_id = root_id;
Olivier Deprez0e641232021-09-23 10:07:05 +0200354 if (key)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000355 ref->key_for_search = *key;
Olivier Deprez0e641232021-09-23 10:07:05 +0200356 else
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000357 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000358
359 ref->inode_list = NULL;
360 ref->level = level;
361 ref->count = count;
362 ref->parent = parent;
363 ref->wanted_disk_byte = wanted_disk_byte;
364 prelim_ref_insert(fs_info, preftree, ref, sc);
365 return extent_is_shared(sc);
366}
367
368/* direct refs use root == 0, key == NULL */
369static int add_direct_ref(const struct btrfs_fs_info *fs_info,
370 struct preftrees *preftrees, int level, u64 parent,
371 u64 wanted_disk_byte, int count,
372 struct share_check *sc, gfp_t gfp_mask)
373{
374 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
375 parent, wanted_disk_byte, count, sc, gfp_mask);
376}
377
378/* indirect refs use parent == 0 */
379static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
380 struct preftrees *preftrees, u64 root_id,
381 const struct btrfs_key *key, int level,
382 u64 wanted_disk_byte, int count,
383 struct share_check *sc, gfp_t gfp_mask)
384{
385 struct preftree *tree = &preftrees->indirect;
386
387 if (!key)
388 tree = &preftrees->indirect_missing_keys;
389 return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
390 wanted_disk_byte, count, sc, gfp_mask);
391}
392
Olivier Deprez0e641232021-09-23 10:07:05 +0200393static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
394{
395 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
396 struct rb_node *parent = NULL;
397 struct prelim_ref *ref = NULL;
Olivier Deprez157378f2022-04-04 15:47:50 +0200398 struct prelim_ref target = {};
Olivier Deprez0e641232021-09-23 10:07:05 +0200399 int result;
400
401 target.parent = bytenr;
402
403 while (*p) {
404 parent = *p;
405 ref = rb_entry(parent, struct prelim_ref, rbnode);
406 result = prelim_ref_compare(ref, &target);
407
408 if (result < 0)
409 p = &(*p)->rb_left;
410 else if (result > 0)
411 p = &(*p)->rb_right;
412 else
413 return 1;
414 }
415 return 0;
416}
417
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000418static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
Olivier Deprez0e641232021-09-23 10:07:05 +0200419 struct ulist *parents,
420 struct preftrees *preftrees, struct prelim_ref *ref,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000421 int level, u64 time_seq, const u64 *extent_item_pos,
Olivier Deprez0e641232021-09-23 10:07:05 +0200422 bool ignore_offset)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000423{
424 int ret = 0;
425 int slot;
426 struct extent_buffer *eb;
427 struct btrfs_key key;
428 struct btrfs_key *key_for_search = &ref->key_for_search;
429 struct btrfs_file_extent_item *fi;
430 struct extent_inode_elem *eie = NULL, *old = NULL;
431 u64 disk_byte;
432 u64 wanted_disk_byte = ref->wanted_disk_byte;
433 u64 count = 0;
Olivier Deprez0e641232021-09-23 10:07:05 +0200434 u64 data_offset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000435
436 if (level != 0) {
437 eb = path->nodes[level];
438 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
439 if (ret < 0)
440 return ret;
441 return 0;
442 }
443
444 /*
Olivier Deprez0e641232021-09-23 10:07:05 +0200445 * 1. We normally enter this function with the path already pointing to
446 * the first item to check. But sometimes, we may enter it with
447 * slot == nritems.
448 * 2. We are searching for normal backref but bytenr of this leaf
449 * matches shared data backref
450 * 3. The leaf owner is not equal to the root we are searching
451 *
452 * For these cases, go to the next leaf before we continue.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000453 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200454 eb = path->nodes[0];
455 if (path->slots[0] >= btrfs_header_nritems(eb) ||
456 is_shared_data_backref(preftrees, eb->start) ||
457 ref->root_id != btrfs_header_owner(eb)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000458 if (time_seq == SEQ_LAST)
459 ret = btrfs_next_leaf(root, path);
460 else
461 ret = btrfs_next_old_leaf(root, path, time_seq);
462 }
463
Olivier Deprez0e641232021-09-23 10:07:05 +0200464 while (!ret && count < ref->count) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000465 eb = path->nodes[0];
466 slot = path->slots[0];
467
468 btrfs_item_key_to_cpu(eb, &key, slot);
469
470 if (key.objectid != key_for_search->objectid ||
471 key.type != BTRFS_EXTENT_DATA_KEY)
472 break;
473
Olivier Deprez0e641232021-09-23 10:07:05 +0200474 /*
475 * We are searching for normal backref but bytenr of this leaf
476 * matches shared data backref, OR
477 * the leaf owner is not equal to the root we are searching for
478 */
479 if (slot == 0 &&
480 (is_shared_data_backref(preftrees, eb->start) ||
481 ref->root_id != btrfs_header_owner(eb))) {
482 if (time_seq == SEQ_LAST)
483 ret = btrfs_next_leaf(root, path);
484 else
485 ret = btrfs_next_old_leaf(root, path, time_seq);
486 continue;
487 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000488 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
489 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
Olivier Deprez0e641232021-09-23 10:07:05 +0200490 data_offset = btrfs_file_extent_offset(eb, fi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000491
492 if (disk_byte == wanted_disk_byte) {
493 eie = NULL;
494 old = NULL;
Olivier Deprez0e641232021-09-23 10:07:05 +0200495 if (ref->key_for_search.offset == key.offset - data_offset)
496 count++;
497 else
498 goto next;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000499 if (extent_item_pos) {
500 ret = check_extent_in_eb(&key, eb, fi,
501 *extent_item_pos,
502 &eie, ignore_offset);
503 if (ret < 0)
504 break;
505 }
506 if (ret > 0)
507 goto next;
508 ret = ulist_add_merge_ptr(parents, eb->start,
509 eie, (void **)&old, GFP_NOFS);
510 if (ret < 0)
511 break;
512 if (!ret && extent_item_pos) {
513 while (old->next)
514 old = old->next;
515 old->next = eie;
516 }
517 eie = NULL;
518 }
519next:
520 if (time_seq == SEQ_LAST)
521 ret = btrfs_next_item(root, path);
522 else
523 ret = btrfs_next_old_item(root, path, time_seq);
524 }
525
526 if (ret > 0)
527 ret = 0;
528 else if (ret < 0)
529 free_inode_elem_list(eie);
530 return ret;
531}
532
533/*
534 * resolve an indirect backref in the form (root_id, key, level)
535 * to a logical address
536 */
537static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
538 struct btrfs_path *path, u64 time_seq,
Olivier Deprez0e641232021-09-23 10:07:05 +0200539 struct preftrees *preftrees,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000540 struct prelim_ref *ref, struct ulist *parents,
Olivier Deprez0e641232021-09-23 10:07:05 +0200541 const u64 *extent_item_pos, bool ignore_offset)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000542{
543 struct btrfs_root *root;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000544 struct extent_buffer *eb;
545 int ret = 0;
546 int root_level;
547 int level = ref->level;
Olivier Deprez0e641232021-09-23 10:07:05 +0200548 struct btrfs_key search_key = ref->key_for_search;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000549
Olivier Deprez157378f2022-04-04 15:47:50 +0200550 /*
551 * If we're search_commit_root we could possibly be holding locks on
552 * other tree nodes. This happens when qgroups does backref walks when
553 * adding new delayed refs. To deal with this we need to look in cache
554 * for the root, and if we don't find it then we need to search the
555 * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
556 * here.
557 */
558 if (path->search_commit_root)
559 root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id);
560 else
561 root = btrfs_get_fs_root(fs_info, ref->root_id, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000562 if (IS_ERR(root)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000563 ret = PTR_ERR(root);
Olivier Deprez157378f2022-04-04 15:47:50 +0200564 goto out_free;
565 }
566
567 if (!path->search_commit_root &&
568 test_bit(BTRFS_ROOT_DELETING, &root->state)) {
569 ret = -ENOENT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000570 goto out;
571 }
572
573 if (btrfs_is_testing(fs_info)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000574 ret = -ENOENT;
575 goto out;
576 }
577
578 if (path->search_commit_root)
579 root_level = btrfs_header_level(root->commit_root);
580 else if (time_seq == SEQ_LAST)
581 root_level = btrfs_header_level(root->node);
582 else
583 root_level = btrfs_old_root_level(root, time_seq);
584
Olivier Deprez157378f2022-04-04 15:47:50 +0200585 if (root_level + 1 == level)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000586 goto out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000587
Olivier Deprez0e641232021-09-23 10:07:05 +0200588 /*
589 * We can often find data backrefs with an offset that is too large
590 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
591 * subtracting a file's offset with the data offset of its
592 * corresponding extent data item. This can happen for example in the
593 * clone ioctl.
594 *
595 * So if we detect such case we set the search key's offset to zero to
596 * make sure we will find the matching file extent item at
597 * add_all_parents(), otherwise we will miss it because the offset
598 * taken form the backref is much larger then the offset of the file
599 * extent item. This can make us scan a very large number of file
600 * extent items, but at least it will not make us miss any.
601 *
602 * This is an ugly workaround for a behaviour that should have never
603 * existed, but it does and a fix for the clone ioctl would touch a lot
604 * of places, cause backwards incompatibility and would not fix the
605 * problem for extents cloned with older kernels.
606 */
607 if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
608 search_key.offset >= LLONG_MAX)
609 search_key.offset = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000610 path->lowest_level = level;
611 if (time_seq == SEQ_LAST)
Olivier Deprez0e641232021-09-23 10:07:05 +0200612 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000613 else
Olivier Deprez0e641232021-09-23 10:07:05 +0200614 ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000615
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000616 btrfs_debug(fs_info,
617 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
618 ref->root_id, level, ref->count, ret,
619 ref->key_for_search.objectid, ref->key_for_search.type,
620 ref->key_for_search.offset);
621 if (ret < 0)
622 goto out;
623
624 eb = path->nodes[level];
625 while (!eb) {
626 if (WARN_ON(!level)) {
627 ret = 1;
628 goto out;
629 }
630 level--;
631 eb = path->nodes[level];
632 }
633
Olivier Deprez0e641232021-09-23 10:07:05 +0200634 ret = add_all_parents(root, path, parents, preftrees, ref, level,
635 time_seq, extent_item_pos, ignore_offset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000636out:
Olivier Deprez157378f2022-04-04 15:47:50 +0200637 btrfs_put_root(root);
638out_free:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000639 path->lowest_level = 0;
640 btrfs_release_path(path);
641 return ret;
642}
643
644static struct extent_inode_elem *
645unode_aux_to_inode_list(struct ulist_node *node)
646{
647 if (!node)
648 return NULL;
649 return (struct extent_inode_elem *)(uintptr_t)node->aux;
650}
651
Olivier Deprez92d4c212022-12-06 15:05:30 +0100652static void free_leaf_list(struct ulist *ulist)
653{
654 struct ulist_node *node;
655 struct ulist_iterator uiter;
656
657 ULIST_ITER_INIT(&uiter);
658 while ((node = ulist_next(ulist, &uiter)))
659 free_inode_elem_list(unode_aux_to_inode_list(node));
660
661 ulist_free(ulist);
662}
663
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000664/*
David Brazdil0f672f62019-12-10 10:32:29 +0000665 * We maintain three separate rbtrees: one for direct refs, one for
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000666 * indirect refs which have a key, and one for indirect refs which do not
667 * have a key. Each tree does merge on insertion.
668 *
669 * Once all of the references are located, we iterate over the tree of
670 * indirect refs with missing keys. An appropriate key is located and
671 * the ref is moved onto the tree for indirect refs. After all missing
672 * keys are thus located, we iterate over the indirect ref tree, resolve
673 * each reference, and then insert the resolved reference onto the
674 * direct tree (merging there too).
675 *
676 * New backrefs (i.e., for parent nodes) are added to the appropriate
677 * rbtree as they are encountered. The new backrefs are subsequently
678 * resolved as above.
679 */
680static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
681 struct btrfs_path *path, u64 time_seq,
682 struct preftrees *preftrees,
Olivier Deprez0e641232021-09-23 10:07:05 +0200683 const u64 *extent_item_pos,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000684 struct share_check *sc, bool ignore_offset)
685{
686 int err;
687 int ret = 0;
688 struct ulist *parents;
689 struct ulist_node *node;
690 struct ulist_iterator uiter;
691 struct rb_node *rnode;
692
693 parents = ulist_alloc(GFP_NOFS);
694 if (!parents)
695 return -ENOMEM;
696
697 /*
698 * We could trade memory usage for performance here by iterating
699 * the tree, allocating new refs for each insertion, and then
700 * freeing the entire indirect tree when we're done. In some test
701 * cases, the tree can grow quite large (~200k objects).
702 */
David Brazdil0f672f62019-12-10 10:32:29 +0000703 while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000704 struct prelim_ref *ref;
705
706 ref = rb_entry(rnode, struct prelim_ref, rbnode);
707 if (WARN(ref->parent,
708 "BUG: direct ref found in indirect tree")) {
709 ret = -EINVAL;
710 goto out;
711 }
712
David Brazdil0f672f62019-12-10 10:32:29 +0000713 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000714 preftrees->indirect.count--;
715
716 if (ref->count == 0) {
717 free_pref(ref);
718 continue;
719 }
720
721 if (sc && sc->root_objectid &&
722 ref->root_id != sc->root_objectid) {
723 free_pref(ref);
724 ret = BACKREF_FOUND_SHARED;
725 goto out;
726 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200727 err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
728 ref, parents, extent_item_pos,
729 ignore_offset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000730 /*
731 * we can only tolerate ENOENT,otherwise,we should catch error
732 * and return directly.
733 */
734 if (err == -ENOENT) {
735 prelim_ref_insert(fs_info, &preftrees->direct, ref,
736 NULL);
737 continue;
738 } else if (err) {
739 free_pref(ref);
740 ret = err;
741 goto out;
742 }
743
744 /* we put the first parent into the ref at hand */
745 ULIST_ITER_INIT(&uiter);
746 node = ulist_next(parents, &uiter);
747 ref->parent = node ? node->val : 0;
748 ref->inode_list = unode_aux_to_inode_list(node);
749
750 /* Add a prelim_ref(s) for any other parent(s). */
751 while ((node = ulist_next(parents, &uiter))) {
752 struct prelim_ref *new_ref;
753
754 new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
755 GFP_NOFS);
756 if (!new_ref) {
757 free_pref(ref);
758 ret = -ENOMEM;
759 goto out;
760 }
761 memcpy(new_ref, ref, sizeof(*ref));
762 new_ref->parent = node->val;
763 new_ref->inode_list = unode_aux_to_inode_list(node);
764 prelim_ref_insert(fs_info, &preftrees->direct,
765 new_ref, NULL);
766 }
767
768 /*
David Brazdil0f672f62019-12-10 10:32:29 +0000769 * Now it's a direct ref, put it in the direct tree. We must
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000770 * do this last because the ref could be merged/freed here.
771 */
772 prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
773
774 ulist_reinit(parents);
775 cond_resched();
776 }
777out:
Olivier Deprez92d4c212022-12-06 15:05:30 +0100778 /*
779 * We may have inode lists attached to refs in the parents ulist, so we
780 * must free them before freeing the ulist and its refs.
781 */
782 free_leaf_list(parents);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000783 return ret;
784}
785
786/*
787 * read tree blocks and add keys where required.
788 */
789static int add_missing_keys(struct btrfs_fs_info *fs_info,
David Brazdil0f672f62019-12-10 10:32:29 +0000790 struct preftrees *preftrees, bool lock)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000791{
792 struct prelim_ref *ref;
793 struct extent_buffer *eb;
794 struct preftree *tree = &preftrees->indirect_missing_keys;
795 struct rb_node *node;
796
David Brazdil0f672f62019-12-10 10:32:29 +0000797 while ((node = rb_first_cached(&tree->root))) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000798 ref = rb_entry(node, struct prelim_ref, rbnode);
David Brazdil0f672f62019-12-10 10:32:29 +0000799 rb_erase_cached(node, &tree->root);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000800
801 BUG_ON(ref->parent); /* should not be a direct ref */
802 BUG_ON(ref->key_for_search.type);
803 BUG_ON(!ref->wanted_disk_byte);
804
805 eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0,
806 ref->level - 1, NULL);
807 if (IS_ERR(eb)) {
808 free_pref(ref);
809 return PTR_ERR(eb);
810 } else if (!extent_buffer_uptodate(eb)) {
811 free_pref(ref);
812 free_extent_buffer(eb);
813 return -EIO;
814 }
David Brazdil0f672f62019-12-10 10:32:29 +0000815 if (lock)
816 btrfs_tree_read_lock(eb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000817 if (btrfs_header_level(eb) == 0)
818 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
819 else
820 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
David Brazdil0f672f62019-12-10 10:32:29 +0000821 if (lock)
822 btrfs_tree_read_unlock(eb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000823 free_extent_buffer(eb);
824 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
825 cond_resched();
826 }
827 return 0;
828}
829
830/*
831 * add all currently queued delayed refs from this head whose seq nr is
832 * smaller or equal that seq to the list
833 */
834static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
835 struct btrfs_delayed_ref_head *head, u64 seq,
Olivier Deprez0e641232021-09-23 10:07:05 +0200836 struct preftrees *preftrees, struct share_check *sc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000837{
838 struct btrfs_delayed_ref_node *node;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000839 struct btrfs_key key;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000840 struct rb_node *n;
841 int count;
842 int ret = 0;
843
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000844 spin_lock(&head->lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000845 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000846 node = rb_entry(n, struct btrfs_delayed_ref_node,
847 ref_node);
848 if (node->seq > seq)
849 continue;
850
851 switch (node->action) {
852 case BTRFS_ADD_DELAYED_EXTENT:
853 case BTRFS_UPDATE_DELAYED_HEAD:
854 WARN_ON(1);
855 continue;
856 case BTRFS_ADD_DELAYED_REF:
857 count = node->ref_mod;
858 break;
859 case BTRFS_DROP_DELAYED_REF:
860 count = node->ref_mod * -1;
861 break;
862 default:
David Brazdil0f672f62019-12-10 10:32:29 +0000863 BUG();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000864 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000865 switch (node->type) {
866 case BTRFS_TREE_BLOCK_REF_KEY: {
867 /* NORMAL INDIRECT METADATA backref */
868 struct btrfs_delayed_tree_ref *ref;
Olivier Deprez92d4c212022-12-06 15:05:30 +0100869 struct btrfs_key *key_ptr = NULL;
870
871 if (head->extent_op && head->extent_op->update_key) {
872 btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
873 key_ptr = &key;
874 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000875
876 ref = btrfs_delayed_node_to_tree_ref(node);
877 ret = add_indirect_ref(fs_info, preftrees, ref->root,
Olivier Deprez92d4c212022-12-06 15:05:30 +0100878 key_ptr, ref->level + 1,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000879 node->bytenr, count, sc,
880 GFP_ATOMIC);
881 break;
882 }
883 case BTRFS_SHARED_BLOCK_REF_KEY: {
884 /* SHARED DIRECT METADATA backref */
885 struct btrfs_delayed_tree_ref *ref;
886
887 ref = btrfs_delayed_node_to_tree_ref(node);
888
889 ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
890 ref->parent, node->bytenr, count,
891 sc, GFP_ATOMIC);
892 break;
893 }
894 case BTRFS_EXTENT_DATA_REF_KEY: {
895 /* NORMAL INDIRECT DATA backref */
896 struct btrfs_delayed_data_ref *ref;
897 ref = btrfs_delayed_node_to_data_ref(node);
898
899 key.objectid = ref->objectid;
900 key.type = BTRFS_EXTENT_DATA_KEY;
901 key.offset = ref->offset;
902
903 /*
Olivier Deprez92d4c212022-12-06 15:05:30 +0100904 * If we have a share check context and a reference for
905 * another inode, we can't exit immediately. This is
906 * because even if this is a BTRFS_ADD_DELAYED_REF
907 * reference we may find next a BTRFS_DROP_DELAYED_REF
908 * which cancels out this ADD reference.
909 *
910 * If this is a DROP reference and there was no previous
911 * ADD reference, then we need to signal that when we
912 * process references from the extent tree (through
913 * add_inline_refs() and add_keyed_refs()), we should
914 * not exit early if we find a reference for another
915 * inode, because one of the delayed DROP references
916 * may cancel that reference in the extent tree.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000917 */
Olivier Deprez92d4c212022-12-06 15:05:30 +0100918 if (sc && count < 0)
919 sc->have_delayed_delete_refs = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000920
921 ret = add_indirect_ref(fs_info, preftrees, ref->root,
922 &key, 0, node->bytenr, count, sc,
923 GFP_ATOMIC);
924 break;
925 }
926 case BTRFS_SHARED_DATA_REF_KEY: {
927 /* SHARED DIRECT FULL backref */
928 struct btrfs_delayed_data_ref *ref;
929
930 ref = btrfs_delayed_node_to_data_ref(node);
931
932 ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
933 node->bytenr, count, sc,
934 GFP_ATOMIC);
935 break;
936 }
937 default:
938 WARN_ON(1);
939 }
940 /*
941 * We must ignore BACKREF_FOUND_SHARED until all delayed
942 * refs have been checked.
943 */
944 if (ret && (ret != BACKREF_FOUND_SHARED))
945 break;
946 }
947 if (!ret)
948 ret = extent_is_shared(sc);
Olivier Deprez92d4c212022-12-06 15:05:30 +0100949
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000950 spin_unlock(&head->lock);
951 return ret;
952}
953
954/*
955 * add all inline backrefs for bytenr to the list
956 *
957 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
958 */
959static int add_inline_refs(const struct btrfs_fs_info *fs_info,
960 struct btrfs_path *path, u64 bytenr,
961 int *info_level, struct preftrees *preftrees,
Olivier Deprez0e641232021-09-23 10:07:05 +0200962 struct share_check *sc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000963{
964 int ret = 0;
965 int slot;
966 struct extent_buffer *leaf;
967 struct btrfs_key key;
968 struct btrfs_key found_key;
969 unsigned long ptr;
970 unsigned long end;
971 struct btrfs_extent_item *ei;
972 u64 flags;
973 u64 item_size;
974
975 /*
976 * enumerate all inline refs
977 */
978 leaf = path->nodes[0];
979 slot = path->slots[0];
980
981 item_size = btrfs_item_size_nr(leaf, slot);
982 BUG_ON(item_size < sizeof(*ei));
983
984 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
985 flags = btrfs_extent_flags(leaf, ei);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000986 btrfs_item_key_to_cpu(leaf, &found_key, slot);
987
988 ptr = (unsigned long)(ei + 1);
989 end = (unsigned long)ei + item_size;
990
991 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
992 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
993 struct btrfs_tree_block_info *info;
994
995 info = (struct btrfs_tree_block_info *)ptr;
996 *info_level = btrfs_tree_block_level(leaf, info);
997 ptr += sizeof(struct btrfs_tree_block_info);
998 BUG_ON(ptr > end);
999 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
1000 *info_level = found_key.offset;
1001 } else {
1002 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1003 }
1004
1005 while (ptr < end) {
1006 struct btrfs_extent_inline_ref *iref;
1007 u64 offset;
1008 int type;
1009
1010 iref = (struct btrfs_extent_inline_ref *)ptr;
1011 type = btrfs_get_extent_inline_ref_type(leaf, iref,
1012 BTRFS_REF_TYPE_ANY);
1013 if (type == BTRFS_REF_TYPE_INVALID)
1014 return -EUCLEAN;
1015
1016 offset = btrfs_extent_inline_ref_offset(leaf, iref);
1017
1018 switch (type) {
1019 case BTRFS_SHARED_BLOCK_REF_KEY:
1020 ret = add_direct_ref(fs_info, preftrees,
1021 *info_level + 1, offset,
1022 bytenr, 1, NULL, GFP_NOFS);
1023 break;
1024 case BTRFS_SHARED_DATA_REF_KEY: {
1025 struct btrfs_shared_data_ref *sdref;
1026 int count;
1027
1028 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1029 count = btrfs_shared_data_ref_count(leaf, sdref);
1030
1031 ret = add_direct_ref(fs_info, preftrees, 0, offset,
1032 bytenr, count, sc, GFP_NOFS);
1033 break;
1034 }
1035 case BTRFS_TREE_BLOCK_REF_KEY:
1036 ret = add_indirect_ref(fs_info, preftrees, offset,
1037 NULL, *info_level + 1,
1038 bytenr, 1, NULL, GFP_NOFS);
1039 break;
1040 case BTRFS_EXTENT_DATA_REF_KEY: {
1041 struct btrfs_extent_data_ref *dref;
1042 int count;
1043 u64 root;
1044
1045 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1046 count = btrfs_extent_data_ref_count(leaf, dref);
1047 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1048 dref);
1049 key.type = BTRFS_EXTENT_DATA_KEY;
1050 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1051
Olivier Deprez92d4c212022-12-06 15:05:30 +01001052 if (sc && sc->inum && key.objectid != sc->inum &&
1053 !sc->have_delayed_delete_refs) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001054 ret = BACKREF_FOUND_SHARED;
1055 break;
1056 }
1057
1058 root = btrfs_extent_data_ref_root(leaf, dref);
1059
1060 ret = add_indirect_ref(fs_info, preftrees, root,
1061 &key, 0, bytenr, count,
1062 sc, GFP_NOFS);
Olivier Deprez92d4c212022-12-06 15:05:30 +01001063
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001064 break;
1065 }
1066 default:
1067 WARN_ON(1);
1068 }
1069 if (ret)
1070 return ret;
1071 ptr += btrfs_extent_inline_ref_size(type);
1072 }
1073
1074 return 0;
1075}
1076
1077/*
1078 * add all non-inline backrefs for bytenr to the list
1079 *
1080 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1081 */
1082static int add_keyed_refs(struct btrfs_fs_info *fs_info,
1083 struct btrfs_path *path, u64 bytenr,
1084 int info_level, struct preftrees *preftrees,
1085 struct share_check *sc)
1086{
1087 struct btrfs_root *extent_root = fs_info->extent_root;
1088 int ret;
1089 int slot;
1090 struct extent_buffer *leaf;
1091 struct btrfs_key key;
1092
1093 while (1) {
1094 ret = btrfs_next_item(extent_root, path);
1095 if (ret < 0)
1096 break;
1097 if (ret) {
1098 ret = 0;
1099 break;
1100 }
1101
1102 slot = path->slots[0];
1103 leaf = path->nodes[0];
1104 btrfs_item_key_to_cpu(leaf, &key, slot);
1105
1106 if (key.objectid != bytenr)
1107 break;
1108 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1109 continue;
1110 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1111 break;
1112
1113 switch (key.type) {
1114 case BTRFS_SHARED_BLOCK_REF_KEY:
1115 /* SHARED DIRECT METADATA backref */
1116 ret = add_direct_ref(fs_info, preftrees,
1117 info_level + 1, key.offset,
1118 bytenr, 1, NULL, GFP_NOFS);
1119 break;
1120 case BTRFS_SHARED_DATA_REF_KEY: {
1121 /* SHARED DIRECT FULL backref */
1122 struct btrfs_shared_data_ref *sdref;
1123 int count;
1124
1125 sdref = btrfs_item_ptr(leaf, slot,
1126 struct btrfs_shared_data_ref);
1127 count = btrfs_shared_data_ref_count(leaf, sdref);
1128 ret = add_direct_ref(fs_info, preftrees, 0,
1129 key.offset, bytenr, count,
1130 sc, GFP_NOFS);
1131 break;
1132 }
1133 case BTRFS_TREE_BLOCK_REF_KEY:
1134 /* NORMAL INDIRECT METADATA backref */
1135 ret = add_indirect_ref(fs_info, preftrees, key.offset,
1136 NULL, info_level + 1, bytenr,
1137 1, NULL, GFP_NOFS);
1138 break;
1139 case BTRFS_EXTENT_DATA_REF_KEY: {
1140 /* NORMAL INDIRECT DATA backref */
1141 struct btrfs_extent_data_ref *dref;
1142 int count;
1143 u64 root;
1144
1145 dref = btrfs_item_ptr(leaf, slot,
1146 struct btrfs_extent_data_ref);
1147 count = btrfs_extent_data_ref_count(leaf, dref);
1148 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1149 dref);
1150 key.type = BTRFS_EXTENT_DATA_KEY;
1151 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1152
Olivier Deprez92d4c212022-12-06 15:05:30 +01001153 if (sc && sc->inum && key.objectid != sc->inum &&
1154 !sc->have_delayed_delete_refs) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001155 ret = BACKREF_FOUND_SHARED;
1156 break;
1157 }
1158
1159 root = btrfs_extent_data_ref_root(leaf, dref);
1160 ret = add_indirect_ref(fs_info, preftrees, root,
1161 &key, 0, bytenr, count,
1162 sc, GFP_NOFS);
1163 break;
1164 }
1165 default:
1166 WARN_ON(1);
1167 }
1168 if (ret)
1169 return ret;
1170
1171 }
1172
1173 return ret;
1174}
1175
1176/*
1177 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1178 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1179 * indirect refs to their parent bytenr.
1180 * When roots are found, they're added to the roots list
1181 *
1182 * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave
1183 * much like trans == NULL case, the difference only lies in it will not
1184 * commit root.
1185 * The special case is for qgroup to search roots in commit_transaction().
1186 *
1187 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1188 * shared extent is detected.
1189 *
1190 * Otherwise this returns 0 for success and <0 for an error.
1191 *
1192 * If ignore_offset is set to false, only extent refs whose offsets match
1193 * extent_item_pos are returned. If true, every extent ref is returned
1194 * and extent_item_pos is ignored.
1195 *
1196 * FIXME some caching might speed things up
1197 */
1198static int find_parent_nodes(struct btrfs_trans_handle *trans,
1199 struct btrfs_fs_info *fs_info, u64 bytenr,
1200 u64 time_seq, struct ulist *refs,
1201 struct ulist *roots, const u64 *extent_item_pos,
1202 struct share_check *sc, bool ignore_offset)
1203{
1204 struct btrfs_key key;
1205 struct btrfs_path *path;
1206 struct btrfs_delayed_ref_root *delayed_refs = NULL;
1207 struct btrfs_delayed_ref_head *head;
1208 int info_level = 0;
1209 int ret;
1210 struct prelim_ref *ref;
1211 struct rb_node *node;
1212 struct extent_inode_elem *eie = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001213 struct preftrees preftrees = {
1214 .direct = PREFTREE_INIT,
1215 .indirect = PREFTREE_INIT,
1216 .indirect_missing_keys = PREFTREE_INIT
1217 };
1218
1219 key.objectid = bytenr;
1220 key.offset = (u64)-1;
1221 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1222 key.type = BTRFS_METADATA_ITEM_KEY;
1223 else
1224 key.type = BTRFS_EXTENT_ITEM_KEY;
1225
1226 path = btrfs_alloc_path();
1227 if (!path)
1228 return -ENOMEM;
1229 if (!trans) {
1230 path->search_commit_root = 1;
1231 path->skip_locking = 1;
1232 }
1233
1234 if (time_seq == SEQ_LAST)
1235 path->skip_locking = 1;
1236
1237 /*
1238 * grab both a lock on the path and a lock on the delayed ref head.
1239 * We need both to get a consistent picture of how the refs look
1240 * at a specified point in time
1241 */
1242again:
1243 head = NULL;
1244
1245 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
1246 if (ret < 0)
1247 goto out;
Olivier Deprez157378f2022-04-04 15:47:50 +02001248 if (ret == 0) {
1249 /* This shouldn't happen, indicates a bug or fs corruption. */
1250 ASSERT(ret != 0);
1251 ret = -EUCLEAN;
1252 goto out;
1253 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001254
1255#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1256 if (trans && likely(trans->type != __TRANS_DUMMY) &&
1257 time_seq != SEQ_LAST) {
1258#else
1259 if (trans && time_seq != SEQ_LAST) {
1260#endif
1261 /*
1262 * look if there are updates for this ref queued and lock the
1263 * head
1264 */
1265 delayed_refs = &trans->transaction->delayed_refs;
1266 spin_lock(&delayed_refs->lock);
1267 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
1268 if (head) {
1269 if (!mutex_trylock(&head->mutex)) {
1270 refcount_inc(&head->refs);
1271 spin_unlock(&delayed_refs->lock);
1272
1273 btrfs_release_path(path);
1274
1275 /*
1276 * Mutex was contended, block until it's
1277 * released and try again
1278 */
1279 mutex_lock(&head->mutex);
1280 mutex_unlock(&head->mutex);
1281 btrfs_put_delayed_ref_head(head);
1282 goto again;
1283 }
1284 spin_unlock(&delayed_refs->lock);
1285 ret = add_delayed_refs(fs_info, head, time_seq,
Olivier Deprez0e641232021-09-23 10:07:05 +02001286 &preftrees, sc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001287 mutex_unlock(&head->mutex);
1288 if (ret)
1289 goto out;
1290 } else {
1291 spin_unlock(&delayed_refs->lock);
1292 }
1293 }
1294
1295 if (path->slots[0]) {
1296 struct extent_buffer *leaf;
1297 int slot;
1298
1299 path->slots[0]--;
1300 leaf = path->nodes[0];
1301 slot = path->slots[0];
1302 btrfs_item_key_to_cpu(leaf, &key, slot);
1303 if (key.objectid == bytenr &&
1304 (key.type == BTRFS_EXTENT_ITEM_KEY ||
1305 key.type == BTRFS_METADATA_ITEM_KEY)) {
1306 ret = add_inline_refs(fs_info, path, bytenr,
Olivier Deprez0e641232021-09-23 10:07:05 +02001307 &info_level, &preftrees, sc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001308 if (ret)
1309 goto out;
1310 ret = add_keyed_refs(fs_info, path, bytenr, info_level,
1311 &preftrees, sc);
1312 if (ret)
1313 goto out;
1314 }
1315 }
1316
1317 btrfs_release_path(path);
1318
David Brazdil0f672f62019-12-10 10:32:29 +00001319 ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001320 if (ret)
1321 goto out;
1322
David Brazdil0f672f62019-12-10 10:32:29 +00001323 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001324
1325 ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
Olivier Deprez0e641232021-09-23 10:07:05 +02001326 extent_item_pos, sc, ignore_offset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001327 if (ret)
1328 goto out;
1329
David Brazdil0f672f62019-12-10 10:32:29 +00001330 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001331
1332 /*
1333 * This walks the tree of merged and resolved refs. Tree blocks are
1334 * read in as needed. Unique entries are added to the ulist, and
1335 * the list of found roots is updated.
1336 *
1337 * We release the entire tree in one go before returning.
1338 */
David Brazdil0f672f62019-12-10 10:32:29 +00001339 node = rb_first_cached(&preftrees.direct.root);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001340 while (node) {
1341 ref = rb_entry(node, struct prelim_ref, rbnode);
1342 node = rb_next(&ref->rbnode);
1343 /*
1344 * ref->count < 0 can happen here if there are delayed
1345 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1346 * prelim_ref_insert() relies on this when merging
1347 * identical refs to keep the overall count correct.
1348 * prelim_ref_insert() will merge only those refs
1349 * which compare identically. Any refs having
1350 * e.g. different offsets would not be merged,
1351 * and would retain their original ref->count < 0.
1352 */
1353 if (roots && ref->count && ref->root_id && ref->parent == 0) {
1354 if (sc && sc->root_objectid &&
1355 ref->root_id != sc->root_objectid) {
1356 ret = BACKREF_FOUND_SHARED;
1357 goto out;
1358 }
1359
1360 /* no parent == root of tree */
1361 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1362 if (ret < 0)
1363 goto out;
1364 }
1365 if (ref->count && ref->parent) {
1366 if (extent_item_pos && !ref->inode_list &&
1367 ref->level == 0) {
1368 struct extent_buffer *eb;
1369
1370 eb = read_tree_block(fs_info, ref->parent, 0,
1371 ref->level, NULL);
1372 if (IS_ERR(eb)) {
1373 ret = PTR_ERR(eb);
1374 goto out;
1375 } else if (!extent_buffer_uptodate(eb)) {
1376 free_extent_buffer(eb);
1377 ret = -EIO;
1378 goto out;
1379 }
David Brazdil0f672f62019-12-10 10:32:29 +00001380
1381 if (!path->skip_locking) {
1382 btrfs_tree_read_lock(eb);
1383 btrfs_set_lock_blocking_read(eb);
1384 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001385 ret = find_extent_in_eb(eb, bytenr,
1386 *extent_item_pos, &eie, ignore_offset);
David Brazdil0f672f62019-12-10 10:32:29 +00001387 if (!path->skip_locking)
1388 btrfs_tree_read_unlock_blocking(eb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001389 free_extent_buffer(eb);
1390 if (ret < 0)
1391 goto out;
1392 ref->inode_list = eie;
Olivier Deprez92d4c212022-12-06 15:05:30 +01001393 /*
1394 * We transferred the list ownership to the ref,
1395 * so set to NULL to avoid a double free in case
1396 * an error happens after this.
1397 */
1398 eie = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001399 }
1400 ret = ulist_add_merge_ptr(refs, ref->parent,
1401 ref->inode_list,
1402 (void **)&eie, GFP_NOFS);
1403 if (ret < 0)
1404 goto out;
1405 if (!ret && extent_item_pos) {
1406 /*
Olivier Deprez157378f2022-04-04 15:47:50 +02001407 * We've recorded that parent, so we must extend
1408 * its inode list here.
1409 *
1410 * However if there was corruption we may not
1411 * have found an eie, return an error in this
1412 * case.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001413 */
Olivier Deprez157378f2022-04-04 15:47:50 +02001414 ASSERT(eie);
1415 if (!eie) {
1416 ret = -EUCLEAN;
1417 goto out;
1418 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001419 while (eie->next)
1420 eie = eie->next;
1421 eie->next = ref->inode_list;
1422 }
1423 eie = NULL;
Olivier Deprez92d4c212022-12-06 15:05:30 +01001424 /*
1425 * We have transferred the inode list ownership from
1426 * this ref to the ref we added to the 'refs' ulist.
1427 * So set this ref's inode list to NULL to avoid
1428 * use-after-free when our caller uses it or double
1429 * frees in case an error happens before we return.
1430 */
1431 ref->inode_list = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001432 }
1433 cond_resched();
1434 }
1435
1436out:
1437 btrfs_free_path(path);
1438
1439 prelim_release(&preftrees.direct);
1440 prelim_release(&preftrees.indirect);
1441 prelim_release(&preftrees.indirect_missing_keys);
1442
1443 if (ret < 0)
1444 free_inode_elem_list(eie);
1445 return ret;
1446}
1447
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001448/*
1449 * Finds all leafs with a reference to the specified combination of bytenr and
1450 * offset. key_list_head will point to a list of corresponding keys (caller must
1451 * free each list element). The leafs will be stored in the leafs ulist, which
1452 * must be freed with ulist_free.
1453 *
1454 * returns 0 on success, <0 on error
1455 */
Olivier Deprez157378f2022-04-04 15:47:50 +02001456int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1457 struct btrfs_fs_info *fs_info, u64 bytenr,
1458 u64 time_seq, struct ulist **leafs,
1459 const u64 *extent_item_pos, bool ignore_offset)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001460{
1461 int ret;
1462
1463 *leafs = ulist_alloc(GFP_NOFS);
1464 if (!*leafs)
1465 return -ENOMEM;
1466
1467 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1468 *leafs, NULL, extent_item_pos, NULL, ignore_offset);
1469 if (ret < 0 && ret != -ENOENT) {
1470 free_leaf_list(*leafs);
1471 return ret;
1472 }
1473
1474 return 0;
1475}
1476
1477/*
1478 * walk all backrefs for a given extent to find all roots that reference this
1479 * extent. Walking a backref means finding all extents that reference this
1480 * extent and in turn walk the backrefs of those, too. Naturally this is a
1481 * recursive process, but here it is implemented in an iterative fashion: We
1482 * find all referencing extents for the extent in question and put them on a
1483 * list. In turn, we find all referencing extents for those, further appending
1484 * to the list. The way we iterate the list allows adding more elements after
1485 * the current while iterating. The process stops when we reach the end of the
1486 * list. Found roots are added to the roots list.
1487 *
1488 * returns 0 on success, < 0 on error.
1489 */
1490static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
1491 struct btrfs_fs_info *fs_info, u64 bytenr,
1492 u64 time_seq, struct ulist **roots,
1493 bool ignore_offset)
1494{
1495 struct ulist *tmp;
1496 struct ulist_node *node = NULL;
1497 struct ulist_iterator uiter;
1498 int ret;
1499
1500 tmp = ulist_alloc(GFP_NOFS);
1501 if (!tmp)
1502 return -ENOMEM;
1503 *roots = ulist_alloc(GFP_NOFS);
1504 if (!*roots) {
1505 ulist_free(tmp);
1506 return -ENOMEM;
1507 }
1508
1509 ULIST_ITER_INIT(&uiter);
1510 while (1) {
1511 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1512 tmp, *roots, NULL, NULL, ignore_offset);
1513 if (ret < 0 && ret != -ENOENT) {
1514 ulist_free(tmp);
1515 ulist_free(*roots);
Olivier Deprez0e641232021-09-23 10:07:05 +02001516 *roots = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001517 return ret;
1518 }
1519 node = ulist_next(tmp, &uiter);
1520 if (!node)
1521 break;
1522 bytenr = node->val;
1523 cond_resched();
1524 }
1525
1526 ulist_free(tmp);
1527 return 0;
1528}
1529
1530int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1531 struct btrfs_fs_info *fs_info, u64 bytenr,
1532 u64 time_seq, struct ulist **roots,
1533 bool ignore_offset)
1534{
1535 int ret;
1536
1537 if (!trans)
1538 down_read(&fs_info->commit_root_sem);
1539 ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
1540 time_seq, roots, ignore_offset);
1541 if (!trans)
1542 up_read(&fs_info->commit_root_sem);
1543 return ret;
1544}
1545
1546/**
1547 * btrfs_check_shared - tell us whether an extent is shared
1548 *
1549 * btrfs_check_shared uses the backref walking code but will short
1550 * circuit as soon as it finds a root or inode that doesn't match the
1551 * one passed in. This provides a significant performance benefit for
1552 * callers (such as fiemap) which want to know whether the extent is
1553 * shared but do not need a ref count.
1554 *
David Brazdil0f672f62019-12-10 10:32:29 +00001555 * This attempts to attach to the running transaction in order to account for
1556 * delayed refs, but continues on even when no running transaction exists.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001557 *
1558 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1559 */
David Brazdil0f672f62019-12-10 10:32:29 +00001560int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
1561 struct ulist *roots, struct ulist *tmp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001562{
1563 struct btrfs_fs_info *fs_info = root->fs_info;
1564 struct btrfs_trans_handle *trans;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001565 struct ulist_iterator uiter;
1566 struct ulist_node *node;
1567 struct seq_list elem = SEQ_LIST_INIT(elem);
1568 int ret = 0;
1569 struct share_check shared = {
David Brazdil0f672f62019-12-10 10:32:29 +00001570 .root_objectid = root->root_key.objectid,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001571 .inum = inum,
1572 .share_count = 0,
Olivier Deprez92d4c212022-12-06 15:05:30 +01001573 .have_delayed_delete_refs = false,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001574 };
1575
David Brazdil0f672f62019-12-10 10:32:29 +00001576 ulist_init(roots);
1577 ulist_init(tmp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001578
David Brazdil0f672f62019-12-10 10:32:29 +00001579 trans = btrfs_join_transaction_nostart(root);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001580 if (IS_ERR(trans)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001581 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1582 ret = PTR_ERR(trans);
1583 goto out;
1584 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001585 trans = NULL;
1586 down_read(&fs_info->commit_root_sem);
1587 } else {
1588 btrfs_get_tree_mod_seq(fs_info, &elem);
1589 }
1590
1591 ULIST_ITER_INIT(&uiter);
1592 while (1) {
1593 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1594 roots, NULL, &shared, false);
1595 if (ret == BACKREF_FOUND_SHARED) {
1596 /* this is the only condition under which we return 1 */
1597 ret = 1;
1598 break;
1599 }
1600 if (ret < 0 && ret != -ENOENT)
1601 break;
1602 ret = 0;
1603 node = ulist_next(tmp, &uiter);
1604 if (!node)
1605 break;
1606 bytenr = node->val;
1607 shared.share_count = 0;
Olivier Deprez92d4c212022-12-06 15:05:30 +01001608 shared.have_delayed_delete_refs = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001609 cond_resched();
1610 }
1611
1612 if (trans) {
1613 btrfs_put_tree_mod_seq(fs_info, &elem);
1614 btrfs_end_transaction(trans);
1615 } else {
1616 up_read(&fs_info->commit_root_sem);
1617 }
David Brazdil0f672f62019-12-10 10:32:29 +00001618out:
1619 ulist_release(roots);
1620 ulist_release(tmp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001621 return ret;
1622}
1623
1624int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1625 u64 start_off, struct btrfs_path *path,
1626 struct btrfs_inode_extref **ret_extref,
1627 u64 *found_off)
1628{
1629 int ret, slot;
1630 struct btrfs_key key;
1631 struct btrfs_key found_key;
1632 struct btrfs_inode_extref *extref;
1633 const struct extent_buffer *leaf;
1634 unsigned long ptr;
1635
1636 key.objectid = inode_objectid;
1637 key.type = BTRFS_INODE_EXTREF_KEY;
1638 key.offset = start_off;
1639
1640 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1641 if (ret < 0)
1642 return ret;
1643
1644 while (1) {
1645 leaf = path->nodes[0];
1646 slot = path->slots[0];
1647 if (slot >= btrfs_header_nritems(leaf)) {
1648 /*
1649 * If the item at offset is not found,
1650 * btrfs_search_slot will point us to the slot
1651 * where it should be inserted. In our case
1652 * that will be the slot directly before the
1653 * next INODE_REF_KEY_V2 item. In the case
1654 * that we're pointing to the last slot in a
1655 * leaf, we must move one leaf over.
1656 */
1657 ret = btrfs_next_leaf(root, path);
1658 if (ret) {
1659 if (ret >= 1)
1660 ret = -ENOENT;
1661 break;
1662 }
1663 continue;
1664 }
1665
1666 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1667
1668 /*
1669 * Check that we're still looking at an extended ref key for
1670 * this particular objectid. If we have different
1671 * objectid or type then there are no more to be found
1672 * in the tree and we can exit.
1673 */
1674 ret = -ENOENT;
1675 if (found_key.objectid != inode_objectid)
1676 break;
1677 if (found_key.type != BTRFS_INODE_EXTREF_KEY)
1678 break;
1679
1680 ret = 0;
1681 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1682 extref = (struct btrfs_inode_extref *)ptr;
1683 *ret_extref = extref;
1684 if (found_off)
1685 *found_off = found_key.offset;
1686 break;
1687 }
1688
1689 return ret;
1690}
1691
1692/*
1693 * this iterates to turn a name (from iref/extref) into a full filesystem path.
1694 * Elements of the path are separated by '/' and the path is guaranteed to be
1695 * 0-terminated. the path is only given within the current file system.
1696 * Therefore, it never starts with a '/'. the caller is responsible to provide
1697 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1698 * the start point of the resulting string is returned. this pointer is within
1699 * dest, normally.
1700 * in case the path buffer would overflow, the pointer is decremented further
1701 * as if output was written to the buffer, though no more output is actually
1702 * generated. that way, the caller can determine how much space would be
1703 * required for the path to fit into the buffer. in that case, the returned
1704 * value will be smaller than dest. callers must check this!
1705 */
1706char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1707 u32 name_len, unsigned long name_off,
1708 struct extent_buffer *eb_in, u64 parent,
1709 char *dest, u32 size)
1710{
1711 int slot;
1712 u64 next_inum;
1713 int ret;
1714 s64 bytes_left = ((s64)size) - 1;
1715 struct extent_buffer *eb = eb_in;
1716 struct btrfs_key found_key;
1717 int leave_spinning = path->leave_spinning;
1718 struct btrfs_inode_ref *iref;
1719
1720 if (bytes_left >= 0)
1721 dest[bytes_left] = '\0';
1722
1723 path->leave_spinning = 1;
1724 while (1) {
1725 bytes_left -= name_len;
1726 if (bytes_left >= 0)
1727 read_extent_buffer(eb, dest + bytes_left,
1728 name_off, name_len);
1729 if (eb != eb_in) {
1730 if (!path->skip_locking)
1731 btrfs_tree_read_unlock_blocking(eb);
1732 free_extent_buffer(eb);
1733 }
1734 ret = btrfs_find_item(fs_root, path, parent, 0,
1735 BTRFS_INODE_REF_KEY, &found_key);
1736 if (ret > 0)
1737 ret = -ENOENT;
1738 if (ret)
1739 break;
1740
1741 next_inum = found_key.offset;
1742
1743 /* regular exit ahead */
1744 if (parent == next_inum)
1745 break;
1746
1747 slot = path->slots[0];
1748 eb = path->nodes[0];
1749 /* make sure we can use eb after releasing the path */
1750 if (eb != eb_in) {
1751 if (!path->skip_locking)
David Brazdil0f672f62019-12-10 10:32:29 +00001752 btrfs_set_lock_blocking_read(eb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001753 path->nodes[0] = NULL;
1754 path->locks[0] = 0;
1755 }
1756 btrfs_release_path(path);
1757 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1758
1759 name_len = btrfs_inode_ref_name_len(eb, iref);
1760 name_off = (unsigned long)(iref + 1);
1761
1762 parent = next_inum;
1763 --bytes_left;
1764 if (bytes_left >= 0)
1765 dest[bytes_left] = '/';
1766 }
1767
1768 btrfs_release_path(path);
1769 path->leave_spinning = leave_spinning;
1770
1771 if (ret)
1772 return ERR_PTR(ret);
1773
1774 return dest + bytes_left;
1775}
1776
1777/*
1778 * this makes the path point to (logical EXTENT_ITEM *)
1779 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1780 * tree blocks and <0 on error.
1781 */
1782int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1783 struct btrfs_path *path, struct btrfs_key *found_key,
1784 u64 *flags_ret)
1785{
1786 int ret;
1787 u64 flags;
1788 u64 size = 0;
1789 u32 item_size;
1790 const struct extent_buffer *eb;
1791 struct btrfs_extent_item *ei;
1792 struct btrfs_key key;
1793
1794 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1795 key.type = BTRFS_METADATA_ITEM_KEY;
1796 else
1797 key.type = BTRFS_EXTENT_ITEM_KEY;
1798 key.objectid = logical;
1799 key.offset = (u64)-1;
1800
1801 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1802 if (ret < 0)
1803 return ret;
1804
1805 ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1806 if (ret) {
1807 if (ret > 0)
1808 ret = -ENOENT;
1809 return ret;
1810 }
1811 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1812 if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1813 size = fs_info->nodesize;
1814 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1815 size = found_key->offset;
1816
1817 if (found_key->objectid > logical ||
1818 found_key->objectid + size <= logical) {
1819 btrfs_debug(fs_info,
1820 "logical %llu is not within any extent", logical);
1821 return -ENOENT;
1822 }
1823
1824 eb = path->nodes[0];
1825 item_size = btrfs_item_size_nr(eb, path->slots[0]);
1826 BUG_ON(item_size < sizeof(*ei));
1827
1828 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1829 flags = btrfs_extent_flags(eb, ei);
1830
1831 btrfs_debug(fs_info,
1832 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1833 logical, logical - found_key->objectid, found_key->objectid,
1834 found_key->offset, flags, item_size);
1835
1836 WARN_ON(!flags_ret);
1837 if (flags_ret) {
1838 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1839 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1840 else if (flags & BTRFS_EXTENT_FLAG_DATA)
1841 *flags_ret = BTRFS_EXTENT_FLAG_DATA;
1842 else
David Brazdil0f672f62019-12-10 10:32:29 +00001843 BUG();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001844 return 0;
1845 }
1846
1847 return -EIO;
1848}
1849
1850/*
1851 * helper function to iterate extent inline refs. ptr must point to a 0 value
1852 * for the first call and may be modified. it is used to track state.
1853 * if more refs exist, 0 is returned and the next call to
1854 * get_extent_inline_ref must pass the modified ptr parameter to get the
1855 * next ref. after the last ref was processed, 1 is returned.
1856 * returns <0 on error
1857 */
1858static int get_extent_inline_ref(unsigned long *ptr,
1859 const struct extent_buffer *eb,
1860 const struct btrfs_key *key,
1861 const struct btrfs_extent_item *ei,
1862 u32 item_size,
1863 struct btrfs_extent_inline_ref **out_eiref,
1864 int *out_type)
1865{
1866 unsigned long end;
1867 u64 flags;
1868 struct btrfs_tree_block_info *info;
1869
1870 if (!*ptr) {
1871 /* first call */
1872 flags = btrfs_extent_flags(eb, ei);
1873 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1874 if (key->type == BTRFS_METADATA_ITEM_KEY) {
1875 /* a skinny metadata extent */
1876 *out_eiref =
1877 (struct btrfs_extent_inline_ref *)(ei + 1);
1878 } else {
1879 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1880 info = (struct btrfs_tree_block_info *)(ei + 1);
1881 *out_eiref =
1882 (struct btrfs_extent_inline_ref *)(info + 1);
1883 }
1884 } else {
1885 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1886 }
1887 *ptr = (unsigned long)*out_eiref;
1888 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1889 return -ENOENT;
1890 }
1891
1892 end = (unsigned long)ei + item_size;
1893 *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1894 *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
1895 BTRFS_REF_TYPE_ANY);
1896 if (*out_type == BTRFS_REF_TYPE_INVALID)
1897 return -EUCLEAN;
1898
1899 *ptr += btrfs_extent_inline_ref_size(*out_type);
1900 WARN_ON(*ptr > end);
1901 if (*ptr == end)
1902 return 1; /* last */
1903
1904 return 0;
1905}
1906
1907/*
1908 * reads the tree block backref for an extent. tree level and root are returned
1909 * through out_level and out_root. ptr must point to a 0 value for the first
1910 * call and may be modified (see get_extent_inline_ref comment).
1911 * returns 0 if data was provided, 1 if there was no more data to provide or
1912 * <0 on error.
1913 */
1914int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1915 struct btrfs_key *key, struct btrfs_extent_item *ei,
1916 u32 item_size, u64 *out_root, u8 *out_level)
1917{
1918 int ret;
1919 int type;
1920 struct btrfs_extent_inline_ref *eiref;
1921
1922 if (*ptr == (unsigned long)-1)
1923 return 1;
1924
1925 while (1) {
1926 ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
1927 &eiref, &type);
1928 if (ret < 0)
1929 return ret;
1930
1931 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1932 type == BTRFS_SHARED_BLOCK_REF_KEY)
1933 break;
1934
1935 if (ret == 1)
1936 return 1;
1937 }
1938
1939 /* we can treat both ref types equally here */
1940 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1941
1942 if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1943 struct btrfs_tree_block_info *info;
1944
1945 info = (struct btrfs_tree_block_info *)(ei + 1);
1946 *out_level = btrfs_tree_block_level(eb, info);
1947 } else {
1948 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1949 *out_level = (u8)key->offset;
1950 }
1951
1952 if (ret == 1)
1953 *ptr = (unsigned long)-1;
1954
1955 return 0;
1956}
1957
1958static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
1959 struct extent_inode_elem *inode_list,
1960 u64 root, u64 extent_item_objectid,
1961 iterate_extent_inodes_t *iterate, void *ctx)
1962{
1963 struct extent_inode_elem *eie;
1964 int ret = 0;
1965
1966 for (eie = inode_list; eie; eie = eie->next) {
1967 btrfs_debug(fs_info,
1968 "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
1969 extent_item_objectid, eie->inum,
1970 eie->offset, root);
1971 ret = iterate(eie->inum, eie->offset, root, ctx);
1972 if (ret) {
1973 btrfs_debug(fs_info,
1974 "stopping iteration for %llu due to ret=%d",
1975 extent_item_objectid, ret);
1976 break;
1977 }
1978 }
1979
1980 return ret;
1981}
1982
1983/*
1984 * calls iterate() for every inode that references the extent identified by
1985 * the given parameters.
1986 * when the iterator function returns a non-zero value, iteration stops.
1987 */
1988int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1989 u64 extent_item_objectid, u64 extent_item_pos,
1990 int search_commit_root,
1991 iterate_extent_inodes_t *iterate, void *ctx,
1992 bool ignore_offset)
1993{
1994 int ret;
1995 struct btrfs_trans_handle *trans = NULL;
1996 struct ulist *refs = NULL;
1997 struct ulist *roots = NULL;
1998 struct ulist_node *ref_node = NULL;
1999 struct ulist_node *root_node = NULL;
2000 struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
2001 struct ulist_iterator ref_uiter;
2002 struct ulist_iterator root_uiter;
2003
2004 btrfs_debug(fs_info, "resolving all inodes for extent %llu",
2005 extent_item_objectid);
2006
2007 if (!search_commit_root) {
David Brazdil0f672f62019-12-10 10:32:29 +00002008 trans = btrfs_attach_transaction(fs_info->extent_root);
2009 if (IS_ERR(trans)) {
2010 if (PTR_ERR(trans) != -ENOENT &&
2011 PTR_ERR(trans) != -EROFS)
2012 return PTR_ERR(trans);
2013 trans = NULL;
2014 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002015 }
2016
David Brazdil0f672f62019-12-10 10:32:29 +00002017 if (trans)
2018 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2019 else
2020 down_read(&fs_info->commit_root_sem);
2021
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002022 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
2023 tree_mod_seq_elem.seq, &refs,
2024 &extent_item_pos, ignore_offset);
2025 if (ret)
2026 goto out;
2027
2028 ULIST_ITER_INIT(&ref_uiter);
2029 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
2030 ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
2031 tree_mod_seq_elem.seq, &roots,
2032 ignore_offset);
2033 if (ret)
2034 break;
2035 ULIST_ITER_INIT(&root_uiter);
2036 while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
2037 btrfs_debug(fs_info,
2038 "root %llu references leaf %llu, data list %#llx",
2039 root_node->val, ref_node->val,
2040 ref_node->aux);
2041 ret = iterate_leaf_refs(fs_info,
2042 (struct extent_inode_elem *)
2043 (uintptr_t)ref_node->aux,
2044 root_node->val,
2045 extent_item_objectid,
2046 iterate, ctx);
2047 }
2048 ulist_free(roots);
2049 }
2050
2051 free_leaf_list(refs);
2052out:
David Brazdil0f672f62019-12-10 10:32:29 +00002053 if (trans) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002054 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2055 btrfs_end_transaction(trans);
2056 } else {
2057 up_read(&fs_info->commit_root_sem);
2058 }
2059
2060 return ret;
2061}
2062
2063int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2064 struct btrfs_path *path,
2065 iterate_extent_inodes_t *iterate, void *ctx,
2066 bool ignore_offset)
2067{
2068 int ret;
2069 u64 extent_item_pos;
2070 u64 flags = 0;
2071 struct btrfs_key found_key;
2072 int search_commit_root = path->search_commit_root;
2073
2074 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2075 btrfs_release_path(path);
2076 if (ret < 0)
2077 return ret;
2078 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2079 return -EINVAL;
2080
2081 extent_item_pos = logical - found_key.objectid;
2082 ret = iterate_extent_inodes(fs_info, found_key.objectid,
2083 extent_item_pos, search_commit_root,
2084 iterate, ctx, ignore_offset);
2085
2086 return ret;
2087}
2088
2089typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
2090 struct extent_buffer *eb, void *ctx);
2091
2092static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
2093 struct btrfs_path *path,
2094 iterate_irefs_t *iterate, void *ctx)
2095{
2096 int ret = 0;
2097 int slot;
2098 u32 cur;
2099 u32 len;
2100 u32 name_len;
2101 u64 parent = 0;
2102 int found = 0;
2103 struct extent_buffer *eb;
2104 struct btrfs_item *item;
2105 struct btrfs_inode_ref *iref;
2106 struct btrfs_key found_key;
2107
2108 while (!ret) {
2109 ret = btrfs_find_item(fs_root, path, inum,
2110 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2111 &found_key);
2112
2113 if (ret < 0)
2114 break;
2115 if (ret) {
2116 ret = found ? 0 : -ENOENT;
2117 break;
2118 }
2119 ++found;
2120
2121 parent = found_key.offset;
2122 slot = path->slots[0];
2123 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2124 if (!eb) {
2125 ret = -ENOMEM;
2126 break;
2127 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002128 btrfs_release_path(path);
2129
2130 item = btrfs_item_nr(slot);
2131 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2132
2133 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
2134 name_len = btrfs_inode_ref_name_len(eb, iref);
2135 /* path must be released before calling iterate()! */
2136 btrfs_debug(fs_root->fs_info,
2137 "following ref at offset %u for inode %llu in tree %llu",
David Brazdil0f672f62019-12-10 10:32:29 +00002138 cur, found_key.objectid,
2139 fs_root->root_key.objectid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002140 ret = iterate(parent, name_len,
2141 (unsigned long)(iref + 1), eb, ctx);
2142 if (ret)
2143 break;
2144 len = sizeof(*iref) + name_len;
2145 iref = (struct btrfs_inode_ref *)((char *)iref + len);
2146 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002147 free_extent_buffer(eb);
2148 }
2149
2150 btrfs_release_path(path);
2151
2152 return ret;
2153}
2154
2155static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
2156 struct btrfs_path *path,
2157 iterate_irefs_t *iterate, void *ctx)
2158{
2159 int ret;
2160 int slot;
2161 u64 offset = 0;
2162 u64 parent;
2163 int found = 0;
2164 struct extent_buffer *eb;
2165 struct btrfs_inode_extref *extref;
2166 u32 item_size;
2167 u32 cur_offset;
2168 unsigned long ptr;
2169
2170 while (1) {
2171 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2172 &offset);
2173 if (ret < 0)
2174 break;
2175 if (ret) {
2176 ret = found ? 0 : -ENOENT;
2177 break;
2178 }
2179 ++found;
2180
2181 slot = path->slots[0];
2182 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2183 if (!eb) {
2184 ret = -ENOMEM;
2185 break;
2186 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002187 btrfs_release_path(path);
2188
2189 item_size = btrfs_item_size_nr(eb, slot);
2190 ptr = btrfs_item_ptr_offset(eb, slot);
2191 cur_offset = 0;
2192
2193 while (cur_offset < item_size) {
2194 u32 name_len;
2195
2196 extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2197 parent = btrfs_inode_extref_parent(eb, extref);
2198 name_len = btrfs_inode_extref_name_len(eb, extref);
2199 ret = iterate(parent, name_len,
2200 (unsigned long)&extref->name, eb, ctx);
2201 if (ret)
2202 break;
2203
2204 cur_offset += btrfs_inode_extref_name_len(eb, extref);
2205 cur_offset += sizeof(*extref);
2206 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002207 free_extent_buffer(eb);
2208
2209 offset++;
2210 }
2211
2212 btrfs_release_path(path);
2213
2214 return ret;
2215}
2216
2217static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
2218 struct btrfs_path *path, iterate_irefs_t *iterate,
2219 void *ctx)
2220{
2221 int ret;
2222 int found_refs = 0;
2223
2224 ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
2225 if (!ret)
2226 ++found_refs;
2227 else if (ret != -ENOENT)
2228 return ret;
2229
2230 ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
2231 if (ret == -ENOENT && found_refs)
2232 return 0;
2233
2234 return ret;
2235}
2236
2237/*
2238 * returns 0 if the path could be dumped (probably truncated)
2239 * returns <0 in case of an error
2240 */
2241static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2242 struct extent_buffer *eb, void *ctx)
2243{
2244 struct inode_fs_paths *ipath = ctx;
2245 char *fspath;
2246 char *fspath_min;
2247 int i = ipath->fspath->elem_cnt;
2248 const int s_ptr = sizeof(char *);
2249 u32 bytes_left;
2250
2251 bytes_left = ipath->fspath->bytes_left > s_ptr ?
2252 ipath->fspath->bytes_left - s_ptr : 0;
2253
2254 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2255 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2256 name_off, eb, inum, fspath_min, bytes_left);
2257 if (IS_ERR(fspath))
2258 return PTR_ERR(fspath);
2259
2260 if (fspath > fspath_min) {
2261 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2262 ++ipath->fspath->elem_cnt;
2263 ipath->fspath->bytes_left = fspath - fspath_min;
2264 } else {
2265 ++ipath->fspath->elem_missed;
2266 ipath->fspath->bytes_missing += fspath_min - fspath;
2267 ipath->fspath->bytes_left = 0;
2268 }
2269
2270 return 0;
2271}
2272
2273/*
2274 * this dumps all file system paths to the inode into the ipath struct, provided
2275 * is has been created large enough. each path is zero-terminated and accessed
2276 * from ipath->fspath->val[i].
2277 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2278 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2279 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2280 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2281 * have been needed to return all paths.
2282 */
2283int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2284{
2285 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
2286 inode_to_path, ipath);
2287}
2288
2289struct btrfs_data_container *init_data_container(u32 total_bytes)
2290{
2291 struct btrfs_data_container *data;
2292 size_t alloc_bytes;
2293
2294 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2295 data = kvmalloc(alloc_bytes, GFP_KERNEL);
2296 if (!data)
2297 return ERR_PTR(-ENOMEM);
2298
2299 if (total_bytes >= sizeof(*data)) {
2300 data->bytes_left = total_bytes - sizeof(*data);
2301 data->bytes_missing = 0;
2302 } else {
2303 data->bytes_missing = sizeof(*data) - total_bytes;
2304 data->bytes_left = 0;
2305 }
2306
2307 data->elem_cnt = 0;
2308 data->elem_missed = 0;
2309
2310 return data;
2311}
2312
2313/*
2314 * allocates space to return multiple file system paths for an inode.
2315 * total_bytes to allocate are passed, note that space usable for actual path
2316 * information will be total_bytes - sizeof(struct inode_fs_paths).
2317 * the returned pointer must be freed with free_ipath() in the end.
2318 */
2319struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2320 struct btrfs_path *path)
2321{
2322 struct inode_fs_paths *ifp;
2323 struct btrfs_data_container *fspath;
2324
2325 fspath = init_data_container(total_bytes);
2326 if (IS_ERR(fspath))
2327 return ERR_CAST(fspath);
2328
2329 ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2330 if (!ifp) {
2331 kvfree(fspath);
2332 return ERR_PTR(-ENOMEM);
2333 }
2334
2335 ifp->btrfs_path = path;
2336 ifp->fspath = fspath;
2337 ifp->fs_root = fs_root;
2338
2339 return ifp;
2340}
2341
2342void free_ipath(struct inode_fs_paths *ipath)
2343{
2344 if (!ipath)
2345 return;
2346 kvfree(ipath->fspath);
2347 kfree(ipath);
2348}
Olivier Deprez157378f2022-04-04 15:47:50 +02002349
2350struct btrfs_backref_iter *btrfs_backref_iter_alloc(
2351 struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
2352{
2353 struct btrfs_backref_iter *ret;
2354
2355 ret = kzalloc(sizeof(*ret), gfp_flag);
2356 if (!ret)
2357 return NULL;
2358
2359 ret->path = btrfs_alloc_path();
2360 if (!ret->path) {
2361 kfree(ret);
2362 return NULL;
2363 }
2364
2365 /* Current backref iterator only supports iteration in commit root */
2366 ret->path->search_commit_root = 1;
2367 ret->path->skip_locking = 1;
2368 ret->fs_info = fs_info;
2369
2370 return ret;
2371}
2372
2373int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2374{
2375 struct btrfs_fs_info *fs_info = iter->fs_info;
2376 struct btrfs_path *path = iter->path;
2377 struct btrfs_extent_item *ei;
2378 struct btrfs_key key;
2379 int ret;
2380
2381 key.objectid = bytenr;
2382 key.type = BTRFS_METADATA_ITEM_KEY;
2383 key.offset = (u64)-1;
2384 iter->bytenr = bytenr;
2385
2386 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
2387 if (ret < 0)
2388 return ret;
2389 if (ret == 0) {
2390 ret = -EUCLEAN;
2391 goto release;
2392 }
2393 if (path->slots[0] == 0) {
2394 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2395 ret = -EUCLEAN;
2396 goto release;
2397 }
2398 path->slots[0]--;
2399
2400 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2401 if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2402 key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2403 ret = -ENOENT;
2404 goto release;
2405 }
2406 memcpy(&iter->cur_key, &key, sizeof(key));
2407 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2408 path->slots[0]);
2409 iter->end_ptr = (u32)(iter->item_ptr +
2410 btrfs_item_size_nr(path->nodes[0], path->slots[0]));
2411 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2412 struct btrfs_extent_item);
2413
2414 /*
2415 * Only support iteration on tree backref yet.
2416 *
2417 * This is an extra precaution for non skinny-metadata, where
2418 * EXTENT_ITEM is also used for tree blocks, that we can only use
2419 * extent flags to determine if it's a tree block.
2420 */
2421 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2422 ret = -ENOTSUPP;
2423 goto release;
2424 }
2425 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2426
2427 /* If there is no inline backref, go search for keyed backref */
2428 if (iter->cur_ptr >= iter->end_ptr) {
2429 ret = btrfs_next_item(fs_info->extent_root, path);
2430
2431 /* No inline nor keyed ref */
2432 if (ret > 0) {
2433 ret = -ENOENT;
2434 goto release;
2435 }
2436 if (ret < 0)
2437 goto release;
2438
2439 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2440 path->slots[0]);
2441 if (iter->cur_key.objectid != bytenr ||
2442 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2443 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2444 ret = -ENOENT;
2445 goto release;
2446 }
2447 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2448 path->slots[0]);
2449 iter->item_ptr = iter->cur_ptr;
2450 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr(
2451 path->nodes[0], path->slots[0]));
2452 }
2453
2454 return 0;
2455release:
2456 btrfs_backref_iter_release(iter);
2457 return ret;
2458}
2459
2460/*
2461 * Go to the next backref item of current bytenr, can be either inlined or
2462 * keyed.
2463 *
2464 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2465 *
2466 * Return 0 if we get next backref without problem.
2467 * Return >0 if there is no extra backref for this bytenr.
2468 * Return <0 if there is something wrong happened.
2469 */
2470int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2471{
2472 struct extent_buffer *eb = btrfs_backref_get_eb(iter);
2473 struct btrfs_path *path = iter->path;
2474 struct btrfs_extent_inline_ref *iref;
2475 int ret;
2476 u32 size;
2477
2478 if (btrfs_backref_iter_is_inline_ref(iter)) {
2479 /* We're still inside the inline refs */
2480 ASSERT(iter->cur_ptr < iter->end_ptr);
2481
2482 if (btrfs_backref_has_tree_block_info(iter)) {
2483 /* First tree block info */
2484 size = sizeof(struct btrfs_tree_block_info);
2485 } else {
2486 /* Use inline ref type to determine the size */
2487 int type;
2488
2489 iref = (struct btrfs_extent_inline_ref *)
2490 ((unsigned long)iter->cur_ptr);
2491 type = btrfs_extent_inline_ref_type(eb, iref);
2492
2493 size = btrfs_extent_inline_ref_size(type);
2494 }
2495 iter->cur_ptr += size;
2496 if (iter->cur_ptr < iter->end_ptr)
2497 return 0;
2498
2499 /* All inline items iterated, fall through */
2500 }
2501
2502 /* We're at keyed items, there is no inline item, go to the next one */
2503 ret = btrfs_next_item(iter->fs_info->extent_root, iter->path);
2504 if (ret)
2505 return ret;
2506
2507 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2508 if (iter->cur_key.objectid != iter->bytenr ||
2509 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2510 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2511 return 1;
2512 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2513 path->slots[0]);
2514 iter->cur_ptr = iter->item_ptr;
2515 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0],
2516 path->slots[0]);
2517 return 0;
2518}
2519
2520void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
2521 struct btrfs_backref_cache *cache, int is_reloc)
2522{
2523 int i;
2524
2525 cache->rb_root = RB_ROOT;
2526 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2527 INIT_LIST_HEAD(&cache->pending[i]);
2528 INIT_LIST_HEAD(&cache->changed);
2529 INIT_LIST_HEAD(&cache->detached);
2530 INIT_LIST_HEAD(&cache->leaves);
2531 INIT_LIST_HEAD(&cache->pending_edge);
2532 INIT_LIST_HEAD(&cache->useless_node);
2533 cache->fs_info = fs_info;
2534 cache->is_reloc = is_reloc;
2535}
2536
2537struct btrfs_backref_node *btrfs_backref_alloc_node(
2538 struct btrfs_backref_cache *cache, u64 bytenr, int level)
2539{
2540 struct btrfs_backref_node *node;
2541
2542 ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
2543 node = kzalloc(sizeof(*node), GFP_NOFS);
2544 if (!node)
2545 return node;
2546
2547 INIT_LIST_HEAD(&node->list);
2548 INIT_LIST_HEAD(&node->upper);
2549 INIT_LIST_HEAD(&node->lower);
2550 RB_CLEAR_NODE(&node->rb_node);
2551 cache->nr_nodes++;
2552 node->level = level;
2553 node->bytenr = bytenr;
2554
2555 return node;
2556}
2557
2558struct btrfs_backref_edge *btrfs_backref_alloc_edge(
2559 struct btrfs_backref_cache *cache)
2560{
2561 struct btrfs_backref_edge *edge;
2562
2563 edge = kzalloc(sizeof(*edge), GFP_NOFS);
2564 if (edge)
2565 cache->nr_edges++;
2566 return edge;
2567}
2568
2569/*
2570 * Drop the backref node from cache, also cleaning up all its
2571 * upper edges and any uncached nodes in the path.
2572 *
2573 * This cleanup happens bottom up, thus the node should either
2574 * be the lowest node in the cache or a detached node.
2575 */
2576void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
2577 struct btrfs_backref_node *node)
2578{
2579 struct btrfs_backref_node *upper;
2580 struct btrfs_backref_edge *edge;
2581
2582 if (!node)
2583 return;
2584
2585 BUG_ON(!node->lowest && !node->detached);
2586 while (!list_empty(&node->upper)) {
2587 edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2588 list[LOWER]);
2589 upper = edge->node[UPPER];
2590 list_del(&edge->list[LOWER]);
2591 list_del(&edge->list[UPPER]);
2592 btrfs_backref_free_edge(cache, edge);
2593
2594 /*
2595 * Add the node to leaf node list if no other child block
2596 * cached.
2597 */
2598 if (list_empty(&upper->lower)) {
2599 list_add_tail(&upper->lower, &cache->leaves);
2600 upper->lowest = 1;
2601 }
2602 }
2603
2604 btrfs_backref_drop_node(cache, node);
2605}
2606
2607/*
2608 * Release all nodes/edges from current cache
2609 */
2610void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
2611{
2612 struct btrfs_backref_node *node;
2613 int i;
2614
2615 while (!list_empty(&cache->detached)) {
2616 node = list_entry(cache->detached.next,
2617 struct btrfs_backref_node, list);
2618 btrfs_backref_cleanup_node(cache, node);
2619 }
2620
2621 while (!list_empty(&cache->leaves)) {
2622 node = list_entry(cache->leaves.next,
2623 struct btrfs_backref_node, lower);
2624 btrfs_backref_cleanup_node(cache, node);
2625 }
2626
2627 cache->last_trans = 0;
2628
2629 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2630 ASSERT(list_empty(&cache->pending[i]));
2631 ASSERT(list_empty(&cache->pending_edge));
2632 ASSERT(list_empty(&cache->useless_node));
2633 ASSERT(list_empty(&cache->changed));
2634 ASSERT(list_empty(&cache->detached));
2635 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
2636 ASSERT(!cache->nr_nodes);
2637 ASSERT(!cache->nr_edges);
2638}
2639
2640/*
2641 * Handle direct tree backref
2642 *
2643 * Direct tree backref means, the backref item shows its parent bytenr
2644 * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
2645 *
2646 * @ref_key: The converted backref key.
2647 * For keyed backref, it's the item key.
2648 * For inlined backref, objectid is the bytenr,
2649 * type is btrfs_inline_ref_type, offset is
2650 * btrfs_inline_ref_offset.
2651 */
2652static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
2653 struct btrfs_key *ref_key,
2654 struct btrfs_backref_node *cur)
2655{
2656 struct btrfs_backref_edge *edge;
2657 struct btrfs_backref_node *upper;
2658 struct rb_node *rb_node;
2659
2660 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
2661
2662 /* Only reloc root uses backref pointing to itself */
2663 if (ref_key->objectid == ref_key->offset) {
2664 struct btrfs_root *root;
2665
2666 cur->is_reloc_root = 1;
2667 /* Only reloc backref cache cares about a specific root */
2668 if (cache->is_reloc) {
2669 root = find_reloc_root(cache->fs_info, cur->bytenr);
2670 if (!root)
2671 return -ENOENT;
2672 cur->root = root;
2673 } else {
2674 /*
2675 * For generic purpose backref cache, reloc root node
2676 * is useless.
2677 */
2678 list_add(&cur->list, &cache->useless_node);
2679 }
2680 return 0;
2681 }
2682
2683 edge = btrfs_backref_alloc_edge(cache);
2684 if (!edge)
2685 return -ENOMEM;
2686
2687 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
2688 if (!rb_node) {
2689 /* Parent node not yet cached */
2690 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
2691 cur->level + 1);
2692 if (!upper) {
2693 btrfs_backref_free_edge(cache, edge);
2694 return -ENOMEM;
2695 }
2696
2697 /*
2698 * Backrefs for the upper level block isn't cached, add the
2699 * block to pending list
2700 */
2701 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2702 } else {
2703 /* Parent node already cached */
2704 upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
2705 ASSERT(upper->checked);
2706 INIT_LIST_HEAD(&edge->list[UPPER]);
2707 }
2708 btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
2709 return 0;
2710}
2711
2712/*
2713 * Handle indirect tree backref
2714 *
2715 * Indirect tree backref means, we only know which tree the node belongs to.
2716 * We still need to do a tree search to find out the parents. This is for
2717 * TREE_BLOCK_REF backref (keyed or inlined).
2718 *
2719 * @ref_key: The same as @ref_key in handle_direct_tree_backref()
2720 * @tree_key: The first key of this tree block.
2721 * @path: A clean (released) path, to avoid allocating path everytime
2722 * the function get called.
2723 */
2724static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
2725 struct btrfs_path *path,
2726 struct btrfs_key *ref_key,
2727 struct btrfs_key *tree_key,
2728 struct btrfs_backref_node *cur)
2729{
2730 struct btrfs_fs_info *fs_info = cache->fs_info;
2731 struct btrfs_backref_node *upper;
2732 struct btrfs_backref_node *lower;
2733 struct btrfs_backref_edge *edge;
2734 struct extent_buffer *eb;
2735 struct btrfs_root *root;
2736 struct rb_node *rb_node;
2737 int level;
2738 bool need_check = true;
2739 int ret;
2740
2741 root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
2742 if (IS_ERR(root))
2743 return PTR_ERR(root);
2744 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2745 cur->cowonly = 1;
2746
2747 if (btrfs_root_level(&root->root_item) == cur->level) {
2748 /* Tree root */
2749 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
2750 /*
2751 * For reloc backref cache, we may ignore reloc root. But for
2752 * general purpose backref cache, we can't rely on
2753 * btrfs_should_ignore_reloc_root() as it may conflict with
2754 * current running relocation and lead to missing root.
2755 *
2756 * For general purpose backref cache, reloc root detection is
2757 * completely relying on direct backref (key->offset is parent
2758 * bytenr), thus only do such check for reloc cache.
2759 */
2760 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
2761 btrfs_put_root(root);
2762 list_add(&cur->list, &cache->useless_node);
2763 } else {
2764 cur->root = root;
2765 }
2766 return 0;
2767 }
2768
2769 level = cur->level + 1;
2770
2771 /* Search the tree to find parent blocks referring to the block */
2772 path->search_commit_root = 1;
2773 path->skip_locking = 1;
2774 path->lowest_level = level;
2775 ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
2776 path->lowest_level = 0;
2777 if (ret < 0) {
2778 btrfs_put_root(root);
2779 return ret;
2780 }
2781 if (ret > 0 && path->slots[level] > 0)
2782 path->slots[level]--;
2783
2784 eb = path->nodes[level];
2785 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
2786 btrfs_err(fs_info,
2787"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
2788 cur->bytenr, level - 1, root->root_key.objectid,
2789 tree_key->objectid, tree_key->type, tree_key->offset);
2790 btrfs_put_root(root);
2791 ret = -ENOENT;
2792 goto out;
2793 }
2794 lower = cur;
2795
2796 /* Add all nodes and edges in the path */
2797 for (; level < BTRFS_MAX_LEVEL; level++) {
2798 if (!path->nodes[level]) {
2799 ASSERT(btrfs_root_bytenr(&root->root_item) ==
2800 lower->bytenr);
2801 /* Same as previous should_ignore_reloc_root() call */
2802 if (btrfs_should_ignore_reloc_root(root) &&
2803 cache->is_reloc) {
2804 btrfs_put_root(root);
2805 list_add(&lower->list, &cache->useless_node);
2806 } else {
2807 lower->root = root;
2808 }
2809 break;
2810 }
2811
2812 edge = btrfs_backref_alloc_edge(cache);
2813 if (!edge) {
2814 btrfs_put_root(root);
2815 ret = -ENOMEM;
2816 goto out;
2817 }
2818
2819 eb = path->nodes[level];
2820 rb_node = rb_simple_search(&cache->rb_root, eb->start);
2821 if (!rb_node) {
2822 upper = btrfs_backref_alloc_node(cache, eb->start,
2823 lower->level + 1);
2824 if (!upper) {
2825 btrfs_put_root(root);
2826 btrfs_backref_free_edge(cache, edge);
2827 ret = -ENOMEM;
2828 goto out;
2829 }
2830 upper->owner = btrfs_header_owner(eb);
2831 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2832 upper->cowonly = 1;
2833
2834 /*
2835 * If we know the block isn't shared we can avoid
2836 * checking its backrefs.
2837 */
2838 if (btrfs_block_can_be_shared(root, eb))
2839 upper->checked = 0;
2840 else
2841 upper->checked = 1;
2842
2843 /*
2844 * Add the block to pending list if we need to check its
2845 * backrefs, we only do this once while walking up a
2846 * tree as we will catch anything else later on.
2847 */
2848 if (!upper->checked && need_check) {
2849 need_check = false;
2850 list_add_tail(&edge->list[UPPER],
2851 &cache->pending_edge);
2852 } else {
2853 if (upper->checked)
2854 need_check = true;
2855 INIT_LIST_HEAD(&edge->list[UPPER]);
2856 }
2857 } else {
2858 upper = rb_entry(rb_node, struct btrfs_backref_node,
2859 rb_node);
2860 ASSERT(upper->checked);
2861 INIT_LIST_HEAD(&edge->list[UPPER]);
2862 if (!upper->owner)
2863 upper->owner = btrfs_header_owner(eb);
2864 }
2865 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
2866
2867 if (rb_node) {
2868 btrfs_put_root(root);
2869 break;
2870 }
2871 lower = upper;
2872 upper = NULL;
2873 }
2874out:
2875 btrfs_release_path(path);
2876 return ret;
2877}
2878
2879/*
2880 * Add backref node @cur into @cache.
2881 *
2882 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
2883 * links aren't yet bi-directional. Needs to finish such links.
2884 * Use btrfs_backref_finish_upper_links() to finish such linkage.
2885 *
2886 * @path: Released path for indirect tree backref lookup
2887 * @iter: Released backref iter for extent tree search
2888 * @node_key: The first key of the tree block
2889 */
2890int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
2891 struct btrfs_path *path,
2892 struct btrfs_backref_iter *iter,
2893 struct btrfs_key *node_key,
2894 struct btrfs_backref_node *cur)
2895{
2896 struct btrfs_fs_info *fs_info = cache->fs_info;
2897 struct btrfs_backref_edge *edge;
2898 struct btrfs_backref_node *exist;
2899 int ret;
2900
2901 ret = btrfs_backref_iter_start(iter, cur->bytenr);
2902 if (ret < 0)
2903 return ret;
2904 /*
2905 * We skip the first btrfs_tree_block_info, as we don't use the key
2906 * stored in it, but fetch it from the tree block
2907 */
2908 if (btrfs_backref_has_tree_block_info(iter)) {
2909 ret = btrfs_backref_iter_next(iter);
2910 if (ret < 0)
2911 goto out;
2912 /* No extra backref? This means the tree block is corrupted */
2913 if (ret > 0) {
2914 ret = -EUCLEAN;
2915 goto out;
2916 }
2917 }
2918 WARN_ON(cur->checked);
2919 if (!list_empty(&cur->upper)) {
2920 /*
2921 * The backref was added previously when processing backref of
2922 * type BTRFS_TREE_BLOCK_REF_KEY
2923 */
2924 ASSERT(list_is_singular(&cur->upper));
2925 edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
2926 list[LOWER]);
2927 ASSERT(list_empty(&edge->list[UPPER]));
2928 exist = edge->node[UPPER];
2929 /*
2930 * Add the upper level block to pending list if we need check
2931 * its backrefs
2932 */
2933 if (!exist->checked)
2934 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2935 } else {
2936 exist = NULL;
2937 }
2938
2939 for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
2940 struct extent_buffer *eb;
2941 struct btrfs_key key;
2942 int type;
2943
2944 cond_resched();
2945 eb = btrfs_backref_get_eb(iter);
2946
2947 key.objectid = iter->bytenr;
2948 if (btrfs_backref_iter_is_inline_ref(iter)) {
2949 struct btrfs_extent_inline_ref *iref;
2950
2951 /* Update key for inline backref */
2952 iref = (struct btrfs_extent_inline_ref *)
2953 ((unsigned long)iter->cur_ptr);
2954 type = btrfs_get_extent_inline_ref_type(eb, iref,
2955 BTRFS_REF_TYPE_BLOCK);
2956 if (type == BTRFS_REF_TYPE_INVALID) {
2957 ret = -EUCLEAN;
2958 goto out;
2959 }
2960 key.type = type;
2961 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
2962 } else {
2963 key.type = iter->cur_key.type;
2964 key.offset = iter->cur_key.offset;
2965 }
2966
2967 /*
2968 * Parent node found and matches current inline ref, no need to
2969 * rebuild this node for this inline ref
2970 */
2971 if (exist &&
2972 ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
2973 exist->owner == key.offset) ||
2974 (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
2975 exist->bytenr == key.offset))) {
2976 exist = NULL;
2977 continue;
2978 }
2979
2980 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
2981 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
2982 ret = handle_direct_tree_backref(cache, &key, cur);
2983 if (ret < 0)
2984 goto out;
2985 continue;
2986 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
2987 ret = -EINVAL;
2988 btrfs_print_v0_err(fs_info);
2989 btrfs_handle_fs_error(fs_info, ret, NULL);
2990 goto out;
2991 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
2992 continue;
2993 }
2994
2995 /*
2996 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
2997 * means the root objectid. We need to search the tree to get
2998 * its parent bytenr.
2999 */
3000 ret = handle_indirect_tree_backref(cache, path, &key, node_key,
3001 cur);
3002 if (ret < 0)
3003 goto out;
3004 }
3005 ret = 0;
3006 cur->checked = 1;
3007 WARN_ON(exist);
3008out:
3009 btrfs_backref_iter_release(iter);
3010 return ret;
3011}
3012
3013/*
3014 * Finish the upwards linkage created by btrfs_backref_add_tree_node()
3015 */
3016int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
3017 struct btrfs_backref_node *start)
3018{
3019 struct list_head *useless_node = &cache->useless_node;
3020 struct btrfs_backref_edge *edge;
3021 struct rb_node *rb_node;
3022 LIST_HEAD(pending_edge);
3023
3024 ASSERT(start->checked);
3025
3026 /* Insert this node to cache if it's not COW-only */
3027 if (!start->cowonly) {
3028 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
3029 &start->rb_node);
3030 if (rb_node)
3031 btrfs_backref_panic(cache->fs_info, start->bytenr,
3032 -EEXIST);
3033 list_add_tail(&start->lower, &cache->leaves);
3034 }
3035
3036 /*
3037 * Use breadth first search to iterate all related edges.
3038 *
3039 * The starting points are all the edges of this node
3040 */
3041 list_for_each_entry(edge, &start->upper, list[LOWER])
3042 list_add_tail(&edge->list[UPPER], &pending_edge);
3043
3044 while (!list_empty(&pending_edge)) {
3045 struct btrfs_backref_node *upper;
3046 struct btrfs_backref_node *lower;
3047
3048 edge = list_first_entry(&pending_edge,
3049 struct btrfs_backref_edge, list[UPPER]);
3050 list_del_init(&edge->list[UPPER]);
3051 upper = edge->node[UPPER];
3052 lower = edge->node[LOWER];
3053
3054 /* Parent is detached, no need to keep any edges */
3055 if (upper->detached) {
3056 list_del(&edge->list[LOWER]);
3057 btrfs_backref_free_edge(cache, edge);
3058
3059 /* Lower node is orphan, queue for cleanup */
3060 if (list_empty(&lower->upper))
3061 list_add(&lower->list, useless_node);
3062 continue;
3063 }
3064
3065 /*
3066 * All new nodes added in current build_backref_tree() haven't
3067 * been linked to the cache rb tree.
3068 * So if we have upper->rb_node populated, this means a cache
3069 * hit. We only need to link the edge, as @upper and all its
3070 * parents have already been linked.
3071 */
3072 if (!RB_EMPTY_NODE(&upper->rb_node)) {
3073 if (upper->lowest) {
3074 list_del_init(&upper->lower);
3075 upper->lowest = 0;
3076 }
3077
3078 list_add_tail(&edge->list[UPPER], &upper->lower);
3079 continue;
3080 }
3081
3082 /* Sanity check, we shouldn't have any unchecked nodes */
3083 if (!upper->checked) {
3084 ASSERT(0);
3085 return -EUCLEAN;
3086 }
3087
3088 /* Sanity check, COW-only node has non-COW-only parent */
3089 if (start->cowonly != upper->cowonly) {
3090 ASSERT(0);
3091 return -EUCLEAN;
3092 }
3093
3094 /* Only cache non-COW-only (subvolume trees) tree blocks */
3095 if (!upper->cowonly) {
3096 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3097 &upper->rb_node);
3098 if (rb_node) {
3099 btrfs_backref_panic(cache->fs_info,
3100 upper->bytenr, -EEXIST);
3101 return -EUCLEAN;
3102 }
3103 }
3104
3105 list_add_tail(&edge->list[UPPER], &upper->lower);
3106
3107 /*
3108 * Also queue all the parent edges of this uncached node
3109 * to finish the upper linkage
3110 */
3111 list_for_each_entry(edge, &upper->upper, list[LOWER])
3112 list_add_tail(&edge->list[UPPER], &pending_edge);
3113 }
3114 return 0;
3115}
3116
3117void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3118 struct btrfs_backref_node *node)
3119{
3120 struct btrfs_backref_node *lower;
3121 struct btrfs_backref_node *upper;
3122 struct btrfs_backref_edge *edge;
3123
3124 while (!list_empty(&cache->useless_node)) {
3125 lower = list_first_entry(&cache->useless_node,
3126 struct btrfs_backref_node, list);
3127 list_del_init(&lower->list);
3128 }
3129 while (!list_empty(&cache->pending_edge)) {
3130 edge = list_first_entry(&cache->pending_edge,
3131 struct btrfs_backref_edge, list[UPPER]);
3132 list_del(&edge->list[UPPER]);
3133 list_del(&edge->list[LOWER]);
3134 lower = edge->node[LOWER];
3135 upper = edge->node[UPPER];
3136 btrfs_backref_free_edge(cache, edge);
3137
3138 /*
3139 * Lower is no longer linked to any upper backref nodes and
3140 * isn't in the cache, we can free it ourselves.
3141 */
3142 if (list_empty(&lower->upper) &&
3143 RB_EMPTY_NODE(&lower->rb_node))
3144 list_add(&lower->list, &cache->useless_node);
3145
3146 if (!RB_EMPTY_NODE(&upper->rb_node))
3147 continue;
3148
3149 /* Add this guy's upper edges to the list to process */
3150 list_for_each_entry(edge, &upper->upper, list[LOWER])
3151 list_add_tail(&edge->list[UPPER],
3152 &cache->pending_edge);
3153 if (list_empty(&upper->upper))
3154 list_add(&upper->list, &cache->useless_node);
3155 }
3156
3157 while (!list_empty(&cache->useless_node)) {
3158 lower = list_first_entry(&cache->useless_node,
3159 struct btrfs_backref_node, list);
3160 list_del_init(&lower->list);
3161 if (lower == node)
3162 node = NULL;
3163 btrfs_backref_drop_node(cache, lower);
3164 }
3165
3166 btrfs_backref_cleanup_node(cache, node);
3167 ASSERT(list_empty(&cache->useless_node) &&
3168 list_empty(&cache->pending_edge));
3169}