blob: dac30b00d14b7abc4385565f5687700d725f16fa [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/slab.h>
8#include <linux/rbtree.h>
9#include <linux/mm.h>
10#include "ctree.h"
11#include "disk-io.h"
12#include "transaction.h"
13#include "print-tree.h"
14#include "locking.h"
David Brazdil0f672f62019-12-10 10:32:29 +000015#include "volumes.h"
16#include "qgroup.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000017
18static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
19 *root, struct btrfs_path *path, int level);
20static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
21 const struct btrfs_key *ins_key, struct btrfs_path *path,
22 int data_size, int extend);
23static int push_node_left(struct btrfs_trans_handle *trans,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000024 struct extent_buffer *dst,
25 struct extent_buffer *src, int empty);
26static int balance_node_right(struct btrfs_trans_handle *trans,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000027 struct extent_buffer *dst_buf,
28 struct extent_buffer *src_buf);
29static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
30 int level, int slot);
31
David Brazdil0f672f62019-12-10 10:32:29 +000032static const struct btrfs_csums {
33 u16 size;
34 const char *name;
35} btrfs_csums[] = {
36 [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
37};
38
39int btrfs_super_csum_size(const struct btrfs_super_block *s)
40{
41 u16 t = btrfs_super_csum_type(s);
42 /*
43 * csum type is validated at mount time
44 */
45 return btrfs_csums[t].size;
46}
47
48const char *btrfs_super_csum_name(u16 csum_type)
49{
50 /* csum type is validated at mount time */
51 return btrfs_csums[csum_type].name;
52}
53
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000054struct btrfs_path *btrfs_alloc_path(void)
55{
56 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
57}
58
59/*
60 * set all locked nodes in the path to blocking locks. This should
61 * be done before scheduling
62 */
63noinline void btrfs_set_path_blocking(struct btrfs_path *p)
64{
65 int i;
66 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
67 if (!p->nodes[i] || !p->locks[i])
68 continue;
David Brazdil0f672f62019-12-10 10:32:29 +000069 /*
70 * If we currently have a spinning reader or writer lock this
71 * will bump the count of blocking holders and drop the
72 * spinlock.
73 */
74 if (p->locks[i] == BTRFS_READ_LOCK) {
75 btrfs_set_lock_blocking_read(p->nodes[i]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000076 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
David Brazdil0f672f62019-12-10 10:32:29 +000077 } else if (p->locks[i] == BTRFS_WRITE_LOCK) {
78 btrfs_set_lock_blocking_write(p->nodes[i]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000079 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000080 }
81 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000082}
83
84/* this also releases the path */
85void btrfs_free_path(struct btrfs_path *p)
86{
87 if (!p)
88 return;
89 btrfs_release_path(p);
90 kmem_cache_free(btrfs_path_cachep, p);
91}
92
93/*
94 * path release drops references on the extent buffers in the path
95 * and it drops any locks held by this path
96 *
97 * It is safe to call this on paths that no locks or extent buffers held.
98 */
99noinline void btrfs_release_path(struct btrfs_path *p)
100{
101 int i;
102
103 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
104 p->slots[i] = 0;
105 if (!p->nodes[i])
106 continue;
107 if (p->locks[i]) {
108 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
109 p->locks[i] = 0;
110 }
111 free_extent_buffer(p->nodes[i]);
112 p->nodes[i] = NULL;
113 }
114}
115
116/*
117 * safely gets a reference on the root node of a tree. A lock
118 * is not taken, so a concurrent writer may put a different node
119 * at the root of the tree. See btrfs_lock_root_node for the
120 * looping required.
121 *
122 * The extent buffer returned by this has a reference taken, so
123 * it won't disappear. It may stop being the root of the tree
124 * at any time because there are no locks held.
125 */
126struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
127{
128 struct extent_buffer *eb;
129
130 while (1) {
131 rcu_read_lock();
132 eb = rcu_dereference(root->node);
133
134 /*
135 * RCU really hurts here, we could free up the root node because
136 * it was COWed but we may not get the new root node yet so do
137 * the inc_not_zero dance and if it doesn't work then
138 * synchronize_rcu and try again.
139 */
140 if (atomic_inc_not_zero(&eb->refs)) {
141 rcu_read_unlock();
142 break;
143 }
144 rcu_read_unlock();
145 synchronize_rcu();
146 }
147 return eb;
148}
149
150/* loop around taking references on and locking the root node of the
151 * tree until you end up with a lock on the root. A locked buffer
152 * is returned, with a reference held.
153 */
154struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
155{
156 struct extent_buffer *eb;
157
158 while (1) {
159 eb = btrfs_root_node(root);
160 btrfs_tree_lock(eb);
161 if (eb == root->node)
162 break;
163 btrfs_tree_unlock(eb);
164 free_extent_buffer(eb);
165 }
166 return eb;
167}
168
169/* loop around taking references on and locking the root node of the
170 * tree until you end up with a lock on the root. A locked buffer
171 * is returned, with a reference held.
172 */
173struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
174{
175 struct extent_buffer *eb;
176
177 while (1) {
178 eb = btrfs_root_node(root);
179 btrfs_tree_read_lock(eb);
180 if (eb == root->node)
181 break;
182 btrfs_tree_read_unlock(eb);
183 free_extent_buffer(eb);
184 }
185 return eb;
186}
187
188/* cowonly root (everything not a reference counted cow subvolume), just get
189 * put onto a simple dirty list. transaction.c walks this to make sure they
190 * get properly updated on disk.
191 */
192static void add_root_to_dirty_list(struct btrfs_root *root)
193{
194 struct btrfs_fs_info *fs_info = root->fs_info;
195
196 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
197 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
198 return;
199
200 spin_lock(&fs_info->trans_lock);
201 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
202 /* Want the extent tree to be the last on the list */
David Brazdil0f672f62019-12-10 10:32:29 +0000203 if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000204 list_move_tail(&root->dirty_list,
205 &fs_info->dirty_cowonly_roots);
206 else
207 list_move(&root->dirty_list,
208 &fs_info->dirty_cowonly_roots);
209 }
210 spin_unlock(&fs_info->trans_lock);
211}
212
213/*
214 * used by snapshot creation to make a copy of a root for a tree with
215 * a given objectid. The buffer with the new root node is returned in
216 * cow_ret, and this func returns zero on success or a negative error code.
217 */
218int btrfs_copy_root(struct btrfs_trans_handle *trans,
219 struct btrfs_root *root,
220 struct extent_buffer *buf,
221 struct extent_buffer **cow_ret, u64 new_root_objectid)
222{
223 struct btrfs_fs_info *fs_info = root->fs_info;
224 struct extent_buffer *cow;
225 int ret = 0;
226 int level;
227 struct btrfs_disk_key disk_key;
228
229 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
230 trans->transid != fs_info->running_transaction->transid);
231 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
232 trans->transid != root->last_trans);
233
234 level = btrfs_header_level(buf);
235 if (level == 0)
236 btrfs_item_key(buf, &disk_key, 0);
237 else
238 btrfs_node_key(buf, &disk_key, 0);
239
240 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
241 &disk_key, level, buf->start, 0);
242 if (IS_ERR(cow))
243 return PTR_ERR(cow);
244
245 copy_extent_buffer_full(cow, buf);
246 btrfs_set_header_bytenr(cow, cow->start);
247 btrfs_set_header_generation(cow, trans->transid);
248 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
249 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
250 BTRFS_HEADER_FLAG_RELOC);
251 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
252 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
253 else
254 btrfs_set_header_owner(cow, new_root_objectid);
255
David Brazdil0f672f62019-12-10 10:32:29 +0000256 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000257
258 WARN_ON(btrfs_header_generation(buf) > trans->transid);
259 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
260 ret = btrfs_inc_ref(trans, root, cow, 1);
261 else
262 ret = btrfs_inc_ref(trans, root, cow, 0);
Olivier Deprez0e641232021-09-23 10:07:05 +0200263 if (ret) {
264 btrfs_tree_unlock(cow);
265 free_extent_buffer(cow);
266 btrfs_abort_transaction(trans, ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000267 return ret;
Olivier Deprez0e641232021-09-23 10:07:05 +0200268 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000269
270 btrfs_mark_buffer_dirty(cow);
271 *cow_ret = cow;
272 return 0;
273}
274
275enum mod_log_op {
276 MOD_LOG_KEY_REPLACE,
277 MOD_LOG_KEY_ADD,
278 MOD_LOG_KEY_REMOVE,
279 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
280 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
281 MOD_LOG_MOVE_KEYS,
282 MOD_LOG_ROOT_REPLACE,
283};
284
285struct tree_mod_root {
286 u64 logical;
287 u8 level;
288};
289
290struct tree_mod_elem {
291 struct rb_node node;
292 u64 logical;
293 u64 seq;
294 enum mod_log_op op;
295
296 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
297 int slot;
298
299 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
300 u64 generation;
301
302 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
303 struct btrfs_disk_key key;
304 u64 blockptr;
305
306 /* this is used for op == MOD_LOG_MOVE_KEYS */
307 struct {
308 int dst_slot;
309 int nr_items;
310 } move;
311
312 /* this is used for op == MOD_LOG_ROOT_REPLACE */
313 struct tree_mod_root old_root;
314};
315
316/*
317 * Pull a new tree mod seq number for our operation.
318 */
319static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
320{
321 return atomic64_inc_return(&fs_info->tree_mod_seq);
322}
323
324/*
325 * This adds a new blocker to the tree mod log's blocker list if the @elem
326 * passed does not already have a sequence number set. So when a caller expects
327 * to record tree modifications, it should ensure to set elem->seq to zero
328 * before calling btrfs_get_tree_mod_seq.
329 * Returns a fresh, unused tree log modification sequence number, even if no new
330 * blocker was added.
331 */
332u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
333 struct seq_list *elem)
334{
335 write_lock(&fs_info->tree_mod_log_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000336 if (!elem->seq) {
337 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
338 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
339 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000340 write_unlock(&fs_info->tree_mod_log_lock);
341
342 return elem->seq;
343}
344
345void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
346 struct seq_list *elem)
347{
348 struct rb_root *tm_root;
349 struct rb_node *node;
350 struct rb_node *next;
351 struct seq_list *cur_elem;
352 struct tree_mod_elem *tm;
353 u64 min_seq = (u64)-1;
354 u64 seq_putting = elem->seq;
355
356 if (!seq_putting)
357 return;
358
Olivier Deprez0e641232021-09-23 10:07:05 +0200359 write_lock(&fs_info->tree_mod_log_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000360 list_del(&elem->list);
361 elem->seq = 0;
362
363 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
364 if (cur_elem->seq < min_seq) {
365 if (seq_putting > cur_elem->seq) {
366 /*
367 * blocker with lower sequence number exists, we
368 * cannot remove anything from the log
369 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200370 write_unlock(&fs_info->tree_mod_log_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000371 return;
372 }
373 min_seq = cur_elem->seq;
374 }
375 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000376
377 /*
378 * anything that's lower than the lowest existing (read: blocked)
379 * sequence number can be removed from the tree.
380 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000381 tm_root = &fs_info->tree_mod_log;
382 for (node = rb_first(tm_root); node; node = next) {
383 next = rb_next(node);
384 tm = rb_entry(node, struct tree_mod_elem, node);
Olivier Deprez0e641232021-09-23 10:07:05 +0200385 if (tm->seq >= min_seq)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000386 continue;
387 rb_erase(node, tm_root);
388 kfree(tm);
389 }
390 write_unlock(&fs_info->tree_mod_log_lock);
391}
392
393/*
394 * key order of the log:
395 * node/leaf start address -> sequence
396 *
397 * The 'start address' is the logical address of the *new* root node
398 * for root replace operations, or the logical address of the affected
399 * block for all other operations.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000400 */
401static noinline int
402__tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
403{
404 struct rb_root *tm_root;
405 struct rb_node **new;
406 struct rb_node *parent = NULL;
407 struct tree_mod_elem *cur;
408
David Brazdil0f672f62019-12-10 10:32:29 +0000409 lockdep_assert_held_write(&fs_info->tree_mod_log_lock);
410
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000411 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
412
413 tm_root = &fs_info->tree_mod_log;
414 new = &tm_root->rb_node;
415 while (*new) {
416 cur = rb_entry(*new, struct tree_mod_elem, node);
417 parent = *new;
418 if (cur->logical < tm->logical)
419 new = &((*new)->rb_left);
420 else if (cur->logical > tm->logical)
421 new = &((*new)->rb_right);
422 else if (cur->seq < tm->seq)
423 new = &((*new)->rb_left);
424 else if (cur->seq > tm->seq)
425 new = &((*new)->rb_right);
426 else
427 return -EEXIST;
428 }
429
430 rb_link_node(&tm->node, parent, new);
431 rb_insert_color(&tm->node, tm_root);
432 return 0;
433}
434
435/*
436 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
437 * returns zero with the tree_mod_log_lock acquired. The caller must hold
438 * this until all tree mod log insertions are recorded in the rb tree and then
439 * write unlock fs_info::tree_mod_log_lock.
440 */
441static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
442 struct extent_buffer *eb) {
443 smp_mb();
444 if (list_empty(&(fs_info)->tree_mod_seq_list))
445 return 1;
446 if (eb && btrfs_header_level(eb) == 0)
447 return 1;
448
449 write_lock(&fs_info->tree_mod_log_lock);
450 if (list_empty(&(fs_info)->tree_mod_seq_list)) {
451 write_unlock(&fs_info->tree_mod_log_lock);
452 return 1;
453 }
454
455 return 0;
456}
457
458/* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
459static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
460 struct extent_buffer *eb)
461{
462 smp_mb();
463 if (list_empty(&(fs_info)->tree_mod_seq_list))
464 return 0;
465 if (eb && btrfs_header_level(eb) == 0)
466 return 0;
467
468 return 1;
469}
470
471static struct tree_mod_elem *
472alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
473 enum mod_log_op op, gfp_t flags)
474{
475 struct tree_mod_elem *tm;
476
477 tm = kzalloc(sizeof(*tm), flags);
478 if (!tm)
479 return NULL;
480
481 tm->logical = eb->start;
482 if (op != MOD_LOG_KEY_ADD) {
483 btrfs_node_key(eb, &tm->key, slot);
484 tm->blockptr = btrfs_node_blockptr(eb, slot);
485 }
486 tm->op = op;
487 tm->slot = slot;
488 tm->generation = btrfs_node_ptr_generation(eb, slot);
489 RB_CLEAR_NODE(&tm->node);
490
491 return tm;
492}
493
494static noinline int tree_mod_log_insert_key(struct extent_buffer *eb, int slot,
495 enum mod_log_op op, gfp_t flags)
496{
497 struct tree_mod_elem *tm;
498 int ret;
499
500 if (!tree_mod_need_log(eb->fs_info, eb))
501 return 0;
502
503 tm = alloc_tree_mod_elem(eb, slot, op, flags);
504 if (!tm)
505 return -ENOMEM;
506
507 if (tree_mod_dont_log(eb->fs_info, eb)) {
508 kfree(tm);
509 return 0;
510 }
511
512 ret = __tree_mod_log_insert(eb->fs_info, tm);
513 write_unlock(&eb->fs_info->tree_mod_log_lock);
514 if (ret)
515 kfree(tm);
516
517 return ret;
518}
519
520static noinline int tree_mod_log_insert_move(struct extent_buffer *eb,
521 int dst_slot, int src_slot, int nr_items)
522{
523 struct tree_mod_elem *tm = NULL;
524 struct tree_mod_elem **tm_list = NULL;
525 int ret = 0;
526 int i;
527 int locked = 0;
528
529 if (!tree_mod_need_log(eb->fs_info, eb))
530 return 0;
531
532 tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS);
533 if (!tm_list)
534 return -ENOMEM;
535
536 tm = kzalloc(sizeof(*tm), GFP_NOFS);
537 if (!tm) {
538 ret = -ENOMEM;
539 goto free_tms;
540 }
541
542 tm->logical = eb->start;
543 tm->slot = src_slot;
544 tm->move.dst_slot = dst_slot;
545 tm->move.nr_items = nr_items;
546 tm->op = MOD_LOG_MOVE_KEYS;
547
548 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
549 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
550 MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
551 if (!tm_list[i]) {
552 ret = -ENOMEM;
553 goto free_tms;
554 }
555 }
556
557 if (tree_mod_dont_log(eb->fs_info, eb))
558 goto free_tms;
559 locked = 1;
560
561 /*
562 * When we override something during the move, we log these removals.
563 * This can only happen when we move towards the beginning of the
564 * buffer, i.e. dst_slot < src_slot.
565 */
566 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
567 ret = __tree_mod_log_insert(eb->fs_info, tm_list[i]);
568 if (ret)
569 goto free_tms;
570 }
571
572 ret = __tree_mod_log_insert(eb->fs_info, tm);
573 if (ret)
574 goto free_tms;
575 write_unlock(&eb->fs_info->tree_mod_log_lock);
576 kfree(tm_list);
577
578 return 0;
579free_tms:
580 for (i = 0; i < nr_items; i++) {
581 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
582 rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log);
583 kfree(tm_list[i]);
584 }
585 if (locked)
586 write_unlock(&eb->fs_info->tree_mod_log_lock);
587 kfree(tm_list);
588 kfree(tm);
589
590 return ret;
591}
592
593static inline int
594__tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
595 struct tree_mod_elem **tm_list,
596 int nritems)
597{
598 int i, j;
599 int ret;
600
601 for (i = nritems - 1; i >= 0; i--) {
602 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
603 if (ret) {
604 for (j = nritems - 1; j > i; j--)
605 rb_erase(&tm_list[j]->node,
606 &fs_info->tree_mod_log);
607 return ret;
608 }
609 }
610
611 return 0;
612}
613
614static noinline int tree_mod_log_insert_root(struct extent_buffer *old_root,
615 struct extent_buffer *new_root, int log_removal)
616{
617 struct btrfs_fs_info *fs_info = old_root->fs_info;
618 struct tree_mod_elem *tm = NULL;
619 struct tree_mod_elem **tm_list = NULL;
620 int nritems = 0;
621 int ret = 0;
622 int i;
623
624 if (!tree_mod_need_log(fs_info, NULL))
625 return 0;
626
627 if (log_removal && btrfs_header_level(old_root) > 0) {
628 nritems = btrfs_header_nritems(old_root);
629 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
630 GFP_NOFS);
631 if (!tm_list) {
632 ret = -ENOMEM;
633 goto free_tms;
634 }
635 for (i = 0; i < nritems; i++) {
636 tm_list[i] = alloc_tree_mod_elem(old_root, i,
637 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
638 if (!tm_list[i]) {
639 ret = -ENOMEM;
640 goto free_tms;
641 }
642 }
643 }
644
645 tm = kzalloc(sizeof(*tm), GFP_NOFS);
646 if (!tm) {
647 ret = -ENOMEM;
648 goto free_tms;
649 }
650
651 tm->logical = new_root->start;
652 tm->old_root.logical = old_root->start;
653 tm->old_root.level = btrfs_header_level(old_root);
654 tm->generation = btrfs_header_generation(old_root);
655 tm->op = MOD_LOG_ROOT_REPLACE;
656
657 if (tree_mod_dont_log(fs_info, NULL))
658 goto free_tms;
659
660 if (tm_list)
661 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
662 if (!ret)
663 ret = __tree_mod_log_insert(fs_info, tm);
664
665 write_unlock(&fs_info->tree_mod_log_lock);
666 if (ret)
667 goto free_tms;
668 kfree(tm_list);
669
670 return ret;
671
672free_tms:
673 if (tm_list) {
674 for (i = 0; i < nritems; i++)
675 kfree(tm_list[i]);
676 kfree(tm_list);
677 }
678 kfree(tm);
679
680 return ret;
681}
682
683static struct tree_mod_elem *
684__tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
685 int smallest)
686{
687 struct rb_root *tm_root;
688 struct rb_node *node;
689 struct tree_mod_elem *cur = NULL;
690 struct tree_mod_elem *found = NULL;
691
692 read_lock(&fs_info->tree_mod_log_lock);
693 tm_root = &fs_info->tree_mod_log;
694 node = tm_root->rb_node;
695 while (node) {
696 cur = rb_entry(node, struct tree_mod_elem, node);
697 if (cur->logical < start) {
698 node = node->rb_left;
699 } else if (cur->logical > start) {
700 node = node->rb_right;
701 } else if (cur->seq < min_seq) {
702 node = node->rb_left;
703 } else if (!smallest) {
704 /* we want the node with the highest seq */
705 if (found)
706 BUG_ON(found->seq > cur->seq);
707 found = cur;
708 node = node->rb_left;
709 } else if (cur->seq > min_seq) {
710 /* we want the node with the smallest seq */
711 if (found)
712 BUG_ON(found->seq < cur->seq);
713 found = cur;
714 node = node->rb_right;
715 } else {
716 found = cur;
717 break;
718 }
719 }
720 read_unlock(&fs_info->tree_mod_log_lock);
721
722 return found;
723}
724
725/*
726 * this returns the element from the log with the smallest time sequence
727 * value that's in the log (the oldest log item). any element with a time
728 * sequence lower than min_seq will be ignored.
729 */
730static struct tree_mod_elem *
731tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
732 u64 min_seq)
733{
734 return __tree_mod_log_search(fs_info, start, min_seq, 1);
735}
736
737/*
738 * this returns the element from the log with the largest time sequence
739 * value that's in the log (the most recent log item). any element with
740 * a time sequence lower than min_seq will be ignored.
741 */
742static struct tree_mod_elem *
743tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
744{
745 return __tree_mod_log_search(fs_info, start, min_seq, 0);
746}
747
David Brazdil0f672f62019-12-10 10:32:29 +0000748static noinline int tree_mod_log_eb_copy(struct extent_buffer *dst,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000749 struct extent_buffer *src, unsigned long dst_offset,
750 unsigned long src_offset, int nr_items)
751{
David Brazdil0f672f62019-12-10 10:32:29 +0000752 struct btrfs_fs_info *fs_info = dst->fs_info;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000753 int ret = 0;
754 struct tree_mod_elem **tm_list = NULL;
755 struct tree_mod_elem **tm_list_add, **tm_list_rem;
756 int i;
757 int locked = 0;
758
759 if (!tree_mod_need_log(fs_info, NULL))
760 return 0;
761
762 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
763 return 0;
764
765 tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
766 GFP_NOFS);
767 if (!tm_list)
768 return -ENOMEM;
769
770 tm_list_add = tm_list;
771 tm_list_rem = tm_list + nr_items;
772 for (i = 0; i < nr_items; i++) {
773 tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
774 MOD_LOG_KEY_REMOVE, GFP_NOFS);
775 if (!tm_list_rem[i]) {
776 ret = -ENOMEM;
777 goto free_tms;
778 }
779
780 tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
781 MOD_LOG_KEY_ADD, GFP_NOFS);
782 if (!tm_list_add[i]) {
783 ret = -ENOMEM;
784 goto free_tms;
785 }
786 }
787
788 if (tree_mod_dont_log(fs_info, NULL))
789 goto free_tms;
790 locked = 1;
791
792 for (i = 0; i < nr_items; i++) {
793 ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
794 if (ret)
795 goto free_tms;
796 ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
797 if (ret)
798 goto free_tms;
799 }
800
801 write_unlock(&fs_info->tree_mod_log_lock);
802 kfree(tm_list);
803
804 return 0;
805
806free_tms:
807 for (i = 0; i < nr_items * 2; i++) {
808 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
809 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
810 kfree(tm_list[i]);
811 }
812 if (locked)
813 write_unlock(&fs_info->tree_mod_log_lock);
814 kfree(tm_list);
815
816 return ret;
817}
818
819static noinline int tree_mod_log_free_eb(struct extent_buffer *eb)
820{
821 struct tree_mod_elem **tm_list = NULL;
822 int nritems = 0;
823 int i;
824 int ret = 0;
825
826 if (btrfs_header_level(eb) == 0)
827 return 0;
828
829 if (!tree_mod_need_log(eb->fs_info, NULL))
830 return 0;
831
832 nritems = btrfs_header_nritems(eb);
833 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
834 if (!tm_list)
835 return -ENOMEM;
836
837 for (i = 0; i < nritems; i++) {
838 tm_list[i] = alloc_tree_mod_elem(eb, i,
839 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
840 if (!tm_list[i]) {
841 ret = -ENOMEM;
842 goto free_tms;
843 }
844 }
845
846 if (tree_mod_dont_log(eb->fs_info, eb))
847 goto free_tms;
848
849 ret = __tree_mod_log_free_eb(eb->fs_info, tm_list, nritems);
850 write_unlock(&eb->fs_info->tree_mod_log_lock);
851 if (ret)
852 goto free_tms;
853 kfree(tm_list);
854
855 return 0;
856
857free_tms:
858 for (i = 0; i < nritems; i++)
859 kfree(tm_list[i]);
860 kfree(tm_list);
861
862 return ret;
863}
864
865/*
866 * check if the tree block can be shared by multiple trees
867 */
868int btrfs_block_can_be_shared(struct btrfs_root *root,
869 struct extent_buffer *buf)
870{
871 /*
872 * Tree blocks not in reference counted trees and tree roots
873 * are never shared. If a block was allocated after the last
874 * snapshot and the block was not allocated by tree relocation,
875 * we know the block is not shared.
876 */
877 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
878 buf != root->node && buf != root->commit_root &&
879 (btrfs_header_generation(buf) <=
880 btrfs_root_last_snapshot(&root->root_item) ||
881 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
882 return 1;
883
884 return 0;
885}
886
887static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
888 struct btrfs_root *root,
889 struct extent_buffer *buf,
890 struct extent_buffer *cow,
891 int *last_ref)
892{
893 struct btrfs_fs_info *fs_info = root->fs_info;
894 u64 refs;
895 u64 owner;
896 u64 flags;
897 u64 new_flags = 0;
898 int ret;
899
900 /*
901 * Backrefs update rules:
902 *
903 * Always use full backrefs for extent pointers in tree block
904 * allocated by tree relocation.
905 *
906 * If a shared tree block is no longer referenced by its owner
907 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
908 * use full backrefs for extent pointers in tree block.
909 *
910 * If a tree block is been relocating
911 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
912 * use full backrefs for extent pointers in tree block.
913 * The reason for this is some operations (such as drop tree)
914 * are only allowed for blocks use full backrefs.
915 */
916
917 if (btrfs_block_can_be_shared(root, buf)) {
918 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
919 btrfs_header_level(buf), 1,
920 &refs, &flags);
921 if (ret)
922 return ret;
923 if (refs == 0) {
924 ret = -EROFS;
925 btrfs_handle_fs_error(fs_info, ret, NULL);
926 return ret;
927 }
928 } else {
929 refs = 1;
930 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
931 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
932 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
933 else
934 flags = 0;
935 }
936
937 owner = btrfs_header_owner(buf);
938 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
939 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
940
941 if (refs > 1) {
942 if ((owner == root->root_key.objectid ||
943 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
944 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
945 ret = btrfs_inc_ref(trans, root, buf, 1);
946 if (ret)
947 return ret;
948
949 if (root->root_key.objectid ==
950 BTRFS_TREE_RELOC_OBJECTID) {
951 ret = btrfs_dec_ref(trans, root, buf, 0);
952 if (ret)
953 return ret;
954 ret = btrfs_inc_ref(trans, root, cow, 1);
955 if (ret)
956 return ret;
957 }
958 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
959 } else {
960
961 if (root->root_key.objectid ==
962 BTRFS_TREE_RELOC_OBJECTID)
963 ret = btrfs_inc_ref(trans, root, cow, 1);
964 else
965 ret = btrfs_inc_ref(trans, root, cow, 0);
966 if (ret)
967 return ret;
968 }
969 if (new_flags != 0) {
970 int level = btrfs_header_level(buf);
971
David Brazdil0f672f62019-12-10 10:32:29 +0000972 ret = btrfs_set_disk_extent_flags(trans,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000973 buf->start,
974 buf->len,
975 new_flags, level, 0);
976 if (ret)
977 return ret;
978 }
979 } else {
980 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
981 if (root->root_key.objectid ==
982 BTRFS_TREE_RELOC_OBJECTID)
983 ret = btrfs_inc_ref(trans, root, cow, 1);
984 else
985 ret = btrfs_inc_ref(trans, root, cow, 0);
986 if (ret)
987 return ret;
988 ret = btrfs_dec_ref(trans, root, buf, 1);
989 if (ret)
990 return ret;
991 }
David Brazdil0f672f62019-12-10 10:32:29 +0000992 btrfs_clean_tree_block(buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000993 *last_ref = 1;
994 }
995 return 0;
996}
997
David Brazdil0f672f62019-12-10 10:32:29 +0000998static struct extent_buffer *alloc_tree_block_no_bg_flush(
999 struct btrfs_trans_handle *trans,
1000 struct btrfs_root *root,
1001 u64 parent_start,
1002 const struct btrfs_disk_key *disk_key,
1003 int level,
1004 u64 hint,
1005 u64 empty_size)
1006{
1007 struct btrfs_fs_info *fs_info = root->fs_info;
1008 struct extent_buffer *ret;
1009
1010 /*
1011 * If we are COWing a node/leaf from the extent, chunk, device or free
1012 * space trees, make sure that we do not finish block group creation of
1013 * pending block groups. We do this to avoid a deadlock.
1014 * COWing can result in allocation of a new chunk, and flushing pending
1015 * block groups (btrfs_create_pending_block_groups()) can be triggered
1016 * when finishing allocation of a new chunk. Creation of a pending block
1017 * group modifies the extent, chunk, device and free space trees,
1018 * therefore we could deadlock with ourselves since we are holding a
1019 * lock on an extent buffer that btrfs_create_pending_block_groups() may
1020 * try to COW later.
1021 * For similar reasons, we also need to delay flushing pending block
1022 * groups when splitting a leaf or node, from one of those trees, since
1023 * we are holding a write lock on it and its parent or when inserting a
1024 * new root node for one of those trees.
1025 */
1026 if (root == fs_info->extent_root ||
1027 root == fs_info->chunk_root ||
1028 root == fs_info->dev_root ||
1029 root == fs_info->free_space_root)
1030 trans->can_flush_pending_bgs = false;
1031
1032 ret = btrfs_alloc_tree_block(trans, root, parent_start,
1033 root->root_key.objectid, disk_key, level,
1034 hint, empty_size);
1035 trans->can_flush_pending_bgs = true;
1036
1037 return ret;
1038}
1039
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001040/*
1041 * does the dirty work in cow of a single block. The parent block (if
1042 * supplied) is updated to point to the new cow copy. The new buffer is marked
1043 * dirty and returned locked. If you modify the block it needs to be marked
1044 * dirty again.
1045 *
1046 * search_start -- an allocation hint for the new block
1047 *
1048 * empty_size -- a hint that you plan on doing more cow. This is the size in
1049 * bytes the allocator should try to find free next to the block it returns.
1050 * This is just a hint and may be ignored by the allocator.
1051 */
1052static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1053 struct btrfs_root *root,
1054 struct extent_buffer *buf,
1055 struct extent_buffer *parent, int parent_slot,
1056 struct extent_buffer **cow_ret,
1057 u64 search_start, u64 empty_size)
1058{
1059 struct btrfs_fs_info *fs_info = root->fs_info;
1060 struct btrfs_disk_key disk_key;
1061 struct extent_buffer *cow;
1062 int level, ret;
1063 int last_ref = 0;
1064 int unlock_orig = 0;
1065 u64 parent_start = 0;
1066
1067 if (*cow_ret == buf)
1068 unlock_orig = 1;
1069
1070 btrfs_assert_tree_locked(buf);
1071
1072 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1073 trans->transid != fs_info->running_transaction->transid);
1074 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1075 trans->transid != root->last_trans);
1076
1077 level = btrfs_header_level(buf);
1078
1079 if (level == 0)
1080 btrfs_item_key(buf, &disk_key, 0);
1081 else
1082 btrfs_node_key(buf, &disk_key, 0);
1083
1084 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
1085 parent_start = parent->start;
1086
David Brazdil0f672f62019-12-10 10:32:29 +00001087 cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key,
1088 level, search_start, empty_size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001089 if (IS_ERR(cow))
1090 return PTR_ERR(cow);
1091
1092 /* cow is set to blocking by btrfs_init_new_buffer */
1093
1094 copy_extent_buffer_full(cow, buf);
1095 btrfs_set_header_bytenr(cow, cow->start);
1096 btrfs_set_header_generation(cow, trans->transid);
1097 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1098 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1099 BTRFS_HEADER_FLAG_RELOC);
1100 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1101 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1102 else
1103 btrfs_set_header_owner(cow, root->root_key.objectid);
1104
David Brazdil0f672f62019-12-10 10:32:29 +00001105 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001106
1107 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1108 if (ret) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001109 btrfs_tree_unlock(cow);
1110 free_extent_buffer(cow);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001111 btrfs_abort_transaction(trans, ret);
1112 return ret;
1113 }
1114
1115 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1116 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1117 if (ret) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001118 btrfs_tree_unlock(cow);
1119 free_extent_buffer(cow);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001120 btrfs_abort_transaction(trans, ret);
1121 return ret;
1122 }
1123 }
1124
1125 if (buf == root->node) {
1126 WARN_ON(parent && parent != buf);
1127 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1128 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1129 parent_start = buf->start;
1130
1131 extent_buffer_get(cow);
1132 ret = tree_mod_log_insert_root(root->node, cow, 1);
1133 BUG_ON(ret < 0);
1134 rcu_assign_pointer(root->node, cow);
1135
1136 btrfs_free_tree_block(trans, root, buf, parent_start,
1137 last_ref);
1138 free_extent_buffer(buf);
1139 add_root_to_dirty_list(root);
1140 } else {
1141 WARN_ON(trans->transid != btrfs_header_generation(parent));
1142 tree_mod_log_insert_key(parent, parent_slot,
1143 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1144 btrfs_set_node_blockptr(parent, parent_slot,
1145 cow->start);
1146 btrfs_set_node_ptr_generation(parent, parent_slot,
1147 trans->transid);
1148 btrfs_mark_buffer_dirty(parent);
1149 if (last_ref) {
1150 ret = tree_mod_log_free_eb(buf);
1151 if (ret) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001152 btrfs_tree_unlock(cow);
1153 free_extent_buffer(cow);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001154 btrfs_abort_transaction(trans, ret);
1155 return ret;
1156 }
1157 }
1158 btrfs_free_tree_block(trans, root, buf, parent_start,
1159 last_ref);
1160 }
1161 if (unlock_orig)
1162 btrfs_tree_unlock(buf);
1163 free_extent_buffer_stale(buf);
1164 btrfs_mark_buffer_dirty(cow);
1165 *cow_ret = cow;
1166 return 0;
1167}
1168
1169/*
1170 * returns the logical address of the oldest predecessor of the given root.
1171 * entries older than time_seq are ignored.
1172 */
1173static struct tree_mod_elem *__tree_mod_log_oldest_root(
1174 struct extent_buffer *eb_root, u64 time_seq)
1175{
1176 struct tree_mod_elem *tm;
1177 struct tree_mod_elem *found = NULL;
1178 u64 root_logical = eb_root->start;
1179 int looped = 0;
1180
1181 if (!time_seq)
1182 return NULL;
1183
1184 /*
1185 * the very last operation that's logged for a root is the
1186 * replacement operation (if it is replaced at all). this has
1187 * the logical address of the *new* root, making it the very
1188 * first operation that's logged for this root.
1189 */
1190 while (1) {
1191 tm = tree_mod_log_search_oldest(eb_root->fs_info, root_logical,
1192 time_seq);
1193 if (!looped && !tm)
1194 return NULL;
1195 /*
1196 * if there are no tree operation for the oldest root, we simply
1197 * return it. this should only happen if that (old) root is at
1198 * level 0.
1199 */
1200 if (!tm)
1201 break;
1202
1203 /*
1204 * if there's an operation that's not a root replacement, we
1205 * found the oldest version of our root. normally, we'll find a
1206 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1207 */
1208 if (tm->op != MOD_LOG_ROOT_REPLACE)
1209 break;
1210
1211 found = tm;
1212 root_logical = tm->old_root.logical;
1213 looped = 1;
1214 }
1215
1216 /* if there's no old root to return, return what we found instead */
1217 if (!found)
1218 found = tm;
1219
1220 return found;
1221}
1222
1223/*
1224 * tm is a pointer to the first operation to rewind within eb. then, all
1225 * previous operations will be rewound (until we reach something older than
1226 * time_seq).
1227 */
1228static void
1229__tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1230 u64 time_seq, struct tree_mod_elem *first_tm)
1231{
1232 u32 n;
1233 struct rb_node *next;
1234 struct tree_mod_elem *tm = first_tm;
1235 unsigned long o_dst;
1236 unsigned long o_src;
1237 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1238
1239 n = btrfs_header_nritems(eb);
1240 read_lock(&fs_info->tree_mod_log_lock);
1241 while (tm && tm->seq >= time_seq) {
1242 /*
1243 * all the operations are recorded with the operator used for
1244 * the modification. as we're going backwards, we do the
1245 * opposite of each operation here.
1246 */
1247 switch (tm->op) {
1248 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1249 BUG_ON(tm->slot < n);
1250 /* Fallthrough */
1251 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1252 case MOD_LOG_KEY_REMOVE:
1253 btrfs_set_node_key(eb, &tm->key, tm->slot);
1254 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1255 btrfs_set_node_ptr_generation(eb, tm->slot,
1256 tm->generation);
1257 n++;
1258 break;
1259 case MOD_LOG_KEY_REPLACE:
1260 BUG_ON(tm->slot >= n);
1261 btrfs_set_node_key(eb, &tm->key, tm->slot);
1262 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1263 btrfs_set_node_ptr_generation(eb, tm->slot,
1264 tm->generation);
1265 break;
1266 case MOD_LOG_KEY_ADD:
1267 /* if a move operation is needed it's in the log */
1268 n--;
1269 break;
1270 case MOD_LOG_MOVE_KEYS:
1271 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1272 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1273 memmove_extent_buffer(eb, o_dst, o_src,
1274 tm->move.nr_items * p_size);
1275 break;
1276 case MOD_LOG_ROOT_REPLACE:
1277 /*
1278 * this operation is special. for roots, this must be
1279 * handled explicitly before rewinding.
1280 * for non-roots, this operation may exist if the node
1281 * was a root: root A -> child B; then A gets empty and
1282 * B is promoted to the new root. in the mod log, we'll
1283 * have a root-replace operation for B, a tree block
1284 * that is no root. we simply ignore that operation.
1285 */
1286 break;
1287 }
1288 next = rb_next(&tm->node);
1289 if (!next)
1290 break;
1291 tm = rb_entry(next, struct tree_mod_elem, node);
1292 if (tm->logical != first_tm->logical)
1293 break;
1294 }
1295 read_unlock(&fs_info->tree_mod_log_lock);
1296 btrfs_set_header_nritems(eb, n);
1297}
1298
1299/*
1300 * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1301 * is returned. If rewind operations happen, a fresh buffer is returned. The
1302 * returned buffer is always read-locked. If the returned buffer is not the
1303 * input buffer, the lock on the input buffer is released and the input buffer
1304 * is freed (its refcount is decremented).
1305 */
1306static struct extent_buffer *
1307tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1308 struct extent_buffer *eb, u64 time_seq)
1309{
1310 struct extent_buffer *eb_rewin;
1311 struct tree_mod_elem *tm;
1312
1313 if (!time_seq)
1314 return eb;
1315
1316 if (btrfs_header_level(eb) == 0)
1317 return eb;
1318
1319 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1320 if (!tm)
1321 return eb;
1322
1323 btrfs_set_path_blocking(path);
David Brazdil0f672f62019-12-10 10:32:29 +00001324 btrfs_set_lock_blocking_read(eb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001325
1326 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1327 BUG_ON(tm->slot != 0);
1328 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
1329 if (!eb_rewin) {
1330 btrfs_tree_read_unlock_blocking(eb);
1331 free_extent_buffer(eb);
1332 return NULL;
1333 }
1334 btrfs_set_header_bytenr(eb_rewin, eb->start);
1335 btrfs_set_header_backref_rev(eb_rewin,
1336 btrfs_header_backref_rev(eb));
1337 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1338 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1339 } else {
1340 eb_rewin = btrfs_clone_extent_buffer(eb);
1341 if (!eb_rewin) {
1342 btrfs_tree_read_unlock_blocking(eb);
1343 free_extent_buffer(eb);
1344 return NULL;
1345 }
1346 }
1347
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001348 btrfs_tree_read_unlock_blocking(eb);
1349 free_extent_buffer(eb);
1350
Olivier Deprez0e641232021-09-23 10:07:05 +02001351 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin),
1352 eb_rewin, btrfs_header_level(eb_rewin));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001353 btrfs_tree_read_lock(eb_rewin);
1354 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1355 WARN_ON(btrfs_header_nritems(eb_rewin) >
1356 BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1357
1358 return eb_rewin;
1359}
1360
1361/*
1362 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1363 * value. If there are no changes, the current root->root_node is returned. If
1364 * anything changed in between, there's a fresh buffer allocated on which the
1365 * rewind operations are done. In any case, the returned buffer is read locked.
1366 * Returns NULL on error (with no locks held).
1367 */
1368static inline struct extent_buffer *
1369get_old_root(struct btrfs_root *root, u64 time_seq)
1370{
1371 struct btrfs_fs_info *fs_info = root->fs_info;
1372 struct tree_mod_elem *tm;
1373 struct extent_buffer *eb = NULL;
1374 struct extent_buffer *eb_root;
David Brazdil0f672f62019-12-10 10:32:29 +00001375 u64 eb_root_owner = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001376 struct extent_buffer *old;
1377 struct tree_mod_root *old_root = NULL;
1378 u64 old_generation = 0;
1379 u64 logical;
1380 int level;
1381
1382 eb_root = btrfs_read_lock_root_node(root);
1383 tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1384 if (!tm)
1385 return eb_root;
1386
1387 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1388 old_root = &tm->old_root;
1389 old_generation = tm->generation;
1390 logical = old_root->logical;
1391 level = old_root->level;
1392 } else {
1393 logical = eb_root->start;
1394 level = btrfs_header_level(eb_root);
1395 }
1396
1397 tm = tree_mod_log_search(fs_info, logical, time_seq);
1398 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1399 btrfs_tree_read_unlock(eb_root);
1400 free_extent_buffer(eb_root);
1401 old = read_tree_block(fs_info, logical, 0, level, NULL);
1402 if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1403 if (!IS_ERR(old))
1404 free_extent_buffer(old);
1405 btrfs_warn(fs_info,
1406 "failed to read tree block %llu from get_old_root",
1407 logical);
1408 } else {
Olivier Deprez0e641232021-09-23 10:07:05 +02001409 struct tree_mod_elem *tm2;
1410
1411 btrfs_tree_read_lock(old);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001412 eb = btrfs_clone_extent_buffer(old);
Olivier Deprez0e641232021-09-23 10:07:05 +02001413 /*
1414 * After the lookup for the most recent tree mod operation
1415 * above and before we locked and cloned the extent buffer
1416 * 'old', a new tree mod log operation may have been added.
1417 * So lookup for a more recent one to make sure the number
1418 * of mod log operations we replay is consistent with the
1419 * number of items we have in the cloned extent buffer,
1420 * otherwise we can hit a BUG_ON when rewinding the extent
1421 * buffer.
1422 */
1423 tm2 = tree_mod_log_search(fs_info, logical, time_seq);
1424 btrfs_tree_read_unlock(old);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001425 free_extent_buffer(old);
Olivier Deprez0e641232021-09-23 10:07:05 +02001426 ASSERT(tm2);
1427 ASSERT(tm2 == tm || tm2->seq > tm->seq);
1428 if (!tm2 || tm2->seq < tm->seq) {
1429 free_extent_buffer(eb);
1430 return NULL;
1431 }
1432 tm = tm2;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001433 }
1434 } else if (old_root) {
David Brazdil0f672f62019-12-10 10:32:29 +00001435 eb_root_owner = btrfs_header_owner(eb_root);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001436 btrfs_tree_read_unlock(eb_root);
1437 free_extent_buffer(eb_root);
1438 eb = alloc_dummy_extent_buffer(fs_info, logical);
1439 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00001440 btrfs_set_lock_blocking_read(eb_root);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001441 eb = btrfs_clone_extent_buffer(eb_root);
1442 btrfs_tree_read_unlock_blocking(eb_root);
1443 free_extent_buffer(eb_root);
1444 }
1445
1446 if (!eb)
1447 return NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001448 if (old_root) {
1449 btrfs_set_header_bytenr(eb, eb->start);
1450 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
David Brazdil0f672f62019-12-10 10:32:29 +00001451 btrfs_set_header_owner(eb, eb_root_owner);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001452 btrfs_set_header_level(eb, old_root->level);
1453 btrfs_set_header_generation(eb, old_generation);
1454 }
Olivier Deprez0e641232021-09-23 10:07:05 +02001455 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb,
1456 btrfs_header_level(eb));
1457 btrfs_tree_read_lock(eb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001458 if (tm)
1459 __tree_mod_log_rewind(fs_info, eb, time_seq, tm);
1460 else
1461 WARN_ON(btrfs_header_level(eb) != 0);
1462 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1463
1464 return eb;
1465}
1466
1467int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1468{
1469 struct tree_mod_elem *tm;
1470 int level;
1471 struct extent_buffer *eb_root = btrfs_root_node(root);
1472
1473 tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1474 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1475 level = tm->old_root.level;
1476 } else {
1477 level = btrfs_header_level(eb_root);
1478 }
1479 free_extent_buffer(eb_root);
1480
1481 return level;
1482}
1483
1484static inline int should_cow_block(struct btrfs_trans_handle *trans,
1485 struct btrfs_root *root,
1486 struct extent_buffer *buf)
1487{
1488 if (btrfs_is_testing(root->fs_info))
1489 return 0;
1490
1491 /* Ensure we can see the FORCE_COW bit */
1492 smp_mb__before_atomic();
1493
1494 /*
1495 * We do not need to cow a block if
1496 * 1) this block is not created or changed in this transaction;
1497 * 2) this block does not belong to TREE_RELOC tree;
1498 * 3) the root is not forced COW.
1499 *
1500 * What is forced COW:
1501 * when we create snapshot during committing the transaction,
David Brazdil0f672f62019-12-10 10:32:29 +00001502 * after we've finished copying src root, we must COW the shared
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001503 * block to ensure the metadata consistency.
1504 */
1505 if (btrfs_header_generation(buf) == trans->transid &&
1506 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1507 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1508 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1509 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1510 return 0;
1511 return 1;
1512}
1513
1514/*
1515 * cows a single block, see __btrfs_cow_block for the real work.
1516 * This version of it has extra checks so that a block isn't COWed more than
1517 * once per transaction, as long as it hasn't been written yet
1518 */
1519noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1520 struct btrfs_root *root, struct extent_buffer *buf,
1521 struct extent_buffer *parent, int parent_slot,
1522 struct extent_buffer **cow_ret)
1523{
1524 struct btrfs_fs_info *fs_info = root->fs_info;
1525 u64 search_start;
1526 int ret;
1527
David Brazdil0f672f62019-12-10 10:32:29 +00001528 if (test_bit(BTRFS_ROOT_DELETING, &root->state))
1529 btrfs_err(fs_info,
1530 "COW'ing blocks on a fs root that's being dropped");
1531
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001532 if (trans->transaction != fs_info->running_transaction)
1533 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1534 trans->transid,
1535 fs_info->running_transaction->transid);
1536
1537 if (trans->transid != fs_info->generation)
1538 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1539 trans->transid, fs_info->generation);
1540
1541 if (!should_cow_block(trans, root, buf)) {
1542 trans->dirty = true;
1543 *cow_ret = buf;
1544 return 0;
1545 }
1546
1547 search_start = buf->start & ~((u64)SZ_1G - 1);
1548
1549 if (parent)
David Brazdil0f672f62019-12-10 10:32:29 +00001550 btrfs_set_lock_blocking_write(parent);
1551 btrfs_set_lock_blocking_write(buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001552
David Brazdil0f672f62019-12-10 10:32:29 +00001553 /*
1554 * Before CoWing this block for later modification, check if it's
1555 * the subtree root and do the delayed subtree trace if needed.
1556 *
1557 * Also We don't care about the error, as it's handled internally.
1558 */
1559 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001560 ret = __btrfs_cow_block(trans, root, buf, parent,
1561 parent_slot, cow_ret, search_start, 0);
1562
1563 trace_btrfs_cow_block(root, buf, *cow_ret);
1564
1565 return ret;
1566}
1567
1568/*
1569 * helper function for defrag to decide if two blocks pointed to by a
1570 * node are actually close by
1571 */
1572static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1573{
1574 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1575 return 1;
1576 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1577 return 1;
1578 return 0;
1579}
1580
1581/*
1582 * compare two keys in a memcmp fashion
1583 */
1584static int comp_keys(const struct btrfs_disk_key *disk,
1585 const struct btrfs_key *k2)
1586{
1587 struct btrfs_key k1;
1588
1589 btrfs_disk_key_to_cpu(&k1, disk);
1590
1591 return btrfs_comp_cpu_keys(&k1, k2);
1592}
1593
1594/*
1595 * same as comp_keys only with two btrfs_key's
1596 */
1597int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
1598{
1599 if (k1->objectid > k2->objectid)
1600 return 1;
1601 if (k1->objectid < k2->objectid)
1602 return -1;
1603 if (k1->type > k2->type)
1604 return 1;
1605 if (k1->type < k2->type)
1606 return -1;
1607 if (k1->offset > k2->offset)
1608 return 1;
1609 if (k1->offset < k2->offset)
1610 return -1;
1611 return 0;
1612}
1613
1614/*
1615 * this is used by the defrag code to go through all the
1616 * leaves pointed to by a node and reallocate them so that
1617 * disk order is close to key order
1618 */
1619int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1620 struct btrfs_root *root, struct extent_buffer *parent,
1621 int start_slot, u64 *last_ret,
1622 struct btrfs_key *progress)
1623{
1624 struct btrfs_fs_info *fs_info = root->fs_info;
1625 struct extent_buffer *cur;
1626 u64 blocknr;
1627 u64 gen;
1628 u64 search_start = *last_ret;
1629 u64 last_block = 0;
1630 u64 other;
1631 u32 parent_nritems;
1632 int end_slot;
1633 int i;
1634 int err = 0;
1635 int parent_level;
1636 int uptodate;
1637 u32 blocksize;
1638 int progress_passed = 0;
1639 struct btrfs_disk_key disk_key;
1640
1641 parent_level = btrfs_header_level(parent);
1642
1643 WARN_ON(trans->transaction != fs_info->running_transaction);
1644 WARN_ON(trans->transid != fs_info->generation);
1645
1646 parent_nritems = btrfs_header_nritems(parent);
1647 blocksize = fs_info->nodesize;
1648 end_slot = parent_nritems - 1;
1649
1650 if (parent_nritems <= 1)
1651 return 0;
1652
David Brazdil0f672f62019-12-10 10:32:29 +00001653 btrfs_set_lock_blocking_write(parent);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001654
1655 for (i = start_slot; i <= end_slot; i++) {
1656 struct btrfs_key first_key;
1657 int close = 1;
1658
1659 btrfs_node_key(parent, &disk_key, i);
1660 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1661 continue;
1662
1663 progress_passed = 1;
1664 blocknr = btrfs_node_blockptr(parent, i);
1665 gen = btrfs_node_ptr_generation(parent, i);
1666 btrfs_node_key_to_cpu(parent, &first_key, i);
1667 if (last_block == 0)
1668 last_block = blocknr;
1669
1670 if (i > 0) {
1671 other = btrfs_node_blockptr(parent, i - 1);
1672 close = close_blocks(blocknr, other, blocksize);
1673 }
1674 if (!close && i < end_slot) {
1675 other = btrfs_node_blockptr(parent, i + 1);
1676 close = close_blocks(blocknr, other, blocksize);
1677 }
1678 if (close) {
1679 last_block = blocknr;
1680 continue;
1681 }
1682
1683 cur = find_extent_buffer(fs_info, blocknr);
1684 if (cur)
1685 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1686 else
1687 uptodate = 0;
1688 if (!cur || !uptodate) {
1689 if (!cur) {
1690 cur = read_tree_block(fs_info, blocknr, gen,
1691 parent_level - 1,
1692 &first_key);
1693 if (IS_ERR(cur)) {
1694 return PTR_ERR(cur);
1695 } else if (!extent_buffer_uptodate(cur)) {
1696 free_extent_buffer(cur);
1697 return -EIO;
1698 }
1699 } else if (!uptodate) {
1700 err = btrfs_read_buffer(cur, gen,
1701 parent_level - 1,&first_key);
1702 if (err) {
1703 free_extent_buffer(cur);
1704 return err;
1705 }
1706 }
1707 }
1708 if (search_start == 0)
1709 search_start = last_block;
1710
1711 btrfs_tree_lock(cur);
David Brazdil0f672f62019-12-10 10:32:29 +00001712 btrfs_set_lock_blocking_write(cur);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001713 err = __btrfs_cow_block(trans, root, cur, parent, i,
1714 &cur, search_start,
1715 min(16 * blocksize,
1716 (end_slot - i) * blocksize));
1717 if (err) {
1718 btrfs_tree_unlock(cur);
1719 free_extent_buffer(cur);
1720 break;
1721 }
1722 search_start = cur->start;
1723 last_block = cur->start;
1724 *last_ret = search_start;
1725 btrfs_tree_unlock(cur);
1726 free_extent_buffer(cur);
1727 }
1728 return err;
1729}
1730
1731/*
1732 * search for key in the extent_buffer. The items start at offset p,
1733 * and they are item_size apart. There are 'max' items in p.
1734 *
1735 * the slot in the array is returned via slot, and it points to
1736 * the place where you would insert key if it is not found in
1737 * the array.
1738 *
1739 * slot may point to max if the key is bigger than all of the keys
1740 */
1741static noinline int generic_bin_search(struct extent_buffer *eb,
1742 unsigned long p, int item_size,
1743 const struct btrfs_key *key,
1744 int max, int *slot)
1745{
1746 int low = 0;
1747 int high = max;
1748 int mid;
1749 int ret;
1750 struct btrfs_disk_key *tmp = NULL;
1751 struct btrfs_disk_key unaligned;
1752 unsigned long offset;
1753 char *kaddr = NULL;
1754 unsigned long map_start = 0;
1755 unsigned long map_len = 0;
1756 int err;
1757
1758 if (low > high) {
1759 btrfs_err(eb->fs_info,
1760 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
1761 __func__, low, high, eb->start,
1762 btrfs_header_owner(eb), btrfs_header_level(eb));
1763 return -EINVAL;
1764 }
1765
1766 while (low < high) {
1767 mid = (low + high) / 2;
1768 offset = p + mid * item_size;
1769
1770 if (!kaddr || offset < map_start ||
1771 (offset + sizeof(struct btrfs_disk_key)) >
1772 map_start + map_len) {
1773
1774 err = map_private_extent_buffer(eb, offset,
1775 sizeof(struct btrfs_disk_key),
1776 &kaddr, &map_start, &map_len);
1777
1778 if (!err) {
1779 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1780 map_start);
1781 } else if (err == 1) {
1782 read_extent_buffer(eb, &unaligned,
1783 offset, sizeof(unaligned));
1784 tmp = &unaligned;
1785 } else {
1786 return err;
1787 }
1788
1789 } else {
1790 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1791 map_start);
1792 }
1793 ret = comp_keys(tmp, key);
1794
1795 if (ret < 0)
1796 low = mid + 1;
1797 else if (ret > 0)
1798 high = mid;
1799 else {
1800 *slot = mid;
1801 return 0;
1802 }
1803 }
1804 *slot = low;
1805 return 1;
1806}
1807
1808/*
1809 * simple bin_search frontend that does the right thing for
1810 * leaves vs nodes
1811 */
1812int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
1813 int level, int *slot)
1814{
1815 if (level == 0)
1816 return generic_bin_search(eb,
1817 offsetof(struct btrfs_leaf, items),
1818 sizeof(struct btrfs_item),
1819 key, btrfs_header_nritems(eb),
1820 slot);
1821 else
1822 return generic_bin_search(eb,
1823 offsetof(struct btrfs_node, ptrs),
1824 sizeof(struct btrfs_key_ptr),
1825 key, btrfs_header_nritems(eb),
1826 slot);
1827}
1828
1829static void root_add_used(struct btrfs_root *root, u32 size)
1830{
1831 spin_lock(&root->accounting_lock);
1832 btrfs_set_root_used(&root->root_item,
1833 btrfs_root_used(&root->root_item) + size);
1834 spin_unlock(&root->accounting_lock);
1835}
1836
1837static void root_sub_used(struct btrfs_root *root, u32 size)
1838{
1839 spin_lock(&root->accounting_lock);
1840 btrfs_set_root_used(&root->root_item,
1841 btrfs_root_used(&root->root_item) - size);
1842 spin_unlock(&root->accounting_lock);
1843}
1844
1845/* given a node and slot number, this reads the blocks it points to. The
1846 * extent buffer is returned with a reference taken (but unlocked).
1847 */
David Brazdil0f672f62019-12-10 10:32:29 +00001848struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
1849 int slot)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001850{
1851 int level = btrfs_header_level(parent);
1852 struct extent_buffer *eb;
1853 struct btrfs_key first_key;
1854
1855 if (slot < 0 || slot >= btrfs_header_nritems(parent))
1856 return ERR_PTR(-ENOENT);
1857
1858 BUG_ON(level == 0);
1859
1860 btrfs_node_key_to_cpu(parent, &first_key, slot);
David Brazdil0f672f62019-12-10 10:32:29 +00001861 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001862 btrfs_node_ptr_generation(parent, slot),
1863 level - 1, &first_key);
1864 if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
1865 free_extent_buffer(eb);
1866 eb = ERR_PTR(-EIO);
1867 }
1868
1869 return eb;
1870}
1871
1872/*
1873 * node level balancing, used to make sure nodes are in proper order for
1874 * item deletion. We balance from the top down, so we have to make sure
1875 * that a deletion won't leave an node completely empty later on.
1876 */
1877static noinline int balance_level(struct btrfs_trans_handle *trans,
1878 struct btrfs_root *root,
1879 struct btrfs_path *path, int level)
1880{
1881 struct btrfs_fs_info *fs_info = root->fs_info;
1882 struct extent_buffer *right = NULL;
1883 struct extent_buffer *mid;
1884 struct extent_buffer *left = NULL;
1885 struct extent_buffer *parent = NULL;
1886 int ret = 0;
1887 int wret;
1888 int pslot;
1889 int orig_slot = path->slots[level];
1890 u64 orig_ptr;
1891
David Brazdil0f672f62019-12-10 10:32:29 +00001892 ASSERT(level > 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001893
1894 mid = path->nodes[level];
1895
1896 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1897 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1898 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1899
1900 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1901
1902 if (level < BTRFS_MAX_LEVEL - 1) {
1903 parent = path->nodes[level + 1];
1904 pslot = path->slots[level + 1];
1905 }
1906
1907 /*
1908 * deal with the case where there is only one pointer in the root
1909 * by promoting the node below to a root
1910 */
1911 if (!parent) {
1912 struct extent_buffer *child;
1913
1914 if (btrfs_header_nritems(mid) != 1)
1915 return 0;
1916
1917 /* promote the child to a root */
David Brazdil0f672f62019-12-10 10:32:29 +00001918 child = btrfs_read_node_slot(mid, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001919 if (IS_ERR(child)) {
1920 ret = PTR_ERR(child);
1921 btrfs_handle_fs_error(fs_info, ret, NULL);
1922 goto enospc;
1923 }
1924
1925 btrfs_tree_lock(child);
David Brazdil0f672f62019-12-10 10:32:29 +00001926 btrfs_set_lock_blocking_write(child);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001927 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1928 if (ret) {
1929 btrfs_tree_unlock(child);
1930 free_extent_buffer(child);
1931 goto enospc;
1932 }
1933
1934 ret = tree_mod_log_insert_root(root->node, child, 1);
1935 BUG_ON(ret < 0);
1936 rcu_assign_pointer(root->node, child);
1937
1938 add_root_to_dirty_list(root);
1939 btrfs_tree_unlock(child);
1940
1941 path->locks[level] = 0;
1942 path->nodes[level] = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00001943 btrfs_clean_tree_block(mid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001944 btrfs_tree_unlock(mid);
1945 /* once for the path */
1946 free_extent_buffer(mid);
1947
1948 root_sub_used(root, mid->len);
1949 btrfs_free_tree_block(trans, root, mid, 0, 1);
1950 /* once for the root ptr */
1951 free_extent_buffer_stale(mid);
1952 return 0;
1953 }
1954 if (btrfs_header_nritems(mid) >
1955 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
1956 return 0;
1957
David Brazdil0f672f62019-12-10 10:32:29 +00001958 left = btrfs_read_node_slot(parent, pslot - 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001959 if (IS_ERR(left))
1960 left = NULL;
1961
1962 if (left) {
1963 btrfs_tree_lock(left);
David Brazdil0f672f62019-12-10 10:32:29 +00001964 btrfs_set_lock_blocking_write(left);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001965 wret = btrfs_cow_block(trans, root, left,
1966 parent, pslot - 1, &left);
1967 if (wret) {
1968 ret = wret;
1969 goto enospc;
1970 }
1971 }
1972
David Brazdil0f672f62019-12-10 10:32:29 +00001973 right = btrfs_read_node_slot(parent, pslot + 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001974 if (IS_ERR(right))
1975 right = NULL;
1976
1977 if (right) {
1978 btrfs_tree_lock(right);
David Brazdil0f672f62019-12-10 10:32:29 +00001979 btrfs_set_lock_blocking_write(right);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001980 wret = btrfs_cow_block(trans, root, right,
1981 parent, pslot + 1, &right);
1982 if (wret) {
1983 ret = wret;
1984 goto enospc;
1985 }
1986 }
1987
1988 /* first, try to make some room in the middle buffer */
1989 if (left) {
1990 orig_slot += btrfs_header_nritems(left);
David Brazdil0f672f62019-12-10 10:32:29 +00001991 wret = push_node_left(trans, left, mid, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001992 if (wret < 0)
1993 ret = wret;
1994 }
1995
1996 /*
1997 * then try to empty the right most buffer into the middle
1998 */
1999 if (right) {
David Brazdil0f672f62019-12-10 10:32:29 +00002000 wret = push_node_left(trans, mid, right, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002001 if (wret < 0 && wret != -ENOSPC)
2002 ret = wret;
2003 if (btrfs_header_nritems(right) == 0) {
David Brazdil0f672f62019-12-10 10:32:29 +00002004 btrfs_clean_tree_block(right);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002005 btrfs_tree_unlock(right);
2006 del_ptr(root, path, level + 1, pslot + 1);
2007 root_sub_used(root, right->len);
2008 btrfs_free_tree_block(trans, root, right, 0, 1);
2009 free_extent_buffer_stale(right);
2010 right = NULL;
2011 } else {
2012 struct btrfs_disk_key right_key;
2013 btrfs_node_key(right, &right_key, 0);
2014 ret = tree_mod_log_insert_key(parent, pslot + 1,
2015 MOD_LOG_KEY_REPLACE, GFP_NOFS);
2016 BUG_ON(ret < 0);
2017 btrfs_set_node_key(parent, &right_key, pslot + 1);
2018 btrfs_mark_buffer_dirty(parent);
2019 }
2020 }
2021 if (btrfs_header_nritems(mid) == 1) {
2022 /*
2023 * we're not allowed to leave a node with one item in the
2024 * tree during a delete. A deletion from lower in the tree
2025 * could try to delete the only pointer in this node.
2026 * So, pull some keys from the left.
2027 * There has to be a left pointer at this point because
2028 * otherwise we would have pulled some pointers from the
2029 * right
2030 */
2031 if (!left) {
2032 ret = -EROFS;
2033 btrfs_handle_fs_error(fs_info, ret, NULL);
2034 goto enospc;
2035 }
David Brazdil0f672f62019-12-10 10:32:29 +00002036 wret = balance_node_right(trans, mid, left);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002037 if (wret < 0) {
2038 ret = wret;
2039 goto enospc;
2040 }
2041 if (wret == 1) {
David Brazdil0f672f62019-12-10 10:32:29 +00002042 wret = push_node_left(trans, left, mid, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002043 if (wret < 0)
2044 ret = wret;
2045 }
2046 BUG_ON(wret == 1);
2047 }
2048 if (btrfs_header_nritems(mid) == 0) {
David Brazdil0f672f62019-12-10 10:32:29 +00002049 btrfs_clean_tree_block(mid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002050 btrfs_tree_unlock(mid);
2051 del_ptr(root, path, level + 1, pslot);
2052 root_sub_used(root, mid->len);
2053 btrfs_free_tree_block(trans, root, mid, 0, 1);
2054 free_extent_buffer_stale(mid);
2055 mid = NULL;
2056 } else {
2057 /* update the parent key to reflect our changes */
2058 struct btrfs_disk_key mid_key;
2059 btrfs_node_key(mid, &mid_key, 0);
2060 ret = tree_mod_log_insert_key(parent, pslot,
2061 MOD_LOG_KEY_REPLACE, GFP_NOFS);
2062 BUG_ON(ret < 0);
2063 btrfs_set_node_key(parent, &mid_key, pslot);
2064 btrfs_mark_buffer_dirty(parent);
2065 }
2066
2067 /* update the path */
2068 if (left) {
2069 if (btrfs_header_nritems(left) > orig_slot) {
2070 extent_buffer_get(left);
2071 /* left was locked after cow */
2072 path->nodes[level] = left;
2073 path->slots[level + 1] -= 1;
2074 path->slots[level] = orig_slot;
2075 if (mid) {
2076 btrfs_tree_unlock(mid);
2077 free_extent_buffer(mid);
2078 }
2079 } else {
2080 orig_slot -= btrfs_header_nritems(left);
2081 path->slots[level] = orig_slot;
2082 }
2083 }
2084 /* double check we haven't messed things up */
2085 if (orig_ptr !=
2086 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2087 BUG();
2088enospc:
2089 if (right) {
2090 btrfs_tree_unlock(right);
2091 free_extent_buffer(right);
2092 }
2093 if (left) {
2094 if (path->nodes[level] != left)
2095 btrfs_tree_unlock(left);
2096 free_extent_buffer(left);
2097 }
2098 return ret;
2099}
2100
2101/* Node balancing for insertion. Here we only split or push nodes around
2102 * when they are completely full. This is also done top down, so we
2103 * have to be pessimistic.
2104 */
2105static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2106 struct btrfs_root *root,
2107 struct btrfs_path *path, int level)
2108{
2109 struct btrfs_fs_info *fs_info = root->fs_info;
2110 struct extent_buffer *right = NULL;
2111 struct extent_buffer *mid;
2112 struct extent_buffer *left = NULL;
2113 struct extent_buffer *parent = NULL;
2114 int ret = 0;
2115 int wret;
2116 int pslot;
2117 int orig_slot = path->slots[level];
2118
2119 if (level == 0)
2120 return 1;
2121
2122 mid = path->nodes[level];
2123 WARN_ON(btrfs_header_generation(mid) != trans->transid);
2124
2125 if (level < BTRFS_MAX_LEVEL - 1) {
2126 parent = path->nodes[level + 1];
2127 pslot = path->slots[level + 1];
2128 }
2129
2130 if (!parent)
2131 return 1;
2132
David Brazdil0f672f62019-12-10 10:32:29 +00002133 left = btrfs_read_node_slot(parent, pslot - 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002134 if (IS_ERR(left))
2135 left = NULL;
2136
2137 /* first, try to make some room in the middle buffer */
2138 if (left) {
2139 u32 left_nr;
2140
2141 btrfs_tree_lock(left);
David Brazdil0f672f62019-12-10 10:32:29 +00002142 btrfs_set_lock_blocking_write(left);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002143
2144 left_nr = btrfs_header_nritems(left);
2145 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2146 wret = 1;
2147 } else {
2148 ret = btrfs_cow_block(trans, root, left, parent,
2149 pslot - 1, &left);
2150 if (ret)
2151 wret = 1;
2152 else {
David Brazdil0f672f62019-12-10 10:32:29 +00002153 wret = push_node_left(trans, left, mid, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002154 }
2155 }
2156 if (wret < 0)
2157 ret = wret;
2158 if (wret == 0) {
2159 struct btrfs_disk_key disk_key;
2160 orig_slot += left_nr;
2161 btrfs_node_key(mid, &disk_key, 0);
2162 ret = tree_mod_log_insert_key(parent, pslot,
2163 MOD_LOG_KEY_REPLACE, GFP_NOFS);
2164 BUG_ON(ret < 0);
2165 btrfs_set_node_key(parent, &disk_key, pslot);
2166 btrfs_mark_buffer_dirty(parent);
2167 if (btrfs_header_nritems(left) > orig_slot) {
2168 path->nodes[level] = left;
2169 path->slots[level + 1] -= 1;
2170 path->slots[level] = orig_slot;
2171 btrfs_tree_unlock(mid);
2172 free_extent_buffer(mid);
2173 } else {
2174 orig_slot -=
2175 btrfs_header_nritems(left);
2176 path->slots[level] = orig_slot;
2177 btrfs_tree_unlock(left);
2178 free_extent_buffer(left);
2179 }
2180 return 0;
2181 }
2182 btrfs_tree_unlock(left);
2183 free_extent_buffer(left);
2184 }
David Brazdil0f672f62019-12-10 10:32:29 +00002185 right = btrfs_read_node_slot(parent, pslot + 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002186 if (IS_ERR(right))
2187 right = NULL;
2188
2189 /*
2190 * then try to empty the right most buffer into the middle
2191 */
2192 if (right) {
2193 u32 right_nr;
2194
2195 btrfs_tree_lock(right);
David Brazdil0f672f62019-12-10 10:32:29 +00002196 btrfs_set_lock_blocking_write(right);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002197
2198 right_nr = btrfs_header_nritems(right);
2199 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2200 wret = 1;
2201 } else {
2202 ret = btrfs_cow_block(trans, root, right,
2203 parent, pslot + 1,
2204 &right);
2205 if (ret)
2206 wret = 1;
2207 else {
David Brazdil0f672f62019-12-10 10:32:29 +00002208 wret = balance_node_right(trans, right, mid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002209 }
2210 }
2211 if (wret < 0)
2212 ret = wret;
2213 if (wret == 0) {
2214 struct btrfs_disk_key disk_key;
2215
2216 btrfs_node_key(right, &disk_key, 0);
2217 ret = tree_mod_log_insert_key(parent, pslot + 1,
2218 MOD_LOG_KEY_REPLACE, GFP_NOFS);
2219 BUG_ON(ret < 0);
2220 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2221 btrfs_mark_buffer_dirty(parent);
2222
2223 if (btrfs_header_nritems(mid) <= orig_slot) {
2224 path->nodes[level] = right;
2225 path->slots[level + 1] += 1;
2226 path->slots[level] = orig_slot -
2227 btrfs_header_nritems(mid);
2228 btrfs_tree_unlock(mid);
2229 free_extent_buffer(mid);
2230 } else {
2231 btrfs_tree_unlock(right);
2232 free_extent_buffer(right);
2233 }
2234 return 0;
2235 }
2236 btrfs_tree_unlock(right);
2237 free_extent_buffer(right);
2238 }
2239 return 1;
2240}
2241
2242/*
2243 * readahead one full node of leaves, finding things that are close
2244 * to the block in 'slot', and triggering ra on them.
2245 */
2246static void reada_for_search(struct btrfs_fs_info *fs_info,
2247 struct btrfs_path *path,
2248 int level, int slot, u64 objectid)
2249{
2250 struct extent_buffer *node;
2251 struct btrfs_disk_key disk_key;
2252 u32 nritems;
2253 u64 search;
2254 u64 target;
2255 u64 nread = 0;
2256 struct extent_buffer *eb;
2257 u32 nr;
2258 u32 blocksize;
2259 u32 nscan = 0;
2260
2261 if (level != 1)
2262 return;
2263
2264 if (!path->nodes[level])
2265 return;
2266
2267 node = path->nodes[level];
2268
2269 search = btrfs_node_blockptr(node, slot);
2270 blocksize = fs_info->nodesize;
2271 eb = find_extent_buffer(fs_info, search);
2272 if (eb) {
2273 free_extent_buffer(eb);
2274 return;
2275 }
2276
2277 target = search;
2278
2279 nritems = btrfs_header_nritems(node);
2280 nr = slot;
2281
2282 while (1) {
2283 if (path->reada == READA_BACK) {
2284 if (nr == 0)
2285 break;
2286 nr--;
2287 } else if (path->reada == READA_FORWARD) {
2288 nr++;
2289 if (nr >= nritems)
2290 break;
2291 }
2292 if (path->reada == READA_BACK && objectid) {
2293 btrfs_node_key(node, &disk_key, nr);
2294 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2295 break;
2296 }
2297 search = btrfs_node_blockptr(node, nr);
2298 if ((search <= target && target - search <= 65536) ||
2299 (search > target && search - target <= 65536)) {
2300 readahead_tree_block(fs_info, search);
2301 nread += blocksize;
2302 }
2303 nscan++;
2304 if ((nread > 65536 || nscan > 32))
2305 break;
2306 }
2307}
2308
2309static noinline void reada_for_balance(struct btrfs_fs_info *fs_info,
2310 struct btrfs_path *path, int level)
2311{
2312 int slot;
2313 int nritems;
2314 struct extent_buffer *parent;
2315 struct extent_buffer *eb;
2316 u64 gen;
2317 u64 block1 = 0;
2318 u64 block2 = 0;
2319
2320 parent = path->nodes[level + 1];
2321 if (!parent)
2322 return;
2323
2324 nritems = btrfs_header_nritems(parent);
2325 slot = path->slots[level + 1];
2326
2327 if (slot > 0) {
2328 block1 = btrfs_node_blockptr(parent, slot - 1);
2329 gen = btrfs_node_ptr_generation(parent, slot - 1);
2330 eb = find_extent_buffer(fs_info, block1);
2331 /*
2332 * if we get -eagain from btrfs_buffer_uptodate, we
2333 * don't want to return eagain here. That will loop
2334 * forever
2335 */
2336 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2337 block1 = 0;
2338 free_extent_buffer(eb);
2339 }
2340 if (slot + 1 < nritems) {
2341 block2 = btrfs_node_blockptr(parent, slot + 1);
2342 gen = btrfs_node_ptr_generation(parent, slot + 1);
2343 eb = find_extent_buffer(fs_info, block2);
2344 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2345 block2 = 0;
2346 free_extent_buffer(eb);
2347 }
2348
2349 if (block1)
2350 readahead_tree_block(fs_info, block1);
2351 if (block2)
2352 readahead_tree_block(fs_info, block2);
2353}
2354
2355
2356/*
2357 * when we walk down the tree, it is usually safe to unlock the higher layers
2358 * in the tree. The exceptions are when our path goes through slot 0, because
2359 * operations on the tree might require changing key pointers higher up in the
2360 * tree.
2361 *
2362 * callers might also have set path->keep_locks, which tells this code to keep
2363 * the lock if the path points to the last slot in the block. This is part of
2364 * walking through the tree, and selecting the next slot in the higher block.
2365 *
2366 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2367 * if lowest_unlock is 1, level 0 won't be unlocked
2368 */
2369static noinline void unlock_up(struct btrfs_path *path, int level,
2370 int lowest_unlock, int min_write_lock_level,
2371 int *write_lock_level)
2372{
2373 int i;
2374 int skip_level = level;
2375 int no_skips = 0;
2376 struct extent_buffer *t;
2377
2378 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2379 if (!path->nodes[i])
2380 break;
2381 if (!path->locks[i])
2382 break;
2383 if (!no_skips && path->slots[i] == 0) {
2384 skip_level = i + 1;
2385 continue;
2386 }
2387 if (!no_skips && path->keep_locks) {
2388 u32 nritems;
2389 t = path->nodes[i];
2390 nritems = btrfs_header_nritems(t);
2391 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2392 skip_level = i + 1;
2393 continue;
2394 }
2395 }
2396 if (skip_level < i && i >= lowest_unlock)
2397 no_skips = 1;
2398
2399 t = path->nodes[i];
2400 if (i >= lowest_unlock && i > skip_level) {
2401 btrfs_tree_unlock_rw(t, path->locks[i]);
2402 path->locks[i] = 0;
2403 if (write_lock_level &&
2404 i > min_write_lock_level &&
2405 i <= *write_lock_level) {
2406 *write_lock_level = i - 1;
2407 }
2408 }
2409 }
2410}
2411
2412/*
2413 * This releases any locks held in the path starting at level and
2414 * going all the way up to the root.
2415 *
2416 * btrfs_search_slot will keep the lock held on higher nodes in a few
2417 * corner cases, such as COW of the block at slot zero in the node. This
2418 * ignores those rules, and it should only be called when there are no
2419 * more updates to be done higher up in the tree.
2420 */
2421noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2422{
2423 int i;
2424
2425 if (path->keep_locks)
2426 return;
2427
2428 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2429 if (!path->nodes[i])
2430 continue;
2431 if (!path->locks[i])
2432 continue;
2433 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2434 path->locks[i] = 0;
2435 }
2436}
2437
2438/*
2439 * helper function for btrfs_search_slot. The goal is to find a block
2440 * in cache without setting the path to blocking. If we find the block
2441 * we return zero and the path is unchanged.
2442 *
2443 * If we can't find the block, we set the path blocking and do some
2444 * reada. -EAGAIN is returned and the search must be repeated.
2445 */
2446static int
2447read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
2448 struct extent_buffer **eb_ret, int level, int slot,
2449 const struct btrfs_key *key)
2450{
2451 struct btrfs_fs_info *fs_info = root->fs_info;
2452 u64 blocknr;
2453 u64 gen;
2454 struct extent_buffer *b = *eb_ret;
2455 struct extent_buffer *tmp;
2456 struct btrfs_key first_key;
2457 int ret;
2458 int parent_level;
2459
2460 blocknr = btrfs_node_blockptr(b, slot);
2461 gen = btrfs_node_ptr_generation(b, slot);
2462 parent_level = btrfs_header_level(b);
2463 btrfs_node_key_to_cpu(b, &first_key, slot);
2464
2465 tmp = find_extent_buffer(fs_info, blocknr);
2466 if (tmp) {
2467 /* first we do an atomic uptodate check */
2468 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
David Brazdil0f672f62019-12-10 10:32:29 +00002469 /*
2470 * Do extra check for first_key, eb can be stale due to
2471 * being cached, read from scrub, or have multiple
2472 * parents (shared tree blocks).
2473 */
2474 if (btrfs_verify_level_key(tmp,
2475 parent_level - 1, &first_key, gen)) {
2476 free_extent_buffer(tmp);
2477 return -EUCLEAN;
2478 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002479 *eb_ret = tmp;
2480 return 0;
2481 }
2482
2483 /* the pages were up to date, but we failed
2484 * the generation number check. Do a full
2485 * read for the generation number that is correct.
2486 * We must do this without dropping locks so
2487 * we can trust our generation number
2488 */
2489 btrfs_set_path_blocking(p);
2490
2491 /* now we're allowed to do a blocking uptodate check */
2492 ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key);
2493 if (!ret) {
2494 *eb_ret = tmp;
2495 return 0;
2496 }
2497 free_extent_buffer(tmp);
2498 btrfs_release_path(p);
2499 return -EIO;
2500 }
2501
2502 /*
2503 * reduce lock contention at high levels
2504 * of the btree by dropping locks before
2505 * we read. Don't release the lock on the current
2506 * level because we need to walk this node to figure
2507 * out which blocks to read.
2508 */
2509 btrfs_unlock_up_safe(p, level + 1);
2510 btrfs_set_path_blocking(p);
2511
2512 if (p->reada != READA_NONE)
2513 reada_for_search(fs_info, p, level, slot, key->objectid);
2514
2515 ret = -EAGAIN;
2516 tmp = read_tree_block(fs_info, blocknr, gen, parent_level - 1,
2517 &first_key);
2518 if (!IS_ERR(tmp)) {
2519 /*
2520 * If the read above didn't mark this buffer up to date,
2521 * it will never end up being up to date. Set ret to EIO now
2522 * and give up so that our caller doesn't loop forever
2523 * on our EAGAINs.
2524 */
2525 if (!extent_buffer_uptodate(tmp))
2526 ret = -EIO;
2527 free_extent_buffer(tmp);
2528 } else {
2529 ret = PTR_ERR(tmp);
2530 }
2531
2532 btrfs_release_path(p);
2533 return ret;
2534}
2535
2536/*
2537 * helper function for btrfs_search_slot. This does all of the checks
2538 * for node-level blocks and does any balancing required based on
2539 * the ins_len.
2540 *
2541 * If no extra work was required, zero is returned. If we had to
2542 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2543 * start over
2544 */
2545static int
2546setup_nodes_for_search(struct btrfs_trans_handle *trans,
2547 struct btrfs_root *root, struct btrfs_path *p,
2548 struct extent_buffer *b, int level, int ins_len,
2549 int *write_lock_level)
2550{
2551 struct btrfs_fs_info *fs_info = root->fs_info;
2552 int ret;
2553
2554 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2555 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
2556 int sret;
2557
2558 if (*write_lock_level < level + 1) {
2559 *write_lock_level = level + 1;
2560 btrfs_release_path(p);
2561 goto again;
2562 }
2563
2564 btrfs_set_path_blocking(p);
2565 reada_for_balance(fs_info, p, level);
2566 sret = split_node(trans, root, p, level);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002567
2568 BUG_ON(sret > 0);
2569 if (sret) {
2570 ret = sret;
2571 goto done;
2572 }
2573 b = p->nodes[level];
2574 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2575 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
2576 int sret;
2577
2578 if (*write_lock_level < level + 1) {
2579 *write_lock_level = level + 1;
2580 btrfs_release_path(p);
2581 goto again;
2582 }
2583
2584 btrfs_set_path_blocking(p);
2585 reada_for_balance(fs_info, p, level);
2586 sret = balance_level(trans, root, p, level);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002587
2588 if (sret) {
2589 ret = sret;
2590 goto done;
2591 }
2592 b = p->nodes[level];
2593 if (!b) {
2594 btrfs_release_path(p);
2595 goto again;
2596 }
2597 BUG_ON(btrfs_header_nritems(b) == 1);
2598 }
2599 return 0;
2600
2601again:
2602 ret = -EAGAIN;
2603done:
2604 return ret;
2605}
2606
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002607static int key_search(struct extent_buffer *b, const struct btrfs_key *key,
2608 int level, int *prev_cmp, int *slot)
2609{
2610 if (*prev_cmp != 0) {
2611 *prev_cmp = btrfs_bin_search(b, key, level, slot);
2612 return *prev_cmp;
2613 }
2614
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002615 *slot = 0;
2616
2617 return 0;
2618}
2619
2620int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2621 u64 iobjectid, u64 ioff, u8 key_type,
2622 struct btrfs_key *found_key)
2623{
2624 int ret;
2625 struct btrfs_key key;
2626 struct extent_buffer *eb;
2627
2628 ASSERT(path);
2629 ASSERT(found_key);
2630
2631 key.type = key_type;
2632 key.objectid = iobjectid;
2633 key.offset = ioff;
2634
2635 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2636 if (ret < 0)
2637 return ret;
2638
2639 eb = path->nodes[0];
2640 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2641 ret = btrfs_next_leaf(fs_root, path);
2642 if (ret)
2643 return ret;
2644 eb = path->nodes[0];
2645 }
2646
2647 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2648 if (found_key->type != key.type ||
2649 found_key->objectid != key.objectid)
2650 return 1;
2651
2652 return 0;
2653}
2654
2655static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
2656 struct btrfs_path *p,
2657 int write_lock_level)
2658{
2659 struct btrfs_fs_info *fs_info = root->fs_info;
2660 struct extent_buffer *b;
2661 int root_lock;
2662 int level = 0;
2663
2664 /* We try very hard to do read locks on the root */
2665 root_lock = BTRFS_READ_LOCK;
2666
2667 if (p->search_commit_root) {
David Brazdil0f672f62019-12-10 10:32:29 +00002668 /*
2669 * The commit roots are read only so we always do read locks,
2670 * and we always must hold the commit_root_sem when doing
2671 * searches on them, the only exception is send where we don't
2672 * want to block transaction commits for a long time, so
2673 * we need to clone the commit root in order to avoid races
2674 * with transaction commits that create a snapshot of one of
2675 * the roots used by a send operation.
2676 */
2677 if (p->need_commit_sem) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002678 down_read(&fs_info->commit_root_sem);
David Brazdil0f672f62019-12-10 10:32:29 +00002679 b = btrfs_clone_extent_buffer(root->commit_root);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002680 up_read(&fs_info->commit_root_sem);
David Brazdil0f672f62019-12-10 10:32:29 +00002681 if (!b)
2682 return ERR_PTR(-ENOMEM);
2683
2684 } else {
2685 b = root->commit_root;
2686 extent_buffer_get(b);
2687 }
2688 level = btrfs_header_level(b);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002689 /*
2690 * Ensure that all callers have set skip_locking when
2691 * p->search_commit_root = 1.
2692 */
2693 ASSERT(p->skip_locking == 1);
2694
2695 goto out;
2696 }
2697
2698 if (p->skip_locking) {
2699 b = btrfs_root_node(root);
2700 level = btrfs_header_level(b);
2701 goto out;
2702 }
2703
2704 /*
2705 * If the level is set to maximum, we can skip trying to get the read
2706 * lock.
2707 */
2708 if (write_lock_level < BTRFS_MAX_LEVEL) {
2709 /*
2710 * We don't know the level of the root node until we actually
2711 * have it read locked
2712 */
2713 b = btrfs_read_lock_root_node(root);
2714 level = btrfs_header_level(b);
2715 if (level > write_lock_level)
2716 goto out;
2717
2718 /* Whoops, must trade for write lock */
2719 btrfs_tree_read_unlock(b);
2720 free_extent_buffer(b);
2721 }
2722
2723 b = btrfs_lock_root_node(root);
2724 root_lock = BTRFS_WRITE_LOCK;
2725
2726 /* The level might have changed, check again */
2727 level = btrfs_header_level(b);
2728
2729out:
2730 p->nodes[level] = b;
2731 if (!p->skip_locking)
2732 p->locks[level] = root_lock;
2733 /*
2734 * Callers are responsible for dropping b's references.
2735 */
2736 return b;
2737}
2738
2739
2740/*
2741 * btrfs_search_slot - look for a key in a tree and perform necessary
2742 * modifications to preserve tree invariants.
2743 *
2744 * @trans: Handle of transaction, used when modifying the tree
2745 * @p: Holds all btree nodes along the search path
2746 * @root: The root node of the tree
2747 * @key: The key we are looking for
2748 * @ins_len: Indicates purpose of search, for inserts it is 1, for
2749 * deletions it's -1. 0 for plain searches
2750 * @cow: boolean should CoW operations be performed. Must always be 1
2751 * when modifying the tree.
2752 *
2753 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
2754 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
2755 *
2756 * If @key is found, 0 is returned and you can find the item in the leaf level
2757 * of the path (level 0)
2758 *
2759 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
2760 * points to the slot where it should be inserted
2761 *
2762 * If an error is encountered while searching the tree a negative error number
2763 * is returned
2764 */
2765int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2766 const struct btrfs_key *key, struct btrfs_path *p,
2767 int ins_len, int cow)
2768{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002769 struct extent_buffer *b;
2770 int slot;
2771 int ret;
2772 int err;
2773 int level;
2774 int lowest_unlock = 1;
2775 /* everything at write_lock_level or lower must be write locked */
2776 int write_lock_level = 0;
2777 u8 lowest_level = 0;
2778 int min_write_lock_level;
2779 int prev_cmp;
2780
2781 lowest_level = p->lowest_level;
2782 WARN_ON(lowest_level && ins_len > 0);
2783 WARN_ON(p->nodes[0] != NULL);
2784 BUG_ON(!cow && ins_len);
2785
2786 if (ins_len < 0) {
2787 lowest_unlock = 2;
2788
2789 /* when we are removing items, we might have to go up to level
2790 * two as we update tree pointers Make sure we keep write
2791 * for those levels as well
2792 */
2793 write_lock_level = 2;
2794 } else if (ins_len > 0) {
2795 /*
2796 * for inserting items, make sure we have a write lock on
2797 * level 1 so we can update keys
2798 */
2799 write_lock_level = 1;
2800 }
2801
2802 if (!cow)
2803 write_lock_level = -1;
2804
2805 if (cow && (p->keep_locks || p->lowest_level))
2806 write_lock_level = BTRFS_MAX_LEVEL;
2807
2808 min_write_lock_level = write_lock_level;
2809
2810again:
2811 prev_cmp = -1;
2812 b = btrfs_search_slot_get_root(root, p, write_lock_level);
David Brazdil0f672f62019-12-10 10:32:29 +00002813 if (IS_ERR(b)) {
2814 ret = PTR_ERR(b);
2815 goto done;
2816 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002817
2818 while (b) {
2819 level = btrfs_header_level(b);
2820
2821 /*
2822 * setup the path here so we can release it under lock
2823 * contention with the cow code
2824 */
2825 if (cow) {
2826 bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
2827
2828 /*
2829 * if we don't really need to cow this block
2830 * then we don't want to set the path blocking,
2831 * so we test it here
2832 */
2833 if (!should_cow_block(trans, root, b)) {
2834 trans->dirty = true;
2835 goto cow_done;
2836 }
2837
2838 /*
2839 * must have write locks on this node and the
2840 * parent
2841 */
2842 if (level > write_lock_level ||
2843 (level + 1 > write_lock_level &&
2844 level + 1 < BTRFS_MAX_LEVEL &&
2845 p->nodes[level + 1])) {
2846 write_lock_level = level + 1;
2847 btrfs_release_path(p);
2848 goto again;
2849 }
2850
2851 btrfs_set_path_blocking(p);
2852 if (last_level)
2853 err = btrfs_cow_block(trans, root, b, NULL, 0,
2854 &b);
2855 else
2856 err = btrfs_cow_block(trans, root, b,
2857 p->nodes[level + 1],
2858 p->slots[level + 1], &b);
2859 if (err) {
2860 ret = err;
2861 goto done;
2862 }
2863 }
2864cow_done:
2865 p->nodes[level] = b;
David Brazdil0f672f62019-12-10 10:32:29 +00002866 /*
2867 * Leave path with blocking locks to avoid massive
2868 * lock context switch, this is made on purpose.
2869 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002870
2871 /*
2872 * we have a lock on b and as long as we aren't changing
2873 * the tree, there is no way to for the items in b to change.
2874 * It is safe to drop the lock on our parent before we
2875 * go through the expensive btree search on b.
2876 *
2877 * If we're inserting or deleting (ins_len != 0), then we might
2878 * be changing slot zero, which may require changing the parent.
2879 * So, we can't drop the lock until after we know which slot
2880 * we're operating on.
2881 */
2882 if (!ins_len && !p->keep_locks) {
2883 int u = level + 1;
2884
2885 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2886 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2887 p->locks[u] = 0;
2888 }
2889 }
2890
2891 ret = key_search(b, key, level, &prev_cmp, &slot);
2892 if (ret < 0)
2893 goto done;
2894
2895 if (level != 0) {
2896 int dec = 0;
2897 if (ret && slot > 0) {
2898 dec = 1;
2899 slot -= 1;
2900 }
2901 p->slots[level] = slot;
2902 err = setup_nodes_for_search(trans, root, p, b, level,
2903 ins_len, &write_lock_level);
2904 if (err == -EAGAIN)
2905 goto again;
2906 if (err) {
2907 ret = err;
2908 goto done;
2909 }
2910 b = p->nodes[level];
2911 slot = p->slots[level];
2912
2913 /*
2914 * slot 0 is special, if we change the key
2915 * we have to update the parent pointer
2916 * which means we must have a write lock
2917 * on the parent
2918 */
2919 if (slot == 0 && ins_len &&
2920 write_lock_level < level + 1) {
2921 write_lock_level = level + 1;
2922 btrfs_release_path(p);
2923 goto again;
2924 }
2925
2926 unlock_up(p, level, lowest_unlock,
2927 min_write_lock_level, &write_lock_level);
2928
2929 if (level == lowest_level) {
2930 if (dec)
2931 p->slots[level]++;
2932 goto done;
2933 }
2934
2935 err = read_block_for_search(root, p, &b, level,
2936 slot, key);
2937 if (err == -EAGAIN)
2938 goto again;
2939 if (err) {
2940 ret = err;
2941 goto done;
2942 }
2943
2944 if (!p->skip_locking) {
2945 level = btrfs_header_level(b);
2946 if (level <= write_lock_level) {
David Brazdil0f672f62019-12-10 10:32:29 +00002947 if (!btrfs_try_tree_write_lock(b)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002948 btrfs_set_path_blocking(p);
2949 btrfs_tree_lock(b);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002950 }
2951 p->locks[level] = BTRFS_WRITE_LOCK;
2952 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00002953 if (!btrfs_tree_read_lock_atomic(b)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002954 btrfs_set_path_blocking(p);
2955 btrfs_tree_read_lock(b);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002956 }
2957 p->locks[level] = BTRFS_READ_LOCK;
2958 }
2959 p->nodes[level] = b;
2960 }
2961 } else {
2962 p->slots[level] = slot;
2963 if (ins_len > 0 &&
David Brazdil0f672f62019-12-10 10:32:29 +00002964 btrfs_leaf_free_space(b) < ins_len) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002965 if (write_lock_level < 1) {
2966 write_lock_level = 1;
2967 btrfs_release_path(p);
2968 goto again;
2969 }
2970
2971 btrfs_set_path_blocking(p);
2972 err = split_leaf(trans, root, key,
2973 p, ins_len, ret == 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002974
2975 BUG_ON(err > 0);
2976 if (err) {
2977 ret = err;
2978 goto done;
2979 }
2980 }
2981 if (!p->search_for_split)
2982 unlock_up(p, level, lowest_unlock,
David Brazdil0f672f62019-12-10 10:32:29 +00002983 min_write_lock_level, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002984 goto done;
2985 }
2986 }
2987 ret = 1;
2988done:
2989 /*
2990 * we don't really know what they plan on doing with the path
2991 * from here on, so for now just mark it as blocking
2992 */
2993 if (!p->leave_spinning)
2994 btrfs_set_path_blocking(p);
2995 if (ret < 0 && !p->skip_release_on_error)
2996 btrfs_release_path(p);
2997 return ret;
2998}
2999
3000/*
3001 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
3002 * current state of the tree together with the operations recorded in the tree
3003 * modification log to search for the key in a previous version of this tree, as
3004 * denoted by the time_seq parameter.
3005 *
3006 * Naturally, there is no support for insert, delete or cow operations.
3007 *
3008 * The resulting path and return value will be set up as if we called
3009 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
3010 */
3011int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
3012 struct btrfs_path *p, u64 time_seq)
3013{
3014 struct btrfs_fs_info *fs_info = root->fs_info;
3015 struct extent_buffer *b;
3016 int slot;
3017 int ret;
3018 int err;
3019 int level;
3020 int lowest_unlock = 1;
3021 u8 lowest_level = 0;
3022 int prev_cmp = -1;
3023
3024 lowest_level = p->lowest_level;
3025 WARN_ON(p->nodes[0] != NULL);
3026
3027 if (p->search_commit_root) {
3028 BUG_ON(time_seq);
3029 return btrfs_search_slot(NULL, root, key, p, 0, 0);
3030 }
3031
3032again:
3033 b = get_old_root(root, time_seq);
David Brazdil0f672f62019-12-10 10:32:29 +00003034 if (!b) {
3035 ret = -EIO;
3036 goto done;
3037 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003038 level = btrfs_header_level(b);
3039 p->locks[level] = BTRFS_READ_LOCK;
3040
3041 while (b) {
3042 level = btrfs_header_level(b);
3043 p->nodes[level] = b;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003044
3045 /*
3046 * we have a lock on b and as long as we aren't changing
3047 * the tree, there is no way to for the items in b to change.
3048 * It is safe to drop the lock on our parent before we
3049 * go through the expensive btree search on b.
3050 */
3051 btrfs_unlock_up_safe(p, level + 1);
3052
3053 /*
3054 * Since we can unwind ebs we want to do a real search every
3055 * time.
3056 */
3057 prev_cmp = -1;
3058 ret = key_search(b, key, level, &prev_cmp, &slot);
David Brazdil0f672f62019-12-10 10:32:29 +00003059 if (ret < 0)
3060 goto done;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003061
3062 if (level != 0) {
3063 int dec = 0;
3064 if (ret && slot > 0) {
3065 dec = 1;
3066 slot -= 1;
3067 }
3068 p->slots[level] = slot;
3069 unlock_up(p, level, lowest_unlock, 0, NULL);
3070
3071 if (level == lowest_level) {
3072 if (dec)
3073 p->slots[level]++;
3074 goto done;
3075 }
3076
3077 err = read_block_for_search(root, p, &b, level,
3078 slot, key);
3079 if (err == -EAGAIN)
3080 goto again;
3081 if (err) {
3082 ret = err;
3083 goto done;
3084 }
3085
3086 level = btrfs_header_level(b);
David Brazdil0f672f62019-12-10 10:32:29 +00003087 if (!btrfs_tree_read_lock_atomic(b)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003088 btrfs_set_path_blocking(p);
3089 btrfs_tree_read_lock(b);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003090 }
3091 b = tree_mod_log_rewind(fs_info, p, b, time_seq);
3092 if (!b) {
3093 ret = -ENOMEM;
3094 goto done;
3095 }
3096 p->locks[level] = BTRFS_READ_LOCK;
3097 p->nodes[level] = b;
3098 } else {
3099 p->slots[level] = slot;
3100 unlock_up(p, level, lowest_unlock, 0, NULL);
3101 goto done;
3102 }
3103 }
3104 ret = 1;
3105done:
3106 if (!p->leave_spinning)
3107 btrfs_set_path_blocking(p);
3108 if (ret < 0)
3109 btrfs_release_path(p);
3110
3111 return ret;
3112}
3113
3114/*
3115 * helper to use instead of search slot if no exact match is needed but
3116 * instead the next or previous item should be returned.
3117 * When find_higher is true, the next higher item is returned, the next lower
3118 * otherwise.
3119 * When return_any and find_higher are both true, and no higher item is found,
3120 * return the next lower instead.
3121 * When return_any is true and find_higher is false, and no lower item is found,
3122 * return the next higher instead.
3123 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3124 * < 0 on error
3125 */
3126int btrfs_search_slot_for_read(struct btrfs_root *root,
3127 const struct btrfs_key *key,
3128 struct btrfs_path *p, int find_higher,
3129 int return_any)
3130{
3131 int ret;
3132 struct extent_buffer *leaf;
3133
3134again:
3135 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3136 if (ret <= 0)
3137 return ret;
3138 /*
3139 * a return value of 1 means the path is at the position where the
3140 * item should be inserted. Normally this is the next bigger item,
3141 * but in case the previous item is the last in a leaf, path points
3142 * to the first free slot in the previous leaf, i.e. at an invalid
3143 * item.
3144 */
3145 leaf = p->nodes[0];
3146
3147 if (find_higher) {
3148 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3149 ret = btrfs_next_leaf(root, p);
3150 if (ret <= 0)
3151 return ret;
3152 if (!return_any)
3153 return 1;
3154 /*
3155 * no higher item found, return the next
3156 * lower instead
3157 */
3158 return_any = 0;
3159 find_higher = 0;
3160 btrfs_release_path(p);
3161 goto again;
3162 }
3163 } else {
3164 if (p->slots[0] == 0) {
3165 ret = btrfs_prev_leaf(root, p);
3166 if (ret < 0)
3167 return ret;
3168 if (!ret) {
3169 leaf = p->nodes[0];
3170 if (p->slots[0] == btrfs_header_nritems(leaf))
3171 p->slots[0]--;
3172 return 0;
3173 }
3174 if (!return_any)
3175 return 1;
3176 /*
3177 * no lower item found, return the next
3178 * higher instead
3179 */
3180 return_any = 0;
3181 find_higher = 1;
3182 btrfs_release_path(p);
3183 goto again;
3184 } else {
3185 --p->slots[0];
3186 }
3187 }
3188 return 0;
3189}
3190
3191/*
3192 * adjust the pointers going up the tree, starting at level
3193 * making sure the right key of each node is points to 'key'.
3194 * This is used after shifting pointers to the left, so it stops
3195 * fixing up pointers when a given leaf/node is not in slot 0 of the
3196 * higher levels
3197 *
3198 */
3199static void fixup_low_keys(struct btrfs_path *path,
3200 struct btrfs_disk_key *key, int level)
3201{
3202 int i;
3203 struct extent_buffer *t;
3204 int ret;
3205
3206 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3207 int tslot = path->slots[i];
3208
3209 if (!path->nodes[i])
3210 break;
3211 t = path->nodes[i];
3212 ret = tree_mod_log_insert_key(t, tslot, MOD_LOG_KEY_REPLACE,
3213 GFP_ATOMIC);
3214 BUG_ON(ret < 0);
3215 btrfs_set_node_key(t, key, tslot);
3216 btrfs_mark_buffer_dirty(path->nodes[i]);
3217 if (tslot != 0)
3218 break;
3219 }
3220}
3221
3222/*
3223 * update item key.
3224 *
3225 * This function isn't completely safe. It's the caller's responsibility
3226 * that the new key won't break the order
3227 */
3228void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3229 struct btrfs_path *path,
3230 const struct btrfs_key *new_key)
3231{
3232 struct btrfs_disk_key disk_key;
3233 struct extent_buffer *eb;
3234 int slot;
3235
3236 eb = path->nodes[0];
3237 slot = path->slots[0];
3238 if (slot > 0) {
3239 btrfs_item_key(eb, &disk_key, slot - 1);
David Brazdil0f672f62019-12-10 10:32:29 +00003240 if (unlikely(comp_keys(&disk_key, new_key) >= 0)) {
3241 btrfs_crit(fs_info,
3242 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
3243 slot, btrfs_disk_key_objectid(&disk_key),
3244 btrfs_disk_key_type(&disk_key),
3245 btrfs_disk_key_offset(&disk_key),
3246 new_key->objectid, new_key->type,
3247 new_key->offset);
3248 btrfs_print_leaf(eb);
3249 BUG();
3250 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003251 }
3252 if (slot < btrfs_header_nritems(eb) - 1) {
3253 btrfs_item_key(eb, &disk_key, slot + 1);
David Brazdil0f672f62019-12-10 10:32:29 +00003254 if (unlikely(comp_keys(&disk_key, new_key) <= 0)) {
3255 btrfs_crit(fs_info,
3256 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
3257 slot, btrfs_disk_key_objectid(&disk_key),
3258 btrfs_disk_key_type(&disk_key),
3259 btrfs_disk_key_offset(&disk_key),
3260 new_key->objectid, new_key->type,
3261 new_key->offset);
3262 btrfs_print_leaf(eb);
3263 BUG();
3264 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003265 }
3266
3267 btrfs_cpu_key_to_disk(&disk_key, new_key);
3268 btrfs_set_item_key(eb, &disk_key, slot);
3269 btrfs_mark_buffer_dirty(eb);
3270 if (slot == 0)
3271 fixup_low_keys(path, &disk_key, 1);
3272}
3273
3274/*
3275 * try to push data from one node into the next node left in the
3276 * tree.
3277 *
3278 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3279 * error, and > 0 if there was no room in the left hand block.
3280 */
3281static int push_node_left(struct btrfs_trans_handle *trans,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003282 struct extent_buffer *dst,
3283 struct extent_buffer *src, int empty)
3284{
David Brazdil0f672f62019-12-10 10:32:29 +00003285 struct btrfs_fs_info *fs_info = trans->fs_info;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003286 int push_items = 0;
3287 int src_nritems;
3288 int dst_nritems;
3289 int ret = 0;
3290
3291 src_nritems = btrfs_header_nritems(src);
3292 dst_nritems = btrfs_header_nritems(dst);
3293 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3294 WARN_ON(btrfs_header_generation(src) != trans->transid);
3295 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3296
3297 if (!empty && src_nritems <= 8)
3298 return 1;
3299
3300 if (push_items <= 0)
3301 return 1;
3302
3303 if (empty) {
3304 push_items = min(src_nritems, push_items);
3305 if (push_items < src_nritems) {
3306 /* leave at least 8 pointers in the node if
3307 * we aren't going to empty it
3308 */
3309 if (src_nritems - push_items < 8) {
3310 if (push_items <= 8)
3311 return 1;
3312 push_items -= 8;
3313 }
3314 }
3315 } else
3316 push_items = min(src_nritems - 8, push_items);
3317
David Brazdil0f672f62019-12-10 10:32:29 +00003318 ret = tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003319 if (ret) {
3320 btrfs_abort_transaction(trans, ret);
3321 return ret;
3322 }
3323 copy_extent_buffer(dst, src,
3324 btrfs_node_key_ptr_offset(dst_nritems),
3325 btrfs_node_key_ptr_offset(0),
3326 push_items * sizeof(struct btrfs_key_ptr));
3327
3328 if (push_items < src_nritems) {
3329 /*
3330 * Don't call tree_mod_log_insert_move here, key removal was
3331 * already fully logged by tree_mod_log_eb_copy above.
3332 */
3333 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3334 btrfs_node_key_ptr_offset(push_items),
3335 (src_nritems - push_items) *
3336 sizeof(struct btrfs_key_ptr));
3337 }
3338 btrfs_set_header_nritems(src, src_nritems - push_items);
3339 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3340 btrfs_mark_buffer_dirty(src);
3341 btrfs_mark_buffer_dirty(dst);
3342
3343 return ret;
3344}
3345
3346/*
3347 * try to push data from one node into the next node right in the
3348 * tree.
3349 *
3350 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3351 * error, and > 0 if there was no room in the right hand block.
3352 *
3353 * this will only push up to 1/2 the contents of the left node over
3354 */
3355static int balance_node_right(struct btrfs_trans_handle *trans,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003356 struct extent_buffer *dst,
3357 struct extent_buffer *src)
3358{
David Brazdil0f672f62019-12-10 10:32:29 +00003359 struct btrfs_fs_info *fs_info = trans->fs_info;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003360 int push_items = 0;
3361 int max_push;
3362 int src_nritems;
3363 int dst_nritems;
3364 int ret = 0;
3365
3366 WARN_ON(btrfs_header_generation(src) != trans->transid);
3367 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3368
3369 src_nritems = btrfs_header_nritems(src);
3370 dst_nritems = btrfs_header_nritems(dst);
3371 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3372 if (push_items <= 0)
3373 return 1;
3374
3375 if (src_nritems < 4)
3376 return 1;
3377
3378 max_push = src_nritems / 2 + 1;
3379 /* don't try to empty the node */
3380 if (max_push >= src_nritems)
3381 return 1;
3382
3383 if (max_push < push_items)
3384 push_items = max_push;
3385
3386 ret = tree_mod_log_insert_move(dst, push_items, 0, dst_nritems);
3387 BUG_ON(ret < 0);
3388 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3389 btrfs_node_key_ptr_offset(0),
3390 (dst_nritems) *
3391 sizeof(struct btrfs_key_ptr));
3392
David Brazdil0f672f62019-12-10 10:32:29 +00003393 ret = tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items,
3394 push_items);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003395 if (ret) {
3396 btrfs_abort_transaction(trans, ret);
3397 return ret;
3398 }
3399 copy_extent_buffer(dst, src,
3400 btrfs_node_key_ptr_offset(0),
3401 btrfs_node_key_ptr_offset(src_nritems - push_items),
3402 push_items * sizeof(struct btrfs_key_ptr));
3403
3404 btrfs_set_header_nritems(src, src_nritems - push_items);
3405 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3406
3407 btrfs_mark_buffer_dirty(src);
3408 btrfs_mark_buffer_dirty(dst);
3409
3410 return ret;
3411}
3412
3413/*
3414 * helper function to insert a new root level in the tree.
3415 * A new node is allocated, and a single item is inserted to
3416 * point to the existing root
3417 *
3418 * returns zero on success or < 0 on failure.
3419 */
3420static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3421 struct btrfs_root *root,
3422 struct btrfs_path *path, int level)
3423{
3424 struct btrfs_fs_info *fs_info = root->fs_info;
3425 u64 lower_gen;
3426 struct extent_buffer *lower;
3427 struct extent_buffer *c;
3428 struct extent_buffer *old;
3429 struct btrfs_disk_key lower_key;
3430 int ret;
3431
3432 BUG_ON(path->nodes[level]);
3433 BUG_ON(path->nodes[level-1] != root->node);
3434
3435 lower = path->nodes[level-1];
3436 if (level == 1)
3437 btrfs_item_key(lower, &lower_key, 0);
3438 else
3439 btrfs_node_key(lower, &lower_key, 0);
3440
David Brazdil0f672f62019-12-10 10:32:29 +00003441 c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level,
3442 root->node->start, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003443 if (IS_ERR(c))
3444 return PTR_ERR(c);
3445
3446 root_add_used(root, fs_info->nodesize);
3447
3448 btrfs_set_header_nritems(c, 1);
3449 btrfs_set_node_key(c, &lower_key, 0);
3450 btrfs_set_node_blockptr(c, 0, lower->start);
3451 lower_gen = btrfs_header_generation(lower);
3452 WARN_ON(lower_gen != trans->transid);
3453
3454 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3455
3456 btrfs_mark_buffer_dirty(c);
3457
3458 old = root->node;
3459 ret = tree_mod_log_insert_root(root->node, c, 0);
3460 BUG_ON(ret < 0);
3461 rcu_assign_pointer(root->node, c);
3462
3463 /* the super has an extra ref to root->node */
3464 free_extent_buffer(old);
3465
3466 add_root_to_dirty_list(root);
3467 extent_buffer_get(c);
3468 path->nodes[level] = c;
3469 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3470 path->slots[level] = 0;
3471 return 0;
3472}
3473
3474/*
3475 * worker function to insert a single pointer in a node.
3476 * the node should have enough room for the pointer already
3477 *
3478 * slot and level indicate where you want the key to go, and
3479 * blocknr is the block the key points to.
3480 */
3481static void insert_ptr(struct btrfs_trans_handle *trans,
David Brazdil0f672f62019-12-10 10:32:29 +00003482 struct btrfs_path *path,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003483 struct btrfs_disk_key *key, u64 bytenr,
3484 int slot, int level)
3485{
3486 struct extent_buffer *lower;
3487 int nritems;
3488 int ret;
3489
3490 BUG_ON(!path->nodes[level]);
3491 btrfs_assert_tree_locked(path->nodes[level]);
3492 lower = path->nodes[level];
3493 nritems = btrfs_header_nritems(lower);
3494 BUG_ON(slot > nritems);
David Brazdil0f672f62019-12-10 10:32:29 +00003495 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003496 if (slot != nritems) {
3497 if (level) {
3498 ret = tree_mod_log_insert_move(lower, slot + 1, slot,
3499 nritems - slot);
3500 BUG_ON(ret < 0);
3501 }
3502 memmove_extent_buffer(lower,
3503 btrfs_node_key_ptr_offset(slot + 1),
3504 btrfs_node_key_ptr_offset(slot),
3505 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3506 }
3507 if (level) {
3508 ret = tree_mod_log_insert_key(lower, slot, MOD_LOG_KEY_ADD,
3509 GFP_NOFS);
3510 BUG_ON(ret < 0);
3511 }
3512 btrfs_set_node_key(lower, key, slot);
3513 btrfs_set_node_blockptr(lower, slot, bytenr);
3514 WARN_ON(trans->transid == 0);
3515 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3516 btrfs_set_header_nritems(lower, nritems + 1);
3517 btrfs_mark_buffer_dirty(lower);
3518}
3519
3520/*
3521 * split the node at the specified level in path in two.
3522 * The path is corrected to point to the appropriate node after the split
3523 *
3524 * Before splitting this tries to make some room in the node by pushing
3525 * left and right, if either one works, it returns right away.
3526 *
3527 * returns 0 on success and < 0 on failure
3528 */
3529static noinline int split_node(struct btrfs_trans_handle *trans,
3530 struct btrfs_root *root,
3531 struct btrfs_path *path, int level)
3532{
3533 struct btrfs_fs_info *fs_info = root->fs_info;
3534 struct extent_buffer *c;
3535 struct extent_buffer *split;
3536 struct btrfs_disk_key disk_key;
3537 int mid;
3538 int ret;
3539 u32 c_nritems;
3540
3541 c = path->nodes[level];
3542 WARN_ON(btrfs_header_generation(c) != trans->transid);
3543 if (c == root->node) {
3544 /*
3545 * trying to split the root, lets make a new one
3546 *
3547 * tree mod log: We don't log_removal old root in
3548 * insert_new_root, because that root buffer will be kept as a
3549 * normal node. We are going to log removal of half of the
3550 * elements below with tree_mod_log_eb_copy. We're holding a
3551 * tree lock on the buffer, which is why we cannot race with
3552 * other tree_mod_log users.
3553 */
3554 ret = insert_new_root(trans, root, path, level + 1);
3555 if (ret)
3556 return ret;
3557 } else {
3558 ret = push_nodes_for_insert(trans, root, path, level);
3559 c = path->nodes[level];
3560 if (!ret && btrfs_header_nritems(c) <
3561 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
3562 return 0;
3563 if (ret < 0)
3564 return ret;
3565 }
3566
3567 c_nritems = btrfs_header_nritems(c);
3568 mid = (c_nritems + 1) / 2;
3569 btrfs_node_key(c, &disk_key, mid);
3570
David Brazdil0f672f62019-12-10 10:32:29 +00003571 split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level,
3572 c->start, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003573 if (IS_ERR(split))
3574 return PTR_ERR(split);
3575
3576 root_add_used(root, fs_info->nodesize);
3577 ASSERT(btrfs_header_level(c) == level);
3578
David Brazdil0f672f62019-12-10 10:32:29 +00003579 ret = tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003580 if (ret) {
3581 btrfs_abort_transaction(trans, ret);
3582 return ret;
3583 }
3584 copy_extent_buffer(split, c,
3585 btrfs_node_key_ptr_offset(0),
3586 btrfs_node_key_ptr_offset(mid),
3587 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3588 btrfs_set_header_nritems(split, c_nritems - mid);
3589 btrfs_set_header_nritems(c, mid);
3590 ret = 0;
3591
3592 btrfs_mark_buffer_dirty(c);
3593 btrfs_mark_buffer_dirty(split);
3594
David Brazdil0f672f62019-12-10 10:32:29 +00003595 insert_ptr(trans, path, &disk_key, split->start,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003596 path->slots[level + 1] + 1, level + 1);
3597
3598 if (path->slots[level] >= mid) {
3599 path->slots[level] -= mid;
3600 btrfs_tree_unlock(c);
3601 free_extent_buffer(c);
3602 path->nodes[level] = split;
3603 path->slots[level + 1] += 1;
3604 } else {
3605 btrfs_tree_unlock(split);
3606 free_extent_buffer(split);
3607 }
3608 return ret;
3609}
3610
3611/*
3612 * how many bytes are required to store the items in a leaf. start
3613 * and nr indicate which items in the leaf to check. This totals up the
3614 * space used both by the item structs and the item data
3615 */
3616static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3617{
3618 struct btrfs_item *start_item;
3619 struct btrfs_item *end_item;
3620 struct btrfs_map_token token;
3621 int data_len;
3622 int nritems = btrfs_header_nritems(l);
3623 int end = min(nritems, start + nr) - 1;
3624
3625 if (!nr)
3626 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +00003627 btrfs_init_map_token(&token, l);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003628 start_item = btrfs_item_nr(start);
3629 end_item = btrfs_item_nr(end);
3630 data_len = btrfs_token_item_offset(l, start_item, &token) +
3631 btrfs_token_item_size(l, start_item, &token);
3632 data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3633 data_len += sizeof(struct btrfs_item) * nr;
3634 WARN_ON(data_len < 0);
3635 return data_len;
3636}
3637
3638/*
3639 * The space between the end of the leaf items and
3640 * the start of the leaf data. IOW, how much room
3641 * the leaf has left for both items and data
3642 */
David Brazdil0f672f62019-12-10 10:32:29 +00003643noinline int btrfs_leaf_free_space(struct extent_buffer *leaf)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003644{
David Brazdil0f672f62019-12-10 10:32:29 +00003645 struct btrfs_fs_info *fs_info = leaf->fs_info;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003646 int nritems = btrfs_header_nritems(leaf);
3647 int ret;
3648
3649 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
3650 if (ret < 0) {
3651 btrfs_crit(fs_info,
3652 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3653 ret,
3654 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
3655 leaf_space_used(leaf, 0, nritems), nritems);
3656 }
3657 return ret;
3658}
3659
3660/*
3661 * min slot controls the lowest index we're willing to push to the
3662 * right. We'll push up to and including min_slot, but no lower
3663 */
David Brazdil0f672f62019-12-10 10:32:29 +00003664static noinline int __push_leaf_right(struct btrfs_path *path,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003665 int data_size, int empty,
3666 struct extent_buffer *right,
3667 int free_space, u32 left_nritems,
3668 u32 min_slot)
3669{
David Brazdil0f672f62019-12-10 10:32:29 +00003670 struct btrfs_fs_info *fs_info = right->fs_info;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003671 struct extent_buffer *left = path->nodes[0];
3672 struct extent_buffer *upper = path->nodes[1];
3673 struct btrfs_map_token token;
3674 struct btrfs_disk_key disk_key;
3675 int slot;
3676 u32 i;
3677 int push_space = 0;
3678 int push_items = 0;
3679 struct btrfs_item *item;
3680 u32 nr;
3681 u32 right_nritems;
3682 u32 data_end;
3683 u32 this_item_size;
3684
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003685 if (empty)
3686 nr = 0;
3687 else
3688 nr = max_t(u32, 1, min_slot);
3689
3690 if (path->slots[0] >= left_nritems)
3691 push_space += data_size;
3692
3693 slot = path->slots[1];
3694 i = left_nritems - 1;
3695 while (i >= nr) {
3696 item = btrfs_item_nr(i);
3697
3698 if (!empty && push_items > 0) {
3699 if (path->slots[0] > i)
3700 break;
3701 if (path->slots[0] == i) {
David Brazdil0f672f62019-12-10 10:32:29 +00003702 int space = btrfs_leaf_free_space(left);
3703
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003704 if (space + push_space * 2 > free_space)
3705 break;
3706 }
3707 }
3708
3709 if (path->slots[0] == i)
3710 push_space += data_size;
3711
3712 this_item_size = btrfs_item_size(left, item);
3713 if (this_item_size + sizeof(*item) + push_space > free_space)
3714 break;
3715
3716 push_items++;
3717 push_space += this_item_size + sizeof(*item);
3718 if (i == 0)
3719 break;
3720 i--;
3721 }
3722
3723 if (push_items == 0)
3724 goto out_unlock;
3725
3726 WARN_ON(!empty && push_items == left_nritems);
3727
3728 /* push left to right */
3729 right_nritems = btrfs_header_nritems(right);
3730
3731 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
David Brazdil0f672f62019-12-10 10:32:29 +00003732 push_space -= leaf_data_end(left);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003733
3734 /* make room in the right data area */
David Brazdil0f672f62019-12-10 10:32:29 +00003735 data_end = leaf_data_end(right);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003736 memmove_extent_buffer(right,
3737 BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
3738 BTRFS_LEAF_DATA_OFFSET + data_end,
3739 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
3740
3741 /* copy from the left data area */
3742 copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
3743 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
David Brazdil0f672f62019-12-10 10:32:29 +00003744 BTRFS_LEAF_DATA_OFFSET + leaf_data_end(left),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003745 push_space);
3746
3747 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3748 btrfs_item_nr_offset(0),
3749 right_nritems * sizeof(struct btrfs_item));
3750
3751 /* copy the items from left to right */
3752 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3753 btrfs_item_nr_offset(left_nritems - push_items),
3754 push_items * sizeof(struct btrfs_item));
3755
3756 /* update the item pointers */
David Brazdil0f672f62019-12-10 10:32:29 +00003757 btrfs_init_map_token(&token, right);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003758 right_nritems += push_items;
3759 btrfs_set_header_nritems(right, right_nritems);
3760 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3761 for (i = 0; i < right_nritems; i++) {
3762 item = btrfs_item_nr(i);
3763 push_space -= btrfs_token_item_size(right, item, &token);
3764 btrfs_set_token_item_offset(right, item, push_space, &token);
3765 }
3766
3767 left_nritems -= push_items;
3768 btrfs_set_header_nritems(left, left_nritems);
3769
3770 if (left_nritems)
3771 btrfs_mark_buffer_dirty(left);
3772 else
David Brazdil0f672f62019-12-10 10:32:29 +00003773 btrfs_clean_tree_block(left);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003774
3775 btrfs_mark_buffer_dirty(right);
3776
3777 btrfs_item_key(right, &disk_key, 0);
3778 btrfs_set_node_key(upper, &disk_key, slot + 1);
3779 btrfs_mark_buffer_dirty(upper);
3780
3781 /* then fixup the leaf pointer in the path */
3782 if (path->slots[0] >= left_nritems) {
3783 path->slots[0] -= left_nritems;
3784 if (btrfs_header_nritems(path->nodes[0]) == 0)
David Brazdil0f672f62019-12-10 10:32:29 +00003785 btrfs_clean_tree_block(path->nodes[0]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003786 btrfs_tree_unlock(path->nodes[0]);
3787 free_extent_buffer(path->nodes[0]);
3788 path->nodes[0] = right;
3789 path->slots[1] += 1;
3790 } else {
3791 btrfs_tree_unlock(right);
3792 free_extent_buffer(right);
3793 }
3794 return 0;
3795
3796out_unlock:
3797 btrfs_tree_unlock(right);
3798 free_extent_buffer(right);
3799 return 1;
3800}
3801
3802/*
3803 * push some data in the path leaf to the right, trying to free up at
3804 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3805 *
3806 * returns 1 if the push failed because the other node didn't have enough
3807 * room, 0 if everything worked out and < 0 if there were major errors.
3808 *
3809 * this will push starting from min_slot to the end of the leaf. It won't
3810 * push any slot lower than min_slot
3811 */
3812static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3813 *root, struct btrfs_path *path,
3814 int min_data_size, int data_size,
3815 int empty, u32 min_slot)
3816{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003817 struct extent_buffer *left = path->nodes[0];
3818 struct extent_buffer *right;
3819 struct extent_buffer *upper;
3820 int slot;
3821 int free_space;
3822 u32 left_nritems;
3823 int ret;
3824
3825 if (!path->nodes[1])
3826 return 1;
3827
3828 slot = path->slots[1];
3829 upper = path->nodes[1];
3830 if (slot >= btrfs_header_nritems(upper) - 1)
3831 return 1;
3832
3833 btrfs_assert_tree_locked(path->nodes[1]);
3834
David Brazdil0f672f62019-12-10 10:32:29 +00003835 right = btrfs_read_node_slot(upper, slot + 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003836 /*
3837 * slot + 1 is not valid or we fail to read the right node,
3838 * no big deal, just return.
3839 */
3840 if (IS_ERR(right))
3841 return 1;
3842
3843 btrfs_tree_lock(right);
David Brazdil0f672f62019-12-10 10:32:29 +00003844 btrfs_set_lock_blocking_write(right);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003845
David Brazdil0f672f62019-12-10 10:32:29 +00003846 free_space = btrfs_leaf_free_space(right);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003847 if (free_space < data_size)
3848 goto out_unlock;
3849
3850 /* cow and double check */
3851 ret = btrfs_cow_block(trans, root, right, upper,
3852 slot + 1, &right);
3853 if (ret)
3854 goto out_unlock;
3855
David Brazdil0f672f62019-12-10 10:32:29 +00003856 free_space = btrfs_leaf_free_space(right);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003857 if (free_space < data_size)
3858 goto out_unlock;
3859
3860 left_nritems = btrfs_header_nritems(left);
3861 if (left_nritems == 0)
3862 goto out_unlock;
3863
3864 if (path->slots[0] == left_nritems && !empty) {
3865 /* Key greater than all keys in the leaf, right neighbor has
3866 * enough room for it and we're not emptying our leaf to delete
3867 * it, therefore use right neighbor to insert the new item and
David Brazdil0f672f62019-12-10 10:32:29 +00003868 * no need to touch/dirty our left leaf. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003869 btrfs_tree_unlock(left);
3870 free_extent_buffer(left);
3871 path->nodes[0] = right;
3872 path->slots[0] = 0;
3873 path->slots[1]++;
3874 return 0;
3875 }
3876
David Brazdil0f672f62019-12-10 10:32:29 +00003877 return __push_leaf_right(path, min_data_size, empty,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003878 right, free_space, left_nritems, min_slot);
3879out_unlock:
3880 btrfs_tree_unlock(right);
3881 free_extent_buffer(right);
3882 return 1;
3883}
3884
3885/*
3886 * push some data in the path leaf to the left, trying to free up at
3887 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3888 *
3889 * max_slot can put a limit on how far into the leaf we'll push items. The
3890 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3891 * items
3892 */
David Brazdil0f672f62019-12-10 10:32:29 +00003893static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003894 int empty, struct extent_buffer *left,
3895 int free_space, u32 right_nritems,
3896 u32 max_slot)
3897{
David Brazdil0f672f62019-12-10 10:32:29 +00003898 struct btrfs_fs_info *fs_info = left->fs_info;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003899 struct btrfs_disk_key disk_key;
3900 struct extent_buffer *right = path->nodes[0];
3901 int i;
3902 int push_space = 0;
3903 int push_items = 0;
3904 struct btrfs_item *item;
3905 u32 old_left_nritems;
3906 u32 nr;
3907 int ret = 0;
3908 u32 this_item_size;
3909 u32 old_left_item_size;
3910 struct btrfs_map_token token;
3911
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003912 if (empty)
3913 nr = min(right_nritems, max_slot);
3914 else
3915 nr = min(right_nritems - 1, max_slot);
3916
3917 for (i = 0; i < nr; i++) {
3918 item = btrfs_item_nr(i);
3919
3920 if (!empty && push_items > 0) {
3921 if (path->slots[0] < i)
3922 break;
3923 if (path->slots[0] == i) {
David Brazdil0f672f62019-12-10 10:32:29 +00003924 int space = btrfs_leaf_free_space(right);
3925
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003926 if (space + push_space * 2 > free_space)
3927 break;
3928 }
3929 }
3930
3931 if (path->slots[0] == i)
3932 push_space += data_size;
3933
3934 this_item_size = btrfs_item_size(right, item);
3935 if (this_item_size + sizeof(*item) + push_space > free_space)
3936 break;
3937
3938 push_items++;
3939 push_space += this_item_size + sizeof(*item);
3940 }
3941
3942 if (push_items == 0) {
3943 ret = 1;
3944 goto out;
3945 }
3946 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3947
3948 /* push data from right to left */
3949 copy_extent_buffer(left, right,
3950 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3951 btrfs_item_nr_offset(0),
3952 push_items * sizeof(struct btrfs_item));
3953
3954 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
3955 btrfs_item_offset_nr(right, push_items - 1);
3956
3957 copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
David Brazdil0f672f62019-12-10 10:32:29 +00003958 leaf_data_end(left) - push_space,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003959 BTRFS_LEAF_DATA_OFFSET +
3960 btrfs_item_offset_nr(right, push_items - 1),
3961 push_space);
3962 old_left_nritems = btrfs_header_nritems(left);
3963 BUG_ON(old_left_nritems <= 0);
3964
David Brazdil0f672f62019-12-10 10:32:29 +00003965 btrfs_init_map_token(&token, left);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003966 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3967 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3968 u32 ioff;
3969
3970 item = btrfs_item_nr(i);
3971
3972 ioff = btrfs_token_item_offset(left, item, &token);
3973 btrfs_set_token_item_offset(left, item,
3974 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size),
3975 &token);
3976 }
3977 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3978
3979 /* fixup right node */
3980 if (push_items > right_nritems)
3981 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3982 right_nritems);
3983
3984 if (push_items < right_nritems) {
3985 push_space = btrfs_item_offset_nr(right, push_items - 1) -
David Brazdil0f672f62019-12-10 10:32:29 +00003986 leaf_data_end(right);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003987 memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
3988 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3989 BTRFS_LEAF_DATA_OFFSET +
David Brazdil0f672f62019-12-10 10:32:29 +00003990 leaf_data_end(right), push_space);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003991
3992 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3993 btrfs_item_nr_offset(push_items),
3994 (btrfs_header_nritems(right) - push_items) *
3995 sizeof(struct btrfs_item));
3996 }
David Brazdil0f672f62019-12-10 10:32:29 +00003997
3998 btrfs_init_map_token(&token, right);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003999 right_nritems -= push_items;
4000 btrfs_set_header_nritems(right, right_nritems);
4001 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
4002 for (i = 0; i < right_nritems; i++) {
4003 item = btrfs_item_nr(i);
4004
4005 push_space = push_space - btrfs_token_item_size(right,
4006 item, &token);
4007 btrfs_set_token_item_offset(right, item, push_space, &token);
4008 }
4009
4010 btrfs_mark_buffer_dirty(left);
4011 if (right_nritems)
4012 btrfs_mark_buffer_dirty(right);
4013 else
David Brazdil0f672f62019-12-10 10:32:29 +00004014 btrfs_clean_tree_block(right);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004015
4016 btrfs_item_key(right, &disk_key, 0);
4017 fixup_low_keys(path, &disk_key, 1);
4018
4019 /* then fixup the leaf pointer in the path */
4020 if (path->slots[0] < push_items) {
4021 path->slots[0] += old_left_nritems;
4022 btrfs_tree_unlock(path->nodes[0]);
4023 free_extent_buffer(path->nodes[0]);
4024 path->nodes[0] = left;
4025 path->slots[1] -= 1;
4026 } else {
4027 btrfs_tree_unlock(left);
4028 free_extent_buffer(left);
4029 path->slots[0] -= push_items;
4030 }
4031 BUG_ON(path->slots[0] < 0);
4032 return ret;
4033out:
4034 btrfs_tree_unlock(left);
4035 free_extent_buffer(left);
4036 return ret;
4037}
4038
4039/*
4040 * push some data in the path leaf to the left, trying to free up at
4041 * least data_size bytes. returns zero if the push worked, nonzero otherwise
4042 *
4043 * max_slot can put a limit on how far into the leaf we'll push items. The
4044 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
4045 * items
4046 */
4047static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
4048 *root, struct btrfs_path *path, int min_data_size,
4049 int data_size, int empty, u32 max_slot)
4050{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004051 struct extent_buffer *right = path->nodes[0];
4052 struct extent_buffer *left;
4053 int slot;
4054 int free_space;
4055 u32 right_nritems;
4056 int ret = 0;
4057
4058 slot = path->slots[1];
4059 if (slot == 0)
4060 return 1;
4061 if (!path->nodes[1])
4062 return 1;
4063
4064 right_nritems = btrfs_header_nritems(right);
4065 if (right_nritems == 0)
4066 return 1;
4067
4068 btrfs_assert_tree_locked(path->nodes[1]);
4069
David Brazdil0f672f62019-12-10 10:32:29 +00004070 left = btrfs_read_node_slot(path->nodes[1], slot - 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004071 /*
4072 * slot - 1 is not valid or we fail to read the left node,
4073 * no big deal, just return.
4074 */
4075 if (IS_ERR(left))
4076 return 1;
4077
4078 btrfs_tree_lock(left);
David Brazdil0f672f62019-12-10 10:32:29 +00004079 btrfs_set_lock_blocking_write(left);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004080
David Brazdil0f672f62019-12-10 10:32:29 +00004081 free_space = btrfs_leaf_free_space(left);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004082 if (free_space < data_size) {
4083 ret = 1;
4084 goto out;
4085 }
4086
4087 /* cow and double check */
4088 ret = btrfs_cow_block(trans, root, left,
4089 path->nodes[1], slot - 1, &left);
4090 if (ret) {
4091 /* we hit -ENOSPC, but it isn't fatal here */
4092 if (ret == -ENOSPC)
4093 ret = 1;
4094 goto out;
4095 }
4096
David Brazdil0f672f62019-12-10 10:32:29 +00004097 free_space = btrfs_leaf_free_space(left);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004098 if (free_space < data_size) {
4099 ret = 1;
4100 goto out;
4101 }
4102
David Brazdil0f672f62019-12-10 10:32:29 +00004103 return __push_leaf_left(path, min_data_size,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004104 empty, left, free_space, right_nritems,
4105 max_slot);
4106out:
4107 btrfs_tree_unlock(left);
4108 free_extent_buffer(left);
4109 return ret;
4110}
4111
4112/*
4113 * split the path's leaf in two, making sure there is at least data_size
4114 * available for the resulting leaf level of the path.
4115 */
4116static noinline void copy_for_split(struct btrfs_trans_handle *trans,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004117 struct btrfs_path *path,
4118 struct extent_buffer *l,
4119 struct extent_buffer *right,
4120 int slot, int mid, int nritems)
4121{
David Brazdil0f672f62019-12-10 10:32:29 +00004122 struct btrfs_fs_info *fs_info = trans->fs_info;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004123 int data_copy_size;
4124 int rt_data_off;
4125 int i;
4126 struct btrfs_disk_key disk_key;
4127 struct btrfs_map_token token;
4128
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004129 nritems = nritems - mid;
4130 btrfs_set_header_nritems(right, nritems);
David Brazdil0f672f62019-12-10 10:32:29 +00004131 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(l);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004132
4133 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4134 btrfs_item_nr_offset(mid),
4135 nritems * sizeof(struct btrfs_item));
4136
4137 copy_extent_buffer(right, l,
4138 BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
4139 data_copy_size, BTRFS_LEAF_DATA_OFFSET +
David Brazdil0f672f62019-12-10 10:32:29 +00004140 leaf_data_end(l), data_copy_size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004141
4142 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
4143
David Brazdil0f672f62019-12-10 10:32:29 +00004144 btrfs_init_map_token(&token, right);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004145 for (i = 0; i < nritems; i++) {
4146 struct btrfs_item *item = btrfs_item_nr(i);
4147 u32 ioff;
4148
4149 ioff = btrfs_token_item_offset(right, item, &token);
4150 btrfs_set_token_item_offset(right, item,
4151 ioff + rt_data_off, &token);
4152 }
4153
4154 btrfs_set_header_nritems(l, mid);
4155 btrfs_item_key(right, &disk_key, 0);
David Brazdil0f672f62019-12-10 10:32:29 +00004156 insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004157
4158 btrfs_mark_buffer_dirty(right);
4159 btrfs_mark_buffer_dirty(l);
4160 BUG_ON(path->slots[0] != slot);
4161
4162 if (mid <= slot) {
4163 btrfs_tree_unlock(path->nodes[0]);
4164 free_extent_buffer(path->nodes[0]);
4165 path->nodes[0] = right;
4166 path->slots[0] -= mid;
4167 path->slots[1] += 1;
4168 } else {
4169 btrfs_tree_unlock(right);
4170 free_extent_buffer(right);
4171 }
4172
4173 BUG_ON(path->slots[0] < 0);
4174}
4175
4176/*
4177 * double splits happen when we need to insert a big item in the middle
4178 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4179 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4180 * A B C
4181 *
4182 * We avoid this by trying to push the items on either side of our target
4183 * into the adjacent leaves. If all goes well we can avoid the double split
4184 * completely.
4185 */
4186static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4187 struct btrfs_root *root,
4188 struct btrfs_path *path,
4189 int data_size)
4190{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004191 int ret;
4192 int progress = 0;
4193 int slot;
4194 u32 nritems;
4195 int space_needed = data_size;
4196
4197 slot = path->slots[0];
4198 if (slot < btrfs_header_nritems(path->nodes[0]))
David Brazdil0f672f62019-12-10 10:32:29 +00004199 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004200
4201 /*
4202 * try to push all the items after our slot into the
4203 * right leaf
4204 */
4205 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4206 if (ret < 0)
4207 return ret;
4208
4209 if (ret == 0)
4210 progress++;
4211
4212 nritems = btrfs_header_nritems(path->nodes[0]);
4213 /*
4214 * our goal is to get our slot at the start or end of a leaf. If
4215 * we've done so we're done
4216 */
4217 if (path->slots[0] == 0 || path->slots[0] == nritems)
4218 return 0;
4219
David Brazdil0f672f62019-12-10 10:32:29 +00004220 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004221 return 0;
4222
4223 /* try to push all the items before our slot into the next leaf */
4224 slot = path->slots[0];
4225 space_needed = data_size;
4226 if (slot > 0)
David Brazdil0f672f62019-12-10 10:32:29 +00004227 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004228 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4229 if (ret < 0)
4230 return ret;
4231
4232 if (ret == 0)
4233 progress++;
4234
4235 if (progress)
4236 return 0;
4237 return 1;
4238}
4239
4240/*
4241 * split the path's leaf in two, making sure there is at least data_size
4242 * available for the resulting leaf level of the path.
4243 *
4244 * returns 0 if all went well and < 0 on failure.
4245 */
4246static noinline int split_leaf(struct btrfs_trans_handle *trans,
4247 struct btrfs_root *root,
4248 const struct btrfs_key *ins_key,
4249 struct btrfs_path *path, int data_size,
4250 int extend)
4251{
4252 struct btrfs_disk_key disk_key;
4253 struct extent_buffer *l;
4254 u32 nritems;
4255 int mid;
4256 int slot;
4257 struct extent_buffer *right;
4258 struct btrfs_fs_info *fs_info = root->fs_info;
4259 int ret = 0;
4260 int wret;
4261 int split;
4262 int num_doubles = 0;
4263 int tried_avoid_double = 0;
4264
4265 l = path->nodes[0];
4266 slot = path->slots[0];
4267 if (extend && data_size + btrfs_item_size_nr(l, slot) +
4268 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
4269 return -EOVERFLOW;
4270
4271 /* first try to make some room by pushing left and right */
4272 if (data_size && path->nodes[1]) {
4273 int space_needed = data_size;
4274
4275 if (slot < btrfs_header_nritems(l))
David Brazdil0f672f62019-12-10 10:32:29 +00004276 space_needed -= btrfs_leaf_free_space(l);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004277
4278 wret = push_leaf_right(trans, root, path, space_needed,
4279 space_needed, 0, 0);
4280 if (wret < 0)
4281 return wret;
4282 if (wret) {
4283 space_needed = data_size;
4284 if (slot > 0)
David Brazdil0f672f62019-12-10 10:32:29 +00004285 space_needed -= btrfs_leaf_free_space(l);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004286 wret = push_leaf_left(trans, root, path, space_needed,
4287 space_needed, 0, (u32)-1);
4288 if (wret < 0)
4289 return wret;
4290 }
4291 l = path->nodes[0];
4292
4293 /* did the pushes work? */
David Brazdil0f672f62019-12-10 10:32:29 +00004294 if (btrfs_leaf_free_space(l) >= data_size)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004295 return 0;
4296 }
4297
4298 if (!path->nodes[1]) {
4299 ret = insert_new_root(trans, root, path, 1);
4300 if (ret)
4301 return ret;
4302 }
4303again:
4304 split = 1;
4305 l = path->nodes[0];
4306 slot = path->slots[0];
4307 nritems = btrfs_header_nritems(l);
4308 mid = (nritems + 1) / 2;
4309
4310 if (mid <= slot) {
4311 if (nritems == 1 ||
4312 leaf_space_used(l, mid, nritems - mid) + data_size >
4313 BTRFS_LEAF_DATA_SIZE(fs_info)) {
4314 if (slot >= nritems) {
4315 split = 0;
4316 } else {
4317 mid = slot;
4318 if (mid != nritems &&
4319 leaf_space_used(l, mid, nritems - mid) +
4320 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4321 if (data_size && !tried_avoid_double)
4322 goto push_for_double;
4323 split = 2;
4324 }
4325 }
4326 }
4327 } else {
4328 if (leaf_space_used(l, 0, mid) + data_size >
4329 BTRFS_LEAF_DATA_SIZE(fs_info)) {
4330 if (!extend && data_size && slot == 0) {
4331 split = 0;
4332 } else if ((extend || !data_size) && slot == 0) {
4333 mid = 1;
4334 } else {
4335 mid = slot;
4336 if (mid != nritems &&
4337 leaf_space_used(l, mid, nritems - mid) +
4338 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4339 if (data_size && !tried_avoid_double)
4340 goto push_for_double;
4341 split = 2;
4342 }
4343 }
4344 }
4345 }
4346
4347 if (split == 0)
4348 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4349 else
4350 btrfs_item_key(l, &disk_key, mid);
4351
David Brazdil0f672f62019-12-10 10:32:29 +00004352 right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0,
4353 l->start, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004354 if (IS_ERR(right))
4355 return PTR_ERR(right);
4356
4357 root_add_used(root, fs_info->nodesize);
4358
4359 if (split == 0) {
4360 if (mid <= slot) {
4361 btrfs_set_header_nritems(right, 0);
David Brazdil0f672f62019-12-10 10:32:29 +00004362 insert_ptr(trans, path, &disk_key,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004363 right->start, path->slots[1] + 1, 1);
4364 btrfs_tree_unlock(path->nodes[0]);
4365 free_extent_buffer(path->nodes[0]);
4366 path->nodes[0] = right;
4367 path->slots[0] = 0;
4368 path->slots[1] += 1;
4369 } else {
4370 btrfs_set_header_nritems(right, 0);
David Brazdil0f672f62019-12-10 10:32:29 +00004371 insert_ptr(trans, path, &disk_key,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004372 right->start, path->slots[1], 1);
4373 btrfs_tree_unlock(path->nodes[0]);
4374 free_extent_buffer(path->nodes[0]);
4375 path->nodes[0] = right;
4376 path->slots[0] = 0;
4377 if (path->slots[1] == 0)
4378 fixup_low_keys(path, &disk_key, 1);
4379 }
4380 /*
4381 * We create a new leaf 'right' for the required ins_len and
4382 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
4383 * the content of ins_len to 'right'.
4384 */
4385 return ret;
4386 }
4387
David Brazdil0f672f62019-12-10 10:32:29 +00004388 copy_for_split(trans, path, l, right, slot, mid, nritems);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004389
4390 if (split == 2) {
4391 BUG_ON(num_doubles != 0);
4392 num_doubles++;
4393 goto again;
4394 }
4395
4396 return 0;
4397
4398push_for_double:
4399 push_for_double_split(trans, root, path, data_size);
4400 tried_avoid_double = 1;
David Brazdil0f672f62019-12-10 10:32:29 +00004401 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004402 return 0;
4403 goto again;
4404}
4405
4406static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4407 struct btrfs_root *root,
4408 struct btrfs_path *path, int ins_len)
4409{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004410 struct btrfs_key key;
4411 struct extent_buffer *leaf;
4412 struct btrfs_file_extent_item *fi;
4413 u64 extent_len = 0;
4414 u32 item_size;
4415 int ret;
4416
4417 leaf = path->nodes[0];
4418 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4419
4420 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4421 key.type != BTRFS_EXTENT_CSUM_KEY);
4422
David Brazdil0f672f62019-12-10 10:32:29 +00004423 if (btrfs_leaf_free_space(leaf) >= ins_len)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004424 return 0;
4425
4426 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4427 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4428 fi = btrfs_item_ptr(leaf, path->slots[0],
4429 struct btrfs_file_extent_item);
4430 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4431 }
4432 btrfs_release_path(path);
4433
4434 path->keep_locks = 1;
4435 path->search_for_split = 1;
4436 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4437 path->search_for_split = 0;
4438 if (ret > 0)
4439 ret = -EAGAIN;
4440 if (ret < 0)
4441 goto err;
4442
4443 ret = -EAGAIN;
4444 leaf = path->nodes[0];
4445 /* if our item isn't there, return now */
4446 if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4447 goto err;
4448
4449 /* the leaf has changed, it now has room. return now */
David Brazdil0f672f62019-12-10 10:32:29 +00004450 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004451 goto err;
4452
4453 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4454 fi = btrfs_item_ptr(leaf, path->slots[0],
4455 struct btrfs_file_extent_item);
4456 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4457 goto err;
4458 }
4459
4460 btrfs_set_path_blocking(path);
4461 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4462 if (ret)
4463 goto err;
4464
4465 path->keep_locks = 0;
4466 btrfs_unlock_up_safe(path, 1);
4467 return 0;
4468err:
4469 path->keep_locks = 0;
4470 return ret;
4471}
4472
David Brazdil0f672f62019-12-10 10:32:29 +00004473static noinline int split_item(struct btrfs_path *path,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004474 const struct btrfs_key *new_key,
4475 unsigned long split_offset)
4476{
4477 struct extent_buffer *leaf;
4478 struct btrfs_item *item;
4479 struct btrfs_item *new_item;
4480 int slot;
4481 char *buf;
4482 u32 nritems;
4483 u32 item_size;
4484 u32 orig_offset;
4485 struct btrfs_disk_key disk_key;
4486
4487 leaf = path->nodes[0];
David Brazdil0f672f62019-12-10 10:32:29 +00004488 BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004489
4490 btrfs_set_path_blocking(path);
4491
4492 item = btrfs_item_nr(path->slots[0]);
4493 orig_offset = btrfs_item_offset(leaf, item);
4494 item_size = btrfs_item_size(leaf, item);
4495
4496 buf = kmalloc(item_size, GFP_NOFS);
4497 if (!buf)
4498 return -ENOMEM;
4499
4500 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4501 path->slots[0]), item_size);
4502
4503 slot = path->slots[0] + 1;
4504 nritems = btrfs_header_nritems(leaf);
4505 if (slot != nritems) {
4506 /* shift the items */
4507 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4508 btrfs_item_nr_offset(slot),
4509 (nritems - slot) * sizeof(struct btrfs_item));
4510 }
4511
4512 btrfs_cpu_key_to_disk(&disk_key, new_key);
4513 btrfs_set_item_key(leaf, &disk_key, slot);
4514
4515 new_item = btrfs_item_nr(slot);
4516
4517 btrfs_set_item_offset(leaf, new_item, orig_offset);
4518 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4519
4520 btrfs_set_item_offset(leaf, item,
4521 orig_offset + item_size - split_offset);
4522 btrfs_set_item_size(leaf, item, split_offset);
4523
4524 btrfs_set_header_nritems(leaf, nritems + 1);
4525
4526 /* write the data for the start of the original item */
4527 write_extent_buffer(leaf, buf,
4528 btrfs_item_ptr_offset(leaf, path->slots[0]),
4529 split_offset);
4530
4531 /* write the data for the new item */
4532 write_extent_buffer(leaf, buf + split_offset,
4533 btrfs_item_ptr_offset(leaf, slot),
4534 item_size - split_offset);
4535 btrfs_mark_buffer_dirty(leaf);
4536
David Brazdil0f672f62019-12-10 10:32:29 +00004537 BUG_ON(btrfs_leaf_free_space(leaf) < 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004538 kfree(buf);
4539 return 0;
4540}
4541
4542/*
4543 * This function splits a single item into two items,
4544 * giving 'new_key' to the new item and splitting the
4545 * old one at split_offset (from the start of the item).
4546 *
4547 * The path may be released by this operation. After
4548 * the split, the path is pointing to the old item. The
4549 * new item is going to be in the same node as the old one.
4550 *
4551 * Note, the item being split must be smaller enough to live alone on
4552 * a tree block with room for one extra struct btrfs_item
4553 *
4554 * This allows us to split the item in place, keeping a lock on the
4555 * leaf the entire time.
4556 */
4557int btrfs_split_item(struct btrfs_trans_handle *trans,
4558 struct btrfs_root *root,
4559 struct btrfs_path *path,
4560 const struct btrfs_key *new_key,
4561 unsigned long split_offset)
4562{
4563 int ret;
4564 ret = setup_leaf_for_split(trans, root, path,
4565 sizeof(struct btrfs_item));
4566 if (ret)
4567 return ret;
4568
David Brazdil0f672f62019-12-10 10:32:29 +00004569 ret = split_item(path, new_key, split_offset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004570 return ret;
4571}
4572
4573/*
4574 * This function duplicate a item, giving 'new_key' to the new item.
4575 * It guarantees both items live in the same tree leaf and the new item
4576 * is contiguous with the original item.
4577 *
4578 * This allows us to split file extent in place, keeping a lock on the
4579 * leaf the entire time.
4580 */
4581int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4582 struct btrfs_root *root,
4583 struct btrfs_path *path,
4584 const struct btrfs_key *new_key)
4585{
4586 struct extent_buffer *leaf;
4587 int ret;
4588 u32 item_size;
4589
4590 leaf = path->nodes[0];
4591 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4592 ret = setup_leaf_for_split(trans, root, path,
4593 item_size + sizeof(struct btrfs_item));
4594 if (ret)
4595 return ret;
4596
4597 path->slots[0]++;
4598 setup_items_for_insert(root, path, new_key, &item_size,
4599 item_size, item_size +
4600 sizeof(struct btrfs_item), 1);
4601 leaf = path->nodes[0];
4602 memcpy_extent_buffer(leaf,
4603 btrfs_item_ptr_offset(leaf, path->slots[0]),
4604 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4605 item_size);
4606 return 0;
4607}
4608
4609/*
4610 * make the item pointed to by the path smaller. new_size indicates
4611 * how small to make it, and from_end tells us if we just chop bytes
4612 * off the end of the item or if we shift the item to chop bytes off
4613 * the front.
4614 */
David Brazdil0f672f62019-12-10 10:32:29 +00004615void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004616{
4617 int slot;
4618 struct extent_buffer *leaf;
4619 struct btrfs_item *item;
4620 u32 nritems;
4621 unsigned int data_end;
4622 unsigned int old_data_start;
4623 unsigned int old_size;
4624 unsigned int size_diff;
4625 int i;
4626 struct btrfs_map_token token;
4627
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004628 leaf = path->nodes[0];
4629 slot = path->slots[0];
4630
4631 old_size = btrfs_item_size_nr(leaf, slot);
4632 if (old_size == new_size)
4633 return;
4634
4635 nritems = btrfs_header_nritems(leaf);
David Brazdil0f672f62019-12-10 10:32:29 +00004636 data_end = leaf_data_end(leaf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004637
4638 old_data_start = btrfs_item_offset_nr(leaf, slot);
4639
4640 size_diff = old_size - new_size;
4641
4642 BUG_ON(slot < 0);
4643 BUG_ON(slot >= nritems);
4644
4645 /*
4646 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4647 */
4648 /* first correct the data pointers */
David Brazdil0f672f62019-12-10 10:32:29 +00004649 btrfs_init_map_token(&token, leaf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004650 for (i = slot; i < nritems; i++) {
4651 u32 ioff;
4652 item = btrfs_item_nr(i);
4653
4654 ioff = btrfs_token_item_offset(leaf, item, &token);
4655 btrfs_set_token_item_offset(leaf, item,
4656 ioff + size_diff, &token);
4657 }
4658
4659 /* shift the data */
4660 if (from_end) {
4661 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4662 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4663 data_end, old_data_start + new_size - data_end);
4664 } else {
4665 struct btrfs_disk_key disk_key;
4666 u64 offset;
4667
4668 btrfs_item_key(leaf, &disk_key, slot);
4669
4670 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4671 unsigned long ptr;
4672 struct btrfs_file_extent_item *fi;
4673
4674 fi = btrfs_item_ptr(leaf, slot,
4675 struct btrfs_file_extent_item);
4676 fi = (struct btrfs_file_extent_item *)(
4677 (unsigned long)fi - size_diff);
4678
4679 if (btrfs_file_extent_type(leaf, fi) ==
4680 BTRFS_FILE_EXTENT_INLINE) {
4681 ptr = btrfs_item_ptr_offset(leaf, slot);
4682 memmove_extent_buffer(leaf, ptr,
4683 (unsigned long)fi,
4684 BTRFS_FILE_EXTENT_INLINE_DATA_START);
4685 }
4686 }
4687
4688 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4689 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4690 data_end, old_data_start - data_end);
4691
4692 offset = btrfs_disk_key_offset(&disk_key);
4693 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4694 btrfs_set_item_key(leaf, &disk_key, slot);
4695 if (slot == 0)
4696 fixup_low_keys(path, &disk_key, 1);
4697 }
4698
4699 item = btrfs_item_nr(slot);
4700 btrfs_set_item_size(leaf, item, new_size);
4701 btrfs_mark_buffer_dirty(leaf);
4702
David Brazdil0f672f62019-12-10 10:32:29 +00004703 if (btrfs_leaf_free_space(leaf) < 0) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004704 btrfs_print_leaf(leaf);
4705 BUG();
4706 }
4707}
4708
4709/*
4710 * make the item pointed to by the path bigger, data_size is the added size.
4711 */
David Brazdil0f672f62019-12-10 10:32:29 +00004712void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004713{
4714 int slot;
4715 struct extent_buffer *leaf;
4716 struct btrfs_item *item;
4717 u32 nritems;
4718 unsigned int data_end;
4719 unsigned int old_data;
4720 unsigned int old_size;
4721 int i;
4722 struct btrfs_map_token token;
4723
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004724 leaf = path->nodes[0];
4725
4726 nritems = btrfs_header_nritems(leaf);
David Brazdil0f672f62019-12-10 10:32:29 +00004727 data_end = leaf_data_end(leaf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004728
David Brazdil0f672f62019-12-10 10:32:29 +00004729 if (btrfs_leaf_free_space(leaf) < data_size) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004730 btrfs_print_leaf(leaf);
4731 BUG();
4732 }
4733 slot = path->slots[0];
4734 old_data = btrfs_item_end_nr(leaf, slot);
4735
4736 BUG_ON(slot < 0);
4737 if (slot >= nritems) {
4738 btrfs_print_leaf(leaf);
David Brazdil0f672f62019-12-10 10:32:29 +00004739 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004740 slot, nritems);
David Brazdil0f672f62019-12-10 10:32:29 +00004741 BUG();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004742 }
4743
4744 /*
4745 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4746 */
4747 /* first correct the data pointers */
David Brazdil0f672f62019-12-10 10:32:29 +00004748 btrfs_init_map_token(&token, leaf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004749 for (i = slot; i < nritems; i++) {
4750 u32 ioff;
4751 item = btrfs_item_nr(i);
4752
4753 ioff = btrfs_token_item_offset(leaf, item, &token);
4754 btrfs_set_token_item_offset(leaf, item,
4755 ioff - data_size, &token);
4756 }
4757
4758 /* shift the data */
4759 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4760 data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
4761 data_end, old_data - data_end);
4762
4763 data_end = old_data;
4764 old_size = btrfs_item_size_nr(leaf, slot);
4765 item = btrfs_item_nr(slot);
4766 btrfs_set_item_size(leaf, item, old_size + data_size);
4767 btrfs_mark_buffer_dirty(leaf);
4768
David Brazdil0f672f62019-12-10 10:32:29 +00004769 if (btrfs_leaf_free_space(leaf) < 0) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004770 btrfs_print_leaf(leaf);
4771 BUG();
4772 }
4773}
4774
4775/*
4776 * this is a helper for btrfs_insert_empty_items, the main goal here is
4777 * to save stack depth by doing the bulk of the work in a function
4778 * that doesn't call btrfs_search_slot
4779 */
4780void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4781 const struct btrfs_key *cpu_key, u32 *data_size,
4782 u32 total_data, u32 total_size, int nr)
4783{
4784 struct btrfs_fs_info *fs_info = root->fs_info;
4785 struct btrfs_item *item;
4786 int i;
4787 u32 nritems;
4788 unsigned int data_end;
4789 struct btrfs_disk_key disk_key;
4790 struct extent_buffer *leaf;
4791 int slot;
4792 struct btrfs_map_token token;
4793
4794 if (path->slots[0] == 0) {
4795 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4796 fixup_low_keys(path, &disk_key, 1);
4797 }
4798 btrfs_unlock_up_safe(path, 1);
4799
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004800 leaf = path->nodes[0];
4801 slot = path->slots[0];
4802
4803 nritems = btrfs_header_nritems(leaf);
David Brazdil0f672f62019-12-10 10:32:29 +00004804 data_end = leaf_data_end(leaf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004805
David Brazdil0f672f62019-12-10 10:32:29 +00004806 if (btrfs_leaf_free_space(leaf) < total_size) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004807 btrfs_print_leaf(leaf);
4808 btrfs_crit(fs_info, "not enough freespace need %u have %d",
David Brazdil0f672f62019-12-10 10:32:29 +00004809 total_size, btrfs_leaf_free_space(leaf));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004810 BUG();
4811 }
4812
David Brazdil0f672f62019-12-10 10:32:29 +00004813 btrfs_init_map_token(&token, leaf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004814 if (slot != nritems) {
4815 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4816
4817 if (old_data < data_end) {
4818 btrfs_print_leaf(leaf);
4819 btrfs_crit(fs_info, "slot %d old_data %d data_end %d",
4820 slot, old_data, data_end);
David Brazdil0f672f62019-12-10 10:32:29 +00004821 BUG();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004822 }
4823 /*
4824 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4825 */
4826 /* first correct the data pointers */
4827 for (i = slot; i < nritems; i++) {
4828 u32 ioff;
4829
4830 item = btrfs_item_nr(i);
4831 ioff = btrfs_token_item_offset(leaf, item, &token);
4832 btrfs_set_token_item_offset(leaf, item,
4833 ioff - total_data, &token);
4834 }
4835 /* shift the items */
4836 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4837 btrfs_item_nr_offset(slot),
4838 (nritems - slot) * sizeof(struct btrfs_item));
4839
4840 /* shift the data */
4841 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4842 data_end - total_data, BTRFS_LEAF_DATA_OFFSET +
4843 data_end, old_data - data_end);
4844 data_end = old_data;
4845 }
4846
4847 /* setup the item for the new data */
4848 for (i = 0; i < nr; i++) {
4849 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4850 btrfs_set_item_key(leaf, &disk_key, slot + i);
4851 item = btrfs_item_nr(slot + i);
4852 btrfs_set_token_item_offset(leaf, item,
4853 data_end - data_size[i], &token);
4854 data_end -= data_size[i];
4855 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4856 }
4857
4858 btrfs_set_header_nritems(leaf, nritems + nr);
4859 btrfs_mark_buffer_dirty(leaf);
4860
David Brazdil0f672f62019-12-10 10:32:29 +00004861 if (btrfs_leaf_free_space(leaf) < 0) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004862 btrfs_print_leaf(leaf);
4863 BUG();
4864 }
4865}
4866
4867/*
4868 * Given a key and some data, insert items into the tree.
4869 * This does all the path init required, making room in the tree if needed.
4870 */
4871int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4872 struct btrfs_root *root,
4873 struct btrfs_path *path,
4874 const struct btrfs_key *cpu_key, u32 *data_size,
4875 int nr)
4876{
4877 int ret = 0;
4878 int slot;
4879 int i;
4880 u32 total_size = 0;
4881 u32 total_data = 0;
4882
4883 for (i = 0; i < nr; i++)
4884 total_data += data_size[i];
4885
4886 total_size = total_data + (nr * sizeof(struct btrfs_item));
4887 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4888 if (ret == 0)
4889 return -EEXIST;
4890 if (ret < 0)
4891 return ret;
4892
4893 slot = path->slots[0];
4894 BUG_ON(slot < 0);
4895
4896 setup_items_for_insert(root, path, cpu_key, data_size,
4897 total_data, total_size, nr);
4898 return 0;
4899}
4900
4901/*
4902 * Given a key and some data, insert an item into the tree.
4903 * This does all the path init required, making room in the tree if needed.
4904 */
4905int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4906 const struct btrfs_key *cpu_key, void *data,
4907 u32 data_size)
4908{
4909 int ret = 0;
4910 struct btrfs_path *path;
4911 struct extent_buffer *leaf;
4912 unsigned long ptr;
4913
4914 path = btrfs_alloc_path();
4915 if (!path)
4916 return -ENOMEM;
4917 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4918 if (!ret) {
4919 leaf = path->nodes[0];
4920 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4921 write_extent_buffer(leaf, data, ptr, data_size);
4922 btrfs_mark_buffer_dirty(leaf);
4923 }
4924 btrfs_free_path(path);
4925 return ret;
4926}
4927
4928/*
4929 * delete the pointer from a given node.
4930 *
4931 * the tree should have been previously balanced so the deletion does not
4932 * empty a node.
4933 */
4934static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4935 int level, int slot)
4936{
4937 struct extent_buffer *parent = path->nodes[level];
4938 u32 nritems;
4939 int ret;
4940
4941 nritems = btrfs_header_nritems(parent);
4942 if (slot != nritems - 1) {
4943 if (level) {
4944 ret = tree_mod_log_insert_move(parent, slot, slot + 1,
4945 nritems - slot - 1);
4946 BUG_ON(ret < 0);
4947 }
4948 memmove_extent_buffer(parent,
4949 btrfs_node_key_ptr_offset(slot),
4950 btrfs_node_key_ptr_offset(slot + 1),
4951 sizeof(struct btrfs_key_ptr) *
4952 (nritems - slot - 1));
4953 } else if (level) {
4954 ret = tree_mod_log_insert_key(parent, slot, MOD_LOG_KEY_REMOVE,
4955 GFP_NOFS);
4956 BUG_ON(ret < 0);
4957 }
4958
4959 nritems--;
4960 btrfs_set_header_nritems(parent, nritems);
4961 if (nritems == 0 && parent == root->node) {
4962 BUG_ON(btrfs_header_level(root->node) != 1);
4963 /* just turn the root into a leaf and break */
4964 btrfs_set_header_level(root->node, 0);
4965 } else if (slot == 0) {
4966 struct btrfs_disk_key disk_key;
4967
4968 btrfs_node_key(parent, &disk_key, 0);
4969 fixup_low_keys(path, &disk_key, level + 1);
4970 }
4971 btrfs_mark_buffer_dirty(parent);
4972}
4973
4974/*
4975 * a helper function to delete the leaf pointed to by path->slots[1] and
4976 * path->nodes[1].
4977 *
4978 * This deletes the pointer in path->nodes[1] and frees the leaf
4979 * block extent. zero is returned if it all worked out, < 0 otherwise.
4980 *
4981 * The path must have already been setup for deleting the leaf, including
4982 * all the proper balancing. path->nodes[1] must be locked.
4983 */
4984static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4985 struct btrfs_root *root,
4986 struct btrfs_path *path,
4987 struct extent_buffer *leaf)
4988{
4989 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4990 del_ptr(root, path, 1, path->slots[1]);
4991
4992 /*
4993 * btrfs_free_extent is expensive, we want to make sure we
4994 * aren't holding any locks when we call it
4995 */
4996 btrfs_unlock_up_safe(path, 0);
4997
4998 root_sub_used(root, leaf->len);
4999
5000 extent_buffer_get(leaf);
5001 btrfs_free_tree_block(trans, root, leaf, 0, 1);
5002 free_extent_buffer_stale(leaf);
5003}
5004/*
5005 * delete the item at the leaf level in path. If that empties
5006 * the leaf, remove it from the tree
5007 */
5008int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5009 struct btrfs_path *path, int slot, int nr)
5010{
5011 struct btrfs_fs_info *fs_info = root->fs_info;
5012 struct extent_buffer *leaf;
5013 struct btrfs_item *item;
5014 u32 last_off;
5015 u32 dsize = 0;
5016 int ret = 0;
5017 int wret;
5018 int i;
5019 u32 nritems;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005020
5021 leaf = path->nodes[0];
5022 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
5023
5024 for (i = 0; i < nr; i++)
5025 dsize += btrfs_item_size_nr(leaf, slot + i);
5026
5027 nritems = btrfs_header_nritems(leaf);
5028
5029 if (slot + nr != nritems) {
David Brazdil0f672f62019-12-10 10:32:29 +00005030 int data_end = leaf_data_end(leaf);
5031 struct btrfs_map_token token;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005032
5033 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
5034 data_end + dsize,
5035 BTRFS_LEAF_DATA_OFFSET + data_end,
5036 last_off - data_end);
5037
David Brazdil0f672f62019-12-10 10:32:29 +00005038 btrfs_init_map_token(&token, leaf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005039 for (i = slot + nr; i < nritems; i++) {
5040 u32 ioff;
5041
5042 item = btrfs_item_nr(i);
5043 ioff = btrfs_token_item_offset(leaf, item, &token);
5044 btrfs_set_token_item_offset(leaf, item,
5045 ioff + dsize, &token);
5046 }
5047
5048 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
5049 btrfs_item_nr_offset(slot + nr),
5050 sizeof(struct btrfs_item) *
5051 (nritems - slot - nr));
5052 }
5053 btrfs_set_header_nritems(leaf, nritems - nr);
5054 nritems -= nr;
5055
5056 /* delete the leaf if we've emptied it */
5057 if (nritems == 0) {
5058 if (leaf == root->node) {
5059 btrfs_set_header_level(leaf, 0);
5060 } else {
5061 btrfs_set_path_blocking(path);
David Brazdil0f672f62019-12-10 10:32:29 +00005062 btrfs_clean_tree_block(leaf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005063 btrfs_del_leaf(trans, root, path, leaf);
5064 }
5065 } else {
5066 int used = leaf_space_used(leaf, 0, nritems);
5067 if (slot == 0) {
5068 struct btrfs_disk_key disk_key;
5069
5070 btrfs_item_key(leaf, &disk_key, 0);
5071 fixup_low_keys(path, &disk_key, 1);
5072 }
5073
5074 /* delete the leaf if it is mostly empty */
5075 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
5076 /* push_leaf_left fixes the path.
5077 * make sure the path still points to our leaf
5078 * for possible call to del_ptr below
5079 */
5080 slot = path->slots[1];
5081 extent_buffer_get(leaf);
5082
5083 btrfs_set_path_blocking(path);
5084 wret = push_leaf_left(trans, root, path, 1, 1,
5085 1, (u32)-1);
5086 if (wret < 0 && wret != -ENOSPC)
5087 ret = wret;
5088
5089 if (path->nodes[0] == leaf &&
5090 btrfs_header_nritems(leaf)) {
5091 wret = push_leaf_right(trans, root, path, 1,
5092 1, 1, 0);
5093 if (wret < 0 && wret != -ENOSPC)
5094 ret = wret;
5095 }
5096
5097 if (btrfs_header_nritems(leaf) == 0) {
5098 path->slots[1] = slot;
5099 btrfs_del_leaf(trans, root, path, leaf);
5100 free_extent_buffer(leaf);
5101 ret = 0;
5102 } else {
5103 /* if we're still in the path, make sure
5104 * we're dirty. Otherwise, one of the
5105 * push_leaf functions must have already
5106 * dirtied this buffer
5107 */
5108 if (path->nodes[0] == leaf)
5109 btrfs_mark_buffer_dirty(leaf);
5110 free_extent_buffer(leaf);
5111 }
5112 } else {
5113 btrfs_mark_buffer_dirty(leaf);
5114 }
5115 }
5116 return ret;
5117}
5118
5119/*
5120 * search the tree again to find a leaf with lesser keys
5121 * returns 0 if it found something or 1 if there are no lesser leaves.
5122 * returns < 0 on io errors.
5123 *
5124 * This may release the path, and so you may lose any locks held at the
5125 * time you call it.
5126 */
5127int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5128{
5129 struct btrfs_key key;
5130 struct btrfs_disk_key found_key;
5131 int ret;
5132
5133 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5134
5135 if (key.offset > 0) {
5136 key.offset--;
5137 } else if (key.type > 0) {
5138 key.type--;
5139 key.offset = (u64)-1;
5140 } else if (key.objectid > 0) {
5141 key.objectid--;
5142 key.type = (u8)-1;
5143 key.offset = (u64)-1;
5144 } else {
5145 return 1;
5146 }
5147
5148 btrfs_release_path(path);
5149 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5150 if (ret < 0)
5151 return ret;
5152 btrfs_item_key(path->nodes[0], &found_key, 0);
5153 ret = comp_keys(&found_key, &key);
5154 /*
5155 * We might have had an item with the previous key in the tree right
5156 * before we released our path. And after we released our path, that
5157 * item might have been pushed to the first slot (0) of the leaf we
5158 * were holding due to a tree balance. Alternatively, an item with the
5159 * previous key can exist as the only element of a leaf (big fat item).
5160 * Therefore account for these 2 cases, so that our callers (like
5161 * btrfs_previous_item) don't miss an existing item with a key matching
5162 * the previous key we computed above.
5163 */
5164 if (ret <= 0)
5165 return 0;
5166 return 1;
5167}
5168
5169/*
5170 * A helper function to walk down the tree starting at min_key, and looking
5171 * for nodes or leaves that are have a minimum transaction id.
5172 * This is used by the btree defrag code, and tree logging
5173 *
5174 * This does not cow, but it does stuff the starting key it finds back
5175 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5176 * key and get a writable path.
5177 *
5178 * This honors path->lowest_level to prevent descent past a given level
5179 * of the tree.
5180 *
5181 * min_trans indicates the oldest transaction that you are interested
5182 * in walking through. Any nodes or leaves older than min_trans are
5183 * skipped over (without reading them).
5184 *
5185 * returns zero if something useful was found, < 0 on error and 1 if there
5186 * was nothing in the tree that matched the search criteria.
5187 */
5188int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5189 struct btrfs_path *path,
5190 u64 min_trans)
5191{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005192 struct extent_buffer *cur;
5193 struct btrfs_key found_key;
5194 int slot;
5195 int sret;
5196 u32 nritems;
5197 int level;
5198 int ret = 1;
5199 int keep_locks = path->keep_locks;
5200
5201 path->keep_locks = 1;
5202again:
5203 cur = btrfs_read_lock_root_node(root);
5204 level = btrfs_header_level(cur);
5205 WARN_ON(path->nodes[level]);
5206 path->nodes[level] = cur;
5207 path->locks[level] = BTRFS_READ_LOCK;
5208
5209 if (btrfs_header_generation(cur) < min_trans) {
5210 ret = 1;
5211 goto out;
5212 }
5213 while (1) {
5214 nritems = btrfs_header_nritems(cur);
5215 level = btrfs_header_level(cur);
5216 sret = btrfs_bin_search(cur, min_key, level, &slot);
David Brazdil0f672f62019-12-10 10:32:29 +00005217 if (sret < 0) {
5218 ret = sret;
5219 goto out;
5220 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005221
5222 /* at the lowest level, we're done, setup the path and exit */
5223 if (level == path->lowest_level) {
5224 if (slot >= nritems)
5225 goto find_next_key;
5226 ret = 0;
5227 path->slots[level] = slot;
5228 btrfs_item_key_to_cpu(cur, &found_key, slot);
5229 goto out;
5230 }
5231 if (sret && slot > 0)
5232 slot--;
5233 /*
5234 * check this node pointer against the min_trans parameters.
Olivier Deprez0e641232021-09-23 10:07:05 +02005235 * If it is too old, skip to the next one.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005236 */
5237 while (slot < nritems) {
5238 u64 gen;
5239
5240 gen = btrfs_node_ptr_generation(cur, slot);
5241 if (gen < min_trans) {
5242 slot++;
5243 continue;
5244 }
5245 break;
5246 }
5247find_next_key:
5248 /*
5249 * we didn't find a candidate key in this node, walk forward
5250 * and find another one
5251 */
5252 if (slot >= nritems) {
5253 path->slots[level] = slot;
5254 btrfs_set_path_blocking(path);
5255 sret = btrfs_find_next_key(root, path, min_key, level,
5256 min_trans);
5257 if (sret == 0) {
5258 btrfs_release_path(path);
5259 goto again;
5260 } else {
5261 goto out;
5262 }
5263 }
5264 /* save our key for returning back */
5265 btrfs_node_key_to_cpu(cur, &found_key, slot);
5266 path->slots[level] = slot;
5267 if (level == path->lowest_level) {
5268 ret = 0;
5269 goto out;
5270 }
5271 btrfs_set_path_blocking(path);
David Brazdil0f672f62019-12-10 10:32:29 +00005272 cur = btrfs_read_node_slot(cur, slot);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005273 if (IS_ERR(cur)) {
5274 ret = PTR_ERR(cur);
5275 goto out;
5276 }
5277
5278 btrfs_tree_read_lock(cur);
5279
5280 path->locks[level - 1] = BTRFS_READ_LOCK;
5281 path->nodes[level - 1] = cur;
5282 unlock_up(path, level, 1, 0, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005283 }
5284out:
5285 path->keep_locks = keep_locks;
5286 if (ret == 0) {
5287 btrfs_unlock_up_safe(path, path->lowest_level + 1);
5288 btrfs_set_path_blocking(path);
5289 memcpy(min_key, &found_key, sizeof(found_key));
5290 }
5291 return ret;
5292}
5293
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005294/*
5295 * this is similar to btrfs_next_leaf, but does not try to preserve
5296 * and fixup the path. It looks for and returns the next key in the
5297 * tree based on the current path and the min_trans parameters.
5298 *
5299 * 0 is returned if another key is found, < 0 if there are any errors
5300 * and 1 is returned if there are no higher keys in the tree
5301 *
5302 * path->keep_locks should be set to 1 on the search made before
5303 * calling this function.
5304 */
5305int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5306 struct btrfs_key *key, int level, u64 min_trans)
5307{
5308 int slot;
5309 struct extent_buffer *c;
5310
David Brazdil0f672f62019-12-10 10:32:29 +00005311 WARN_ON(!path->keep_locks && !path->skip_locking);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005312 while (level < BTRFS_MAX_LEVEL) {
5313 if (!path->nodes[level])
5314 return 1;
5315
5316 slot = path->slots[level] + 1;
5317 c = path->nodes[level];
5318next:
5319 if (slot >= btrfs_header_nritems(c)) {
5320 int ret;
5321 int orig_lowest;
5322 struct btrfs_key cur_key;
5323 if (level + 1 >= BTRFS_MAX_LEVEL ||
5324 !path->nodes[level + 1])
5325 return 1;
5326
David Brazdil0f672f62019-12-10 10:32:29 +00005327 if (path->locks[level + 1] || path->skip_locking) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005328 level++;
5329 continue;
5330 }
5331
5332 slot = btrfs_header_nritems(c) - 1;
5333 if (level == 0)
5334 btrfs_item_key_to_cpu(c, &cur_key, slot);
5335 else
5336 btrfs_node_key_to_cpu(c, &cur_key, slot);
5337
5338 orig_lowest = path->lowest_level;
5339 btrfs_release_path(path);
5340 path->lowest_level = level;
5341 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5342 0, 0);
5343 path->lowest_level = orig_lowest;
5344 if (ret < 0)
5345 return ret;
5346
5347 c = path->nodes[level];
5348 slot = path->slots[level];
5349 if (ret == 0)
5350 slot++;
5351 goto next;
5352 }
5353
5354 if (level == 0)
5355 btrfs_item_key_to_cpu(c, key, slot);
5356 else {
5357 u64 gen = btrfs_node_ptr_generation(c, slot);
5358
5359 if (gen < min_trans) {
5360 slot++;
5361 goto next;
5362 }
5363 btrfs_node_key_to_cpu(c, key, slot);
5364 }
5365 return 0;
5366 }
5367 return 1;
5368}
5369
5370/*
5371 * search the tree again to find a leaf with greater keys
5372 * returns 0 if it found something or 1 if there are no greater leaves.
5373 * returns < 0 on io errors.
5374 */
5375int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5376{
5377 return btrfs_next_old_leaf(root, path, 0);
5378}
5379
5380int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5381 u64 time_seq)
5382{
5383 int slot;
5384 int level;
5385 struct extent_buffer *c;
5386 struct extent_buffer *next;
5387 struct btrfs_key key;
5388 u32 nritems;
5389 int ret;
5390 int old_spinning = path->leave_spinning;
5391 int next_rw_lock = 0;
5392
5393 nritems = btrfs_header_nritems(path->nodes[0]);
5394 if (nritems == 0)
5395 return 1;
5396
5397 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5398again:
5399 level = 1;
5400 next = NULL;
5401 next_rw_lock = 0;
5402 btrfs_release_path(path);
5403
5404 path->keep_locks = 1;
5405 path->leave_spinning = 1;
5406
5407 if (time_seq)
5408 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5409 else
5410 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5411 path->keep_locks = 0;
5412
5413 if (ret < 0)
5414 return ret;
5415
5416 nritems = btrfs_header_nritems(path->nodes[0]);
5417 /*
5418 * by releasing the path above we dropped all our locks. A balance
5419 * could have added more items next to the key that used to be
5420 * at the very end of the block. So, check again here and
5421 * advance the path if there are now more items available.
5422 */
5423 if (nritems > 0 && path->slots[0] < nritems - 1) {
5424 if (ret == 0)
5425 path->slots[0]++;
5426 ret = 0;
5427 goto done;
5428 }
5429 /*
5430 * So the above check misses one case:
5431 * - after releasing the path above, someone has removed the item that
5432 * used to be at the very end of the block, and balance between leafs
5433 * gets another one with bigger key.offset to replace it.
5434 *
5435 * This one should be returned as well, or we can get leaf corruption
5436 * later(esp. in __btrfs_drop_extents()).
5437 *
5438 * And a bit more explanation about this check,
5439 * with ret > 0, the key isn't found, the path points to the slot
5440 * where it should be inserted, so the path->slots[0] item must be the
5441 * bigger one.
5442 */
5443 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5444 ret = 0;
5445 goto done;
5446 }
5447
5448 while (level < BTRFS_MAX_LEVEL) {
5449 if (!path->nodes[level]) {
5450 ret = 1;
5451 goto done;
5452 }
5453
5454 slot = path->slots[level] + 1;
5455 c = path->nodes[level];
5456 if (slot >= btrfs_header_nritems(c)) {
5457 level++;
5458 if (level == BTRFS_MAX_LEVEL) {
5459 ret = 1;
5460 goto done;
5461 }
5462 continue;
5463 }
5464
5465 if (next) {
5466 btrfs_tree_unlock_rw(next, next_rw_lock);
5467 free_extent_buffer(next);
5468 }
5469
5470 next = c;
5471 next_rw_lock = path->locks[level];
5472 ret = read_block_for_search(root, path, &next, level,
5473 slot, &key);
5474 if (ret == -EAGAIN)
5475 goto again;
5476
5477 if (ret < 0) {
5478 btrfs_release_path(path);
5479 goto done;
5480 }
5481
5482 if (!path->skip_locking) {
5483 ret = btrfs_try_tree_read_lock(next);
5484 if (!ret && time_seq) {
5485 /*
5486 * If we don't get the lock, we may be racing
5487 * with push_leaf_left, holding that lock while
5488 * itself waiting for the leaf we've currently
5489 * locked. To solve this situation, we give up
5490 * on our lock and cycle.
5491 */
5492 free_extent_buffer(next);
5493 btrfs_release_path(path);
5494 cond_resched();
5495 goto again;
5496 }
5497 if (!ret) {
5498 btrfs_set_path_blocking(path);
5499 btrfs_tree_read_lock(next);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005500 }
5501 next_rw_lock = BTRFS_READ_LOCK;
5502 }
5503 break;
5504 }
5505 path->slots[level] = slot;
5506 while (1) {
5507 level--;
5508 c = path->nodes[level];
5509 if (path->locks[level])
5510 btrfs_tree_unlock_rw(c, path->locks[level]);
5511
5512 free_extent_buffer(c);
5513 path->nodes[level] = next;
5514 path->slots[level] = 0;
5515 if (!path->skip_locking)
5516 path->locks[level] = next_rw_lock;
5517 if (!level)
5518 break;
5519
5520 ret = read_block_for_search(root, path, &next, level,
5521 0, &key);
5522 if (ret == -EAGAIN)
5523 goto again;
5524
5525 if (ret < 0) {
5526 btrfs_release_path(path);
5527 goto done;
5528 }
5529
5530 if (!path->skip_locking) {
5531 ret = btrfs_try_tree_read_lock(next);
5532 if (!ret) {
5533 btrfs_set_path_blocking(path);
5534 btrfs_tree_read_lock(next);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005535 }
5536 next_rw_lock = BTRFS_READ_LOCK;
5537 }
5538 }
5539 ret = 0;
5540done:
5541 unlock_up(path, 0, 1, 0, NULL);
5542 path->leave_spinning = old_spinning;
5543 if (!old_spinning)
5544 btrfs_set_path_blocking(path);
5545
5546 return ret;
5547}
5548
5549/*
5550 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5551 * searching until it gets past min_objectid or finds an item of 'type'
5552 *
5553 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5554 */
5555int btrfs_previous_item(struct btrfs_root *root,
5556 struct btrfs_path *path, u64 min_objectid,
5557 int type)
5558{
5559 struct btrfs_key found_key;
5560 struct extent_buffer *leaf;
5561 u32 nritems;
5562 int ret;
5563
5564 while (1) {
5565 if (path->slots[0] == 0) {
5566 btrfs_set_path_blocking(path);
5567 ret = btrfs_prev_leaf(root, path);
5568 if (ret != 0)
5569 return ret;
5570 } else {
5571 path->slots[0]--;
5572 }
5573 leaf = path->nodes[0];
5574 nritems = btrfs_header_nritems(leaf);
5575 if (nritems == 0)
5576 return 1;
5577 if (path->slots[0] == nritems)
5578 path->slots[0]--;
5579
5580 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5581 if (found_key.objectid < min_objectid)
5582 break;
5583 if (found_key.type == type)
5584 return 0;
5585 if (found_key.objectid == min_objectid &&
5586 found_key.type < type)
5587 break;
5588 }
5589 return 1;
5590}
5591
5592/*
5593 * search in extent tree to find a previous Metadata/Data extent item with
5594 * min objecitd.
5595 *
5596 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5597 */
5598int btrfs_previous_extent_item(struct btrfs_root *root,
5599 struct btrfs_path *path, u64 min_objectid)
5600{
5601 struct btrfs_key found_key;
5602 struct extent_buffer *leaf;
5603 u32 nritems;
5604 int ret;
5605
5606 while (1) {
5607 if (path->slots[0] == 0) {
5608 btrfs_set_path_blocking(path);
5609 ret = btrfs_prev_leaf(root, path);
5610 if (ret != 0)
5611 return ret;
5612 } else {
5613 path->slots[0]--;
5614 }
5615 leaf = path->nodes[0];
5616 nritems = btrfs_header_nritems(leaf);
5617 if (nritems == 0)
5618 return 1;
5619 if (path->slots[0] == nritems)
5620 path->slots[0]--;
5621
5622 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5623 if (found_key.objectid < min_objectid)
5624 break;
5625 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5626 found_key.type == BTRFS_METADATA_ITEM_KEY)
5627 return 0;
5628 if (found_key.objectid == min_objectid &&
5629 found_key.type < BTRFS_EXTENT_ITEM_KEY)
5630 break;
5631 }
5632 return 1;
5633}