blob: 089b46c4d97ff094b5887d7e14233b7961eb4eb5 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/slab.h>
8#include <linux/rbtree.h>
9#include <linux/mm.h>
10#include "ctree.h"
11#include "disk-io.h"
12#include "transaction.h"
13#include "print-tree.h"
14#include "locking.h"
15
16static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
17 *root, struct btrfs_path *path, int level);
18static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
19 const struct btrfs_key *ins_key, struct btrfs_path *path,
20 int data_size, int extend);
21static int push_node_left(struct btrfs_trans_handle *trans,
22 struct btrfs_fs_info *fs_info,
23 struct extent_buffer *dst,
24 struct extent_buffer *src, int empty);
25static int balance_node_right(struct btrfs_trans_handle *trans,
26 struct btrfs_fs_info *fs_info,
27 struct extent_buffer *dst_buf,
28 struct extent_buffer *src_buf);
29static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
30 int level, int slot);
31
32struct btrfs_path *btrfs_alloc_path(void)
33{
34 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
35}
36
37/*
38 * set all locked nodes in the path to blocking locks. This should
39 * be done before scheduling
40 */
41noinline void btrfs_set_path_blocking(struct btrfs_path *p)
42{
43 int i;
44 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
45 if (!p->nodes[i] || !p->locks[i])
46 continue;
47 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
48 if (p->locks[i] == BTRFS_READ_LOCK)
49 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
50 else if (p->locks[i] == BTRFS_WRITE_LOCK)
51 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
52 }
53}
54
55/*
56 * reset all the locked nodes in the patch to spinning locks.
57 *
58 * held is used to keep lockdep happy, when lockdep is enabled
59 * we set held to a blocking lock before we go around and
60 * retake all the spinlocks in the path. You can safely use NULL
61 * for held
62 */
63noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
64 struct extent_buffer *held, int held_rw)
65{
66 int i;
67
68 if (held) {
69 btrfs_set_lock_blocking_rw(held, held_rw);
70 if (held_rw == BTRFS_WRITE_LOCK)
71 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
72 else if (held_rw == BTRFS_READ_LOCK)
73 held_rw = BTRFS_READ_LOCK_BLOCKING;
74 }
75 btrfs_set_path_blocking(p);
76
77 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
78 if (p->nodes[i] && p->locks[i]) {
79 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
80 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
81 p->locks[i] = BTRFS_WRITE_LOCK;
82 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
83 p->locks[i] = BTRFS_READ_LOCK;
84 }
85 }
86
87 if (held)
88 btrfs_clear_lock_blocking_rw(held, held_rw);
89}
90
91/* this also releases the path */
92void btrfs_free_path(struct btrfs_path *p)
93{
94 if (!p)
95 return;
96 btrfs_release_path(p);
97 kmem_cache_free(btrfs_path_cachep, p);
98}
99
100/*
101 * path release drops references on the extent buffers in the path
102 * and it drops any locks held by this path
103 *
104 * It is safe to call this on paths that no locks or extent buffers held.
105 */
106noinline void btrfs_release_path(struct btrfs_path *p)
107{
108 int i;
109
110 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
111 p->slots[i] = 0;
112 if (!p->nodes[i])
113 continue;
114 if (p->locks[i]) {
115 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
116 p->locks[i] = 0;
117 }
118 free_extent_buffer(p->nodes[i]);
119 p->nodes[i] = NULL;
120 }
121}
122
123/*
124 * safely gets a reference on the root node of a tree. A lock
125 * is not taken, so a concurrent writer may put a different node
126 * at the root of the tree. See btrfs_lock_root_node for the
127 * looping required.
128 *
129 * The extent buffer returned by this has a reference taken, so
130 * it won't disappear. It may stop being the root of the tree
131 * at any time because there are no locks held.
132 */
133struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
134{
135 struct extent_buffer *eb;
136
137 while (1) {
138 rcu_read_lock();
139 eb = rcu_dereference(root->node);
140
141 /*
142 * RCU really hurts here, we could free up the root node because
143 * it was COWed but we may not get the new root node yet so do
144 * the inc_not_zero dance and if it doesn't work then
145 * synchronize_rcu and try again.
146 */
147 if (atomic_inc_not_zero(&eb->refs)) {
148 rcu_read_unlock();
149 break;
150 }
151 rcu_read_unlock();
152 synchronize_rcu();
153 }
154 return eb;
155}
156
157/* loop around taking references on and locking the root node of the
158 * tree until you end up with a lock on the root. A locked buffer
159 * is returned, with a reference held.
160 */
161struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
162{
163 struct extent_buffer *eb;
164
165 while (1) {
166 eb = btrfs_root_node(root);
167 btrfs_tree_lock(eb);
168 if (eb == root->node)
169 break;
170 btrfs_tree_unlock(eb);
171 free_extent_buffer(eb);
172 }
173 return eb;
174}
175
176/* loop around taking references on and locking the root node of the
177 * tree until you end up with a lock on the root. A locked buffer
178 * is returned, with a reference held.
179 */
180struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
181{
182 struct extent_buffer *eb;
183
184 while (1) {
185 eb = btrfs_root_node(root);
186 btrfs_tree_read_lock(eb);
187 if (eb == root->node)
188 break;
189 btrfs_tree_read_unlock(eb);
190 free_extent_buffer(eb);
191 }
192 return eb;
193}
194
195/* cowonly root (everything not a reference counted cow subvolume), just get
196 * put onto a simple dirty list. transaction.c walks this to make sure they
197 * get properly updated on disk.
198 */
199static void add_root_to_dirty_list(struct btrfs_root *root)
200{
201 struct btrfs_fs_info *fs_info = root->fs_info;
202
203 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
204 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
205 return;
206
207 spin_lock(&fs_info->trans_lock);
208 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
209 /* Want the extent tree to be the last on the list */
210 if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
211 list_move_tail(&root->dirty_list,
212 &fs_info->dirty_cowonly_roots);
213 else
214 list_move(&root->dirty_list,
215 &fs_info->dirty_cowonly_roots);
216 }
217 spin_unlock(&fs_info->trans_lock);
218}
219
220/*
221 * used by snapshot creation to make a copy of a root for a tree with
222 * a given objectid. The buffer with the new root node is returned in
223 * cow_ret, and this func returns zero on success or a negative error code.
224 */
225int btrfs_copy_root(struct btrfs_trans_handle *trans,
226 struct btrfs_root *root,
227 struct extent_buffer *buf,
228 struct extent_buffer **cow_ret, u64 new_root_objectid)
229{
230 struct btrfs_fs_info *fs_info = root->fs_info;
231 struct extent_buffer *cow;
232 int ret = 0;
233 int level;
234 struct btrfs_disk_key disk_key;
235
236 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
237 trans->transid != fs_info->running_transaction->transid);
238 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
239 trans->transid != root->last_trans);
240
241 level = btrfs_header_level(buf);
242 if (level == 0)
243 btrfs_item_key(buf, &disk_key, 0);
244 else
245 btrfs_node_key(buf, &disk_key, 0);
246
247 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
248 &disk_key, level, buf->start, 0);
249 if (IS_ERR(cow))
250 return PTR_ERR(cow);
251
252 copy_extent_buffer_full(cow, buf);
253 btrfs_set_header_bytenr(cow, cow->start);
254 btrfs_set_header_generation(cow, trans->transid);
255 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
256 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
257 BTRFS_HEADER_FLAG_RELOC);
258 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
259 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
260 else
261 btrfs_set_header_owner(cow, new_root_objectid);
262
263 write_extent_buffer_fsid(cow, fs_info->fsid);
264
265 WARN_ON(btrfs_header_generation(buf) > trans->transid);
266 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
267 ret = btrfs_inc_ref(trans, root, cow, 1);
268 else
269 ret = btrfs_inc_ref(trans, root, cow, 0);
270
271 if (ret)
272 return ret;
273
274 btrfs_mark_buffer_dirty(cow);
275 *cow_ret = cow;
276 return 0;
277}
278
279enum mod_log_op {
280 MOD_LOG_KEY_REPLACE,
281 MOD_LOG_KEY_ADD,
282 MOD_LOG_KEY_REMOVE,
283 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
284 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
285 MOD_LOG_MOVE_KEYS,
286 MOD_LOG_ROOT_REPLACE,
287};
288
289struct tree_mod_root {
290 u64 logical;
291 u8 level;
292};
293
294struct tree_mod_elem {
295 struct rb_node node;
296 u64 logical;
297 u64 seq;
298 enum mod_log_op op;
299
300 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
301 int slot;
302
303 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
304 u64 generation;
305
306 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
307 struct btrfs_disk_key key;
308 u64 blockptr;
309
310 /* this is used for op == MOD_LOG_MOVE_KEYS */
311 struct {
312 int dst_slot;
313 int nr_items;
314 } move;
315
316 /* this is used for op == MOD_LOG_ROOT_REPLACE */
317 struct tree_mod_root old_root;
318};
319
320/*
321 * Pull a new tree mod seq number for our operation.
322 */
323static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
324{
325 return atomic64_inc_return(&fs_info->tree_mod_seq);
326}
327
328/*
329 * This adds a new blocker to the tree mod log's blocker list if the @elem
330 * passed does not already have a sequence number set. So when a caller expects
331 * to record tree modifications, it should ensure to set elem->seq to zero
332 * before calling btrfs_get_tree_mod_seq.
333 * Returns a fresh, unused tree log modification sequence number, even if no new
334 * blocker was added.
335 */
336u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
337 struct seq_list *elem)
338{
339 write_lock(&fs_info->tree_mod_log_lock);
340 spin_lock(&fs_info->tree_mod_seq_lock);
341 if (!elem->seq) {
342 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
343 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
344 }
345 spin_unlock(&fs_info->tree_mod_seq_lock);
346 write_unlock(&fs_info->tree_mod_log_lock);
347
348 return elem->seq;
349}
350
351void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
352 struct seq_list *elem)
353{
354 struct rb_root *tm_root;
355 struct rb_node *node;
356 struct rb_node *next;
357 struct seq_list *cur_elem;
358 struct tree_mod_elem *tm;
359 u64 min_seq = (u64)-1;
360 u64 seq_putting = elem->seq;
361
362 if (!seq_putting)
363 return;
364
365 spin_lock(&fs_info->tree_mod_seq_lock);
366 list_del(&elem->list);
367 elem->seq = 0;
368
369 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
370 if (cur_elem->seq < min_seq) {
371 if (seq_putting > cur_elem->seq) {
372 /*
373 * blocker with lower sequence number exists, we
374 * cannot remove anything from the log
375 */
376 spin_unlock(&fs_info->tree_mod_seq_lock);
377 return;
378 }
379 min_seq = cur_elem->seq;
380 }
381 }
382 spin_unlock(&fs_info->tree_mod_seq_lock);
383
384 /*
385 * anything that's lower than the lowest existing (read: blocked)
386 * sequence number can be removed from the tree.
387 */
388 write_lock(&fs_info->tree_mod_log_lock);
389 tm_root = &fs_info->tree_mod_log;
390 for (node = rb_first(tm_root); node; node = next) {
391 next = rb_next(node);
392 tm = rb_entry(node, struct tree_mod_elem, node);
393 if (tm->seq > min_seq)
394 continue;
395 rb_erase(node, tm_root);
396 kfree(tm);
397 }
398 write_unlock(&fs_info->tree_mod_log_lock);
399}
400
401/*
402 * key order of the log:
403 * node/leaf start address -> sequence
404 *
405 * The 'start address' is the logical address of the *new* root node
406 * for root replace operations, or the logical address of the affected
407 * block for all other operations.
408 *
409 * Note: must be called with write lock for fs_info::tree_mod_log_lock.
410 */
411static noinline int
412__tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
413{
414 struct rb_root *tm_root;
415 struct rb_node **new;
416 struct rb_node *parent = NULL;
417 struct tree_mod_elem *cur;
418
419 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
420
421 tm_root = &fs_info->tree_mod_log;
422 new = &tm_root->rb_node;
423 while (*new) {
424 cur = rb_entry(*new, struct tree_mod_elem, node);
425 parent = *new;
426 if (cur->logical < tm->logical)
427 new = &((*new)->rb_left);
428 else if (cur->logical > tm->logical)
429 new = &((*new)->rb_right);
430 else if (cur->seq < tm->seq)
431 new = &((*new)->rb_left);
432 else if (cur->seq > tm->seq)
433 new = &((*new)->rb_right);
434 else
435 return -EEXIST;
436 }
437
438 rb_link_node(&tm->node, parent, new);
439 rb_insert_color(&tm->node, tm_root);
440 return 0;
441}
442
443/*
444 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
445 * returns zero with the tree_mod_log_lock acquired. The caller must hold
446 * this until all tree mod log insertions are recorded in the rb tree and then
447 * write unlock fs_info::tree_mod_log_lock.
448 */
449static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
450 struct extent_buffer *eb) {
451 smp_mb();
452 if (list_empty(&(fs_info)->tree_mod_seq_list))
453 return 1;
454 if (eb && btrfs_header_level(eb) == 0)
455 return 1;
456
457 write_lock(&fs_info->tree_mod_log_lock);
458 if (list_empty(&(fs_info)->tree_mod_seq_list)) {
459 write_unlock(&fs_info->tree_mod_log_lock);
460 return 1;
461 }
462
463 return 0;
464}
465
466/* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
467static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
468 struct extent_buffer *eb)
469{
470 smp_mb();
471 if (list_empty(&(fs_info)->tree_mod_seq_list))
472 return 0;
473 if (eb && btrfs_header_level(eb) == 0)
474 return 0;
475
476 return 1;
477}
478
479static struct tree_mod_elem *
480alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
481 enum mod_log_op op, gfp_t flags)
482{
483 struct tree_mod_elem *tm;
484
485 tm = kzalloc(sizeof(*tm), flags);
486 if (!tm)
487 return NULL;
488
489 tm->logical = eb->start;
490 if (op != MOD_LOG_KEY_ADD) {
491 btrfs_node_key(eb, &tm->key, slot);
492 tm->blockptr = btrfs_node_blockptr(eb, slot);
493 }
494 tm->op = op;
495 tm->slot = slot;
496 tm->generation = btrfs_node_ptr_generation(eb, slot);
497 RB_CLEAR_NODE(&tm->node);
498
499 return tm;
500}
501
502static noinline int tree_mod_log_insert_key(struct extent_buffer *eb, int slot,
503 enum mod_log_op op, gfp_t flags)
504{
505 struct tree_mod_elem *tm;
506 int ret;
507
508 if (!tree_mod_need_log(eb->fs_info, eb))
509 return 0;
510
511 tm = alloc_tree_mod_elem(eb, slot, op, flags);
512 if (!tm)
513 return -ENOMEM;
514
515 if (tree_mod_dont_log(eb->fs_info, eb)) {
516 kfree(tm);
517 return 0;
518 }
519
520 ret = __tree_mod_log_insert(eb->fs_info, tm);
521 write_unlock(&eb->fs_info->tree_mod_log_lock);
522 if (ret)
523 kfree(tm);
524
525 return ret;
526}
527
528static noinline int tree_mod_log_insert_move(struct extent_buffer *eb,
529 int dst_slot, int src_slot, int nr_items)
530{
531 struct tree_mod_elem *tm = NULL;
532 struct tree_mod_elem **tm_list = NULL;
533 int ret = 0;
534 int i;
535 int locked = 0;
536
537 if (!tree_mod_need_log(eb->fs_info, eb))
538 return 0;
539
540 tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS);
541 if (!tm_list)
542 return -ENOMEM;
543
544 tm = kzalloc(sizeof(*tm), GFP_NOFS);
545 if (!tm) {
546 ret = -ENOMEM;
547 goto free_tms;
548 }
549
550 tm->logical = eb->start;
551 tm->slot = src_slot;
552 tm->move.dst_slot = dst_slot;
553 tm->move.nr_items = nr_items;
554 tm->op = MOD_LOG_MOVE_KEYS;
555
556 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
557 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
558 MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
559 if (!tm_list[i]) {
560 ret = -ENOMEM;
561 goto free_tms;
562 }
563 }
564
565 if (tree_mod_dont_log(eb->fs_info, eb))
566 goto free_tms;
567 locked = 1;
568
569 /*
570 * When we override something during the move, we log these removals.
571 * This can only happen when we move towards the beginning of the
572 * buffer, i.e. dst_slot < src_slot.
573 */
574 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
575 ret = __tree_mod_log_insert(eb->fs_info, tm_list[i]);
576 if (ret)
577 goto free_tms;
578 }
579
580 ret = __tree_mod_log_insert(eb->fs_info, tm);
581 if (ret)
582 goto free_tms;
583 write_unlock(&eb->fs_info->tree_mod_log_lock);
584 kfree(tm_list);
585
586 return 0;
587free_tms:
588 for (i = 0; i < nr_items; i++) {
589 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
590 rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log);
591 kfree(tm_list[i]);
592 }
593 if (locked)
594 write_unlock(&eb->fs_info->tree_mod_log_lock);
595 kfree(tm_list);
596 kfree(tm);
597
598 return ret;
599}
600
601static inline int
602__tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
603 struct tree_mod_elem **tm_list,
604 int nritems)
605{
606 int i, j;
607 int ret;
608
609 for (i = nritems - 1; i >= 0; i--) {
610 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
611 if (ret) {
612 for (j = nritems - 1; j > i; j--)
613 rb_erase(&tm_list[j]->node,
614 &fs_info->tree_mod_log);
615 return ret;
616 }
617 }
618
619 return 0;
620}
621
622static noinline int tree_mod_log_insert_root(struct extent_buffer *old_root,
623 struct extent_buffer *new_root, int log_removal)
624{
625 struct btrfs_fs_info *fs_info = old_root->fs_info;
626 struct tree_mod_elem *tm = NULL;
627 struct tree_mod_elem **tm_list = NULL;
628 int nritems = 0;
629 int ret = 0;
630 int i;
631
632 if (!tree_mod_need_log(fs_info, NULL))
633 return 0;
634
635 if (log_removal && btrfs_header_level(old_root) > 0) {
636 nritems = btrfs_header_nritems(old_root);
637 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
638 GFP_NOFS);
639 if (!tm_list) {
640 ret = -ENOMEM;
641 goto free_tms;
642 }
643 for (i = 0; i < nritems; i++) {
644 tm_list[i] = alloc_tree_mod_elem(old_root, i,
645 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
646 if (!tm_list[i]) {
647 ret = -ENOMEM;
648 goto free_tms;
649 }
650 }
651 }
652
653 tm = kzalloc(sizeof(*tm), GFP_NOFS);
654 if (!tm) {
655 ret = -ENOMEM;
656 goto free_tms;
657 }
658
659 tm->logical = new_root->start;
660 tm->old_root.logical = old_root->start;
661 tm->old_root.level = btrfs_header_level(old_root);
662 tm->generation = btrfs_header_generation(old_root);
663 tm->op = MOD_LOG_ROOT_REPLACE;
664
665 if (tree_mod_dont_log(fs_info, NULL))
666 goto free_tms;
667
668 if (tm_list)
669 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
670 if (!ret)
671 ret = __tree_mod_log_insert(fs_info, tm);
672
673 write_unlock(&fs_info->tree_mod_log_lock);
674 if (ret)
675 goto free_tms;
676 kfree(tm_list);
677
678 return ret;
679
680free_tms:
681 if (tm_list) {
682 for (i = 0; i < nritems; i++)
683 kfree(tm_list[i]);
684 kfree(tm_list);
685 }
686 kfree(tm);
687
688 return ret;
689}
690
691static struct tree_mod_elem *
692__tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
693 int smallest)
694{
695 struct rb_root *tm_root;
696 struct rb_node *node;
697 struct tree_mod_elem *cur = NULL;
698 struct tree_mod_elem *found = NULL;
699
700 read_lock(&fs_info->tree_mod_log_lock);
701 tm_root = &fs_info->tree_mod_log;
702 node = tm_root->rb_node;
703 while (node) {
704 cur = rb_entry(node, struct tree_mod_elem, node);
705 if (cur->logical < start) {
706 node = node->rb_left;
707 } else if (cur->logical > start) {
708 node = node->rb_right;
709 } else if (cur->seq < min_seq) {
710 node = node->rb_left;
711 } else if (!smallest) {
712 /* we want the node with the highest seq */
713 if (found)
714 BUG_ON(found->seq > cur->seq);
715 found = cur;
716 node = node->rb_left;
717 } else if (cur->seq > min_seq) {
718 /* we want the node with the smallest seq */
719 if (found)
720 BUG_ON(found->seq < cur->seq);
721 found = cur;
722 node = node->rb_right;
723 } else {
724 found = cur;
725 break;
726 }
727 }
728 read_unlock(&fs_info->tree_mod_log_lock);
729
730 return found;
731}
732
733/*
734 * this returns the element from the log with the smallest time sequence
735 * value that's in the log (the oldest log item). any element with a time
736 * sequence lower than min_seq will be ignored.
737 */
738static struct tree_mod_elem *
739tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
740 u64 min_seq)
741{
742 return __tree_mod_log_search(fs_info, start, min_seq, 1);
743}
744
745/*
746 * this returns the element from the log with the largest time sequence
747 * value that's in the log (the most recent log item). any element with
748 * a time sequence lower than min_seq will be ignored.
749 */
750static struct tree_mod_elem *
751tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
752{
753 return __tree_mod_log_search(fs_info, start, min_seq, 0);
754}
755
756static noinline int
757tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
758 struct extent_buffer *src, unsigned long dst_offset,
759 unsigned long src_offset, int nr_items)
760{
761 int ret = 0;
762 struct tree_mod_elem **tm_list = NULL;
763 struct tree_mod_elem **tm_list_add, **tm_list_rem;
764 int i;
765 int locked = 0;
766
767 if (!tree_mod_need_log(fs_info, NULL))
768 return 0;
769
770 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
771 return 0;
772
773 tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
774 GFP_NOFS);
775 if (!tm_list)
776 return -ENOMEM;
777
778 tm_list_add = tm_list;
779 tm_list_rem = tm_list + nr_items;
780 for (i = 0; i < nr_items; i++) {
781 tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
782 MOD_LOG_KEY_REMOVE, GFP_NOFS);
783 if (!tm_list_rem[i]) {
784 ret = -ENOMEM;
785 goto free_tms;
786 }
787
788 tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
789 MOD_LOG_KEY_ADD, GFP_NOFS);
790 if (!tm_list_add[i]) {
791 ret = -ENOMEM;
792 goto free_tms;
793 }
794 }
795
796 if (tree_mod_dont_log(fs_info, NULL))
797 goto free_tms;
798 locked = 1;
799
800 for (i = 0; i < nr_items; i++) {
801 ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
802 if (ret)
803 goto free_tms;
804 ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
805 if (ret)
806 goto free_tms;
807 }
808
809 write_unlock(&fs_info->tree_mod_log_lock);
810 kfree(tm_list);
811
812 return 0;
813
814free_tms:
815 for (i = 0; i < nr_items * 2; i++) {
816 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
817 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
818 kfree(tm_list[i]);
819 }
820 if (locked)
821 write_unlock(&fs_info->tree_mod_log_lock);
822 kfree(tm_list);
823
824 return ret;
825}
826
827static noinline int tree_mod_log_free_eb(struct extent_buffer *eb)
828{
829 struct tree_mod_elem **tm_list = NULL;
830 int nritems = 0;
831 int i;
832 int ret = 0;
833
834 if (btrfs_header_level(eb) == 0)
835 return 0;
836
837 if (!tree_mod_need_log(eb->fs_info, NULL))
838 return 0;
839
840 nritems = btrfs_header_nritems(eb);
841 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
842 if (!tm_list)
843 return -ENOMEM;
844
845 for (i = 0; i < nritems; i++) {
846 tm_list[i] = alloc_tree_mod_elem(eb, i,
847 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
848 if (!tm_list[i]) {
849 ret = -ENOMEM;
850 goto free_tms;
851 }
852 }
853
854 if (tree_mod_dont_log(eb->fs_info, eb))
855 goto free_tms;
856
857 ret = __tree_mod_log_free_eb(eb->fs_info, tm_list, nritems);
858 write_unlock(&eb->fs_info->tree_mod_log_lock);
859 if (ret)
860 goto free_tms;
861 kfree(tm_list);
862
863 return 0;
864
865free_tms:
866 for (i = 0; i < nritems; i++)
867 kfree(tm_list[i]);
868 kfree(tm_list);
869
870 return ret;
871}
872
873/*
874 * check if the tree block can be shared by multiple trees
875 */
876int btrfs_block_can_be_shared(struct btrfs_root *root,
877 struct extent_buffer *buf)
878{
879 /*
880 * Tree blocks not in reference counted trees and tree roots
881 * are never shared. If a block was allocated after the last
882 * snapshot and the block was not allocated by tree relocation,
883 * we know the block is not shared.
884 */
885 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
886 buf != root->node && buf != root->commit_root &&
887 (btrfs_header_generation(buf) <=
888 btrfs_root_last_snapshot(&root->root_item) ||
889 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
890 return 1;
891
892 return 0;
893}
894
895static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
896 struct btrfs_root *root,
897 struct extent_buffer *buf,
898 struct extent_buffer *cow,
899 int *last_ref)
900{
901 struct btrfs_fs_info *fs_info = root->fs_info;
902 u64 refs;
903 u64 owner;
904 u64 flags;
905 u64 new_flags = 0;
906 int ret;
907
908 /*
909 * Backrefs update rules:
910 *
911 * Always use full backrefs for extent pointers in tree block
912 * allocated by tree relocation.
913 *
914 * If a shared tree block is no longer referenced by its owner
915 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
916 * use full backrefs for extent pointers in tree block.
917 *
918 * If a tree block is been relocating
919 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
920 * use full backrefs for extent pointers in tree block.
921 * The reason for this is some operations (such as drop tree)
922 * are only allowed for blocks use full backrefs.
923 */
924
925 if (btrfs_block_can_be_shared(root, buf)) {
926 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
927 btrfs_header_level(buf), 1,
928 &refs, &flags);
929 if (ret)
930 return ret;
931 if (refs == 0) {
932 ret = -EROFS;
933 btrfs_handle_fs_error(fs_info, ret, NULL);
934 return ret;
935 }
936 } else {
937 refs = 1;
938 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
939 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
940 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
941 else
942 flags = 0;
943 }
944
945 owner = btrfs_header_owner(buf);
946 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
947 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
948
949 if (refs > 1) {
950 if ((owner == root->root_key.objectid ||
951 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
952 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
953 ret = btrfs_inc_ref(trans, root, buf, 1);
954 if (ret)
955 return ret;
956
957 if (root->root_key.objectid ==
958 BTRFS_TREE_RELOC_OBJECTID) {
959 ret = btrfs_dec_ref(trans, root, buf, 0);
960 if (ret)
961 return ret;
962 ret = btrfs_inc_ref(trans, root, cow, 1);
963 if (ret)
964 return ret;
965 }
966 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
967 } else {
968
969 if (root->root_key.objectid ==
970 BTRFS_TREE_RELOC_OBJECTID)
971 ret = btrfs_inc_ref(trans, root, cow, 1);
972 else
973 ret = btrfs_inc_ref(trans, root, cow, 0);
974 if (ret)
975 return ret;
976 }
977 if (new_flags != 0) {
978 int level = btrfs_header_level(buf);
979
980 ret = btrfs_set_disk_extent_flags(trans, fs_info,
981 buf->start,
982 buf->len,
983 new_flags, level, 0);
984 if (ret)
985 return ret;
986 }
987 } else {
988 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
989 if (root->root_key.objectid ==
990 BTRFS_TREE_RELOC_OBJECTID)
991 ret = btrfs_inc_ref(trans, root, cow, 1);
992 else
993 ret = btrfs_inc_ref(trans, root, cow, 0);
994 if (ret)
995 return ret;
996 ret = btrfs_dec_ref(trans, root, buf, 1);
997 if (ret)
998 return ret;
999 }
1000 clean_tree_block(fs_info, buf);
1001 *last_ref = 1;
1002 }
1003 return 0;
1004}
1005
1006/*
1007 * does the dirty work in cow of a single block. The parent block (if
1008 * supplied) is updated to point to the new cow copy. The new buffer is marked
1009 * dirty and returned locked. If you modify the block it needs to be marked
1010 * dirty again.
1011 *
1012 * search_start -- an allocation hint for the new block
1013 *
1014 * empty_size -- a hint that you plan on doing more cow. This is the size in
1015 * bytes the allocator should try to find free next to the block it returns.
1016 * This is just a hint and may be ignored by the allocator.
1017 */
1018static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1019 struct btrfs_root *root,
1020 struct extent_buffer *buf,
1021 struct extent_buffer *parent, int parent_slot,
1022 struct extent_buffer **cow_ret,
1023 u64 search_start, u64 empty_size)
1024{
1025 struct btrfs_fs_info *fs_info = root->fs_info;
1026 struct btrfs_disk_key disk_key;
1027 struct extent_buffer *cow;
1028 int level, ret;
1029 int last_ref = 0;
1030 int unlock_orig = 0;
1031 u64 parent_start = 0;
1032
1033 if (*cow_ret == buf)
1034 unlock_orig = 1;
1035
1036 btrfs_assert_tree_locked(buf);
1037
1038 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1039 trans->transid != fs_info->running_transaction->transid);
1040 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1041 trans->transid != root->last_trans);
1042
1043 level = btrfs_header_level(buf);
1044
1045 if (level == 0)
1046 btrfs_item_key(buf, &disk_key, 0);
1047 else
1048 btrfs_node_key(buf, &disk_key, 0);
1049
1050 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
1051 parent_start = parent->start;
1052
1053 /*
1054 * If we are COWing a node/leaf from the extent, chunk or device trees,
1055 * make sure that we do not finish block group creation of pending block
1056 * groups. We do this to avoid a deadlock.
1057 * COWing can result in allocation of a new chunk, and flushing pending
1058 * block groups (btrfs_create_pending_block_groups()) can be triggered
1059 * when finishing allocation of a new chunk. Creation of a pending block
1060 * group modifies the extent, chunk and device trees, therefore we could
1061 * deadlock with ourselves since we are holding a lock on an extent
1062 * buffer that btrfs_create_pending_block_groups() may try to COW later.
1063 */
1064 if (root == fs_info->extent_root ||
1065 root == fs_info->chunk_root ||
1066 root == fs_info->dev_root)
1067 trans->can_flush_pending_bgs = false;
1068
1069 cow = btrfs_alloc_tree_block(trans, root, parent_start,
1070 root->root_key.objectid, &disk_key, level,
1071 search_start, empty_size);
1072 trans->can_flush_pending_bgs = true;
1073 if (IS_ERR(cow))
1074 return PTR_ERR(cow);
1075
1076 /* cow is set to blocking by btrfs_init_new_buffer */
1077
1078 copy_extent_buffer_full(cow, buf);
1079 btrfs_set_header_bytenr(cow, cow->start);
1080 btrfs_set_header_generation(cow, trans->transid);
1081 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1082 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1083 BTRFS_HEADER_FLAG_RELOC);
1084 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1085 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1086 else
1087 btrfs_set_header_owner(cow, root->root_key.objectid);
1088
1089 write_extent_buffer_fsid(cow, fs_info->fsid);
1090
1091 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1092 if (ret) {
1093 btrfs_abort_transaction(trans, ret);
1094 return ret;
1095 }
1096
1097 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1098 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1099 if (ret) {
1100 btrfs_abort_transaction(trans, ret);
1101 return ret;
1102 }
1103 }
1104
1105 if (buf == root->node) {
1106 WARN_ON(parent && parent != buf);
1107 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1108 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1109 parent_start = buf->start;
1110
1111 extent_buffer_get(cow);
1112 ret = tree_mod_log_insert_root(root->node, cow, 1);
1113 BUG_ON(ret < 0);
1114 rcu_assign_pointer(root->node, cow);
1115
1116 btrfs_free_tree_block(trans, root, buf, parent_start,
1117 last_ref);
1118 free_extent_buffer(buf);
1119 add_root_to_dirty_list(root);
1120 } else {
1121 WARN_ON(trans->transid != btrfs_header_generation(parent));
1122 tree_mod_log_insert_key(parent, parent_slot,
1123 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1124 btrfs_set_node_blockptr(parent, parent_slot,
1125 cow->start);
1126 btrfs_set_node_ptr_generation(parent, parent_slot,
1127 trans->transid);
1128 btrfs_mark_buffer_dirty(parent);
1129 if (last_ref) {
1130 ret = tree_mod_log_free_eb(buf);
1131 if (ret) {
1132 btrfs_abort_transaction(trans, ret);
1133 return ret;
1134 }
1135 }
1136 btrfs_free_tree_block(trans, root, buf, parent_start,
1137 last_ref);
1138 }
1139 if (unlock_orig)
1140 btrfs_tree_unlock(buf);
1141 free_extent_buffer_stale(buf);
1142 btrfs_mark_buffer_dirty(cow);
1143 *cow_ret = cow;
1144 return 0;
1145}
1146
1147/*
1148 * returns the logical address of the oldest predecessor of the given root.
1149 * entries older than time_seq are ignored.
1150 */
1151static struct tree_mod_elem *__tree_mod_log_oldest_root(
1152 struct extent_buffer *eb_root, u64 time_seq)
1153{
1154 struct tree_mod_elem *tm;
1155 struct tree_mod_elem *found = NULL;
1156 u64 root_logical = eb_root->start;
1157 int looped = 0;
1158
1159 if (!time_seq)
1160 return NULL;
1161
1162 /*
1163 * the very last operation that's logged for a root is the
1164 * replacement operation (if it is replaced at all). this has
1165 * the logical address of the *new* root, making it the very
1166 * first operation that's logged for this root.
1167 */
1168 while (1) {
1169 tm = tree_mod_log_search_oldest(eb_root->fs_info, root_logical,
1170 time_seq);
1171 if (!looped && !tm)
1172 return NULL;
1173 /*
1174 * if there are no tree operation for the oldest root, we simply
1175 * return it. this should only happen if that (old) root is at
1176 * level 0.
1177 */
1178 if (!tm)
1179 break;
1180
1181 /*
1182 * if there's an operation that's not a root replacement, we
1183 * found the oldest version of our root. normally, we'll find a
1184 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1185 */
1186 if (tm->op != MOD_LOG_ROOT_REPLACE)
1187 break;
1188
1189 found = tm;
1190 root_logical = tm->old_root.logical;
1191 looped = 1;
1192 }
1193
1194 /* if there's no old root to return, return what we found instead */
1195 if (!found)
1196 found = tm;
1197
1198 return found;
1199}
1200
1201/*
1202 * tm is a pointer to the first operation to rewind within eb. then, all
1203 * previous operations will be rewound (until we reach something older than
1204 * time_seq).
1205 */
1206static void
1207__tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1208 u64 time_seq, struct tree_mod_elem *first_tm)
1209{
1210 u32 n;
1211 struct rb_node *next;
1212 struct tree_mod_elem *tm = first_tm;
1213 unsigned long o_dst;
1214 unsigned long o_src;
1215 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1216
1217 n = btrfs_header_nritems(eb);
1218 read_lock(&fs_info->tree_mod_log_lock);
1219 while (tm && tm->seq >= time_seq) {
1220 /*
1221 * all the operations are recorded with the operator used for
1222 * the modification. as we're going backwards, we do the
1223 * opposite of each operation here.
1224 */
1225 switch (tm->op) {
1226 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1227 BUG_ON(tm->slot < n);
1228 /* Fallthrough */
1229 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1230 case MOD_LOG_KEY_REMOVE:
1231 btrfs_set_node_key(eb, &tm->key, tm->slot);
1232 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1233 btrfs_set_node_ptr_generation(eb, tm->slot,
1234 tm->generation);
1235 n++;
1236 break;
1237 case MOD_LOG_KEY_REPLACE:
1238 BUG_ON(tm->slot >= n);
1239 btrfs_set_node_key(eb, &tm->key, tm->slot);
1240 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1241 btrfs_set_node_ptr_generation(eb, tm->slot,
1242 tm->generation);
1243 break;
1244 case MOD_LOG_KEY_ADD:
1245 /* if a move operation is needed it's in the log */
1246 n--;
1247 break;
1248 case MOD_LOG_MOVE_KEYS:
1249 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1250 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1251 memmove_extent_buffer(eb, o_dst, o_src,
1252 tm->move.nr_items * p_size);
1253 break;
1254 case MOD_LOG_ROOT_REPLACE:
1255 /*
1256 * this operation is special. for roots, this must be
1257 * handled explicitly before rewinding.
1258 * for non-roots, this operation may exist if the node
1259 * was a root: root A -> child B; then A gets empty and
1260 * B is promoted to the new root. in the mod log, we'll
1261 * have a root-replace operation for B, a tree block
1262 * that is no root. we simply ignore that operation.
1263 */
1264 break;
1265 }
1266 next = rb_next(&tm->node);
1267 if (!next)
1268 break;
1269 tm = rb_entry(next, struct tree_mod_elem, node);
1270 if (tm->logical != first_tm->logical)
1271 break;
1272 }
1273 read_unlock(&fs_info->tree_mod_log_lock);
1274 btrfs_set_header_nritems(eb, n);
1275}
1276
1277/*
1278 * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1279 * is returned. If rewind operations happen, a fresh buffer is returned. The
1280 * returned buffer is always read-locked. If the returned buffer is not the
1281 * input buffer, the lock on the input buffer is released and the input buffer
1282 * is freed (its refcount is decremented).
1283 */
1284static struct extent_buffer *
1285tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1286 struct extent_buffer *eb, u64 time_seq)
1287{
1288 struct extent_buffer *eb_rewin;
1289 struct tree_mod_elem *tm;
1290
1291 if (!time_seq)
1292 return eb;
1293
1294 if (btrfs_header_level(eb) == 0)
1295 return eb;
1296
1297 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1298 if (!tm)
1299 return eb;
1300
1301 btrfs_set_path_blocking(path);
1302 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1303
1304 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1305 BUG_ON(tm->slot != 0);
1306 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
1307 if (!eb_rewin) {
1308 btrfs_tree_read_unlock_blocking(eb);
1309 free_extent_buffer(eb);
1310 return NULL;
1311 }
1312 btrfs_set_header_bytenr(eb_rewin, eb->start);
1313 btrfs_set_header_backref_rev(eb_rewin,
1314 btrfs_header_backref_rev(eb));
1315 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1316 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1317 } else {
1318 eb_rewin = btrfs_clone_extent_buffer(eb);
1319 if (!eb_rewin) {
1320 btrfs_tree_read_unlock_blocking(eb);
1321 free_extent_buffer(eb);
1322 return NULL;
1323 }
1324 }
1325
1326 btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1327 btrfs_tree_read_unlock_blocking(eb);
1328 free_extent_buffer(eb);
1329
1330 extent_buffer_get(eb_rewin);
1331 btrfs_tree_read_lock(eb_rewin);
1332 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1333 WARN_ON(btrfs_header_nritems(eb_rewin) >
1334 BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1335
1336 return eb_rewin;
1337}
1338
1339/*
1340 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1341 * value. If there are no changes, the current root->root_node is returned. If
1342 * anything changed in between, there's a fresh buffer allocated on which the
1343 * rewind operations are done. In any case, the returned buffer is read locked.
1344 * Returns NULL on error (with no locks held).
1345 */
1346static inline struct extent_buffer *
1347get_old_root(struct btrfs_root *root, u64 time_seq)
1348{
1349 struct btrfs_fs_info *fs_info = root->fs_info;
1350 struct tree_mod_elem *tm;
1351 struct extent_buffer *eb = NULL;
1352 struct extent_buffer *eb_root;
1353 struct extent_buffer *old;
1354 struct tree_mod_root *old_root = NULL;
1355 u64 old_generation = 0;
1356 u64 logical;
1357 int level;
1358
1359 eb_root = btrfs_read_lock_root_node(root);
1360 tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1361 if (!tm)
1362 return eb_root;
1363
1364 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1365 old_root = &tm->old_root;
1366 old_generation = tm->generation;
1367 logical = old_root->logical;
1368 level = old_root->level;
1369 } else {
1370 logical = eb_root->start;
1371 level = btrfs_header_level(eb_root);
1372 }
1373
1374 tm = tree_mod_log_search(fs_info, logical, time_seq);
1375 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1376 btrfs_tree_read_unlock(eb_root);
1377 free_extent_buffer(eb_root);
1378 old = read_tree_block(fs_info, logical, 0, level, NULL);
1379 if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1380 if (!IS_ERR(old))
1381 free_extent_buffer(old);
1382 btrfs_warn(fs_info,
1383 "failed to read tree block %llu from get_old_root",
1384 logical);
1385 } else {
1386 eb = btrfs_clone_extent_buffer(old);
1387 free_extent_buffer(old);
1388 }
1389 } else if (old_root) {
1390 btrfs_tree_read_unlock(eb_root);
1391 free_extent_buffer(eb_root);
1392 eb = alloc_dummy_extent_buffer(fs_info, logical);
1393 } else {
1394 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1395 eb = btrfs_clone_extent_buffer(eb_root);
1396 btrfs_tree_read_unlock_blocking(eb_root);
1397 free_extent_buffer(eb_root);
1398 }
1399
1400 if (!eb)
1401 return NULL;
1402 extent_buffer_get(eb);
1403 btrfs_tree_read_lock(eb);
1404 if (old_root) {
1405 btrfs_set_header_bytenr(eb, eb->start);
1406 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1407 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1408 btrfs_set_header_level(eb, old_root->level);
1409 btrfs_set_header_generation(eb, old_generation);
1410 }
1411 if (tm)
1412 __tree_mod_log_rewind(fs_info, eb, time_seq, tm);
1413 else
1414 WARN_ON(btrfs_header_level(eb) != 0);
1415 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1416
1417 return eb;
1418}
1419
1420int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1421{
1422 struct tree_mod_elem *tm;
1423 int level;
1424 struct extent_buffer *eb_root = btrfs_root_node(root);
1425
1426 tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1427 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1428 level = tm->old_root.level;
1429 } else {
1430 level = btrfs_header_level(eb_root);
1431 }
1432 free_extent_buffer(eb_root);
1433
1434 return level;
1435}
1436
1437static inline int should_cow_block(struct btrfs_trans_handle *trans,
1438 struct btrfs_root *root,
1439 struct extent_buffer *buf)
1440{
1441 if (btrfs_is_testing(root->fs_info))
1442 return 0;
1443
1444 /* Ensure we can see the FORCE_COW bit */
1445 smp_mb__before_atomic();
1446
1447 /*
1448 * We do not need to cow a block if
1449 * 1) this block is not created or changed in this transaction;
1450 * 2) this block does not belong to TREE_RELOC tree;
1451 * 3) the root is not forced COW.
1452 *
1453 * What is forced COW:
1454 * when we create snapshot during committing the transaction,
1455 * after we've finished coping src root, we must COW the shared
1456 * block to ensure the metadata consistency.
1457 */
1458 if (btrfs_header_generation(buf) == trans->transid &&
1459 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1460 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1461 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1462 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1463 return 0;
1464 return 1;
1465}
1466
1467/*
1468 * cows a single block, see __btrfs_cow_block for the real work.
1469 * This version of it has extra checks so that a block isn't COWed more than
1470 * once per transaction, as long as it hasn't been written yet
1471 */
1472noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1473 struct btrfs_root *root, struct extent_buffer *buf,
1474 struct extent_buffer *parent, int parent_slot,
1475 struct extent_buffer **cow_ret)
1476{
1477 struct btrfs_fs_info *fs_info = root->fs_info;
1478 u64 search_start;
1479 int ret;
1480
1481 if (trans->transaction != fs_info->running_transaction)
1482 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1483 trans->transid,
1484 fs_info->running_transaction->transid);
1485
1486 if (trans->transid != fs_info->generation)
1487 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1488 trans->transid, fs_info->generation);
1489
1490 if (!should_cow_block(trans, root, buf)) {
1491 trans->dirty = true;
1492 *cow_ret = buf;
1493 return 0;
1494 }
1495
1496 search_start = buf->start & ~((u64)SZ_1G - 1);
1497
1498 if (parent)
1499 btrfs_set_lock_blocking(parent);
1500 btrfs_set_lock_blocking(buf);
1501
1502 ret = __btrfs_cow_block(trans, root, buf, parent,
1503 parent_slot, cow_ret, search_start, 0);
1504
1505 trace_btrfs_cow_block(root, buf, *cow_ret);
1506
1507 return ret;
1508}
1509
1510/*
1511 * helper function for defrag to decide if two blocks pointed to by a
1512 * node are actually close by
1513 */
1514static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1515{
1516 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1517 return 1;
1518 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1519 return 1;
1520 return 0;
1521}
1522
1523/*
1524 * compare two keys in a memcmp fashion
1525 */
1526static int comp_keys(const struct btrfs_disk_key *disk,
1527 const struct btrfs_key *k2)
1528{
1529 struct btrfs_key k1;
1530
1531 btrfs_disk_key_to_cpu(&k1, disk);
1532
1533 return btrfs_comp_cpu_keys(&k1, k2);
1534}
1535
1536/*
1537 * same as comp_keys only with two btrfs_key's
1538 */
1539int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
1540{
1541 if (k1->objectid > k2->objectid)
1542 return 1;
1543 if (k1->objectid < k2->objectid)
1544 return -1;
1545 if (k1->type > k2->type)
1546 return 1;
1547 if (k1->type < k2->type)
1548 return -1;
1549 if (k1->offset > k2->offset)
1550 return 1;
1551 if (k1->offset < k2->offset)
1552 return -1;
1553 return 0;
1554}
1555
1556/*
1557 * this is used by the defrag code to go through all the
1558 * leaves pointed to by a node and reallocate them so that
1559 * disk order is close to key order
1560 */
1561int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1562 struct btrfs_root *root, struct extent_buffer *parent,
1563 int start_slot, u64 *last_ret,
1564 struct btrfs_key *progress)
1565{
1566 struct btrfs_fs_info *fs_info = root->fs_info;
1567 struct extent_buffer *cur;
1568 u64 blocknr;
1569 u64 gen;
1570 u64 search_start = *last_ret;
1571 u64 last_block = 0;
1572 u64 other;
1573 u32 parent_nritems;
1574 int end_slot;
1575 int i;
1576 int err = 0;
1577 int parent_level;
1578 int uptodate;
1579 u32 blocksize;
1580 int progress_passed = 0;
1581 struct btrfs_disk_key disk_key;
1582
1583 parent_level = btrfs_header_level(parent);
1584
1585 WARN_ON(trans->transaction != fs_info->running_transaction);
1586 WARN_ON(trans->transid != fs_info->generation);
1587
1588 parent_nritems = btrfs_header_nritems(parent);
1589 blocksize = fs_info->nodesize;
1590 end_slot = parent_nritems - 1;
1591
1592 if (parent_nritems <= 1)
1593 return 0;
1594
1595 btrfs_set_lock_blocking(parent);
1596
1597 for (i = start_slot; i <= end_slot; i++) {
1598 struct btrfs_key first_key;
1599 int close = 1;
1600
1601 btrfs_node_key(parent, &disk_key, i);
1602 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1603 continue;
1604
1605 progress_passed = 1;
1606 blocknr = btrfs_node_blockptr(parent, i);
1607 gen = btrfs_node_ptr_generation(parent, i);
1608 btrfs_node_key_to_cpu(parent, &first_key, i);
1609 if (last_block == 0)
1610 last_block = blocknr;
1611
1612 if (i > 0) {
1613 other = btrfs_node_blockptr(parent, i - 1);
1614 close = close_blocks(blocknr, other, blocksize);
1615 }
1616 if (!close && i < end_slot) {
1617 other = btrfs_node_blockptr(parent, i + 1);
1618 close = close_blocks(blocknr, other, blocksize);
1619 }
1620 if (close) {
1621 last_block = blocknr;
1622 continue;
1623 }
1624
1625 cur = find_extent_buffer(fs_info, blocknr);
1626 if (cur)
1627 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1628 else
1629 uptodate = 0;
1630 if (!cur || !uptodate) {
1631 if (!cur) {
1632 cur = read_tree_block(fs_info, blocknr, gen,
1633 parent_level - 1,
1634 &first_key);
1635 if (IS_ERR(cur)) {
1636 return PTR_ERR(cur);
1637 } else if (!extent_buffer_uptodate(cur)) {
1638 free_extent_buffer(cur);
1639 return -EIO;
1640 }
1641 } else if (!uptodate) {
1642 err = btrfs_read_buffer(cur, gen,
1643 parent_level - 1,&first_key);
1644 if (err) {
1645 free_extent_buffer(cur);
1646 return err;
1647 }
1648 }
1649 }
1650 if (search_start == 0)
1651 search_start = last_block;
1652
1653 btrfs_tree_lock(cur);
1654 btrfs_set_lock_blocking(cur);
1655 err = __btrfs_cow_block(trans, root, cur, parent, i,
1656 &cur, search_start,
1657 min(16 * blocksize,
1658 (end_slot - i) * blocksize));
1659 if (err) {
1660 btrfs_tree_unlock(cur);
1661 free_extent_buffer(cur);
1662 break;
1663 }
1664 search_start = cur->start;
1665 last_block = cur->start;
1666 *last_ret = search_start;
1667 btrfs_tree_unlock(cur);
1668 free_extent_buffer(cur);
1669 }
1670 return err;
1671}
1672
1673/*
1674 * search for key in the extent_buffer. The items start at offset p,
1675 * and they are item_size apart. There are 'max' items in p.
1676 *
1677 * the slot in the array is returned via slot, and it points to
1678 * the place where you would insert key if it is not found in
1679 * the array.
1680 *
1681 * slot may point to max if the key is bigger than all of the keys
1682 */
1683static noinline int generic_bin_search(struct extent_buffer *eb,
1684 unsigned long p, int item_size,
1685 const struct btrfs_key *key,
1686 int max, int *slot)
1687{
1688 int low = 0;
1689 int high = max;
1690 int mid;
1691 int ret;
1692 struct btrfs_disk_key *tmp = NULL;
1693 struct btrfs_disk_key unaligned;
1694 unsigned long offset;
1695 char *kaddr = NULL;
1696 unsigned long map_start = 0;
1697 unsigned long map_len = 0;
1698 int err;
1699
1700 if (low > high) {
1701 btrfs_err(eb->fs_info,
1702 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
1703 __func__, low, high, eb->start,
1704 btrfs_header_owner(eb), btrfs_header_level(eb));
1705 return -EINVAL;
1706 }
1707
1708 while (low < high) {
1709 mid = (low + high) / 2;
1710 offset = p + mid * item_size;
1711
1712 if (!kaddr || offset < map_start ||
1713 (offset + sizeof(struct btrfs_disk_key)) >
1714 map_start + map_len) {
1715
1716 err = map_private_extent_buffer(eb, offset,
1717 sizeof(struct btrfs_disk_key),
1718 &kaddr, &map_start, &map_len);
1719
1720 if (!err) {
1721 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1722 map_start);
1723 } else if (err == 1) {
1724 read_extent_buffer(eb, &unaligned,
1725 offset, sizeof(unaligned));
1726 tmp = &unaligned;
1727 } else {
1728 return err;
1729 }
1730
1731 } else {
1732 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1733 map_start);
1734 }
1735 ret = comp_keys(tmp, key);
1736
1737 if (ret < 0)
1738 low = mid + 1;
1739 else if (ret > 0)
1740 high = mid;
1741 else {
1742 *slot = mid;
1743 return 0;
1744 }
1745 }
1746 *slot = low;
1747 return 1;
1748}
1749
1750/*
1751 * simple bin_search frontend that does the right thing for
1752 * leaves vs nodes
1753 */
1754int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
1755 int level, int *slot)
1756{
1757 if (level == 0)
1758 return generic_bin_search(eb,
1759 offsetof(struct btrfs_leaf, items),
1760 sizeof(struct btrfs_item),
1761 key, btrfs_header_nritems(eb),
1762 slot);
1763 else
1764 return generic_bin_search(eb,
1765 offsetof(struct btrfs_node, ptrs),
1766 sizeof(struct btrfs_key_ptr),
1767 key, btrfs_header_nritems(eb),
1768 slot);
1769}
1770
1771static void root_add_used(struct btrfs_root *root, u32 size)
1772{
1773 spin_lock(&root->accounting_lock);
1774 btrfs_set_root_used(&root->root_item,
1775 btrfs_root_used(&root->root_item) + size);
1776 spin_unlock(&root->accounting_lock);
1777}
1778
1779static void root_sub_used(struct btrfs_root *root, u32 size)
1780{
1781 spin_lock(&root->accounting_lock);
1782 btrfs_set_root_used(&root->root_item,
1783 btrfs_root_used(&root->root_item) - size);
1784 spin_unlock(&root->accounting_lock);
1785}
1786
1787/* given a node and slot number, this reads the blocks it points to. The
1788 * extent buffer is returned with a reference taken (but unlocked).
1789 */
1790static noinline struct extent_buffer *
1791read_node_slot(struct btrfs_fs_info *fs_info, struct extent_buffer *parent,
1792 int slot)
1793{
1794 int level = btrfs_header_level(parent);
1795 struct extent_buffer *eb;
1796 struct btrfs_key first_key;
1797
1798 if (slot < 0 || slot >= btrfs_header_nritems(parent))
1799 return ERR_PTR(-ENOENT);
1800
1801 BUG_ON(level == 0);
1802
1803 btrfs_node_key_to_cpu(parent, &first_key, slot);
1804 eb = read_tree_block(fs_info, btrfs_node_blockptr(parent, slot),
1805 btrfs_node_ptr_generation(parent, slot),
1806 level - 1, &first_key);
1807 if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
1808 free_extent_buffer(eb);
1809 eb = ERR_PTR(-EIO);
1810 }
1811
1812 return eb;
1813}
1814
1815/*
1816 * node level balancing, used to make sure nodes are in proper order for
1817 * item deletion. We balance from the top down, so we have to make sure
1818 * that a deletion won't leave an node completely empty later on.
1819 */
1820static noinline int balance_level(struct btrfs_trans_handle *trans,
1821 struct btrfs_root *root,
1822 struct btrfs_path *path, int level)
1823{
1824 struct btrfs_fs_info *fs_info = root->fs_info;
1825 struct extent_buffer *right = NULL;
1826 struct extent_buffer *mid;
1827 struct extent_buffer *left = NULL;
1828 struct extent_buffer *parent = NULL;
1829 int ret = 0;
1830 int wret;
1831 int pslot;
1832 int orig_slot = path->slots[level];
1833 u64 orig_ptr;
1834
1835 if (level == 0)
1836 return 0;
1837
1838 mid = path->nodes[level];
1839
1840 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1841 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1842 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1843
1844 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1845
1846 if (level < BTRFS_MAX_LEVEL - 1) {
1847 parent = path->nodes[level + 1];
1848 pslot = path->slots[level + 1];
1849 }
1850
1851 /*
1852 * deal with the case where there is only one pointer in the root
1853 * by promoting the node below to a root
1854 */
1855 if (!parent) {
1856 struct extent_buffer *child;
1857
1858 if (btrfs_header_nritems(mid) != 1)
1859 return 0;
1860
1861 /* promote the child to a root */
1862 child = read_node_slot(fs_info, mid, 0);
1863 if (IS_ERR(child)) {
1864 ret = PTR_ERR(child);
1865 btrfs_handle_fs_error(fs_info, ret, NULL);
1866 goto enospc;
1867 }
1868
1869 btrfs_tree_lock(child);
1870 btrfs_set_lock_blocking(child);
1871 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1872 if (ret) {
1873 btrfs_tree_unlock(child);
1874 free_extent_buffer(child);
1875 goto enospc;
1876 }
1877
1878 ret = tree_mod_log_insert_root(root->node, child, 1);
1879 BUG_ON(ret < 0);
1880 rcu_assign_pointer(root->node, child);
1881
1882 add_root_to_dirty_list(root);
1883 btrfs_tree_unlock(child);
1884
1885 path->locks[level] = 0;
1886 path->nodes[level] = NULL;
1887 clean_tree_block(fs_info, mid);
1888 btrfs_tree_unlock(mid);
1889 /* once for the path */
1890 free_extent_buffer(mid);
1891
1892 root_sub_used(root, mid->len);
1893 btrfs_free_tree_block(trans, root, mid, 0, 1);
1894 /* once for the root ptr */
1895 free_extent_buffer_stale(mid);
1896 return 0;
1897 }
1898 if (btrfs_header_nritems(mid) >
1899 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
1900 return 0;
1901
1902 left = read_node_slot(fs_info, parent, pslot - 1);
1903 if (IS_ERR(left))
1904 left = NULL;
1905
1906 if (left) {
1907 btrfs_tree_lock(left);
1908 btrfs_set_lock_blocking(left);
1909 wret = btrfs_cow_block(trans, root, left,
1910 parent, pslot - 1, &left);
1911 if (wret) {
1912 ret = wret;
1913 goto enospc;
1914 }
1915 }
1916
1917 right = read_node_slot(fs_info, parent, pslot + 1);
1918 if (IS_ERR(right))
1919 right = NULL;
1920
1921 if (right) {
1922 btrfs_tree_lock(right);
1923 btrfs_set_lock_blocking(right);
1924 wret = btrfs_cow_block(trans, root, right,
1925 parent, pslot + 1, &right);
1926 if (wret) {
1927 ret = wret;
1928 goto enospc;
1929 }
1930 }
1931
1932 /* first, try to make some room in the middle buffer */
1933 if (left) {
1934 orig_slot += btrfs_header_nritems(left);
1935 wret = push_node_left(trans, fs_info, left, mid, 1);
1936 if (wret < 0)
1937 ret = wret;
1938 }
1939
1940 /*
1941 * then try to empty the right most buffer into the middle
1942 */
1943 if (right) {
1944 wret = push_node_left(trans, fs_info, mid, right, 1);
1945 if (wret < 0 && wret != -ENOSPC)
1946 ret = wret;
1947 if (btrfs_header_nritems(right) == 0) {
1948 clean_tree_block(fs_info, right);
1949 btrfs_tree_unlock(right);
1950 del_ptr(root, path, level + 1, pslot + 1);
1951 root_sub_used(root, right->len);
1952 btrfs_free_tree_block(trans, root, right, 0, 1);
1953 free_extent_buffer_stale(right);
1954 right = NULL;
1955 } else {
1956 struct btrfs_disk_key right_key;
1957 btrfs_node_key(right, &right_key, 0);
1958 ret = tree_mod_log_insert_key(parent, pslot + 1,
1959 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1960 BUG_ON(ret < 0);
1961 btrfs_set_node_key(parent, &right_key, pslot + 1);
1962 btrfs_mark_buffer_dirty(parent);
1963 }
1964 }
1965 if (btrfs_header_nritems(mid) == 1) {
1966 /*
1967 * we're not allowed to leave a node with one item in the
1968 * tree during a delete. A deletion from lower in the tree
1969 * could try to delete the only pointer in this node.
1970 * So, pull some keys from the left.
1971 * There has to be a left pointer at this point because
1972 * otherwise we would have pulled some pointers from the
1973 * right
1974 */
1975 if (!left) {
1976 ret = -EROFS;
1977 btrfs_handle_fs_error(fs_info, ret, NULL);
1978 goto enospc;
1979 }
1980 wret = balance_node_right(trans, fs_info, mid, left);
1981 if (wret < 0) {
1982 ret = wret;
1983 goto enospc;
1984 }
1985 if (wret == 1) {
1986 wret = push_node_left(trans, fs_info, left, mid, 1);
1987 if (wret < 0)
1988 ret = wret;
1989 }
1990 BUG_ON(wret == 1);
1991 }
1992 if (btrfs_header_nritems(mid) == 0) {
1993 clean_tree_block(fs_info, mid);
1994 btrfs_tree_unlock(mid);
1995 del_ptr(root, path, level + 1, pslot);
1996 root_sub_used(root, mid->len);
1997 btrfs_free_tree_block(trans, root, mid, 0, 1);
1998 free_extent_buffer_stale(mid);
1999 mid = NULL;
2000 } else {
2001 /* update the parent key to reflect our changes */
2002 struct btrfs_disk_key mid_key;
2003 btrfs_node_key(mid, &mid_key, 0);
2004 ret = tree_mod_log_insert_key(parent, pslot,
2005 MOD_LOG_KEY_REPLACE, GFP_NOFS);
2006 BUG_ON(ret < 0);
2007 btrfs_set_node_key(parent, &mid_key, pslot);
2008 btrfs_mark_buffer_dirty(parent);
2009 }
2010
2011 /* update the path */
2012 if (left) {
2013 if (btrfs_header_nritems(left) > orig_slot) {
2014 extent_buffer_get(left);
2015 /* left was locked after cow */
2016 path->nodes[level] = left;
2017 path->slots[level + 1] -= 1;
2018 path->slots[level] = orig_slot;
2019 if (mid) {
2020 btrfs_tree_unlock(mid);
2021 free_extent_buffer(mid);
2022 }
2023 } else {
2024 orig_slot -= btrfs_header_nritems(left);
2025 path->slots[level] = orig_slot;
2026 }
2027 }
2028 /* double check we haven't messed things up */
2029 if (orig_ptr !=
2030 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2031 BUG();
2032enospc:
2033 if (right) {
2034 btrfs_tree_unlock(right);
2035 free_extent_buffer(right);
2036 }
2037 if (left) {
2038 if (path->nodes[level] != left)
2039 btrfs_tree_unlock(left);
2040 free_extent_buffer(left);
2041 }
2042 return ret;
2043}
2044
2045/* Node balancing for insertion. Here we only split or push nodes around
2046 * when they are completely full. This is also done top down, so we
2047 * have to be pessimistic.
2048 */
2049static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2050 struct btrfs_root *root,
2051 struct btrfs_path *path, int level)
2052{
2053 struct btrfs_fs_info *fs_info = root->fs_info;
2054 struct extent_buffer *right = NULL;
2055 struct extent_buffer *mid;
2056 struct extent_buffer *left = NULL;
2057 struct extent_buffer *parent = NULL;
2058 int ret = 0;
2059 int wret;
2060 int pslot;
2061 int orig_slot = path->slots[level];
2062
2063 if (level == 0)
2064 return 1;
2065
2066 mid = path->nodes[level];
2067 WARN_ON(btrfs_header_generation(mid) != trans->transid);
2068
2069 if (level < BTRFS_MAX_LEVEL - 1) {
2070 parent = path->nodes[level + 1];
2071 pslot = path->slots[level + 1];
2072 }
2073
2074 if (!parent)
2075 return 1;
2076
2077 left = read_node_slot(fs_info, parent, pslot - 1);
2078 if (IS_ERR(left))
2079 left = NULL;
2080
2081 /* first, try to make some room in the middle buffer */
2082 if (left) {
2083 u32 left_nr;
2084
2085 btrfs_tree_lock(left);
2086 btrfs_set_lock_blocking(left);
2087
2088 left_nr = btrfs_header_nritems(left);
2089 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2090 wret = 1;
2091 } else {
2092 ret = btrfs_cow_block(trans, root, left, parent,
2093 pslot - 1, &left);
2094 if (ret)
2095 wret = 1;
2096 else {
2097 wret = push_node_left(trans, fs_info,
2098 left, mid, 0);
2099 }
2100 }
2101 if (wret < 0)
2102 ret = wret;
2103 if (wret == 0) {
2104 struct btrfs_disk_key disk_key;
2105 orig_slot += left_nr;
2106 btrfs_node_key(mid, &disk_key, 0);
2107 ret = tree_mod_log_insert_key(parent, pslot,
2108 MOD_LOG_KEY_REPLACE, GFP_NOFS);
2109 BUG_ON(ret < 0);
2110 btrfs_set_node_key(parent, &disk_key, pslot);
2111 btrfs_mark_buffer_dirty(parent);
2112 if (btrfs_header_nritems(left) > orig_slot) {
2113 path->nodes[level] = left;
2114 path->slots[level + 1] -= 1;
2115 path->slots[level] = orig_slot;
2116 btrfs_tree_unlock(mid);
2117 free_extent_buffer(mid);
2118 } else {
2119 orig_slot -=
2120 btrfs_header_nritems(left);
2121 path->slots[level] = orig_slot;
2122 btrfs_tree_unlock(left);
2123 free_extent_buffer(left);
2124 }
2125 return 0;
2126 }
2127 btrfs_tree_unlock(left);
2128 free_extent_buffer(left);
2129 }
2130 right = read_node_slot(fs_info, parent, pslot + 1);
2131 if (IS_ERR(right))
2132 right = NULL;
2133
2134 /*
2135 * then try to empty the right most buffer into the middle
2136 */
2137 if (right) {
2138 u32 right_nr;
2139
2140 btrfs_tree_lock(right);
2141 btrfs_set_lock_blocking(right);
2142
2143 right_nr = btrfs_header_nritems(right);
2144 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2145 wret = 1;
2146 } else {
2147 ret = btrfs_cow_block(trans, root, right,
2148 parent, pslot + 1,
2149 &right);
2150 if (ret)
2151 wret = 1;
2152 else {
2153 wret = balance_node_right(trans, fs_info,
2154 right, mid);
2155 }
2156 }
2157 if (wret < 0)
2158 ret = wret;
2159 if (wret == 0) {
2160 struct btrfs_disk_key disk_key;
2161
2162 btrfs_node_key(right, &disk_key, 0);
2163 ret = tree_mod_log_insert_key(parent, pslot + 1,
2164 MOD_LOG_KEY_REPLACE, GFP_NOFS);
2165 BUG_ON(ret < 0);
2166 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2167 btrfs_mark_buffer_dirty(parent);
2168
2169 if (btrfs_header_nritems(mid) <= orig_slot) {
2170 path->nodes[level] = right;
2171 path->slots[level + 1] += 1;
2172 path->slots[level] = orig_slot -
2173 btrfs_header_nritems(mid);
2174 btrfs_tree_unlock(mid);
2175 free_extent_buffer(mid);
2176 } else {
2177 btrfs_tree_unlock(right);
2178 free_extent_buffer(right);
2179 }
2180 return 0;
2181 }
2182 btrfs_tree_unlock(right);
2183 free_extent_buffer(right);
2184 }
2185 return 1;
2186}
2187
2188/*
2189 * readahead one full node of leaves, finding things that are close
2190 * to the block in 'slot', and triggering ra on them.
2191 */
2192static void reada_for_search(struct btrfs_fs_info *fs_info,
2193 struct btrfs_path *path,
2194 int level, int slot, u64 objectid)
2195{
2196 struct extent_buffer *node;
2197 struct btrfs_disk_key disk_key;
2198 u32 nritems;
2199 u64 search;
2200 u64 target;
2201 u64 nread = 0;
2202 struct extent_buffer *eb;
2203 u32 nr;
2204 u32 blocksize;
2205 u32 nscan = 0;
2206
2207 if (level != 1)
2208 return;
2209
2210 if (!path->nodes[level])
2211 return;
2212
2213 node = path->nodes[level];
2214
2215 search = btrfs_node_blockptr(node, slot);
2216 blocksize = fs_info->nodesize;
2217 eb = find_extent_buffer(fs_info, search);
2218 if (eb) {
2219 free_extent_buffer(eb);
2220 return;
2221 }
2222
2223 target = search;
2224
2225 nritems = btrfs_header_nritems(node);
2226 nr = slot;
2227
2228 while (1) {
2229 if (path->reada == READA_BACK) {
2230 if (nr == 0)
2231 break;
2232 nr--;
2233 } else if (path->reada == READA_FORWARD) {
2234 nr++;
2235 if (nr >= nritems)
2236 break;
2237 }
2238 if (path->reada == READA_BACK && objectid) {
2239 btrfs_node_key(node, &disk_key, nr);
2240 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2241 break;
2242 }
2243 search = btrfs_node_blockptr(node, nr);
2244 if ((search <= target && target - search <= 65536) ||
2245 (search > target && search - target <= 65536)) {
2246 readahead_tree_block(fs_info, search);
2247 nread += blocksize;
2248 }
2249 nscan++;
2250 if ((nread > 65536 || nscan > 32))
2251 break;
2252 }
2253}
2254
2255static noinline void reada_for_balance(struct btrfs_fs_info *fs_info,
2256 struct btrfs_path *path, int level)
2257{
2258 int slot;
2259 int nritems;
2260 struct extent_buffer *parent;
2261 struct extent_buffer *eb;
2262 u64 gen;
2263 u64 block1 = 0;
2264 u64 block2 = 0;
2265
2266 parent = path->nodes[level + 1];
2267 if (!parent)
2268 return;
2269
2270 nritems = btrfs_header_nritems(parent);
2271 slot = path->slots[level + 1];
2272
2273 if (slot > 0) {
2274 block1 = btrfs_node_blockptr(parent, slot - 1);
2275 gen = btrfs_node_ptr_generation(parent, slot - 1);
2276 eb = find_extent_buffer(fs_info, block1);
2277 /*
2278 * if we get -eagain from btrfs_buffer_uptodate, we
2279 * don't want to return eagain here. That will loop
2280 * forever
2281 */
2282 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2283 block1 = 0;
2284 free_extent_buffer(eb);
2285 }
2286 if (slot + 1 < nritems) {
2287 block2 = btrfs_node_blockptr(parent, slot + 1);
2288 gen = btrfs_node_ptr_generation(parent, slot + 1);
2289 eb = find_extent_buffer(fs_info, block2);
2290 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2291 block2 = 0;
2292 free_extent_buffer(eb);
2293 }
2294
2295 if (block1)
2296 readahead_tree_block(fs_info, block1);
2297 if (block2)
2298 readahead_tree_block(fs_info, block2);
2299}
2300
2301
2302/*
2303 * when we walk down the tree, it is usually safe to unlock the higher layers
2304 * in the tree. The exceptions are when our path goes through slot 0, because
2305 * operations on the tree might require changing key pointers higher up in the
2306 * tree.
2307 *
2308 * callers might also have set path->keep_locks, which tells this code to keep
2309 * the lock if the path points to the last slot in the block. This is part of
2310 * walking through the tree, and selecting the next slot in the higher block.
2311 *
2312 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2313 * if lowest_unlock is 1, level 0 won't be unlocked
2314 */
2315static noinline void unlock_up(struct btrfs_path *path, int level,
2316 int lowest_unlock, int min_write_lock_level,
2317 int *write_lock_level)
2318{
2319 int i;
2320 int skip_level = level;
2321 int no_skips = 0;
2322 struct extent_buffer *t;
2323
2324 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2325 if (!path->nodes[i])
2326 break;
2327 if (!path->locks[i])
2328 break;
2329 if (!no_skips && path->slots[i] == 0) {
2330 skip_level = i + 1;
2331 continue;
2332 }
2333 if (!no_skips && path->keep_locks) {
2334 u32 nritems;
2335 t = path->nodes[i];
2336 nritems = btrfs_header_nritems(t);
2337 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2338 skip_level = i + 1;
2339 continue;
2340 }
2341 }
2342 if (skip_level < i && i >= lowest_unlock)
2343 no_skips = 1;
2344
2345 t = path->nodes[i];
2346 if (i >= lowest_unlock && i > skip_level) {
2347 btrfs_tree_unlock_rw(t, path->locks[i]);
2348 path->locks[i] = 0;
2349 if (write_lock_level &&
2350 i > min_write_lock_level &&
2351 i <= *write_lock_level) {
2352 *write_lock_level = i - 1;
2353 }
2354 }
2355 }
2356}
2357
2358/*
2359 * This releases any locks held in the path starting at level and
2360 * going all the way up to the root.
2361 *
2362 * btrfs_search_slot will keep the lock held on higher nodes in a few
2363 * corner cases, such as COW of the block at slot zero in the node. This
2364 * ignores those rules, and it should only be called when there are no
2365 * more updates to be done higher up in the tree.
2366 */
2367noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2368{
2369 int i;
2370
2371 if (path->keep_locks)
2372 return;
2373
2374 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2375 if (!path->nodes[i])
2376 continue;
2377 if (!path->locks[i])
2378 continue;
2379 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2380 path->locks[i] = 0;
2381 }
2382}
2383
2384/*
2385 * helper function for btrfs_search_slot. The goal is to find a block
2386 * in cache without setting the path to blocking. If we find the block
2387 * we return zero and the path is unchanged.
2388 *
2389 * If we can't find the block, we set the path blocking and do some
2390 * reada. -EAGAIN is returned and the search must be repeated.
2391 */
2392static int
2393read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
2394 struct extent_buffer **eb_ret, int level, int slot,
2395 const struct btrfs_key *key)
2396{
2397 struct btrfs_fs_info *fs_info = root->fs_info;
2398 u64 blocknr;
2399 u64 gen;
2400 struct extent_buffer *b = *eb_ret;
2401 struct extent_buffer *tmp;
2402 struct btrfs_key first_key;
2403 int ret;
2404 int parent_level;
2405
2406 blocknr = btrfs_node_blockptr(b, slot);
2407 gen = btrfs_node_ptr_generation(b, slot);
2408 parent_level = btrfs_header_level(b);
2409 btrfs_node_key_to_cpu(b, &first_key, slot);
2410
2411 tmp = find_extent_buffer(fs_info, blocknr);
2412 if (tmp) {
2413 /* first we do an atomic uptodate check */
2414 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2415 *eb_ret = tmp;
2416 return 0;
2417 }
2418
2419 /* the pages were up to date, but we failed
2420 * the generation number check. Do a full
2421 * read for the generation number that is correct.
2422 * We must do this without dropping locks so
2423 * we can trust our generation number
2424 */
2425 btrfs_set_path_blocking(p);
2426
2427 /* now we're allowed to do a blocking uptodate check */
2428 ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key);
2429 if (!ret) {
2430 *eb_ret = tmp;
2431 return 0;
2432 }
2433 free_extent_buffer(tmp);
2434 btrfs_release_path(p);
2435 return -EIO;
2436 }
2437
2438 /*
2439 * reduce lock contention at high levels
2440 * of the btree by dropping locks before
2441 * we read. Don't release the lock on the current
2442 * level because we need to walk this node to figure
2443 * out which blocks to read.
2444 */
2445 btrfs_unlock_up_safe(p, level + 1);
2446 btrfs_set_path_blocking(p);
2447
2448 if (p->reada != READA_NONE)
2449 reada_for_search(fs_info, p, level, slot, key->objectid);
2450
2451 ret = -EAGAIN;
2452 tmp = read_tree_block(fs_info, blocknr, gen, parent_level - 1,
2453 &first_key);
2454 if (!IS_ERR(tmp)) {
2455 /*
2456 * If the read above didn't mark this buffer up to date,
2457 * it will never end up being up to date. Set ret to EIO now
2458 * and give up so that our caller doesn't loop forever
2459 * on our EAGAINs.
2460 */
2461 if (!extent_buffer_uptodate(tmp))
2462 ret = -EIO;
2463 free_extent_buffer(tmp);
2464 } else {
2465 ret = PTR_ERR(tmp);
2466 }
2467
2468 btrfs_release_path(p);
2469 return ret;
2470}
2471
2472/*
2473 * helper function for btrfs_search_slot. This does all of the checks
2474 * for node-level blocks and does any balancing required based on
2475 * the ins_len.
2476 *
2477 * If no extra work was required, zero is returned. If we had to
2478 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2479 * start over
2480 */
2481static int
2482setup_nodes_for_search(struct btrfs_trans_handle *trans,
2483 struct btrfs_root *root, struct btrfs_path *p,
2484 struct extent_buffer *b, int level, int ins_len,
2485 int *write_lock_level)
2486{
2487 struct btrfs_fs_info *fs_info = root->fs_info;
2488 int ret;
2489
2490 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2491 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
2492 int sret;
2493
2494 if (*write_lock_level < level + 1) {
2495 *write_lock_level = level + 1;
2496 btrfs_release_path(p);
2497 goto again;
2498 }
2499
2500 btrfs_set_path_blocking(p);
2501 reada_for_balance(fs_info, p, level);
2502 sret = split_node(trans, root, p, level);
2503 btrfs_clear_path_blocking(p, NULL, 0);
2504
2505 BUG_ON(sret > 0);
2506 if (sret) {
2507 ret = sret;
2508 goto done;
2509 }
2510 b = p->nodes[level];
2511 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2512 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
2513 int sret;
2514
2515 if (*write_lock_level < level + 1) {
2516 *write_lock_level = level + 1;
2517 btrfs_release_path(p);
2518 goto again;
2519 }
2520
2521 btrfs_set_path_blocking(p);
2522 reada_for_balance(fs_info, p, level);
2523 sret = balance_level(trans, root, p, level);
2524 btrfs_clear_path_blocking(p, NULL, 0);
2525
2526 if (sret) {
2527 ret = sret;
2528 goto done;
2529 }
2530 b = p->nodes[level];
2531 if (!b) {
2532 btrfs_release_path(p);
2533 goto again;
2534 }
2535 BUG_ON(btrfs_header_nritems(b) == 1);
2536 }
2537 return 0;
2538
2539again:
2540 ret = -EAGAIN;
2541done:
2542 return ret;
2543}
2544
2545static void key_search_validate(struct extent_buffer *b,
2546 const struct btrfs_key *key,
2547 int level)
2548{
2549#ifdef CONFIG_BTRFS_ASSERT
2550 struct btrfs_disk_key disk_key;
2551
2552 btrfs_cpu_key_to_disk(&disk_key, key);
2553
2554 if (level == 0)
2555 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2556 offsetof(struct btrfs_leaf, items[0].key),
2557 sizeof(disk_key)));
2558 else
2559 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2560 offsetof(struct btrfs_node, ptrs[0].key),
2561 sizeof(disk_key)));
2562#endif
2563}
2564
2565static int key_search(struct extent_buffer *b, const struct btrfs_key *key,
2566 int level, int *prev_cmp, int *slot)
2567{
2568 if (*prev_cmp != 0) {
2569 *prev_cmp = btrfs_bin_search(b, key, level, slot);
2570 return *prev_cmp;
2571 }
2572
2573 key_search_validate(b, key, level);
2574 *slot = 0;
2575
2576 return 0;
2577}
2578
2579int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2580 u64 iobjectid, u64 ioff, u8 key_type,
2581 struct btrfs_key *found_key)
2582{
2583 int ret;
2584 struct btrfs_key key;
2585 struct extent_buffer *eb;
2586
2587 ASSERT(path);
2588 ASSERT(found_key);
2589
2590 key.type = key_type;
2591 key.objectid = iobjectid;
2592 key.offset = ioff;
2593
2594 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2595 if (ret < 0)
2596 return ret;
2597
2598 eb = path->nodes[0];
2599 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2600 ret = btrfs_next_leaf(fs_root, path);
2601 if (ret)
2602 return ret;
2603 eb = path->nodes[0];
2604 }
2605
2606 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2607 if (found_key->type != key.type ||
2608 found_key->objectid != key.objectid)
2609 return 1;
2610
2611 return 0;
2612}
2613
2614static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
2615 struct btrfs_path *p,
2616 int write_lock_level)
2617{
2618 struct btrfs_fs_info *fs_info = root->fs_info;
2619 struct extent_buffer *b;
2620 int root_lock;
2621 int level = 0;
2622
2623 /* We try very hard to do read locks on the root */
2624 root_lock = BTRFS_READ_LOCK;
2625
2626 if (p->search_commit_root) {
2627 /* The commit roots are read only so we always do read locks */
2628 if (p->need_commit_sem)
2629 down_read(&fs_info->commit_root_sem);
2630 b = root->commit_root;
2631 extent_buffer_get(b);
2632 level = btrfs_header_level(b);
2633 if (p->need_commit_sem)
2634 up_read(&fs_info->commit_root_sem);
2635 /*
2636 * Ensure that all callers have set skip_locking when
2637 * p->search_commit_root = 1.
2638 */
2639 ASSERT(p->skip_locking == 1);
2640
2641 goto out;
2642 }
2643
2644 if (p->skip_locking) {
2645 b = btrfs_root_node(root);
2646 level = btrfs_header_level(b);
2647 goto out;
2648 }
2649
2650 /*
2651 * If the level is set to maximum, we can skip trying to get the read
2652 * lock.
2653 */
2654 if (write_lock_level < BTRFS_MAX_LEVEL) {
2655 /*
2656 * We don't know the level of the root node until we actually
2657 * have it read locked
2658 */
2659 b = btrfs_read_lock_root_node(root);
2660 level = btrfs_header_level(b);
2661 if (level > write_lock_level)
2662 goto out;
2663
2664 /* Whoops, must trade for write lock */
2665 btrfs_tree_read_unlock(b);
2666 free_extent_buffer(b);
2667 }
2668
2669 b = btrfs_lock_root_node(root);
2670 root_lock = BTRFS_WRITE_LOCK;
2671
2672 /* The level might have changed, check again */
2673 level = btrfs_header_level(b);
2674
2675out:
2676 p->nodes[level] = b;
2677 if (!p->skip_locking)
2678 p->locks[level] = root_lock;
2679 /*
2680 * Callers are responsible for dropping b's references.
2681 */
2682 return b;
2683}
2684
2685
2686/*
2687 * btrfs_search_slot - look for a key in a tree and perform necessary
2688 * modifications to preserve tree invariants.
2689 *
2690 * @trans: Handle of transaction, used when modifying the tree
2691 * @p: Holds all btree nodes along the search path
2692 * @root: The root node of the tree
2693 * @key: The key we are looking for
2694 * @ins_len: Indicates purpose of search, for inserts it is 1, for
2695 * deletions it's -1. 0 for plain searches
2696 * @cow: boolean should CoW operations be performed. Must always be 1
2697 * when modifying the tree.
2698 *
2699 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
2700 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
2701 *
2702 * If @key is found, 0 is returned and you can find the item in the leaf level
2703 * of the path (level 0)
2704 *
2705 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
2706 * points to the slot where it should be inserted
2707 *
2708 * If an error is encountered while searching the tree a negative error number
2709 * is returned
2710 */
2711int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2712 const struct btrfs_key *key, struct btrfs_path *p,
2713 int ins_len, int cow)
2714{
2715 struct btrfs_fs_info *fs_info = root->fs_info;
2716 struct extent_buffer *b;
2717 int slot;
2718 int ret;
2719 int err;
2720 int level;
2721 int lowest_unlock = 1;
2722 /* everything at write_lock_level or lower must be write locked */
2723 int write_lock_level = 0;
2724 u8 lowest_level = 0;
2725 int min_write_lock_level;
2726 int prev_cmp;
2727
2728 lowest_level = p->lowest_level;
2729 WARN_ON(lowest_level && ins_len > 0);
2730 WARN_ON(p->nodes[0] != NULL);
2731 BUG_ON(!cow && ins_len);
2732
2733 if (ins_len < 0) {
2734 lowest_unlock = 2;
2735
2736 /* when we are removing items, we might have to go up to level
2737 * two as we update tree pointers Make sure we keep write
2738 * for those levels as well
2739 */
2740 write_lock_level = 2;
2741 } else if (ins_len > 0) {
2742 /*
2743 * for inserting items, make sure we have a write lock on
2744 * level 1 so we can update keys
2745 */
2746 write_lock_level = 1;
2747 }
2748
2749 if (!cow)
2750 write_lock_level = -1;
2751
2752 if (cow && (p->keep_locks || p->lowest_level))
2753 write_lock_level = BTRFS_MAX_LEVEL;
2754
2755 min_write_lock_level = write_lock_level;
2756
2757again:
2758 prev_cmp = -1;
2759 b = btrfs_search_slot_get_root(root, p, write_lock_level);
2760
2761 while (b) {
2762 level = btrfs_header_level(b);
2763
2764 /*
2765 * setup the path here so we can release it under lock
2766 * contention with the cow code
2767 */
2768 if (cow) {
2769 bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
2770
2771 /*
2772 * if we don't really need to cow this block
2773 * then we don't want to set the path blocking,
2774 * so we test it here
2775 */
2776 if (!should_cow_block(trans, root, b)) {
2777 trans->dirty = true;
2778 goto cow_done;
2779 }
2780
2781 /*
2782 * must have write locks on this node and the
2783 * parent
2784 */
2785 if (level > write_lock_level ||
2786 (level + 1 > write_lock_level &&
2787 level + 1 < BTRFS_MAX_LEVEL &&
2788 p->nodes[level + 1])) {
2789 write_lock_level = level + 1;
2790 btrfs_release_path(p);
2791 goto again;
2792 }
2793
2794 btrfs_set_path_blocking(p);
2795 if (last_level)
2796 err = btrfs_cow_block(trans, root, b, NULL, 0,
2797 &b);
2798 else
2799 err = btrfs_cow_block(trans, root, b,
2800 p->nodes[level + 1],
2801 p->slots[level + 1], &b);
2802 if (err) {
2803 ret = err;
2804 goto done;
2805 }
2806 }
2807cow_done:
2808 p->nodes[level] = b;
2809 btrfs_clear_path_blocking(p, NULL, 0);
2810
2811 /*
2812 * we have a lock on b and as long as we aren't changing
2813 * the tree, there is no way to for the items in b to change.
2814 * It is safe to drop the lock on our parent before we
2815 * go through the expensive btree search on b.
2816 *
2817 * If we're inserting or deleting (ins_len != 0), then we might
2818 * be changing slot zero, which may require changing the parent.
2819 * So, we can't drop the lock until after we know which slot
2820 * we're operating on.
2821 */
2822 if (!ins_len && !p->keep_locks) {
2823 int u = level + 1;
2824
2825 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2826 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2827 p->locks[u] = 0;
2828 }
2829 }
2830
2831 ret = key_search(b, key, level, &prev_cmp, &slot);
2832 if (ret < 0)
2833 goto done;
2834
2835 if (level != 0) {
2836 int dec = 0;
2837 if (ret && slot > 0) {
2838 dec = 1;
2839 slot -= 1;
2840 }
2841 p->slots[level] = slot;
2842 err = setup_nodes_for_search(trans, root, p, b, level,
2843 ins_len, &write_lock_level);
2844 if (err == -EAGAIN)
2845 goto again;
2846 if (err) {
2847 ret = err;
2848 goto done;
2849 }
2850 b = p->nodes[level];
2851 slot = p->slots[level];
2852
2853 /*
2854 * slot 0 is special, if we change the key
2855 * we have to update the parent pointer
2856 * which means we must have a write lock
2857 * on the parent
2858 */
2859 if (slot == 0 && ins_len &&
2860 write_lock_level < level + 1) {
2861 write_lock_level = level + 1;
2862 btrfs_release_path(p);
2863 goto again;
2864 }
2865
2866 unlock_up(p, level, lowest_unlock,
2867 min_write_lock_level, &write_lock_level);
2868
2869 if (level == lowest_level) {
2870 if (dec)
2871 p->slots[level]++;
2872 goto done;
2873 }
2874
2875 err = read_block_for_search(root, p, &b, level,
2876 slot, key);
2877 if (err == -EAGAIN)
2878 goto again;
2879 if (err) {
2880 ret = err;
2881 goto done;
2882 }
2883
2884 if (!p->skip_locking) {
2885 level = btrfs_header_level(b);
2886 if (level <= write_lock_level) {
2887 err = btrfs_try_tree_write_lock(b);
2888 if (!err) {
2889 btrfs_set_path_blocking(p);
2890 btrfs_tree_lock(b);
2891 btrfs_clear_path_blocking(p, b,
2892 BTRFS_WRITE_LOCK);
2893 }
2894 p->locks[level] = BTRFS_WRITE_LOCK;
2895 } else {
2896 err = btrfs_tree_read_lock_atomic(b);
2897 if (!err) {
2898 btrfs_set_path_blocking(p);
2899 btrfs_tree_read_lock(b);
2900 btrfs_clear_path_blocking(p, b,
2901 BTRFS_READ_LOCK);
2902 }
2903 p->locks[level] = BTRFS_READ_LOCK;
2904 }
2905 p->nodes[level] = b;
2906 }
2907 } else {
2908 p->slots[level] = slot;
2909 if (ins_len > 0 &&
2910 btrfs_leaf_free_space(fs_info, b) < ins_len) {
2911 if (write_lock_level < 1) {
2912 write_lock_level = 1;
2913 btrfs_release_path(p);
2914 goto again;
2915 }
2916
2917 btrfs_set_path_blocking(p);
2918 err = split_leaf(trans, root, key,
2919 p, ins_len, ret == 0);
2920 btrfs_clear_path_blocking(p, NULL, 0);
2921
2922 BUG_ON(err > 0);
2923 if (err) {
2924 ret = err;
2925 goto done;
2926 }
2927 }
2928 if (!p->search_for_split)
2929 unlock_up(p, level, lowest_unlock,
2930 min_write_lock_level, &write_lock_level);
2931 goto done;
2932 }
2933 }
2934 ret = 1;
2935done:
2936 /*
2937 * we don't really know what they plan on doing with the path
2938 * from here on, so for now just mark it as blocking
2939 */
2940 if (!p->leave_spinning)
2941 btrfs_set_path_blocking(p);
2942 if (ret < 0 && !p->skip_release_on_error)
2943 btrfs_release_path(p);
2944 return ret;
2945}
2946
2947/*
2948 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2949 * current state of the tree together with the operations recorded in the tree
2950 * modification log to search for the key in a previous version of this tree, as
2951 * denoted by the time_seq parameter.
2952 *
2953 * Naturally, there is no support for insert, delete or cow operations.
2954 *
2955 * The resulting path and return value will be set up as if we called
2956 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2957 */
2958int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
2959 struct btrfs_path *p, u64 time_seq)
2960{
2961 struct btrfs_fs_info *fs_info = root->fs_info;
2962 struct extent_buffer *b;
2963 int slot;
2964 int ret;
2965 int err;
2966 int level;
2967 int lowest_unlock = 1;
2968 u8 lowest_level = 0;
2969 int prev_cmp = -1;
2970
2971 lowest_level = p->lowest_level;
2972 WARN_ON(p->nodes[0] != NULL);
2973
2974 if (p->search_commit_root) {
2975 BUG_ON(time_seq);
2976 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2977 }
2978
2979again:
2980 b = get_old_root(root, time_seq);
2981 level = btrfs_header_level(b);
2982 p->locks[level] = BTRFS_READ_LOCK;
2983
2984 while (b) {
2985 level = btrfs_header_level(b);
2986 p->nodes[level] = b;
2987 btrfs_clear_path_blocking(p, NULL, 0);
2988
2989 /*
2990 * we have a lock on b and as long as we aren't changing
2991 * the tree, there is no way to for the items in b to change.
2992 * It is safe to drop the lock on our parent before we
2993 * go through the expensive btree search on b.
2994 */
2995 btrfs_unlock_up_safe(p, level + 1);
2996
2997 /*
2998 * Since we can unwind ebs we want to do a real search every
2999 * time.
3000 */
3001 prev_cmp = -1;
3002 ret = key_search(b, key, level, &prev_cmp, &slot);
3003
3004 if (level != 0) {
3005 int dec = 0;
3006 if (ret && slot > 0) {
3007 dec = 1;
3008 slot -= 1;
3009 }
3010 p->slots[level] = slot;
3011 unlock_up(p, level, lowest_unlock, 0, NULL);
3012
3013 if (level == lowest_level) {
3014 if (dec)
3015 p->slots[level]++;
3016 goto done;
3017 }
3018
3019 err = read_block_for_search(root, p, &b, level,
3020 slot, key);
3021 if (err == -EAGAIN)
3022 goto again;
3023 if (err) {
3024 ret = err;
3025 goto done;
3026 }
3027
3028 level = btrfs_header_level(b);
3029 err = btrfs_tree_read_lock_atomic(b);
3030 if (!err) {
3031 btrfs_set_path_blocking(p);
3032 btrfs_tree_read_lock(b);
3033 btrfs_clear_path_blocking(p, b,
3034 BTRFS_READ_LOCK);
3035 }
3036 b = tree_mod_log_rewind(fs_info, p, b, time_seq);
3037 if (!b) {
3038 ret = -ENOMEM;
3039 goto done;
3040 }
3041 p->locks[level] = BTRFS_READ_LOCK;
3042 p->nodes[level] = b;
3043 } else {
3044 p->slots[level] = slot;
3045 unlock_up(p, level, lowest_unlock, 0, NULL);
3046 goto done;
3047 }
3048 }
3049 ret = 1;
3050done:
3051 if (!p->leave_spinning)
3052 btrfs_set_path_blocking(p);
3053 if (ret < 0)
3054 btrfs_release_path(p);
3055
3056 return ret;
3057}
3058
3059/*
3060 * helper to use instead of search slot if no exact match is needed but
3061 * instead the next or previous item should be returned.
3062 * When find_higher is true, the next higher item is returned, the next lower
3063 * otherwise.
3064 * When return_any and find_higher are both true, and no higher item is found,
3065 * return the next lower instead.
3066 * When return_any is true and find_higher is false, and no lower item is found,
3067 * return the next higher instead.
3068 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3069 * < 0 on error
3070 */
3071int btrfs_search_slot_for_read(struct btrfs_root *root,
3072 const struct btrfs_key *key,
3073 struct btrfs_path *p, int find_higher,
3074 int return_any)
3075{
3076 int ret;
3077 struct extent_buffer *leaf;
3078
3079again:
3080 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3081 if (ret <= 0)
3082 return ret;
3083 /*
3084 * a return value of 1 means the path is at the position where the
3085 * item should be inserted. Normally this is the next bigger item,
3086 * but in case the previous item is the last in a leaf, path points
3087 * to the first free slot in the previous leaf, i.e. at an invalid
3088 * item.
3089 */
3090 leaf = p->nodes[0];
3091
3092 if (find_higher) {
3093 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3094 ret = btrfs_next_leaf(root, p);
3095 if (ret <= 0)
3096 return ret;
3097 if (!return_any)
3098 return 1;
3099 /*
3100 * no higher item found, return the next
3101 * lower instead
3102 */
3103 return_any = 0;
3104 find_higher = 0;
3105 btrfs_release_path(p);
3106 goto again;
3107 }
3108 } else {
3109 if (p->slots[0] == 0) {
3110 ret = btrfs_prev_leaf(root, p);
3111 if (ret < 0)
3112 return ret;
3113 if (!ret) {
3114 leaf = p->nodes[0];
3115 if (p->slots[0] == btrfs_header_nritems(leaf))
3116 p->slots[0]--;
3117 return 0;
3118 }
3119 if (!return_any)
3120 return 1;
3121 /*
3122 * no lower item found, return the next
3123 * higher instead
3124 */
3125 return_any = 0;
3126 find_higher = 1;
3127 btrfs_release_path(p);
3128 goto again;
3129 } else {
3130 --p->slots[0];
3131 }
3132 }
3133 return 0;
3134}
3135
3136/*
3137 * adjust the pointers going up the tree, starting at level
3138 * making sure the right key of each node is points to 'key'.
3139 * This is used after shifting pointers to the left, so it stops
3140 * fixing up pointers when a given leaf/node is not in slot 0 of the
3141 * higher levels
3142 *
3143 */
3144static void fixup_low_keys(struct btrfs_path *path,
3145 struct btrfs_disk_key *key, int level)
3146{
3147 int i;
3148 struct extent_buffer *t;
3149 int ret;
3150
3151 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3152 int tslot = path->slots[i];
3153
3154 if (!path->nodes[i])
3155 break;
3156 t = path->nodes[i];
3157 ret = tree_mod_log_insert_key(t, tslot, MOD_LOG_KEY_REPLACE,
3158 GFP_ATOMIC);
3159 BUG_ON(ret < 0);
3160 btrfs_set_node_key(t, key, tslot);
3161 btrfs_mark_buffer_dirty(path->nodes[i]);
3162 if (tslot != 0)
3163 break;
3164 }
3165}
3166
3167/*
3168 * update item key.
3169 *
3170 * This function isn't completely safe. It's the caller's responsibility
3171 * that the new key won't break the order
3172 */
3173void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3174 struct btrfs_path *path,
3175 const struct btrfs_key *new_key)
3176{
3177 struct btrfs_disk_key disk_key;
3178 struct extent_buffer *eb;
3179 int slot;
3180
3181 eb = path->nodes[0];
3182 slot = path->slots[0];
3183 if (slot > 0) {
3184 btrfs_item_key(eb, &disk_key, slot - 1);
3185 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3186 }
3187 if (slot < btrfs_header_nritems(eb) - 1) {
3188 btrfs_item_key(eb, &disk_key, slot + 1);
3189 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3190 }
3191
3192 btrfs_cpu_key_to_disk(&disk_key, new_key);
3193 btrfs_set_item_key(eb, &disk_key, slot);
3194 btrfs_mark_buffer_dirty(eb);
3195 if (slot == 0)
3196 fixup_low_keys(path, &disk_key, 1);
3197}
3198
3199/*
3200 * try to push data from one node into the next node left in the
3201 * tree.
3202 *
3203 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3204 * error, and > 0 if there was no room in the left hand block.
3205 */
3206static int push_node_left(struct btrfs_trans_handle *trans,
3207 struct btrfs_fs_info *fs_info,
3208 struct extent_buffer *dst,
3209 struct extent_buffer *src, int empty)
3210{
3211 int push_items = 0;
3212 int src_nritems;
3213 int dst_nritems;
3214 int ret = 0;
3215
3216 src_nritems = btrfs_header_nritems(src);
3217 dst_nritems = btrfs_header_nritems(dst);
3218 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3219 WARN_ON(btrfs_header_generation(src) != trans->transid);
3220 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3221
3222 if (!empty && src_nritems <= 8)
3223 return 1;
3224
3225 if (push_items <= 0)
3226 return 1;
3227
3228 if (empty) {
3229 push_items = min(src_nritems, push_items);
3230 if (push_items < src_nritems) {
3231 /* leave at least 8 pointers in the node if
3232 * we aren't going to empty it
3233 */
3234 if (src_nritems - push_items < 8) {
3235 if (push_items <= 8)
3236 return 1;
3237 push_items -= 8;
3238 }
3239 }
3240 } else
3241 push_items = min(src_nritems - 8, push_items);
3242
3243 ret = tree_mod_log_eb_copy(fs_info, dst, src, dst_nritems, 0,
3244 push_items);
3245 if (ret) {
3246 btrfs_abort_transaction(trans, ret);
3247 return ret;
3248 }
3249 copy_extent_buffer(dst, src,
3250 btrfs_node_key_ptr_offset(dst_nritems),
3251 btrfs_node_key_ptr_offset(0),
3252 push_items * sizeof(struct btrfs_key_ptr));
3253
3254 if (push_items < src_nritems) {
3255 /*
3256 * Don't call tree_mod_log_insert_move here, key removal was
3257 * already fully logged by tree_mod_log_eb_copy above.
3258 */
3259 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3260 btrfs_node_key_ptr_offset(push_items),
3261 (src_nritems - push_items) *
3262 sizeof(struct btrfs_key_ptr));
3263 }
3264 btrfs_set_header_nritems(src, src_nritems - push_items);
3265 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3266 btrfs_mark_buffer_dirty(src);
3267 btrfs_mark_buffer_dirty(dst);
3268
3269 return ret;
3270}
3271
3272/*
3273 * try to push data from one node into the next node right in the
3274 * tree.
3275 *
3276 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3277 * error, and > 0 if there was no room in the right hand block.
3278 *
3279 * this will only push up to 1/2 the contents of the left node over
3280 */
3281static int balance_node_right(struct btrfs_trans_handle *trans,
3282 struct btrfs_fs_info *fs_info,
3283 struct extent_buffer *dst,
3284 struct extent_buffer *src)
3285{
3286 int push_items = 0;
3287 int max_push;
3288 int src_nritems;
3289 int dst_nritems;
3290 int ret = 0;
3291
3292 WARN_ON(btrfs_header_generation(src) != trans->transid);
3293 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3294
3295 src_nritems = btrfs_header_nritems(src);
3296 dst_nritems = btrfs_header_nritems(dst);
3297 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3298 if (push_items <= 0)
3299 return 1;
3300
3301 if (src_nritems < 4)
3302 return 1;
3303
3304 max_push = src_nritems / 2 + 1;
3305 /* don't try to empty the node */
3306 if (max_push >= src_nritems)
3307 return 1;
3308
3309 if (max_push < push_items)
3310 push_items = max_push;
3311
3312 ret = tree_mod_log_insert_move(dst, push_items, 0, dst_nritems);
3313 BUG_ON(ret < 0);
3314 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3315 btrfs_node_key_ptr_offset(0),
3316 (dst_nritems) *
3317 sizeof(struct btrfs_key_ptr));
3318
3319 ret = tree_mod_log_eb_copy(fs_info, dst, src, 0,
3320 src_nritems - push_items, push_items);
3321 if (ret) {
3322 btrfs_abort_transaction(trans, ret);
3323 return ret;
3324 }
3325 copy_extent_buffer(dst, src,
3326 btrfs_node_key_ptr_offset(0),
3327 btrfs_node_key_ptr_offset(src_nritems - push_items),
3328 push_items * sizeof(struct btrfs_key_ptr));
3329
3330 btrfs_set_header_nritems(src, src_nritems - push_items);
3331 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3332
3333 btrfs_mark_buffer_dirty(src);
3334 btrfs_mark_buffer_dirty(dst);
3335
3336 return ret;
3337}
3338
3339/*
3340 * helper function to insert a new root level in the tree.
3341 * A new node is allocated, and a single item is inserted to
3342 * point to the existing root
3343 *
3344 * returns zero on success or < 0 on failure.
3345 */
3346static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3347 struct btrfs_root *root,
3348 struct btrfs_path *path, int level)
3349{
3350 struct btrfs_fs_info *fs_info = root->fs_info;
3351 u64 lower_gen;
3352 struct extent_buffer *lower;
3353 struct extent_buffer *c;
3354 struct extent_buffer *old;
3355 struct btrfs_disk_key lower_key;
3356 int ret;
3357
3358 BUG_ON(path->nodes[level]);
3359 BUG_ON(path->nodes[level-1] != root->node);
3360
3361 lower = path->nodes[level-1];
3362 if (level == 1)
3363 btrfs_item_key(lower, &lower_key, 0);
3364 else
3365 btrfs_node_key(lower, &lower_key, 0);
3366
3367 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3368 &lower_key, level, root->node->start, 0);
3369 if (IS_ERR(c))
3370 return PTR_ERR(c);
3371
3372 root_add_used(root, fs_info->nodesize);
3373
3374 btrfs_set_header_nritems(c, 1);
3375 btrfs_set_node_key(c, &lower_key, 0);
3376 btrfs_set_node_blockptr(c, 0, lower->start);
3377 lower_gen = btrfs_header_generation(lower);
3378 WARN_ON(lower_gen != trans->transid);
3379
3380 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3381
3382 btrfs_mark_buffer_dirty(c);
3383
3384 old = root->node;
3385 ret = tree_mod_log_insert_root(root->node, c, 0);
3386 BUG_ON(ret < 0);
3387 rcu_assign_pointer(root->node, c);
3388
3389 /* the super has an extra ref to root->node */
3390 free_extent_buffer(old);
3391
3392 add_root_to_dirty_list(root);
3393 extent_buffer_get(c);
3394 path->nodes[level] = c;
3395 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3396 path->slots[level] = 0;
3397 return 0;
3398}
3399
3400/*
3401 * worker function to insert a single pointer in a node.
3402 * the node should have enough room for the pointer already
3403 *
3404 * slot and level indicate where you want the key to go, and
3405 * blocknr is the block the key points to.
3406 */
3407static void insert_ptr(struct btrfs_trans_handle *trans,
3408 struct btrfs_fs_info *fs_info, struct btrfs_path *path,
3409 struct btrfs_disk_key *key, u64 bytenr,
3410 int slot, int level)
3411{
3412 struct extent_buffer *lower;
3413 int nritems;
3414 int ret;
3415
3416 BUG_ON(!path->nodes[level]);
3417 btrfs_assert_tree_locked(path->nodes[level]);
3418 lower = path->nodes[level];
3419 nritems = btrfs_header_nritems(lower);
3420 BUG_ON(slot > nritems);
3421 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(fs_info));
3422 if (slot != nritems) {
3423 if (level) {
3424 ret = tree_mod_log_insert_move(lower, slot + 1, slot,
3425 nritems - slot);
3426 BUG_ON(ret < 0);
3427 }
3428 memmove_extent_buffer(lower,
3429 btrfs_node_key_ptr_offset(slot + 1),
3430 btrfs_node_key_ptr_offset(slot),
3431 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3432 }
3433 if (level) {
3434 ret = tree_mod_log_insert_key(lower, slot, MOD_LOG_KEY_ADD,
3435 GFP_NOFS);
3436 BUG_ON(ret < 0);
3437 }
3438 btrfs_set_node_key(lower, key, slot);
3439 btrfs_set_node_blockptr(lower, slot, bytenr);
3440 WARN_ON(trans->transid == 0);
3441 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3442 btrfs_set_header_nritems(lower, nritems + 1);
3443 btrfs_mark_buffer_dirty(lower);
3444}
3445
3446/*
3447 * split the node at the specified level in path in two.
3448 * The path is corrected to point to the appropriate node after the split
3449 *
3450 * Before splitting this tries to make some room in the node by pushing
3451 * left and right, if either one works, it returns right away.
3452 *
3453 * returns 0 on success and < 0 on failure
3454 */
3455static noinline int split_node(struct btrfs_trans_handle *trans,
3456 struct btrfs_root *root,
3457 struct btrfs_path *path, int level)
3458{
3459 struct btrfs_fs_info *fs_info = root->fs_info;
3460 struct extent_buffer *c;
3461 struct extent_buffer *split;
3462 struct btrfs_disk_key disk_key;
3463 int mid;
3464 int ret;
3465 u32 c_nritems;
3466
3467 c = path->nodes[level];
3468 WARN_ON(btrfs_header_generation(c) != trans->transid);
3469 if (c == root->node) {
3470 /*
3471 * trying to split the root, lets make a new one
3472 *
3473 * tree mod log: We don't log_removal old root in
3474 * insert_new_root, because that root buffer will be kept as a
3475 * normal node. We are going to log removal of half of the
3476 * elements below with tree_mod_log_eb_copy. We're holding a
3477 * tree lock on the buffer, which is why we cannot race with
3478 * other tree_mod_log users.
3479 */
3480 ret = insert_new_root(trans, root, path, level + 1);
3481 if (ret)
3482 return ret;
3483 } else {
3484 ret = push_nodes_for_insert(trans, root, path, level);
3485 c = path->nodes[level];
3486 if (!ret && btrfs_header_nritems(c) <
3487 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
3488 return 0;
3489 if (ret < 0)
3490 return ret;
3491 }
3492
3493 c_nritems = btrfs_header_nritems(c);
3494 mid = (c_nritems + 1) / 2;
3495 btrfs_node_key(c, &disk_key, mid);
3496
3497 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3498 &disk_key, level, c->start, 0);
3499 if (IS_ERR(split))
3500 return PTR_ERR(split);
3501
3502 root_add_used(root, fs_info->nodesize);
3503 ASSERT(btrfs_header_level(c) == level);
3504
3505 ret = tree_mod_log_eb_copy(fs_info, split, c, 0, mid, c_nritems - mid);
3506 if (ret) {
3507 btrfs_abort_transaction(trans, ret);
3508 return ret;
3509 }
3510 copy_extent_buffer(split, c,
3511 btrfs_node_key_ptr_offset(0),
3512 btrfs_node_key_ptr_offset(mid),
3513 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3514 btrfs_set_header_nritems(split, c_nritems - mid);
3515 btrfs_set_header_nritems(c, mid);
3516 ret = 0;
3517
3518 btrfs_mark_buffer_dirty(c);
3519 btrfs_mark_buffer_dirty(split);
3520
3521 insert_ptr(trans, fs_info, path, &disk_key, split->start,
3522 path->slots[level + 1] + 1, level + 1);
3523
3524 if (path->slots[level] >= mid) {
3525 path->slots[level] -= mid;
3526 btrfs_tree_unlock(c);
3527 free_extent_buffer(c);
3528 path->nodes[level] = split;
3529 path->slots[level + 1] += 1;
3530 } else {
3531 btrfs_tree_unlock(split);
3532 free_extent_buffer(split);
3533 }
3534 return ret;
3535}
3536
3537/*
3538 * how many bytes are required to store the items in a leaf. start
3539 * and nr indicate which items in the leaf to check. This totals up the
3540 * space used both by the item structs and the item data
3541 */
3542static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3543{
3544 struct btrfs_item *start_item;
3545 struct btrfs_item *end_item;
3546 struct btrfs_map_token token;
3547 int data_len;
3548 int nritems = btrfs_header_nritems(l);
3549 int end = min(nritems, start + nr) - 1;
3550
3551 if (!nr)
3552 return 0;
3553 btrfs_init_map_token(&token);
3554 start_item = btrfs_item_nr(start);
3555 end_item = btrfs_item_nr(end);
3556 data_len = btrfs_token_item_offset(l, start_item, &token) +
3557 btrfs_token_item_size(l, start_item, &token);
3558 data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3559 data_len += sizeof(struct btrfs_item) * nr;
3560 WARN_ON(data_len < 0);
3561 return data_len;
3562}
3563
3564/*
3565 * The space between the end of the leaf items and
3566 * the start of the leaf data. IOW, how much room
3567 * the leaf has left for both items and data
3568 */
3569noinline int btrfs_leaf_free_space(struct btrfs_fs_info *fs_info,
3570 struct extent_buffer *leaf)
3571{
3572 int nritems = btrfs_header_nritems(leaf);
3573 int ret;
3574
3575 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
3576 if (ret < 0) {
3577 btrfs_crit(fs_info,
3578 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3579 ret,
3580 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
3581 leaf_space_used(leaf, 0, nritems), nritems);
3582 }
3583 return ret;
3584}
3585
3586/*
3587 * min slot controls the lowest index we're willing to push to the
3588 * right. We'll push up to and including min_slot, but no lower
3589 */
3590static noinline int __push_leaf_right(struct btrfs_fs_info *fs_info,
3591 struct btrfs_path *path,
3592 int data_size, int empty,
3593 struct extent_buffer *right,
3594 int free_space, u32 left_nritems,
3595 u32 min_slot)
3596{
3597 struct extent_buffer *left = path->nodes[0];
3598 struct extent_buffer *upper = path->nodes[1];
3599 struct btrfs_map_token token;
3600 struct btrfs_disk_key disk_key;
3601 int slot;
3602 u32 i;
3603 int push_space = 0;
3604 int push_items = 0;
3605 struct btrfs_item *item;
3606 u32 nr;
3607 u32 right_nritems;
3608 u32 data_end;
3609 u32 this_item_size;
3610
3611 btrfs_init_map_token(&token);
3612
3613 if (empty)
3614 nr = 0;
3615 else
3616 nr = max_t(u32, 1, min_slot);
3617
3618 if (path->slots[0] >= left_nritems)
3619 push_space += data_size;
3620
3621 slot = path->slots[1];
3622 i = left_nritems - 1;
3623 while (i >= nr) {
3624 item = btrfs_item_nr(i);
3625
3626 if (!empty && push_items > 0) {
3627 if (path->slots[0] > i)
3628 break;
3629 if (path->slots[0] == i) {
3630 int space = btrfs_leaf_free_space(fs_info, left);
3631 if (space + push_space * 2 > free_space)
3632 break;
3633 }
3634 }
3635
3636 if (path->slots[0] == i)
3637 push_space += data_size;
3638
3639 this_item_size = btrfs_item_size(left, item);
3640 if (this_item_size + sizeof(*item) + push_space > free_space)
3641 break;
3642
3643 push_items++;
3644 push_space += this_item_size + sizeof(*item);
3645 if (i == 0)
3646 break;
3647 i--;
3648 }
3649
3650 if (push_items == 0)
3651 goto out_unlock;
3652
3653 WARN_ON(!empty && push_items == left_nritems);
3654
3655 /* push left to right */
3656 right_nritems = btrfs_header_nritems(right);
3657
3658 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3659 push_space -= leaf_data_end(fs_info, left);
3660
3661 /* make room in the right data area */
3662 data_end = leaf_data_end(fs_info, right);
3663 memmove_extent_buffer(right,
3664 BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
3665 BTRFS_LEAF_DATA_OFFSET + data_end,
3666 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
3667
3668 /* copy from the left data area */
3669 copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
3670 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3671 BTRFS_LEAF_DATA_OFFSET + leaf_data_end(fs_info, left),
3672 push_space);
3673
3674 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3675 btrfs_item_nr_offset(0),
3676 right_nritems * sizeof(struct btrfs_item));
3677
3678 /* copy the items from left to right */
3679 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3680 btrfs_item_nr_offset(left_nritems - push_items),
3681 push_items * sizeof(struct btrfs_item));
3682
3683 /* update the item pointers */
3684 right_nritems += push_items;
3685 btrfs_set_header_nritems(right, right_nritems);
3686 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3687 for (i = 0; i < right_nritems; i++) {
3688 item = btrfs_item_nr(i);
3689 push_space -= btrfs_token_item_size(right, item, &token);
3690 btrfs_set_token_item_offset(right, item, push_space, &token);
3691 }
3692
3693 left_nritems -= push_items;
3694 btrfs_set_header_nritems(left, left_nritems);
3695
3696 if (left_nritems)
3697 btrfs_mark_buffer_dirty(left);
3698 else
3699 clean_tree_block(fs_info, left);
3700
3701 btrfs_mark_buffer_dirty(right);
3702
3703 btrfs_item_key(right, &disk_key, 0);
3704 btrfs_set_node_key(upper, &disk_key, slot + 1);
3705 btrfs_mark_buffer_dirty(upper);
3706
3707 /* then fixup the leaf pointer in the path */
3708 if (path->slots[0] >= left_nritems) {
3709 path->slots[0] -= left_nritems;
3710 if (btrfs_header_nritems(path->nodes[0]) == 0)
3711 clean_tree_block(fs_info, path->nodes[0]);
3712 btrfs_tree_unlock(path->nodes[0]);
3713 free_extent_buffer(path->nodes[0]);
3714 path->nodes[0] = right;
3715 path->slots[1] += 1;
3716 } else {
3717 btrfs_tree_unlock(right);
3718 free_extent_buffer(right);
3719 }
3720 return 0;
3721
3722out_unlock:
3723 btrfs_tree_unlock(right);
3724 free_extent_buffer(right);
3725 return 1;
3726}
3727
3728/*
3729 * push some data in the path leaf to the right, trying to free up at
3730 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3731 *
3732 * returns 1 if the push failed because the other node didn't have enough
3733 * room, 0 if everything worked out and < 0 if there were major errors.
3734 *
3735 * this will push starting from min_slot to the end of the leaf. It won't
3736 * push any slot lower than min_slot
3737 */
3738static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3739 *root, struct btrfs_path *path,
3740 int min_data_size, int data_size,
3741 int empty, u32 min_slot)
3742{
3743 struct btrfs_fs_info *fs_info = root->fs_info;
3744 struct extent_buffer *left = path->nodes[0];
3745 struct extent_buffer *right;
3746 struct extent_buffer *upper;
3747 int slot;
3748 int free_space;
3749 u32 left_nritems;
3750 int ret;
3751
3752 if (!path->nodes[1])
3753 return 1;
3754
3755 slot = path->slots[1];
3756 upper = path->nodes[1];
3757 if (slot >= btrfs_header_nritems(upper) - 1)
3758 return 1;
3759
3760 btrfs_assert_tree_locked(path->nodes[1]);
3761
3762 right = read_node_slot(fs_info, upper, slot + 1);
3763 /*
3764 * slot + 1 is not valid or we fail to read the right node,
3765 * no big deal, just return.
3766 */
3767 if (IS_ERR(right))
3768 return 1;
3769
3770 btrfs_tree_lock(right);
3771 btrfs_set_lock_blocking(right);
3772
3773 free_space = btrfs_leaf_free_space(fs_info, right);
3774 if (free_space < data_size)
3775 goto out_unlock;
3776
3777 /* cow and double check */
3778 ret = btrfs_cow_block(trans, root, right, upper,
3779 slot + 1, &right);
3780 if (ret)
3781 goto out_unlock;
3782
3783 free_space = btrfs_leaf_free_space(fs_info, right);
3784 if (free_space < data_size)
3785 goto out_unlock;
3786
3787 left_nritems = btrfs_header_nritems(left);
3788 if (left_nritems == 0)
3789 goto out_unlock;
3790
3791 if (path->slots[0] == left_nritems && !empty) {
3792 /* Key greater than all keys in the leaf, right neighbor has
3793 * enough room for it and we're not emptying our leaf to delete
3794 * it, therefore use right neighbor to insert the new item and
3795 * no need to touch/dirty our left leaft. */
3796 btrfs_tree_unlock(left);
3797 free_extent_buffer(left);
3798 path->nodes[0] = right;
3799 path->slots[0] = 0;
3800 path->slots[1]++;
3801 return 0;
3802 }
3803
3804 return __push_leaf_right(fs_info, path, min_data_size, empty,
3805 right, free_space, left_nritems, min_slot);
3806out_unlock:
3807 btrfs_tree_unlock(right);
3808 free_extent_buffer(right);
3809 return 1;
3810}
3811
3812/*
3813 * push some data in the path leaf to the left, trying to free up at
3814 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3815 *
3816 * max_slot can put a limit on how far into the leaf we'll push items. The
3817 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3818 * items
3819 */
3820static noinline int __push_leaf_left(struct btrfs_fs_info *fs_info,
3821 struct btrfs_path *path, int data_size,
3822 int empty, struct extent_buffer *left,
3823 int free_space, u32 right_nritems,
3824 u32 max_slot)
3825{
3826 struct btrfs_disk_key disk_key;
3827 struct extent_buffer *right = path->nodes[0];
3828 int i;
3829 int push_space = 0;
3830 int push_items = 0;
3831 struct btrfs_item *item;
3832 u32 old_left_nritems;
3833 u32 nr;
3834 int ret = 0;
3835 u32 this_item_size;
3836 u32 old_left_item_size;
3837 struct btrfs_map_token token;
3838
3839 btrfs_init_map_token(&token);
3840
3841 if (empty)
3842 nr = min(right_nritems, max_slot);
3843 else
3844 nr = min(right_nritems - 1, max_slot);
3845
3846 for (i = 0; i < nr; i++) {
3847 item = btrfs_item_nr(i);
3848
3849 if (!empty && push_items > 0) {
3850 if (path->slots[0] < i)
3851 break;
3852 if (path->slots[0] == i) {
3853 int space = btrfs_leaf_free_space(fs_info, right);
3854 if (space + push_space * 2 > free_space)
3855 break;
3856 }
3857 }
3858
3859 if (path->slots[0] == i)
3860 push_space += data_size;
3861
3862 this_item_size = btrfs_item_size(right, item);
3863 if (this_item_size + sizeof(*item) + push_space > free_space)
3864 break;
3865
3866 push_items++;
3867 push_space += this_item_size + sizeof(*item);
3868 }
3869
3870 if (push_items == 0) {
3871 ret = 1;
3872 goto out;
3873 }
3874 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3875
3876 /* push data from right to left */
3877 copy_extent_buffer(left, right,
3878 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3879 btrfs_item_nr_offset(0),
3880 push_items * sizeof(struct btrfs_item));
3881
3882 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
3883 btrfs_item_offset_nr(right, push_items - 1);
3884
3885 copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
3886 leaf_data_end(fs_info, left) - push_space,
3887 BTRFS_LEAF_DATA_OFFSET +
3888 btrfs_item_offset_nr(right, push_items - 1),
3889 push_space);
3890 old_left_nritems = btrfs_header_nritems(left);
3891 BUG_ON(old_left_nritems <= 0);
3892
3893 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3894 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3895 u32 ioff;
3896
3897 item = btrfs_item_nr(i);
3898
3899 ioff = btrfs_token_item_offset(left, item, &token);
3900 btrfs_set_token_item_offset(left, item,
3901 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size),
3902 &token);
3903 }
3904 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3905
3906 /* fixup right node */
3907 if (push_items > right_nritems)
3908 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3909 right_nritems);
3910
3911 if (push_items < right_nritems) {
3912 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3913 leaf_data_end(fs_info, right);
3914 memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
3915 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3916 BTRFS_LEAF_DATA_OFFSET +
3917 leaf_data_end(fs_info, right), push_space);
3918
3919 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3920 btrfs_item_nr_offset(push_items),
3921 (btrfs_header_nritems(right) - push_items) *
3922 sizeof(struct btrfs_item));
3923 }
3924 right_nritems -= push_items;
3925 btrfs_set_header_nritems(right, right_nritems);
3926 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3927 for (i = 0; i < right_nritems; i++) {
3928 item = btrfs_item_nr(i);
3929
3930 push_space = push_space - btrfs_token_item_size(right,
3931 item, &token);
3932 btrfs_set_token_item_offset(right, item, push_space, &token);
3933 }
3934
3935 btrfs_mark_buffer_dirty(left);
3936 if (right_nritems)
3937 btrfs_mark_buffer_dirty(right);
3938 else
3939 clean_tree_block(fs_info, right);
3940
3941 btrfs_item_key(right, &disk_key, 0);
3942 fixup_low_keys(path, &disk_key, 1);
3943
3944 /* then fixup the leaf pointer in the path */
3945 if (path->slots[0] < push_items) {
3946 path->slots[0] += old_left_nritems;
3947 btrfs_tree_unlock(path->nodes[0]);
3948 free_extent_buffer(path->nodes[0]);
3949 path->nodes[0] = left;
3950 path->slots[1] -= 1;
3951 } else {
3952 btrfs_tree_unlock(left);
3953 free_extent_buffer(left);
3954 path->slots[0] -= push_items;
3955 }
3956 BUG_ON(path->slots[0] < 0);
3957 return ret;
3958out:
3959 btrfs_tree_unlock(left);
3960 free_extent_buffer(left);
3961 return ret;
3962}
3963
3964/*
3965 * push some data in the path leaf to the left, trying to free up at
3966 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3967 *
3968 * max_slot can put a limit on how far into the leaf we'll push items. The
3969 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3970 * items
3971 */
3972static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3973 *root, struct btrfs_path *path, int min_data_size,
3974 int data_size, int empty, u32 max_slot)
3975{
3976 struct btrfs_fs_info *fs_info = root->fs_info;
3977 struct extent_buffer *right = path->nodes[0];
3978 struct extent_buffer *left;
3979 int slot;
3980 int free_space;
3981 u32 right_nritems;
3982 int ret = 0;
3983
3984 slot = path->slots[1];
3985 if (slot == 0)
3986 return 1;
3987 if (!path->nodes[1])
3988 return 1;
3989
3990 right_nritems = btrfs_header_nritems(right);
3991 if (right_nritems == 0)
3992 return 1;
3993
3994 btrfs_assert_tree_locked(path->nodes[1]);
3995
3996 left = read_node_slot(fs_info, path->nodes[1], slot - 1);
3997 /*
3998 * slot - 1 is not valid or we fail to read the left node,
3999 * no big deal, just return.
4000 */
4001 if (IS_ERR(left))
4002 return 1;
4003
4004 btrfs_tree_lock(left);
4005 btrfs_set_lock_blocking(left);
4006
4007 free_space = btrfs_leaf_free_space(fs_info, left);
4008 if (free_space < data_size) {
4009 ret = 1;
4010 goto out;
4011 }
4012
4013 /* cow and double check */
4014 ret = btrfs_cow_block(trans, root, left,
4015 path->nodes[1], slot - 1, &left);
4016 if (ret) {
4017 /* we hit -ENOSPC, but it isn't fatal here */
4018 if (ret == -ENOSPC)
4019 ret = 1;
4020 goto out;
4021 }
4022
4023 free_space = btrfs_leaf_free_space(fs_info, left);
4024 if (free_space < data_size) {
4025 ret = 1;
4026 goto out;
4027 }
4028
4029 return __push_leaf_left(fs_info, path, min_data_size,
4030 empty, left, free_space, right_nritems,
4031 max_slot);
4032out:
4033 btrfs_tree_unlock(left);
4034 free_extent_buffer(left);
4035 return ret;
4036}
4037
4038/*
4039 * split the path's leaf in two, making sure there is at least data_size
4040 * available for the resulting leaf level of the path.
4041 */
4042static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4043 struct btrfs_fs_info *fs_info,
4044 struct btrfs_path *path,
4045 struct extent_buffer *l,
4046 struct extent_buffer *right,
4047 int slot, int mid, int nritems)
4048{
4049 int data_copy_size;
4050 int rt_data_off;
4051 int i;
4052 struct btrfs_disk_key disk_key;
4053 struct btrfs_map_token token;
4054
4055 btrfs_init_map_token(&token);
4056
4057 nritems = nritems - mid;
4058 btrfs_set_header_nritems(right, nritems);
4059 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(fs_info, l);
4060
4061 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4062 btrfs_item_nr_offset(mid),
4063 nritems * sizeof(struct btrfs_item));
4064
4065 copy_extent_buffer(right, l,
4066 BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
4067 data_copy_size, BTRFS_LEAF_DATA_OFFSET +
4068 leaf_data_end(fs_info, l), data_copy_size);
4069
4070 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
4071
4072 for (i = 0; i < nritems; i++) {
4073 struct btrfs_item *item = btrfs_item_nr(i);
4074 u32 ioff;
4075
4076 ioff = btrfs_token_item_offset(right, item, &token);
4077 btrfs_set_token_item_offset(right, item,
4078 ioff + rt_data_off, &token);
4079 }
4080
4081 btrfs_set_header_nritems(l, mid);
4082 btrfs_item_key(right, &disk_key, 0);
4083 insert_ptr(trans, fs_info, path, &disk_key, right->start,
4084 path->slots[1] + 1, 1);
4085
4086 btrfs_mark_buffer_dirty(right);
4087 btrfs_mark_buffer_dirty(l);
4088 BUG_ON(path->slots[0] != slot);
4089
4090 if (mid <= slot) {
4091 btrfs_tree_unlock(path->nodes[0]);
4092 free_extent_buffer(path->nodes[0]);
4093 path->nodes[0] = right;
4094 path->slots[0] -= mid;
4095 path->slots[1] += 1;
4096 } else {
4097 btrfs_tree_unlock(right);
4098 free_extent_buffer(right);
4099 }
4100
4101 BUG_ON(path->slots[0] < 0);
4102}
4103
4104/*
4105 * double splits happen when we need to insert a big item in the middle
4106 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4107 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4108 * A B C
4109 *
4110 * We avoid this by trying to push the items on either side of our target
4111 * into the adjacent leaves. If all goes well we can avoid the double split
4112 * completely.
4113 */
4114static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4115 struct btrfs_root *root,
4116 struct btrfs_path *path,
4117 int data_size)
4118{
4119 struct btrfs_fs_info *fs_info = root->fs_info;
4120 int ret;
4121 int progress = 0;
4122 int slot;
4123 u32 nritems;
4124 int space_needed = data_size;
4125
4126 slot = path->slots[0];
4127 if (slot < btrfs_header_nritems(path->nodes[0]))
4128 space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
4129
4130 /*
4131 * try to push all the items after our slot into the
4132 * right leaf
4133 */
4134 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4135 if (ret < 0)
4136 return ret;
4137
4138 if (ret == 0)
4139 progress++;
4140
4141 nritems = btrfs_header_nritems(path->nodes[0]);
4142 /*
4143 * our goal is to get our slot at the start or end of a leaf. If
4144 * we've done so we're done
4145 */
4146 if (path->slots[0] == 0 || path->slots[0] == nritems)
4147 return 0;
4148
4149 if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
4150 return 0;
4151
4152 /* try to push all the items before our slot into the next leaf */
4153 slot = path->slots[0];
4154 space_needed = data_size;
4155 if (slot > 0)
4156 space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
4157 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4158 if (ret < 0)
4159 return ret;
4160
4161 if (ret == 0)
4162 progress++;
4163
4164 if (progress)
4165 return 0;
4166 return 1;
4167}
4168
4169/*
4170 * split the path's leaf in two, making sure there is at least data_size
4171 * available for the resulting leaf level of the path.
4172 *
4173 * returns 0 if all went well and < 0 on failure.
4174 */
4175static noinline int split_leaf(struct btrfs_trans_handle *trans,
4176 struct btrfs_root *root,
4177 const struct btrfs_key *ins_key,
4178 struct btrfs_path *path, int data_size,
4179 int extend)
4180{
4181 struct btrfs_disk_key disk_key;
4182 struct extent_buffer *l;
4183 u32 nritems;
4184 int mid;
4185 int slot;
4186 struct extent_buffer *right;
4187 struct btrfs_fs_info *fs_info = root->fs_info;
4188 int ret = 0;
4189 int wret;
4190 int split;
4191 int num_doubles = 0;
4192 int tried_avoid_double = 0;
4193
4194 l = path->nodes[0];
4195 slot = path->slots[0];
4196 if (extend && data_size + btrfs_item_size_nr(l, slot) +
4197 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
4198 return -EOVERFLOW;
4199
4200 /* first try to make some room by pushing left and right */
4201 if (data_size && path->nodes[1]) {
4202 int space_needed = data_size;
4203
4204 if (slot < btrfs_header_nritems(l))
4205 space_needed -= btrfs_leaf_free_space(fs_info, l);
4206
4207 wret = push_leaf_right(trans, root, path, space_needed,
4208 space_needed, 0, 0);
4209 if (wret < 0)
4210 return wret;
4211 if (wret) {
4212 space_needed = data_size;
4213 if (slot > 0)
4214 space_needed -= btrfs_leaf_free_space(fs_info,
4215 l);
4216 wret = push_leaf_left(trans, root, path, space_needed,
4217 space_needed, 0, (u32)-1);
4218 if (wret < 0)
4219 return wret;
4220 }
4221 l = path->nodes[0];
4222
4223 /* did the pushes work? */
4224 if (btrfs_leaf_free_space(fs_info, l) >= data_size)
4225 return 0;
4226 }
4227
4228 if (!path->nodes[1]) {
4229 ret = insert_new_root(trans, root, path, 1);
4230 if (ret)
4231 return ret;
4232 }
4233again:
4234 split = 1;
4235 l = path->nodes[0];
4236 slot = path->slots[0];
4237 nritems = btrfs_header_nritems(l);
4238 mid = (nritems + 1) / 2;
4239
4240 if (mid <= slot) {
4241 if (nritems == 1 ||
4242 leaf_space_used(l, mid, nritems - mid) + data_size >
4243 BTRFS_LEAF_DATA_SIZE(fs_info)) {
4244 if (slot >= nritems) {
4245 split = 0;
4246 } else {
4247 mid = slot;
4248 if (mid != nritems &&
4249 leaf_space_used(l, mid, nritems - mid) +
4250 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4251 if (data_size && !tried_avoid_double)
4252 goto push_for_double;
4253 split = 2;
4254 }
4255 }
4256 }
4257 } else {
4258 if (leaf_space_used(l, 0, mid) + data_size >
4259 BTRFS_LEAF_DATA_SIZE(fs_info)) {
4260 if (!extend && data_size && slot == 0) {
4261 split = 0;
4262 } else if ((extend || !data_size) && slot == 0) {
4263 mid = 1;
4264 } else {
4265 mid = slot;
4266 if (mid != nritems &&
4267 leaf_space_used(l, mid, nritems - mid) +
4268 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4269 if (data_size && !tried_avoid_double)
4270 goto push_for_double;
4271 split = 2;
4272 }
4273 }
4274 }
4275 }
4276
4277 if (split == 0)
4278 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4279 else
4280 btrfs_item_key(l, &disk_key, mid);
4281
4282 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
4283 &disk_key, 0, l->start, 0);
4284 if (IS_ERR(right))
4285 return PTR_ERR(right);
4286
4287 root_add_used(root, fs_info->nodesize);
4288
4289 if (split == 0) {
4290 if (mid <= slot) {
4291 btrfs_set_header_nritems(right, 0);
4292 insert_ptr(trans, fs_info, path, &disk_key,
4293 right->start, path->slots[1] + 1, 1);
4294 btrfs_tree_unlock(path->nodes[0]);
4295 free_extent_buffer(path->nodes[0]);
4296 path->nodes[0] = right;
4297 path->slots[0] = 0;
4298 path->slots[1] += 1;
4299 } else {
4300 btrfs_set_header_nritems(right, 0);
4301 insert_ptr(trans, fs_info, path, &disk_key,
4302 right->start, path->slots[1], 1);
4303 btrfs_tree_unlock(path->nodes[0]);
4304 free_extent_buffer(path->nodes[0]);
4305 path->nodes[0] = right;
4306 path->slots[0] = 0;
4307 if (path->slots[1] == 0)
4308 fixup_low_keys(path, &disk_key, 1);
4309 }
4310 /*
4311 * We create a new leaf 'right' for the required ins_len and
4312 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
4313 * the content of ins_len to 'right'.
4314 */
4315 return ret;
4316 }
4317
4318 copy_for_split(trans, fs_info, path, l, right, slot, mid, nritems);
4319
4320 if (split == 2) {
4321 BUG_ON(num_doubles != 0);
4322 num_doubles++;
4323 goto again;
4324 }
4325
4326 return 0;
4327
4328push_for_double:
4329 push_for_double_split(trans, root, path, data_size);
4330 tried_avoid_double = 1;
4331 if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
4332 return 0;
4333 goto again;
4334}
4335
4336static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4337 struct btrfs_root *root,
4338 struct btrfs_path *path, int ins_len)
4339{
4340 struct btrfs_fs_info *fs_info = root->fs_info;
4341 struct btrfs_key key;
4342 struct extent_buffer *leaf;
4343 struct btrfs_file_extent_item *fi;
4344 u64 extent_len = 0;
4345 u32 item_size;
4346 int ret;
4347
4348 leaf = path->nodes[0];
4349 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4350
4351 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4352 key.type != BTRFS_EXTENT_CSUM_KEY);
4353
4354 if (btrfs_leaf_free_space(fs_info, leaf) >= ins_len)
4355 return 0;
4356
4357 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4358 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4359 fi = btrfs_item_ptr(leaf, path->slots[0],
4360 struct btrfs_file_extent_item);
4361 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4362 }
4363 btrfs_release_path(path);
4364
4365 path->keep_locks = 1;
4366 path->search_for_split = 1;
4367 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4368 path->search_for_split = 0;
4369 if (ret > 0)
4370 ret = -EAGAIN;
4371 if (ret < 0)
4372 goto err;
4373
4374 ret = -EAGAIN;
4375 leaf = path->nodes[0];
4376 /* if our item isn't there, return now */
4377 if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4378 goto err;
4379
4380 /* the leaf has changed, it now has room. return now */
4381 if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= ins_len)
4382 goto err;
4383
4384 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4385 fi = btrfs_item_ptr(leaf, path->slots[0],
4386 struct btrfs_file_extent_item);
4387 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4388 goto err;
4389 }
4390
4391 btrfs_set_path_blocking(path);
4392 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4393 if (ret)
4394 goto err;
4395
4396 path->keep_locks = 0;
4397 btrfs_unlock_up_safe(path, 1);
4398 return 0;
4399err:
4400 path->keep_locks = 0;
4401 return ret;
4402}
4403
4404static noinline int split_item(struct btrfs_fs_info *fs_info,
4405 struct btrfs_path *path,
4406 const struct btrfs_key *new_key,
4407 unsigned long split_offset)
4408{
4409 struct extent_buffer *leaf;
4410 struct btrfs_item *item;
4411 struct btrfs_item *new_item;
4412 int slot;
4413 char *buf;
4414 u32 nritems;
4415 u32 item_size;
4416 u32 orig_offset;
4417 struct btrfs_disk_key disk_key;
4418
4419 leaf = path->nodes[0];
4420 BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < sizeof(struct btrfs_item));
4421
4422 btrfs_set_path_blocking(path);
4423
4424 item = btrfs_item_nr(path->slots[0]);
4425 orig_offset = btrfs_item_offset(leaf, item);
4426 item_size = btrfs_item_size(leaf, item);
4427
4428 buf = kmalloc(item_size, GFP_NOFS);
4429 if (!buf)
4430 return -ENOMEM;
4431
4432 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4433 path->slots[0]), item_size);
4434
4435 slot = path->slots[0] + 1;
4436 nritems = btrfs_header_nritems(leaf);
4437 if (slot != nritems) {
4438 /* shift the items */
4439 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4440 btrfs_item_nr_offset(slot),
4441 (nritems - slot) * sizeof(struct btrfs_item));
4442 }
4443
4444 btrfs_cpu_key_to_disk(&disk_key, new_key);
4445 btrfs_set_item_key(leaf, &disk_key, slot);
4446
4447 new_item = btrfs_item_nr(slot);
4448
4449 btrfs_set_item_offset(leaf, new_item, orig_offset);
4450 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4451
4452 btrfs_set_item_offset(leaf, item,
4453 orig_offset + item_size - split_offset);
4454 btrfs_set_item_size(leaf, item, split_offset);
4455
4456 btrfs_set_header_nritems(leaf, nritems + 1);
4457
4458 /* write the data for the start of the original item */
4459 write_extent_buffer(leaf, buf,
4460 btrfs_item_ptr_offset(leaf, path->slots[0]),
4461 split_offset);
4462
4463 /* write the data for the new item */
4464 write_extent_buffer(leaf, buf + split_offset,
4465 btrfs_item_ptr_offset(leaf, slot),
4466 item_size - split_offset);
4467 btrfs_mark_buffer_dirty(leaf);
4468
4469 BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < 0);
4470 kfree(buf);
4471 return 0;
4472}
4473
4474/*
4475 * This function splits a single item into two items,
4476 * giving 'new_key' to the new item and splitting the
4477 * old one at split_offset (from the start of the item).
4478 *
4479 * The path may be released by this operation. After
4480 * the split, the path is pointing to the old item. The
4481 * new item is going to be in the same node as the old one.
4482 *
4483 * Note, the item being split must be smaller enough to live alone on
4484 * a tree block with room for one extra struct btrfs_item
4485 *
4486 * This allows us to split the item in place, keeping a lock on the
4487 * leaf the entire time.
4488 */
4489int btrfs_split_item(struct btrfs_trans_handle *trans,
4490 struct btrfs_root *root,
4491 struct btrfs_path *path,
4492 const struct btrfs_key *new_key,
4493 unsigned long split_offset)
4494{
4495 int ret;
4496 ret = setup_leaf_for_split(trans, root, path,
4497 sizeof(struct btrfs_item));
4498 if (ret)
4499 return ret;
4500
4501 ret = split_item(root->fs_info, path, new_key, split_offset);
4502 return ret;
4503}
4504
4505/*
4506 * This function duplicate a item, giving 'new_key' to the new item.
4507 * It guarantees both items live in the same tree leaf and the new item
4508 * is contiguous with the original item.
4509 *
4510 * This allows us to split file extent in place, keeping a lock on the
4511 * leaf the entire time.
4512 */
4513int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4514 struct btrfs_root *root,
4515 struct btrfs_path *path,
4516 const struct btrfs_key *new_key)
4517{
4518 struct extent_buffer *leaf;
4519 int ret;
4520 u32 item_size;
4521
4522 leaf = path->nodes[0];
4523 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4524 ret = setup_leaf_for_split(trans, root, path,
4525 item_size + sizeof(struct btrfs_item));
4526 if (ret)
4527 return ret;
4528
4529 path->slots[0]++;
4530 setup_items_for_insert(root, path, new_key, &item_size,
4531 item_size, item_size +
4532 sizeof(struct btrfs_item), 1);
4533 leaf = path->nodes[0];
4534 memcpy_extent_buffer(leaf,
4535 btrfs_item_ptr_offset(leaf, path->slots[0]),
4536 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4537 item_size);
4538 return 0;
4539}
4540
4541/*
4542 * make the item pointed to by the path smaller. new_size indicates
4543 * how small to make it, and from_end tells us if we just chop bytes
4544 * off the end of the item or if we shift the item to chop bytes off
4545 * the front.
4546 */
4547void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
4548 struct btrfs_path *path, u32 new_size, int from_end)
4549{
4550 int slot;
4551 struct extent_buffer *leaf;
4552 struct btrfs_item *item;
4553 u32 nritems;
4554 unsigned int data_end;
4555 unsigned int old_data_start;
4556 unsigned int old_size;
4557 unsigned int size_diff;
4558 int i;
4559 struct btrfs_map_token token;
4560
4561 btrfs_init_map_token(&token);
4562
4563 leaf = path->nodes[0];
4564 slot = path->slots[0];
4565
4566 old_size = btrfs_item_size_nr(leaf, slot);
4567 if (old_size == new_size)
4568 return;
4569
4570 nritems = btrfs_header_nritems(leaf);
4571 data_end = leaf_data_end(fs_info, leaf);
4572
4573 old_data_start = btrfs_item_offset_nr(leaf, slot);
4574
4575 size_diff = old_size - new_size;
4576
4577 BUG_ON(slot < 0);
4578 BUG_ON(slot >= nritems);
4579
4580 /*
4581 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4582 */
4583 /* first correct the data pointers */
4584 for (i = slot; i < nritems; i++) {
4585 u32 ioff;
4586 item = btrfs_item_nr(i);
4587
4588 ioff = btrfs_token_item_offset(leaf, item, &token);
4589 btrfs_set_token_item_offset(leaf, item,
4590 ioff + size_diff, &token);
4591 }
4592
4593 /* shift the data */
4594 if (from_end) {
4595 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4596 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4597 data_end, old_data_start + new_size - data_end);
4598 } else {
4599 struct btrfs_disk_key disk_key;
4600 u64 offset;
4601
4602 btrfs_item_key(leaf, &disk_key, slot);
4603
4604 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4605 unsigned long ptr;
4606 struct btrfs_file_extent_item *fi;
4607
4608 fi = btrfs_item_ptr(leaf, slot,
4609 struct btrfs_file_extent_item);
4610 fi = (struct btrfs_file_extent_item *)(
4611 (unsigned long)fi - size_diff);
4612
4613 if (btrfs_file_extent_type(leaf, fi) ==
4614 BTRFS_FILE_EXTENT_INLINE) {
4615 ptr = btrfs_item_ptr_offset(leaf, slot);
4616 memmove_extent_buffer(leaf, ptr,
4617 (unsigned long)fi,
4618 BTRFS_FILE_EXTENT_INLINE_DATA_START);
4619 }
4620 }
4621
4622 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4623 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4624 data_end, old_data_start - data_end);
4625
4626 offset = btrfs_disk_key_offset(&disk_key);
4627 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4628 btrfs_set_item_key(leaf, &disk_key, slot);
4629 if (slot == 0)
4630 fixup_low_keys(path, &disk_key, 1);
4631 }
4632
4633 item = btrfs_item_nr(slot);
4634 btrfs_set_item_size(leaf, item, new_size);
4635 btrfs_mark_buffer_dirty(leaf);
4636
4637 if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4638 btrfs_print_leaf(leaf);
4639 BUG();
4640 }
4641}
4642
4643/*
4644 * make the item pointed to by the path bigger, data_size is the added size.
4645 */
4646void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
4647 u32 data_size)
4648{
4649 int slot;
4650 struct extent_buffer *leaf;
4651 struct btrfs_item *item;
4652 u32 nritems;
4653 unsigned int data_end;
4654 unsigned int old_data;
4655 unsigned int old_size;
4656 int i;
4657 struct btrfs_map_token token;
4658
4659 btrfs_init_map_token(&token);
4660
4661 leaf = path->nodes[0];
4662
4663 nritems = btrfs_header_nritems(leaf);
4664 data_end = leaf_data_end(fs_info, leaf);
4665
4666 if (btrfs_leaf_free_space(fs_info, leaf) < data_size) {
4667 btrfs_print_leaf(leaf);
4668 BUG();
4669 }
4670 slot = path->slots[0];
4671 old_data = btrfs_item_end_nr(leaf, slot);
4672
4673 BUG_ON(slot < 0);
4674 if (slot >= nritems) {
4675 btrfs_print_leaf(leaf);
4676 btrfs_crit(fs_info, "slot %d too large, nritems %d",
4677 slot, nritems);
4678 BUG_ON(1);
4679 }
4680
4681 /*
4682 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4683 */
4684 /* first correct the data pointers */
4685 for (i = slot; i < nritems; i++) {
4686 u32 ioff;
4687 item = btrfs_item_nr(i);
4688
4689 ioff = btrfs_token_item_offset(leaf, item, &token);
4690 btrfs_set_token_item_offset(leaf, item,
4691 ioff - data_size, &token);
4692 }
4693
4694 /* shift the data */
4695 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4696 data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
4697 data_end, old_data - data_end);
4698
4699 data_end = old_data;
4700 old_size = btrfs_item_size_nr(leaf, slot);
4701 item = btrfs_item_nr(slot);
4702 btrfs_set_item_size(leaf, item, old_size + data_size);
4703 btrfs_mark_buffer_dirty(leaf);
4704
4705 if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4706 btrfs_print_leaf(leaf);
4707 BUG();
4708 }
4709}
4710
4711/*
4712 * this is a helper for btrfs_insert_empty_items, the main goal here is
4713 * to save stack depth by doing the bulk of the work in a function
4714 * that doesn't call btrfs_search_slot
4715 */
4716void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4717 const struct btrfs_key *cpu_key, u32 *data_size,
4718 u32 total_data, u32 total_size, int nr)
4719{
4720 struct btrfs_fs_info *fs_info = root->fs_info;
4721 struct btrfs_item *item;
4722 int i;
4723 u32 nritems;
4724 unsigned int data_end;
4725 struct btrfs_disk_key disk_key;
4726 struct extent_buffer *leaf;
4727 int slot;
4728 struct btrfs_map_token token;
4729
4730 if (path->slots[0] == 0) {
4731 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4732 fixup_low_keys(path, &disk_key, 1);
4733 }
4734 btrfs_unlock_up_safe(path, 1);
4735
4736 btrfs_init_map_token(&token);
4737
4738 leaf = path->nodes[0];
4739 slot = path->slots[0];
4740
4741 nritems = btrfs_header_nritems(leaf);
4742 data_end = leaf_data_end(fs_info, leaf);
4743
4744 if (btrfs_leaf_free_space(fs_info, leaf) < total_size) {
4745 btrfs_print_leaf(leaf);
4746 btrfs_crit(fs_info, "not enough freespace need %u have %d",
4747 total_size, btrfs_leaf_free_space(fs_info, leaf));
4748 BUG();
4749 }
4750
4751 if (slot != nritems) {
4752 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4753
4754 if (old_data < data_end) {
4755 btrfs_print_leaf(leaf);
4756 btrfs_crit(fs_info, "slot %d old_data %d data_end %d",
4757 slot, old_data, data_end);
4758 BUG_ON(1);
4759 }
4760 /*
4761 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4762 */
4763 /* first correct the data pointers */
4764 for (i = slot; i < nritems; i++) {
4765 u32 ioff;
4766
4767 item = btrfs_item_nr(i);
4768 ioff = btrfs_token_item_offset(leaf, item, &token);
4769 btrfs_set_token_item_offset(leaf, item,
4770 ioff - total_data, &token);
4771 }
4772 /* shift the items */
4773 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4774 btrfs_item_nr_offset(slot),
4775 (nritems - slot) * sizeof(struct btrfs_item));
4776
4777 /* shift the data */
4778 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4779 data_end - total_data, BTRFS_LEAF_DATA_OFFSET +
4780 data_end, old_data - data_end);
4781 data_end = old_data;
4782 }
4783
4784 /* setup the item for the new data */
4785 for (i = 0; i < nr; i++) {
4786 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4787 btrfs_set_item_key(leaf, &disk_key, slot + i);
4788 item = btrfs_item_nr(slot + i);
4789 btrfs_set_token_item_offset(leaf, item,
4790 data_end - data_size[i], &token);
4791 data_end -= data_size[i];
4792 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4793 }
4794
4795 btrfs_set_header_nritems(leaf, nritems + nr);
4796 btrfs_mark_buffer_dirty(leaf);
4797
4798 if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4799 btrfs_print_leaf(leaf);
4800 BUG();
4801 }
4802}
4803
4804/*
4805 * Given a key and some data, insert items into the tree.
4806 * This does all the path init required, making room in the tree if needed.
4807 */
4808int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4809 struct btrfs_root *root,
4810 struct btrfs_path *path,
4811 const struct btrfs_key *cpu_key, u32 *data_size,
4812 int nr)
4813{
4814 int ret = 0;
4815 int slot;
4816 int i;
4817 u32 total_size = 0;
4818 u32 total_data = 0;
4819
4820 for (i = 0; i < nr; i++)
4821 total_data += data_size[i];
4822
4823 total_size = total_data + (nr * sizeof(struct btrfs_item));
4824 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4825 if (ret == 0)
4826 return -EEXIST;
4827 if (ret < 0)
4828 return ret;
4829
4830 slot = path->slots[0];
4831 BUG_ON(slot < 0);
4832
4833 setup_items_for_insert(root, path, cpu_key, data_size,
4834 total_data, total_size, nr);
4835 return 0;
4836}
4837
4838/*
4839 * Given a key and some data, insert an item into the tree.
4840 * This does all the path init required, making room in the tree if needed.
4841 */
4842int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4843 const struct btrfs_key *cpu_key, void *data,
4844 u32 data_size)
4845{
4846 int ret = 0;
4847 struct btrfs_path *path;
4848 struct extent_buffer *leaf;
4849 unsigned long ptr;
4850
4851 path = btrfs_alloc_path();
4852 if (!path)
4853 return -ENOMEM;
4854 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4855 if (!ret) {
4856 leaf = path->nodes[0];
4857 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4858 write_extent_buffer(leaf, data, ptr, data_size);
4859 btrfs_mark_buffer_dirty(leaf);
4860 }
4861 btrfs_free_path(path);
4862 return ret;
4863}
4864
4865/*
4866 * delete the pointer from a given node.
4867 *
4868 * the tree should have been previously balanced so the deletion does not
4869 * empty a node.
4870 */
4871static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4872 int level, int slot)
4873{
4874 struct extent_buffer *parent = path->nodes[level];
4875 u32 nritems;
4876 int ret;
4877
4878 nritems = btrfs_header_nritems(parent);
4879 if (slot != nritems - 1) {
4880 if (level) {
4881 ret = tree_mod_log_insert_move(parent, slot, slot + 1,
4882 nritems - slot - 1);
4883 BUG_ON(ret < 0);
4884 }
4885 memmove_extent_buffer(parent,
4886 btrfs_node_key_ptr_offset(slot),
4887 btrfs_node_key_ptr_offset(slot + 1),
4888 sizeof(struct btrfs_key_ptr) *
4889 (nritems - slot - 1));
4890 } else if (level) {
4891 ret = tree_mod_log_insert_key(parent, slot, MOD_LOG_KEY_REMOVE,
4892 GFP_NOFS);
4893 BUG_ON(ret < 0);
4894 }
4895
4896 nritems--;
4897 btrfs_set_header_nritems(parent, nritems);
4898 if (nritems == 0 && parent == root->node) {
4899 BUG_ON(btrfs_header_level(root->node) != 1);
4900 /* just turn the root into a leaf and break */
4901 btrfs_set_header_level(root->node, 0);
4902 } else if (slot == 0) {
4903 struct btrfs_disk_key disk_key;
4904
4905 btrfs_node_key(parent, &disk_key, 0);
4906 fixup_low_keys(path, &disk_key, level + 1);
4907 }
4908 btrfs_mark_buffer_dirty(parent);
4909}
4910
4911/*
4912 * a helper function to delete the leaf pointed to by path->slots[1] and
4913 * path->nodes[1].
4914 *
4915 * This deletes the pointer in path->nodes[1] and frees the leaf
4916 * block extent. zero is returned if it all worked out, < 0 otherwise.
4917 *
4918 * The path must have already been setup for deleting the leaf, including
4919 * all the proper balancing. path->nodes[1] must be locked.
4920 */
4921static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4922 struct btrfs_root *root,
4923 struct btrfs_path *path,
4924 struct extent_buffer *leaf)
4925{
4926 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4927 del_ptr(root, path, 1, path->slots[1]);
4928
4929 /*
4930 * btrfs_free_extent is expensive, we want to make sure we
4931 * aren't holding any locks when we call it
4932 */
4933 btrfs_unlock_up_safe(path, 0);
4934
4935 root_sub_used(root, leaf->len);
4936
4937 extent_buffer_get(leaf);
4938 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4939 free_extent_buffer_stale(leaf);
4940}
4941/*
4942 * delete the item at the leaf level in path. If that empties
4943 * the leaf, remove it from the tree
4944 */
4945int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4946 struct btrfs_path *path, int slot, int nr)
4947{
4948 struct btrfs_fs_info *fs_info = root->fs_info;
4949 struct extent_buffer *leaf;
4950 struct btrfs_item *item;
4951 u32 last_off;
4952 u32 dsize = 0;
4953 int ret = 0;
4954 int wret;
4955 int i;
4956 u32 nritems;
4957 struct btrfs_map_token token;
4958
4959 btrfs_init_map_token(&token);
4960
4961 leaf = path->nodes[0];
4962 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4963
4964 for (i = 0; i < nr; i++)
4965 dsize += btrfs_item_size_nr(leaf, slot + i);
4966
4967 nritems = btrfs_header_nritems(leaf);
4968
4969 if (slot + nr != nritems) {
4970 int data_end = leaf_data_end(fs_info, leaf);
4971
4972 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4973 data_end + dsize,
4974 BTRFS_LEAF_DATA_OFFSET + data_end,
4975 last_off - data_end);
4976
4977 for (i = slot + nr; i < nritems; i++) {
4978 u32 ioff;
4979
4980 item = btrfs_item_nr(i);
4981 ioff = btrfs_token_item_offset(leaf, item, &token);
4982 btrfs_set_token_item_offset(leaf, item,
4983 ioff + dsize, &token);
4984 }
4985
4986 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4987 btrfs_item_nr_offset(slot + nr),
4988 sizeof(struct btrfs_item) *
4989 (nritems - slot - nr));
4990 }
4991 btrfs_set_header_nritems(leaf, nritems - nr);
4992 nritems -= nr;
4993
4994 /* delete the leaf if we've emptied it */
4995 if (nritems == 0) {
4996 if (leaf == root->node) {
4997 btrfs_set_header_level(leaf, 0);
4998 } else {
4999 btrfs_set_path_blocking(path);
5000 clean_tree_block(fs_info, leaf);
5001 btrfs_del_leaf(trans, root, path, leaf);
5002 }
5003 } else {
5004 int used = leaf_space_used(leaf, 0, nritems);
5005 if (slot == 0) {
5006 struct btrfs_disk_key disk_key;
5007
5008 btrfs_item_key(leaf, &disk_key, 0);
5009 fixup_low_keys(path, &disk_key, 1);
5010 }
5011
5012 /* delete the leaf if it is mostly empty */
5013 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
5014 /* push_leaf_left fixes the path.
5015 * make sure the path still points to our leaf
5016 * for possible call to del_ptr below
5017 */
5018 slot = path->slots[1];
5019 extent_buffer_get(leaf);
5020
5021 btrfs_set_path_blocking(path);
5022 wret = push_leaf_left(trans, root, path, 1, 1,
5023 1, (u32)-1);
5024 if (wret < 0 && wret != -ENOSPC)
5025 ret = wret;
5026
5027 if (path->nodes[0] == leaf &&
5028 btrfs_header_nritems(leaf)) {
5029 wret = push_leaf_right(trans, root, path, 1,
5030 1, 1, 0);
5031 if (wret < 0 && wret != -ENOSPC)
5032 ret = wret;
5033 }
5034
5035 if (btrfs_header_nritems(leaf) == 0) {
5036 path->slots[1] = slot;
5037 btrfs_del_leaf(trans, root, path, leaf);
5038 free_extent_buffer(leaf);
5039 ret = 0;
5040 } else {
5041 /* if we're still in the path, make sure
5042 * we're dirty. Otherwise, one of the
5043 * push_leaf functions must have already
5044 * dirtied this buffer
5045 */
5046 if (path->nodes[0] == leaf)
5047 btrfs_mark_buffer_dirty(leaf);
5048 free_extent_buffer(leaf);
5049 }
5050 } else {
5051 btrfs_mark_buffer_dirty(leaf);
5052 }
5053 }
5054 return ret;
5055}
5056
5057/*
5058 * search the tree again to find a leaf with lesser keys
5059 * returns 0 if it found something or 1 if there are no lesser leaves.
5060 * returns < 0 on io errors.
5061 *
5062 * This may release the path, and so you may lose any locks held at the
5063 * time you call it.
5064 */
5065int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5066{
5067 struct btrfs_key key;
5068 struct btrfs_disk_key found_key;
5069 int ret;
5070
5071 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5072
5073 if (key.offset > 0) {
5074 key.offset--;
5075 } else if (key.type > 0) {
5076 key.type--;
5077 key.offset = (u64)-1;
5078 } else if (key.objectid > 0) {
5079 key.objectid--;
5080 key.type = (u8)-1;
5081 key.offset = (u64)-1;
5082 } else {
5083 return 1;
5084 }
5085
5086 btrfs_release_path(path);
5087 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5088 if (ret < 0)
5089 return ret;
5090 btrfs_item_key(path->nodes[0], &found_key, 0);
5091 ret = comp_keys(&found_key, &key);
5092 /*
5093 * We might have had an item with the previous key in the tree right
5094 * before we released our path. And after we released our path, that
5095 * item might have been pushed to the first slot (0) of the leaf we
5096 * were holding due to a tree balance. Alternatively, an item with the
5097 * previous key can exist as the only element of a leaf (big fat item).
5098 * Therefore account for these 2 cases, so that our callers (like
5099 * btrfs_previous_item) don't miss an existing item with a key matching
5100 * the previous key we computed above.
5101 */
5102 if (ret <= 0)
5103 return 0;
5104 return 1;
5105}
5106
5107/*
5108 * A helper function to walk down the tree starting at min_key, and looking
5109 * for nodes or leaves that are have a minimum transaction id.
5110 * This is used by the btree defrag code, and tree logging
5111 *
5112 * This does not cow, but it does stuff the starting key it finds back
5113 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5114 * key and get a writable path.
5115 *
5116 * This honors path->lowest_level to prevent descent past a given level
5117 * of the tree.
5118 *
5119 * min_trans indicates the oldest transaction that you are interested
5120 * in walking through. Any nodes or leaves older than min_trans are
5121 * skipped over (without reading them).
5122 *
5123 * returns zero if something useful was found, < 0 on error and 1 if there
5124 * was nothing in the tree that matched the search criteria.
5125 */
5126int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5127 struct btrfs_path *path,
5128 u64 min_trans)
5129{
5130 struct btrfs_fs_info *fs_info = root->fs_info;
5131 struct extent_buffer *cur;
5132 struct btrfs_key found_key;
5133 int slot;
5134 int sret;
5135 u32 nritems;
5136 int level;
5137 int ret = 1;
5138 int keep_locks = path->keep_locks;
5139
5140 path->keep_locks = 1;
5141again:
5142 cur = btrfs_read_lock_root_node(root);
5143 level = btrfs_header_level(cur);
5144 WARN_ON(path->nodes[level]);
5145 path->nodes[level] = cur;
5146 path->locks[level] = BTRFS_READ_LOCK;
5147
5148 if (btrfs_header_generation(cur) < min_trans) {
5149 ret = 1;
5150 goto out;
5151 }
5152 while (1) {
5153 nritems = btrfs_header_nritems(cur);
5154 level = btrfs_header_level(cur);
5155 sret = btrfs_bin_search(cur, min_key, level, &slot);
5156
5157 /* at the lowest level, we're done, setup the path and exit */
5158 if (level == path->lowest_level) {
5159 if (slot >= nritems)
5160 goto find_next_key;
5161 ret = 0;
5162 path->slots[level] = slot;
5163 btrfs_item_key_to_cpu(cur, &found_key, slot);
5164 goto out;
5165 }
5166 if (sret && slot > 0)
5167 slot--;
5168 /*
5169 * check this node pointer against the min_trans parameters.
5170 * If it is too old, old, skip to the next one.
5171 */
5172 while (slot < nritems) {
5173 u64 gen;
5174
5175 gen = btrfs_node_ptr_generation(cur, slot);
5176 if (gen < min_trans) {
5177 slot++;
5178 continue;
5179 }
5180 break;
5181 }
5182find_next_key:
5183 /*
5184 * we didn't find a candidate key in this node, walk forward
5185 * and find another one
5186 */
5187 if (slot >= nritems) {
5188 path->slots[level] = slot;
5189 btrfs_set_path_blocking(path);
5190 sret = btrfs_find_next_key(root, path, min_key, level,
5191 min_trans);
5192 if (sret == 0) {
5193 btrfs_release_path(path);
5194 goto again;
5195 } else {
5196 goto out;
5197 }
5198 }
5199 /* save our key for returning back */
5200 btrfs_node_key_to_cpu(cur, &found_key, slot);
5201 path->slots[level] = slot;
5202 if (level == path->lowest_level) {
5203 ret = 0;
5204 goto out;
5205 }
5206 btrfs_set_path_blocking(path);
5207 cur = read_node_slot(fs_info, cur, slot);
5208 if (IS_ERR(cur)) {
5209 ret = PTR_ERR(cur);
5210 goto out;
5211 }
5212
5213 btrfs_tree_read_lock(cur);
5214
5215 path->locks[level - 1] = BTRFS_READ_LOCK;
5216 path->nodes[level - 1] = cur;
5217 unlock_up(path, level, 1, 0, NULL);
5218 btrfs_clear_path_blocking(path, NULL, 0);
5219 }
5220out:
5221 path->keep_locks = keep_locks;
5222 if (ret == 0) {
5223 btrfs_unlock_up_safe(path, path->lowest_level + 1);
5224 btrfs_set_path_blocking(path);
5225 memcpy(min_key, &found_key, sizeof(found_key));
5226 }
5227 return ret;
5228}
5229
5230static int tree_move_down(struct btrfs_fs_info *fs_info,
5231 struct btrfs_path *path,
5232 int *level)
5233{
5234 struct extent_buffer *eb;
5235
5236 BUG_ON(*level == 0);
5237 eb = read_node_slot(fs_info, path->nodes[*level], path->slots[*level]);
5238 if (IS_ERR(eb))
5239 return PTR_ERR(eb);
5240
5241 path->nodes[*level - 1] = eb;
5242 path->slots[*level - 1] = 0;
5243 (*level)--;
5244 return 0;
5245}
5246
5247static int tree_move_next_or_upnext(struct btrfs_path *path,
5248 int *level, int root_level)
5249{
5250 int ret = 0;
5251 int nritems;
5252 nritems = btrfs_header_nritems(path->nodes[*level]);
5253
5254 path->slots[*level]++;
5255
5256 while (path->slots[*level] >= nritems) {
5257 if (*level == root_level)
5258 return -1;
5259
5260 /* move upnext */
5261 path->slots[*level] = 0;
5262 free_extent_buffer(path->nodes[*level]);
5263 path->nodes[*level] = NULL;
5264 (*level)++;
5265 path->slots[*level]++;
5266
5267 nritems = btrfs_header_nritems(path->nodes[*level]);
5268 ret = 1;
5269 }
5270 return ret;
5271}
5272
5273/*
5274 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5275 * or down.
5276 */
5277static int tree_advance(struct btrfs_fs_info *fs_info,
5278 struct btrfs_path *path,
5279 int *level, int root_level,
5280 int allow_down,
5281 struct btrfs_key *key)
5282{
5283 int ret;
5284
5285 if (*level == 0 || !allow_down) {
5286 ret = tree_move_next_or_upnext(path, level, root_level);
5287 } else {
5288 ret = tree_move_down(fs_info, path, level);
5289 }
5290 if (ret >= 0) {
5291 if (*level == 0)
5292 btrfs_item_key_to_cpu(path->nodes[*level], key,
5293 path->slots[*level]);
5294 else
5295 btrfs_node_key_to_cpu(path->nodes[*level], key,
5296 path->slots[*level]);
5297 }
5298 return ret;
5299}
5300
5301static int tree_compare_item(struct btrfs_path *left_path,
5302 struct btrfs_path *right_path,
5303 char *tmp_buf)
5304{
5305 int cmp;
5306 int len1, len2;
5307 unsigned long off1, off2;
5308
5309 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5310 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5311 if (len1 != len2)
5312 return 1;
5313
5314 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5315 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5316 right_path->slots[0]);
5317
5318 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5319
5320 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5321 if (cmp)
5322 return 1;
5323 return 0;
5324}
5325
5326#define ADVANCE 1
5327#define ADVANCE_ONLY_NEXT -1
5328
5329/*
5330 * This function compares two trees and calls the provided callback for
5331 * every changed/new/deleted item it finds.
5332 * If shared tree blocks are encountered, whole subtrees are skipped, making
5333 * the compare pretty fast on snapshotted subvolumes.
5334 *
5335 * This currently works on commit roots only. As commit roots are read only,
5336 * we don't do any locking. The commit roots are protected with transactions.
5337 * Transactions are ended and rejoined when a commit is tried in between.
5338 *
5339 * This function checks for modifications done to the trees while comparing.
5340 * If it detects a change, it aborts immediately.
5341 */
5342int btrfs_compare_trees(struct btrfs_root *left_root,
5343 struct btrfs_root *right_root,
5344 btrfs_changed_cb_t changed_cb, void *ctx)
5345{
5346 struct btrfs_fs_info *fs_info = left_root->fs_info;
5347 int ret;
5348 int cmp;
5349 struct btrfs_path *left_path = NULL;
5350 struct btrfs_path *right_path = NULL;
5351 struct btrfs_key left_key;
5352 struct btrfs_key right_key;
5353 char *tmp_buf = NULL;
5354 int left_root_level;
5355 int right_root_level;
5356 int left_level;
5357 int right_level;
5358 int left_end_reached;
5359 int right_end_reached;
5360 int advance_left;
5361 int advance_right;
5362 u64 left_blockptr;
5363 u64 right_blockptr;
5364 u64 left_gen;
5365 u64 right_gen;
5366
5367 left_path = btrfs_alloc_path();
5368 if (!left_path) {
5369 ret = -ENOMEM;
5370 goto out;
5371 }
5372 right_path = btrfs_alloc_path();
5373 if (!right_path) {
5374 ret = -ENOMEM;
5375 goto out;
5376 }
5377
5378 tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
5379 if (!tmp_buf) {
5380 ret = -ENOMEM;
5381 goto out;
5382 }
5383
5384 left_path->search_commit_root = 1;
5385 left_path->skip_locking = 1;
5386 right_path->search_commit_root = 1;
5387 right_path->skip_locking = 1;
5388
5389 /*
5390 * Strategy: Go to the first items of both trees. Then do
5391 *
5392 * If both trees are at level 0
5393 * Compare keys of current items
5394 * If left < right treat left item as new, advance left tree
5395 * and repeat
5396 * If left > right treat right item as deleted, advance right tree
5397 * and repeat
5398 * If left == right do deep compare of items, treat as changed if
5399 * needed, advance both trees and repeat
5400 * If both trees are at the same level but not at level 0
5401 * Compare keys of current nodes/leafs
5402 * If left < right advance left tree and repeat
5403 * If left > right advance right tree and repeat
5404 * If left == right compare blockptrs of the next nodes/leafs
5405 * If they match advance both trees but stay at the same level
5406 * and repeat
5407 * If they don't match advance both trees while allowing to go
5408 * deeper and repeat
5409 * If tree levels are different
5410 * Advance the tree that needs it and repeat
5411 *
5412 * Advancing a tree means:
5413 * If we are at level 0, try to go to the next slot. If that's not
5414 * possible, go one level up and repeat. Stop when we found a level
5415 * where we could go to the next slot. We may at this point be on a
5416 * node or a leaf.
5417 *
5418 * If we are not at level 0 and not on shared tree blocks, go one
5419 * level deeper.
5420 *
5421 * If we are not at level 0 and on shared tree blocks, go one slot to
5422 * the right if possible or go up and right.
5423 */
5424
5425 down_read(&fs_info->commit_root_sem);
5426 left_level = btrfs_header_level(left_root->commit_root);
5427 left_root_level = left_level;
5428 left_path->nodes[left_level] =
5429 btrfs_clone_extent_buffer(left_root->commit_root);
5430 if (!left_path->nodes[left_level]) {
5431 up_read(&fs_info->commit_root_sem);
5432 ret = -ENOMEM;
5433 goto out;
5434 }
5435 extent_buffer_get(left_path->nodes[left_level]);
5436
5437 right_level = btrfs_header_level(right_root->commit_root);
5438 right_root_level = right_level;
5439 right_path->nodes[right_level] =
5440 btrfs_clone_extent_buffer(right_root->commit_root);
5441 if (!right_path->nodes[right_level]) {
5442 up_read(&fs_info->commit_root_sem);
5443 ret = -ENOMEM;
5444 goto out;
5445 }
5446 extent_buffer_get(right_path->nodes[right_level]);
5447 up_read(&fs_info->commit_root_sem);
5448
5449 if (left_level == 0)
5450 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5451 &left_key, left_path->slots[left_level]);
5452 else
5453 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5454 &left_key, left_path->slots[left_level]);
5455 if (right_level == 0)
5456 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5457 &right_key, right_path->slots[right_level]);
5458 else
5459 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5460 &right_key, right_path->slots[right_level]);
5461
5462 left_end_reached = right_end_reached = 0;
5463 advance_left = advance_right = 0;
5464
5465 while (1) {
5466 if (advance_left && !left_end_reached) {
5467 ret = tree_advance(fs_info, left_path, &left_level,
5468 left_root_level,
5469 advance_left != ADVANCE_ONLY_NEXT,
5470 &left_key);
5471 if (ret == -1)
5472 left_end_reached = ADVANCE;
5473 else if (ret < 0)
5474 goto out;
5475 advance_left = 0;
5476 }
5477 if (advance_right && !right_end_reached) {
5478 ret = tree_advance(fs_info, right_path, &right_level,
5479 right_root_level,
5480 advance_right != ADVANCE_ONLY_NEXT,
5481 &right_key);
5482 if (ret == -1)
5483 right_end_reached = ADVANCE;
5484 else if (ret < 0)
5485 goto out;
5486 advance_right = 0;
5487 }
5488
5489 if (left_end_reached && right_end_reached) {
5490 ret = 0;
5491 goto out;
5492 } else if (left_end_reached) {
5493 if (right_level == 0) {
5494 ret = changed_cb(left_path, right_path,
5495 &right_key,
5496 BTRFS_COMPARE_TREE_DELETED,
5497 ctx);
5498 if (ret < 0)
5499 goto out;
5500 }
5501 advance_right = ADVANCE;
5502 continue;
5503 } else if (right_end_reached) {
5504 if (left_level == 0) {
5505 ret = changed_cb(left_path, right_path,
5506 &left_key,
5507 BTRFS_COMPARE_TREE_NEW,
5508 ctx);
5509 if (ret < 0)
5510 goto out;
5511 }
5512 advance_left = ADVANCE;
5513 continue;
5514 }
5515
5516 if (left_level == 0 && right_level == 0) {
5517 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5518 if (cmp < 0) {
5519 ret = changed_cb(left_path, right_path,
5520 &left_key,
5521 BTRFS_COMPARE_TREE_NEW,
5522 ctx);
5523 if (ret < 0)
5524 goto out;
5525 advance_left = ADVANCE;
5526 } else if (cmp > 0) {
5527 ret = changed_cb(left_path, right_path,
5528 &right_key,
5529 BTRFS_COMPARE_TREE_DELETED,
5530 ctx);
5531 if (ret < 0)
5532 goto out;
5533 advance_right = ADVANCE;
5534 } else {
5535 enum btrfs_compare_tree_result result;
5536
5537 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5538 ret = tree_compare_item(left_path, right_path,
5539 tmp_buf);
5540 if (ret)
5541 result = BTRFS_COMPARE_TREE_CHANGED;
5542 else
5543 result = BTRFS_COMPARE_TREE_SAME;
5544 ret = changed_cb(left_path, right_path,
5545 &left_key, result, ctx);
5546 if (ret < 0)
5547 goto out;
5548 advance_left = ADVANCE;
5549 advance_right = ADVANCE;
5550 }
5551 } else if (left_level == right_level) {
5552 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5553 if (cmp < 0) {
5554 advance_left = ADVANCE;
5555 } else if (cmp > 0) {
5556 advance_right = ADVANCE;
5557 } else {
5558 left_blockptr = btrfs_node_blockptr(
5559 left_path->nodes[left_level],
5560 left_path->slots[left_level]);
5561 right_blockptr = btrfs_node_blockptr(
5562 right_path->nodes[right_level],
5563 right_path->slots[right_level]);
5564 left_gen = btrfs_node_ptr_generation(
5565 left_path->nodes[left_level],
5566 left_path->slots[left_level]);
5567 right_gen = btrfs_node_ptr_generation(
5568 right_path->nodes[right_level],
5569 right_path->slots[right_level]);
5570 if (left_blockptr == right_blockptr &&
5571 left_gen == right_gen) {
5572 /*
5573 * As we're on a shared block, don't
5574 * allow to go deeper.
5575 */
5576 advance_left = ADVANCE_ONLY_NEXT;
5577 advance_right = ADVANCE_ONLY_NEXT;
5578 } else {
5579 advance_left = ADVANCE;
5580 advance_right = ADVANCE;
5581 }
5582 }
5583 } else if (left_level < right_level) {
5584 advance_right = ADVANCE;
5585 } else {
5586 advance_left = ADVANCE;
5587 }
5588 }
5589
5590out:
5591 btrfs_free_path(left_path);
5592 btrfs_free_path(right_path);
5593 kvfree(tmp_buf);
5594 return ret;
5595}
5596
5597/*
5598 * this is similar to btrfs_next_leaf, but does not try to preserve
5599 * and fixup the path. It looks for and returns the next key in the
5600 * tree based on the current path and the min_trans parameters.
5601 *
5602 * 0 is returned if another key is found, < 0 if there are any errors
5603 * and 1 is returned if there are no higher keys in the tree
5604 *
5605 * path->keep_locks should be set to 1 on the search made before
5606 * calling this function.
5607 */
5608int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5609 struct btrfs_key *key, int level, u64 min_trans)
5610{
5611 int slot;
5612 struct extent_buffer *c;
5613
5614 WARN_ON(!path->keep_locks);
5615 while (level < BTRFS_MAX_LEVEL) {
5616 if (!path->nodes[level])
5617 return 1;
5618
5619 slot = path->slots[level] + 1;
5620 c = path->nodes[level];
5621next:
5622 if (slot >= btrfs_header_nritems(c)) {
5623 int ret;
5624 int orig_lowest;
5625 struct btrfs_key cur_key;
5626 if (level + 1 >= BTRFS_MAX_LEVEL ||
5627 !path->nodes[level + 1])
5628 return 1;
5629
5630 if (path->locks[level + 1]) {
5631 level++;
5632 continue;
5633 }
5634
5635 slot = btrfs_header_nritems(c) - 1;
5636 if (level == 0)
5637 btrfs_item_key_to_cpu(c, &cur_key, slot);
5638 else
5639 btrfs_node_key_to_cpu(c, &cur_key, slot);
5640
5641 orig_lowest = path->lowest_level;
5642 btrfs_release_path(path);
5643 path->lowest_level = level;
5644 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5645 0, 0);
5646 path->lowest_level = orig_lowest;
5647 if (ret < 0)
5648 return ret;
5649
5650 c = path->nodes[level];
5651 slot = path->slots[level];
5652 if (ret == 0)
5653 slot++;
5654 goto next;
5655 }
5656
5657 if (level == 0)
5658 btrfs_item_key_to_cpu(c, key, slot);
5659 else {
5660 u64 gen = btrfs_node_ptr_generation(c, slot);
5661
5662 if (gen < min_trans) {
5663 slot++;
5664 goto next;
5665 }
5666 btrfs_node_key_to_cpu(c, key, slot);
5667 }
5668 return 0;
5669 }
5670 return 1;
5671}
5672
5673/*
5674 * search the tree again to find a leaf with greater keys
5675 * returns 0 if it found something or 1 if there are no greater leaves.
5676 * returns < 0 on io errors.
5677 */
5678int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5679{
5680 return btrfs_next_old_leaf(root, path, 0);
5681}
5682
5683int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5684 u64 time_seq)
5685{
5686 int slot;
5687 int level;
5688 struct extent_buffer *c;
5689 struct extent_buffer *next;
5690 struct btrfs_key key;
5691 u32 nritems;
5692 int ret;
5693 int old_spinning = path->leave_spinning;
5694 int next_rw_lock = 0;
5695
5696 nritems = btrfs_header_nritems(path->nodes[0]);
5697 if (nritems == 0)
5698 return 1;
5699
5700 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5701again:
5702 level = 1;
5703 next = NULL;
5704 next_rw_lock = 0;
5705 btrfs_release_path(path);
5706
5707 path->keep_locks = 1;
5708 path->leave_spinning = 1;
5709
5710 if (time_seq)
5711 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5712 else
5713 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5714 path->keep_locks = 0;
5715
5716 if (ret < 0)
5717 return ret;
5718
5719 nritems = btrfs_header_nritems(path->nodes[0]);
5720 /*
5721 * by releasing the path above we dropped all our locks. A balance
5722 * could have added more items next to the key that used to be
5723 * at the very end of the block. So, check again here and
5724 * advance the path if there are now more items available.
5725 */
5726 if (nritems > 0 && path->slots[0] < nritems - 1) {
5727 if (ret == 0)
5728 path->slots[0]++;
5729 ret = 0;
5730 goto done;
5731 }
5732 /*
5733 * So the above check misses one case:
5734 * - after releasing the path above, someone has removed the item that
5735 * used to be at the very end of the block, and balance between leafs
5736 * gets another one with bigger key.offset to replace it.
5737 *
5738 * This one should be returned as well, or we can get leaf corruption
5739 * later(esp. in __btrfs_drop_extents()).
5740 *
5741 * And a bit more explanation about this check,
5742 * with ret > 0, the key isn't found, the path points to the slot
5743 * where it should be inserted, so the path->slots[0] item must be the
5744 * bigger one.
5745 */
5746 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5747 ret = 0;
5748 goto done;
5749 }
5750
5751 while (level < BTRFS_MAX_LEVEL) {
5752 if (!path->nodes[level]) {
5753 ret = 1;
5754 goto done;
5755 }
5756
5757 slot = path->slots[level] + 1;
5758 c = path->nodes[level];
5759 if (slot >= btrfs_header_nritems(c)) {
5760 level++;
5761 if (level == BTRFS_MAX_LEVEL) {
5762 ret = 1;
5763 goto done;
5764 }
5765 continue;
5766 }
5767
5768 if (next) {
5769 btrfs_tree_unlock_rw(next, next_rw_lock);
5770 free_extent_buffer(next);
5771 }
5772
5773 next = c;
5774 next_rw_lock = path->locks[level];
5775 ret = read_block_for_search(root, path, &next, level,
5776 slot, &key);
5777 if (ret == -EAGAIN)
5778 goto again;
5779
5780 if (ret < 0) {
5781 btrfs_release_path(path);
5782 goto done;
5783 }
5784
5785 if (!path->skip_locking) {
5786 ret = btrfs_try_tree_read_lock(next);
5787 if (!ret && time_seq) {
5788 /*
5789 * If we don't get the lock, we may be racing
5790 * with push_leaf_left, holding that lock while
5791 * itself waiting for the leaf we've currently
5792 * locked. To solve this situation, we give up
5793 * on our lock and cycle.
5794 */
5795 free_extent_buffer(next);
5796 btrfs_release_path(path);
5797 cond_resched();
5798 goto again;
5799 }
5800 if (!ret) {
5801 btrfs_set_path_blocking(path);
5802 btrfs_tree_read_lock(next);
5803 btrfs_clear_path_blocking(path, next,
5804 BTRFS_READ_LOCK);
5805 }
5806 next_rw_lock = BTRFS_READ_LOCK;
5807 }
5808 break;
5809 }
5810 path->slots[level] = slot;
5811 while (1) {
5812 level--;
5813 c = path->nodes[level];
5814 if (path->locks[level])
5815 btrfs_tree_unlock_rw(c, path->locks[level]);
5816
5817 free_extent_buffer(c);
5818 path->nodes[level] = next;
5819 path->slots[level] = 0;
5820 if (!path->skip_locking)
5821 path->locks[level] = next_rw_lock;
5822 if (!level)
5823 break;
5824
5825 ret = read_block_for_search(root, path, &next, level,
5826 0, &key);
5827 if (ret == -EAGAIN)
5828 goto again;
5829
5830 if (ret < 0) {
5831 btrfs_release_path(path);
5832 goto done;
5833 }
5834
5835 if (!path->skip_locking) {
5836 ret = btrfs_try_tree_read_lock(next);
5837 if (!ret) {
5838 btrfs_set_path_blocking(path);
5839 btrfs_tree_read_lock(next);
5840 btrfs_clear_path_blocking(path, next,
5841 BTRFS_READ_LOCK);
5842 }
5843 next_rw_lock = BTRFS_READ_LOCK;
5844 }
5845 }
5846 ret = 0;
5847done:
5848 unlock_up(path, 0, 1, 0, NULL);
5849 path->leave_spinning = old_spinning;
5850 if (!old_spinning)
5851 btrfs_set_path_blocking(path);
5852
5853 return ret;
5854}
5855
5856/*
5857 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5858 * searching until it gets past min_objectid or finds an item of 'type'
5859 *
5860 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5861 */
5862int btrfs_previous_item(struct btrfs_root *root,
5863 struct btrfs_path *path, u64 min_objectid,
5864 int type)
5865{
5866 struct btrfs_key found_key;
5867 struct extent_buffer *leaf;
5868 u32 nritems;
5869 int ret;
5870
5871 while (1) {
5872 if (path->slots[0] == 0) {
5873 btrfs_set_path_blocking(path);
5874 ret = btrfs_prev_leaf(root, path);
5875 if (ret != 0)
5876 return ret;
5877 } else {
5878 path->slots[0]--;
5879 }
5880 leaf = path->nodes[0];
5881 nritems = btrfs_header_nritems(leaf);
5882 if (nritems == 0)
5883 return 1;
5884 if (path->slots[0] == nritems)
5885 path->slots[0]--;
5886
5887 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5888 if (found_key.objectid < min_objectid)
5889 break;
5890 if (found_key.type == type)
5891 return 0;
5892 if (found_key.objectid == min_objectid &&
5893 found_key.type < type)
5894 break;
5895 }
5896 return 1;
5897}
5898
5899/*
5900 * search in extent tree to find a previous Metadata/Data extent item with
5901 * min objecitd.
5902 *
5903 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5904 */
5905int btrfs_previous_extent_item(struct btrfs_root *root,
5906 struct btrfs_path *path, u64 min_objectid)
5907{
5908 struct btrfs_key found_key;
5909 struct extent_buffer *leaf;
5910 u32 nritems;
5911 int ret;
5912
5913 while (1) {
5914 if (path->slots[0] == 0) {
5915 btrfs_set_path_blocking(path);
5916 ret = btrfs_prev_leaf(root, path);
5917 if (ret != 0)
5918 return ret;
5919 } else {
5920 path->slots[0]--;
5921 }
5922 leaf = path->nodes[0];
5923 nritems = btrfs_header_nritems(leaf);
5924 if (nritems == 0)
5925 return 1;
5926 if (path->slots[0] == nritems)
5927 path->slots[0]--;
5928
5929 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5930 if (found_key.objectid < min_objectid)
5931 break;
5932 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5933 found_key.type == BTRFS_METADATA_ITEM_KEY)
5934 return 0;
5935 if (found_key.objectid == min_objectid &&
5936 found_key.type < BTRFS_EXTENT_ITEM_KEY)
5937 break;
5938 }
5939 return 1;
5940}