blob: 87bac9ecdf4c8d2ba564bcac34d09b901d817690 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6#include <linux/slab.h>
7#include <linux/blkdev.h>
8#include <linux/writeback.h>
David Brazdil0f672f62019-12-10 10:32:29 +00009#include <linux/sched/mm.h>
10#include "misc.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011#include "ctree.h"
12#include "transaction.h"
13#include "btrfs_inode.h"
14#include "extent_io.h"
15#include "disk-io.h"
16#include "compression.h"
David Brazdil0f672f62019-12-10 10:32:29 +000017#include "delalloc-space.h"
Olivier Deprez157378f2022-04-04 15:47:50 +020018#include "qgroup.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000019
20static struct kmem_cache *btrfs_ordered_extent_cache;
21
22static u64 entry_end(struct btrfs_ordered_extent *entry)
23{
Olivier Deprez157378f2022-04-04 15:47:50 +020024 if (entry->file_offset + entry->num_bytes < entry->file_offset)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000025 return (u64)-1;
Olivier Deprez157378f2022-04-04 15:47:50 +020026 return entry->file_offset + entry->num_bytes;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000027}
28
29/* returns NULL if the insertion worked, or it returns the node it did find
30 * in the tree
31 */
32static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
33 struct rb_node *node)
34{
35 struct rb_node **p = &root->rb_node;
36 struct rb_node *parent = NULL;
37 struct btrfs_ordered_extent *entry;
38
39 while (*p) {
40 parent = *p;
41 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
42
43 if (file_offset < entry->file_offset)
44 p = &(*p)->rb_left;
45 else if (file_offset >= entry_end(entry))
46 p = &(*p)->rb_right;
47 else
48 return parent;
49 }
50
51 rb_link_node(node, parent, p);
52 rb_insert_color(node, root);
53 return NULL;
54}
55
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000056/*
57 * look for a given offset in the tree, and if it can't be found return the
58 * first lesser offset
59 */
60static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
61 struct rb_node **prev_ret)
62{
63 struct rb_node *n = root->rb_node;
64 struct rb_node *prev = NULL;
65 struct rb_node *test;
66 struct btrfs_ordered_extent *entry;
67 struct btrfs_ordered_extent *prev_entry = NULL;
68
69 while (n) {
70 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
71 prev = n;
72 prev_entry = entry;
73
74 if (file_offset < entry->file_offset)
75 n = n->rb_left;
76 else if (file_offset >= entry_end(entry))
77 n = n->rb_right;
78 else
79 return n;
80 }
81 if (!prev_ret)
82 return NULL;
83
84 while (prev && file_offset >= entry_end(prev_entry)) {
85 test = rb_next(prev);
86 if (!test)
87 break;
88 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
89 rb_node);
90 if (file_offset < entry_end(prev_entry))
91 break;
92
93 prev = test;
94 }
95 if (prev)
96 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
97 rb_node);
98 while (prev && file_offset < entry_end(prev_entry)) {
99 test = rb_prev(prev);
100 if (!test)
101 break;
102 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
103 rb_node);
104 prev = test;
105 }
106 *prev_ret = prev;
107 return NULL;
108}
109
110/*
111 * helper to check if a given offset is inside a given entry
112 */
113static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
114{
115 if (file_offset < entry->file_offset ||
Olivier Deprez157378f2022-04-04 15:47:50 +0200116 entry->file_offset + entry->num_bytes <= file_offset)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000117 return 0;
118 return 1;
119}
120
121static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
122 u64 len)
123{
124 if (file_offset + len <= entry->file_offset ||
Olivier Deprez157378f2022-04-04 15:47:50 +0200125 entry->file_offset + entry->num_bytes <= file_offset)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000126 return 0;
127 return 1;
128}
129
130/*
131 * look find the first ordered struct that has this offset, otherwise
132 * the first one less than this offset
133 */
134static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
135 u64 file_offset)
136{
137 struct rb_root *root = &tree->tree;
138 struct rb_node *prev = NULL;
139 struct rb_node *ret;
140 struct btrfs_ordered_extent *entry;
141
142 if (tree->last) {
143 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
144 rb_node);
145 if (offset_in_entry(entry, file_offset))
146 return tree->last;
147 }
148 ret = __tree_search(root, file_offset, &prev);
149 if (!ret)
150 ret = prev;
151 if (ret)
152 tree->last = ret;
153 return ret;
154}
155
Olivier Deprez157378f2022-04-04 15:47:50 +0200156/*
157 * Allocate and add a new ordered_extent into the per-inode tree.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000158 *
159 * The tree is given a single reference on the ordered extent that was
160 * inserted.
161 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200162static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
163 u64 disk_bytenr, u64 num_bytes,
164 u64 disk_num_bytes, int type, int dio,
165 int compress_type)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000166{
Olivier Deprez157378f2022-04-04 15:47:50 +0200167 struct btrfs_root *root = inode->root;
168 struct btrfs_fs_info *fs_info = root->fs_info;
169 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000170 struct rb_node *node;
171 struct btrfs_ordered_extent *entry;
Olivier Deprez157378f2022-04-04 15:47:50 +0200172 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000173
Olivier Deprez157378f2022-04-04 15:47:50 +0200174 if (type == BTRFS_ORDERED_NOCOW || type == BTRFS_ORDERED_PREALLOC) {
175 /* For nocow write, we can release the qgroup rsv right now */
176 ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
177 if (ret < 0)
178 return ret;
179 ret = 0;
180 } else {
181 /*
182 * The ordered extent has reserved qgroup space, release now
183 * and pass the reserved number for qgroup_record to free.
184 */
185 ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
186 if (ret < 0)
187 return ret;
188 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000189 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
190 if (!entry)
191 return -ENOMEM;
192
193 entry->file_offset = file_offset;
Olivier Deprez157378f2022-04-04 15:47:50 +0200194 entry->disk_bytenr = disk_bytenr;
195 entry->num_bytes = num_bytes;
196 entry->disk_num_bytes = disk_num_bytes;
197 entry->bytes_left = num_bytes;
198 entry->inode = igrab(&inode->vfs_inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000199 entry->compress_type = compress_type;
200 entry->truncated_len = (u64)-1;
Olivier Deprez157378f2022-04-04 15:47:50 +0200201 entry->qgroup_rsv = ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000202 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
203 set_bit(type, &entry->flags);
204
David Brazdil0f672f62019-12-10 10:32:29 +0000205 if (dio) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200206 percpu_counter_add_batch(&fs_info->dio_bytes, num_bytes,
David Brazdil0f672f62019-12-10 10:32:29 +0000207 fs_info->delalloc_batch);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000208 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
David Brazdil0f672f62019-12-10 10:32:29 +0000209 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000210
211 /* one ref for the tree */
212 refcount_set(&entry->refs, 1);
213 init_waitqueue_head(&entry->wait);
214 INIT_LIST_HEAD(&entry->list);
Olivier Deprez157378f2022-04-04 15:47:50 +0200215 INIT_LIST_HEAD(&entry->log_list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000216 INIT_LIST_HEAD(&entry->root_extent_list);
217 INIT_LIST_HEAD(&entry->work_list);
218 init_completion(&entry->completion);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000219
220 trace_btrfs_ordered_extent_add(inode, entry);
221
222 spin_lock_irq(&tree->lock);
223 node = tree_insert(&tree->tree, file_offset,
224 &entry->rb_node);
225 if (node)
Olivier Deprez157378f2022-04-04 15:47:50 +0200226 btrfs_panic(fs_info, -EEXIST,
227 "inconsistency in ordered tree at offset %llu",
228 file_offset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000229 spin_unlock_irq(&tree->lock);
230
231 spin_lock(&root->ordered_extent_lock);
232 list_add_tail(&entry->root_extent_list,
233 &root->ordered_extents);
234 root->nr_ordered_extents++;
235 if (root->nr_ordered_extents == 1) {
236 spin_lock(&fs_info->ordered_root_lock);
237 BUG_ON(!list_empty(&root->ordered_root));
238 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
239 spin_unlock(&fs_info->ordered_root_lock);
240 }
241 spin_unlock(&root->ordered_extent_lock);
242
243 /*
244 * We don't need the count_max_extents here, we can assume that all of
245 * that work has been done at higher layers, so this is truly the
246 * smallest the extent is going to get.
247 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200248 spin_lock(&inode->lock);
249 btrfs_mod_outstanding_extents(inode, 1);
250 spin_unlock(&inode->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000251
252 return 0;
253}
254
Olivier Deprez157378f2022-04-04 15:47:50 +0200255int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
256 u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes,
257 int type)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000258{
Olivier Deprez157378f2022-04-04 15:47:50 +0200259 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
260 num_bytes, disk_num_bytes, type, 0,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000261 BTRFS_COMPRESS_NONE);
262}
263
Olivier Deprez157378f2022-04-04 15:47:50 +0200264int btrfs_add_ordered_extent_dio(struct btrfs_inode *inode, u64 file_offset,
265 u64 disk_bytenr, u64 num_bytes,
266 u64 disk_num_bytes, int type)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000267{
Olivier Deprez157378f2022-04-04 15:47:50 +0200268 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
269 num_bytes, disk_num_bytes, type, 1,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000270 BTRFS_COMPRESS_NONE);
271}
272
Olivier Deprez157378f2022-04-04 15:47:50 +0200273int btrfs_add_ordered_extent_compress(struct btrfs_inode *inode, u64 file_offset,
274 u64 disk_bytenr, u64 num_bytes,
275 u64 disk_num_bytes, int type,
276 int compress_type)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000277{
Olivier Deprez157378f2022-04-04 15:47:50 +0200278 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
279 num_bytes, disk_num_bytes, type, 0,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000280 compress_type);
281}
282
283/*
284 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
285 * when an ordered extent is finished. If the list covers more than one
286 * ordered extent, it is split across multiples.
287 */
David Brazdil0f672f62019-12-10 10:32:29 +0000288void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000289 struct btrfs_ordered_sum *sum)
290{
291 struct btrfs_ordered_inode_tree *tree;
292
David Brazdil0f672f62019-12-10 10:32:29 +0000293 tree = &BTRFS_I(entry->inode)->ordered_tree;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000294 spin_lock_irq(&tree->lock);
295 list_add_tail(&sum->list, &entry->list);
296 spin_unlock_irq(&tree->lock);
297}
298
299/*
300 * this is used to account for finished IO across a given range
301 * of the file. The IO may span ordered extents. If
302 * a given ordered_extent is completely done, 1 is returned, otherwise
303 * 0.
304 *
305 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
306 * to make sure this function only returns 1 once for a given ordered extent.
307 *
308 * file_offset is updated to one byte past the range that is recorded as
309 * complete. This allows you to walk forward in the file.
310 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200311int btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000312 struct btrfs_ordered_extent **cached,
313 u64 *file_offset, u64 io_size, int uptodate)
314{
Olivier Deprez157378f2022-04-04 15:47:50 +0200315 struct btrfs_fs_info *fs_info = inode->root->fs_info;
316 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000317 struct rb_node *node;
318 struct btrfs_ordered_extent *entry = NULL;
319 int ret;
320 unsigned long flags;
321 u64 dec_end;
322 u64 dec_start;
323 u64 to_dec;
324
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000325 spin_lock_irqsave(&tree->lock, flags);
326 node = tree_search(tree, *file_offset);
327 if (!node) {
328 ret = 1;
329 goto out;
330 }
331
332 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
333 if (!offset_in_entry(entry, *file_offset)) {
334 ret = 1;
335 goto out;
336 }
337
338 dec_start = max(*file_offset, entry->file_offset);
Olivier Deprez157378f2022-04-04 15:47:50 +0200339 dec_end = min(*file_offset + io_size,
340 entry->file_offset + entry->num_bytes);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000341 *file_offset = dec_end;
342 if (dec_start > dec_end) {
343 btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu",
344 dec_start, dec_end);
345 }
346 to_dec = dec_end - dec_start;
347 if (to_dec > entry->bytes_left) {
348 btrfs_crit(fs_info,
349 "bad ordered accounting left %llu size %llu",
350 entry->bytes_left, to_dec);
351 }
352 entry->bytes_left -= to_dec;
353 if (!uptodate)
354 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
355
356 if (entry->bytes_left == 0) {
357 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
358 /* test_and_set_bit implies a barrier */
359 cond_wake_up_nomb(&entry->wait);
360 } else {
361 ret = 1;
362 }
363out:
364 if (!ret && cached && entry) {
365 *cached = entry;
366 refcount_inc(&entry->refs);
367 }
368 spin_unlock_irqrestore(&tree->lock, flags);
369 return ret == 0;
370}
371
372/*
373 * this is used to account for finished IO across a given range
374 * of the file. The IO should not span ordered extents. If
375 * a given ordered_extent is completely done, 1 is returned, otherwise
376 * 0.
377 *
378 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
379 * to make sure this function only returns 1 once for a given ordered extent.
380 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200381int btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000382 struct btrfs_ordered_extent **cached,
383 u64 file_offset, u64 io_size, int uptodate)
384{
Olivier Deprez157378f2022-04-04 15:47:50 +0200385 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000386 struct rb_node *node;
387 struct btrfs_ordered_extent *entry = NULL;
388 unsigned long flags;
389 int ret;
390
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000391 spin_lock_irqsave(&tree->lock, flags);
392 if (cached && *cached) {
393 entry = *cached;
394 goto have_entry;
395 }
396
397 node = tree_search(tree, file_offset);
398 if (!node) {
399 ret = 1;
400 goto out;
401 }
402
403 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
404have_entry:
405 if (!offset_in_entry(entry, file_offset)) {
406 ret = 1;
407 goto out;
408 }
409
410 if (io_size > entry->bytes_left) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200411 btrfs_crit(inode->root->fs_info,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000412 "bad ordered accounting left %llu size %llu",
413 entry->bytes_left, io_size);
414 }
415 entry->bytes_left -= io_size;
416 if (!uptodate)
417 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
418
419 if (entry->bytes_left == 0) {
420 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
421 /* test_and_set_bit implies a barrier */
422 cond_wake_up_nomb(&entry->wait);
423 } else {
424 ret = 1;
425 }
426out:
427 if (!ret && cached && entry) {
428 *cached = entry;
429 refcount_inc(&entry->refs);
430 }
431 spin_unlock_irqrestore(&tree->lock, flags);
432 return ret == 0;
433}
434
435/*
436 * used to drop a reference on an ordered extent. This will free
437 * the extent if the last reference is dropped
438 */
439void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
440{
441 struct list_head *cur;
442 struct btrfs_ordered_sum *sum;
443
Olivier Deprez157378f2022-04-04 15:47:50 +0200444 trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000445
446 if (refcount_dec_and_test(&entry->refs)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000447 ASSERT(list_empty(&entry->root_extent_list));
Olivier Deprez157378f2022-04-04 15:47:50 +0200448 ASSERT(list_empty(&entry->log_list));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000449 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
450 if (entry->inode)
451 btrfs_add_delayed_iput(entry->inode);
452 while (!list_empty(&entry->list)) {
453 cur = entry->list.next;
454 sum = list_entry(cur, struct btrfs_ordered_sum, list);
455 list_del(&sum->list);
David Brazdil0f672f62019-12-10 10:32:29 +0000456 kvfree(sum);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000457 }
458 kmem_cache_free(btrfs_ordered_extent_cache, entry);
459 }
460}
461
462/*
463 * remove an ordered extent from the tree. No references are dropped
464 * and waiters are woken up.
465 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200466void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000467 struct btrfs_ordered_extent *entry)
468{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000469 struct btrfs_ordered_inode_tree *tree;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000470 struct btrfs_root *root = btrfs_inode->root;
Olivier Deprez157378f2022-04-04 15:47:50 +0200471 struct btrfs_fs_info *fs_info = root->fs_info;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000472 struct rb_node *node;
Olivier Deprez157378f2022-04-04 15:47:50 +0200473 bool pending;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000474
475 /* This is paired with btrfs_add_ordered_extent. */
476 spin_lock(&btrfs_inode->lock);
477 btrfs_mod_outstanding_extents(btrfs_inode, -1);
478 spin_unlock(&btrfs_inode->lock);
479 if (root != fs_info->tree_root)
Olivier Deprez157378f2022-04-04 15:47:50 +0200480 btrfs_delalloc_release_metadata(btrfs_inode, entry->num_bytes,
481 false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000482
David Brazdil0f672f62019-12-10 10:32:29 +0000483 if (test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
Olivier Deprez157378f2022-04-04 15:47:50 +0200484 percpu_counter_add_batch(&fs_info->dio_bytes, -entry->num_bytes,
David Brazdil0f672f62019-12-10 10:32:29 +0000485 fs_info->delalloc_batch);
486
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000487 tree = &btrfs_inode->ordered_tree;
488 spin_lock_irq(&tree->lock);
489 node = &entry->rb_node;
490 rb_erase(node, &tree->tree);
491 RB_CLEAR_NODE(node);
492 if (tree->last == node)
493 tree->last = NULL;
494 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
Olivier Deprez157378f2022-04-04 15:47:50 +0200495 pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000496 spin_unlock_irq(&tree->lock);
497
Olivier Deprez157378f2022-04-04 15:47:50 +0200498 /*
499 * The current running transaction is waiting on us, we need to let it
500 * know that we're complete and wake it up.
501 */
502 if (pending) {
503 struct btrfs_transaction *trans;
504
505 /*
506 * The checks for trans are just a formality, it should be set,
507 * but if it isn't we don't want to deref/assert under the spin
508 * lock, so be nice and check if trans is set, but ASSERT() so
509 * if it isn't set a developer will notice.
510 */
511 spin_lock(&fs_info->trans_lock);
512 trans = fs_info->running_transaction;
513 if (trans)
514 refcount_inc(&trans->use_count);
515 spin_unlock(&fs_info->trans_lock);
516
517 ASSERT(trans);
518 if (trans) {
519 if (atomic_dec_and_test(&trans->pending_ordered))
520 wake_up(&trans->pending_wait);
521 btrfs_put_transaction(trans);
522 }
523 }
524
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000525 spin_lock(&root->ordered_extent_lock);
526 list_del_init(&entry->root_extent_list);
527 root->nr_ordered_extents--;
528
Olivier Deprez157378f2022-04-04 15:47:50 +0200529 trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000530
531 if (!root->nr_ordered_extents) {
532 spin_lock(&fs_info->ordered_root_lock);
533 BUG_ON(list_empty(&root->ordered_root));
534 list_del_init(&root->ordered_root);
535 spin_unlock(&fs_info->ordered_root_lock);
536 }
537 spin_unlock(&root->ordered_extent_lock);
538 wake_up(&entry->wait);
539}
540
541static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
542{
543 struct btrfs_ordered_extent *ordered;
544
545 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
Olivier Deprez157378f2022-04-04 15:47:50 +0200546 btrfs_start_ordered_extent(ordered, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000547 complete(&ordered->completion);
548}
549
550/*
551 * wait for all the ordered extents in a root. This is done when balancing
552 * space between drives.
553 */
554u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
555 const u64 range_start, const u64 range_len)
556{
557 struct btrfs_fs_info *fs_info = root->fs_info;
558 LIST_HEAD(splice);
559 LIST_HEAD(skipped);
560 LIST_HEAD(works);
561 struct btrfs_ordered_extent *ordered, *next;
562 u64 count = 0;
563 const u64 range_end = range_start + range_len;
564
565 mutex_lock(&root->ordered_extent_mutex);
566 spin_lock(&root->ordered_extent_lock);
567 list_splice_init(&root->ordered_extents, &splice);
568 while (!list_empty(&splice) && nr) {
569 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
570 root_extent_list);
571
Olivier Deprez157378f2022-04-04 15:47:50 +0200572 if (range_end <= ordered->disk_bytenr ||
573 ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000574 list_move_tail(&ordered->root_extent_list, &skipped);
575 cond_resched_lock(&root->ordered_extent_lock);
576 continue;
577 }
578
579 list_move_tail(&ordered->root_extent_list,
580 &root->ordered_extents);
581 refcount_inc(&ordered->refs);
582 spin_unlock(&root->ordered_extent_lock);
583
584 btrfs_init_work(&ordered->flush_work,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000585 btrfs_run_ordered_extent_work, NULL, NULL);
586 list_add_tail(&ordered->work_list, &works);
587 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
588
589 cond_resched();
590 spin_lock(&root->ordered_extent_lock);
591 if (nr != U64_MAX)
592 nr--;
593 count++;
594 }
595 list_splice_tail(&skipped, &root->ordered_extents);
596 list_splice_tail(&splice, &root->ordered_extents);
597 spin_unlock(&root->ordered_extent_lock);
598
599 list_for_each_entry_safe(ordered, next, &works, work_list) {
600 list_del_init(&ordered->work_list);
601 wait_for_completion(&ordered->completion);
602 btrfs_put_ordered_extent(ordered);
603 cond_resched();
604 }
605 mutex_unlock(&root->ordered_extent_mutex);
606
607 return count;
608}
609
Olivier Deprez157378f2022-04-04 15:47:50 +0200610void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000611 const u64 range_start, const u64 range_len)
612{
613 struct btrfs_root *root;
614 struct list_head splice;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000615 u64 done;
616
617 INIT_LIST_HEAD(&splice);
618
619 mutex_lock(&fs_info->ordered_operations_mutex);
620 spin_lock(&fs_info->ordered_root_lock);
621 list_splice_init(&fs_info->ordered_roots, &splice);
622 while (!list_empty(&splice) && nr) {
623 root = list_first_entry(&splice, struct btrfs_root,
624 ordered_root);
Olivier Deprez157378f2022-04-04 15:47:50 +0200625 root = btrfs_grab_root(root);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000626 BUG_ON(!root);
627 list_move_tail(&root->ordered_root,
628 &fs_info->ordered_roots);
629 spin_unlock(&fs_info->ordered_root_lock);
630
631 done = btrfs_wait_ordered_extents(root, nr,
632 range_start, range_len);
Olivier Deprez157378f2022-04-04 15:47:50 +0200633 btrfs_put_root(root);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000634
635 spin_lock(&fs_info->ordered_root_lock);
636 if (nr != U64_MAX) {
637 nr -= done;
638 }
639 }
640 list_splice_tail(&splice, &fs_info->ordered_roots);
641 spin_unlock(&fs_info->ordered_root_lock);
642 mutex_unlock(&fs_info->ordered_operations_mutex);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000643}
644
645/*
646 * Used to start IO or wait for a given ordered extent to finish.
647 *
648 * If wait is one, this effectively waits on page writeback for all the pages
649 * in the extent, and it waits on the io completion code to insert
650 * metadata into the btree corresponding to the extent
651 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200652void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000653{
654 u64 start = entry->file_offset;
Olivier Deprez157378f2022-04-04 15:47:50 +0200655 u64 end = start + entry->num_bytes - 1;
656 struct btrfs_inode *inode = BTRFS_I(entry->inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000657
658 trace_btrfs_ordered_extent_start(inode, entry);
659
660 /*
661 * pages in the range can be dirty, clean or writeback. We
662 * start IO on any dirty ones so the wait doesn't stall waiting
663 * for the flusher thread to find them
664 */
665 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
Olivier Deprez157378f2022-04-04 15:47:50 +0200666 filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000667 if (wait) {
668 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
669 &entry->flags));
670 }
671}
672
673/*
674 * Used to wait on ordered extents across a large range of bytes.
675 */
676int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
677{
678 int ret = 0;
679 int ret_wb = 0;
680 u64 end;
681 u64 orig_end;
682 struct btrfs_ordered_extent *ordered;
683
684 if (start + len < start) {
685 orig_end = INT_LIMIT(loff_t);
686 } else {
687 orig_end = start + len - 1;
688 if (orig_end > INT_LIMIT(loff_t))
689 orig_end = INT_LIMIT(loff_t);
690 }
691
692 /* start IO across the range first to instantiate any delalloc
693 * extents
694 */
695 ret = btrfs_fdatawrite_range(inode, start, orig_end);
696 if (ret)
697 return ret;
698
699 /*
700 * If we have a writeback error don't return immediately. Wait first
701 * for any ordered extents that haven't completed yet. This is to make
702 * sure no one can dirty the same page ranges and call writepages()
703 * before the ordered extents complete - to avoid failures (-EEXIST)
704 * when adding the new ordered extents to the ordered tree.
705 */
706 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
707
708 end = orig_end;
709 while (1) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200710 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000711 if (!ordered)
712 break;
713 if (ordered->file_offset > orig_end) {
714 btrfs_put_ordered_extent(ordered);
715 break;
716 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200717 if (ordered->file_offset + ordered->num_bytes <= start) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000718 btrfs_put_ordered_extent(ordered);
719 break;
720 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200721 btrfs_start_ordered_extent(ordered, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000722 end = ordered->file_offset;
Olivier Deprez0e641232021-09-23 10:07:05 +0200723 /*
724 * If the ordered extent had an error save the error but don't
725 * exit without waiting first for all other ordered extents in
726 * the range to complete.
727 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000728 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
729 ret = -EIO;
730 btrfs_put_ordered_extent(ordered);
Olivier Deprez0e641232021-09-23 10:07:05 +0200731 if (end == 0 || end == start)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000732 break;
733 end--;
734 }
735 return ret_wb ? ret_wb : ret;
736}
737
738/*
739 * find an ordered extent corresponding to file_offset. return NULL if
740 * nothing is found, otherwise take a reference on the extent and return it
741 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200742struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000743 u64 file_offset)
744{
745 struct btrfs_ordered_inode_tree *tree;
746 struct rb_node *node;
747 struct btrfs_ordered_extent *entry = NULL;
748
Olivier Deprez157378f2022-04-04 15:47:50 +0200749 tree = &inode->ordered_tree;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000750 spin_lock_irq(&tree->lock);
751 node = tree_search(tree, file_offset);
752 if (!node)
753 goto out;
754
755 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
756 if (!offset_in_entry(entry, file_offset))
757 entry = NULL;
758 if (entry)
759 refcount_inc(&entry->refs);
760out:
761 spin_unlock_irq(&tree->lock);
762 return entry;
763}
764
765/* Since the DIO code tries to lock a wide area we need to look for any ordered
766 * extents that exist in the range, rather than just the start of the range.
767 */
768struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
769 struct btrfs_inode *inode, u64 file_offset, u64 len)
770{
771 struct btrfs_ordered_inode_tree *tree;
772 struct rb_node *node;
773 struct btrfs_ordered_extent *entry = NULL;
774
775 tree = &inode->ordered_tree;
776 spin_lock_irq(&tree->lock);
777 node = tree_search(tree, file_offset);
778 if (!node) {
779 node = tree_search(tree, file_offset + len);
780 if (!node)
781 goto out;
782 }
783
784 while (1) {
785 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
786 if (range_overlaps(entry, file_offset, len))
787 break;
788
789 if (entry->file_offset >= file_offset + len) {
790 entry = NULL;
791 break;
792 }
793 entry = NULL;
794 node = rb_next(node);
795 if (!node)
796 break;
797 }
798out:
799 if (entry)
800 refcount_inc(&entry->refs);
801 spin_unlock_irq(&tree->lock);
802 return entry;
803}
804
805/*
Olivier Deprez157378f2022-04-04 15:47:50 +0200806 * Adds all ordered extents to the given list. The list ends up sorted by the
807 * file_offset of the ordered extents.
808 */
809void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
810 struct list_head *list)
811{
812 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
813 struct rb_node *n;
814
815 ASSERT(inode_is_locked(&inode->vfs_inode));
816
817 spin_lock_irq(&tree->lock);
818 for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
819 struct btrfs_ordered_extent *ordered;
820
821 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
822
823 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
824 continue;
825
826 ASSERT(list_empty(&ordered->log_list));
827 list_add_tail(&ordered->log_list, list);
828 refcount_inc(&ordered->refs);
829 }
830 spin_unlock_irq(&tree->lock);
831}
832
833/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000834 * lookup and return any extent before 'file_offset'. NULL is returned
835 * if none is found
836 */
837struct btrfs_ordered_extent *
Olivier Deprez157378f2022-04-04 15:47:50 +0200838btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000839{
840 struct btrfs_ordered_inode_tree *tree;
841 struct rb_node *node;
842 struct btrfs_ordered_extent *entry = NULL;
843
Olivier Deprez157378f2022-04-04 15:47:50 +0200844 tree = &inode->ordered_tree;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000845 spin_lock_irq(&tree->lock);
846 node = tree_search(tree, file_offset);
847 if (!node)
848 goto out;
849
850 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
851 refcount_inc(&entry->refs);
852out:
853 spin_unlock_irq(&tree->lock);
854 return entry;
855}
856
857/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000858 * search the ordered extents for one corresponding to 'offset' and
859 * try to find a checksum. This is used because we allow pages to
860 * be reclaimed before their checksum is actually put into the btree
861 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200862int btrfs_find_ordered_sum(struct btrfs_inode *inode, u64 offset,
863 u64 disk_bytenr, u8 *sum, int len)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000864{
Olivier Deprez157378f2022-04-04 15:47:50 +0200865 struct btrfs_fs_info *fs_info = inode->root->fs_info;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000866 struct btrfs_ordered_sum *ordered_sum;
867 struct btrfs_ordered_extent *ordered;
Olivier Deprez157378f2022-04-04 15:47:50 +0200868 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000869 unsigned long num_sectors;
870 unsigned long i;
871 u32 sectorsize = btrfs_inode_sectorsize(inode);
Olivier Deprez157378f2022-04-04 15:47:50 +0200872 const u8 blocksize_bits = inode->vfs_inode.i_sb->s_blocksize_bits;
David Brazdil0f672f62019-12-10 10:32:29 +0000873 const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000874 int index = 0;
875
876 ordered = btrfs_lookup_ordered_extent(inode, offset);
877 if (!ordered)
878 return 0;
879
880 spin_lock_irq(&tree->lock);
881 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
882 if (disk_bytenr >= ordered_sum->bytenr &&
883 disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200884 i = (disk_bytenr - ordered_sum->bytenr) >> blocksize_bits;
885 num_sectors = ordered_sum->len >> blocksize_bits;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000886 num_sectors = min_t(int, len - index, num_sectors - i);
David Brazdil0f672f62019-12-10 10:32:29 +0000887 memcpy(sum + index, ordered_sum->sums + i * csum_size,
888 num_sectors * csum_size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000889
David Brazdil0f672f62019-12-10 10:32:29 +0000890 index += (int)num_sectors * csum_size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000891 if (index == len)
892 goto out;
893 disk_bytenr += num_sectors * sectorsize;
894 }
895 }
896out:
897 spin_unlock_irq(&tree->lock);
898 btrfs_put_ordered_extent(ordered);
899 return index;
900}
901
David Brazdil0f672f62019-12-10 10:32:29 +0000902/*
903 * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
904 * ordered extents in it are run to completion.
905 *
David Brazdil0f672f62019-12-10 10:32:29 +0000906 * @inode: Inode whose ordered tree is to be searched
907 * @start: Beginning of range to flush
908 * @end: Last byte of range to lock
909 * @cached_state: If passed, will return the extent state responsible for the
910 * locked range. It's the caller's responsibility to free the cached state.
911 *
912 * This function always returns with the given range locked, ensuring after it's
913 * called no order extent can be pending.
914 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200915void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
David Brazdil0f672f62019-12-10 10:32:29 +0000916 u64 end,
917 struct extent_state **cached_state)
918{
919 struct btrfs_ordered_extent *ordered;
920 struct extent_state *cache = NULL;
921 struct extent_state **cachedp = &cache;
922
923 if (cached_state)
924 cachedp = cached_state;
925
926 while (1) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200927 lock_extent_bits(&inode->io_tree, start, end, cachedp);
David Brazdil0f672f62019-12-10 10:32:29 +0000928 ordered = btrfs_lookup_ordered_range(inode, start,
929 end - start + 1);
930 if (!ordered) {
931 /*
932 * If no external cached_state has been passed then
933 * decrement the extra ref taken for cachedp since we
934 * aren't exposing it outside of this function
935 */
936 if (!cached_state)
937 refcount_dec(&cache->refs);
938 break;
939 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200940 unlock_extent_cached(&inode->io_tree, start, end, cachedp);
941 btrfs_start_ordered_extent(ordered, 1);
David Brazdil0f672f62019-12-10 10:32:29 +0000942 btrfs_put_ordered_extent(ordered);
943 }
944}
945
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000946int __init ordered_data_init(void)
947{
948 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
949 sizeof(struct btrfs_ordered_extent), 0,
950 SLAB_MEM_SPREAD,
951 NULL);
952 if (!btrfs_ordered_extent_cache)
953 return -ENOMEM;
954
955 return 0;
956}
957
958void __cold ordered_data_exit(void)
959{
960 kmem_cache_destroy(btrfs_ordered_extent_cache);
961}