blob: 43fba01da6c3debec1fd9a2582822d7a2c2c6823 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/ext4/extents_status.c
4 *
5 * Written by Yongqiang Yang <xiaoqiangnk@gmail.com>
6 * Modified by
7 * Allison Henderson <achender@linux.vnet.ibm.com>
8 * Hugh Dickins <hughd@google.com>
9 * Zheng Liu <wenqing.lz@taobao.com>
10 *
11 * Ext4 extents status tree core functions.
12 */
13#include <linux/list_sort.h>
14#include <linux/proc_fs.h>
15#include <linux/seq_file.h>
16#include "ext4.h"
17
18#include <trace/events/ext4.h>
19
20/*
21 * According to previous discussion in Ext4 Developer Workshop, we
22 * will introduce a new structure called io tree to track all extent
23 * status in order to solve some problems that we have met
24 * (e.g. Reservation space warning), and provide extent-level locking.
25 * Delay extent tree is the first step to achieve this goal. It is
26 * original built by Yongqiang Yang. At that time it is called delay
27 * extent tree, whose goal is only track delayed extents in memory to
28 * simplify the implementation of fiemap and bigalloc, and introduce
29 * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called
30 * delay extent tree at the first commit. But for better understand
31 * what it does, it has been rename to extent status tree.
32 *
33 * Step1:
34 * Currently the first step has been done. All delayed extents are
35 * tracked in the tree. It maintains the delayed extent when a delayed
36 * allocation is issued, and the delayed extent is written out or
37 * invalidated. Therefore the implementation of fiemap and bigalloc
38 * are simplified, and SEEK_DATA/SEEK_HOLE are introduced.
39 *
40 * The following comment describes the implemenmtation of extent
41 * status tree and future works.
42 *
43 * Step2:
44 * In this step all extent status are tracked by extent status tree.
45 * Thus, we can first try to lookup a block mapping in this tree before
46 * finding it in extent tree. Hence, single extent cache can be removed
47 * because extent status tree can do a better job. Extents in status
48 * tree are loaded on-demand. Therefore, the extent status tree may not
49 * contain all of the extents in a file. Meanwhile we define a shrinker
50 * to reclaim memory from extent status tree because fragmented extent
51 * tree will make status tree cost too much memory. written/unwritten/-
52 * hole extents in the tree will be reclaimed by this shrinker when we
53 * are under high memory pressure. Delayed extents will not be
54 * reclimed because fiemap, bigalloc, and seek_data/hole need it.
55 */
56
57/*
58 * Extent status tree implementation for ext4.
59 *
60 *
61 * ==========================================================================
62 * Extent status tree tracks all extent status.
63 *
64 * 1. Why we need to implement extent status tree?
65 *
66 * Without extent status tree, ext4 identifies a delayed extent by looking
67 * up page cache, this has several deficiencies - complicated, buggy,
68 * and inefficient code.
69 *
70 * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a
71 * block or a range of blocks are belonged to a delayed extent.
72 *
73 * Let us have a look at how they do without extent status tree.
74 * -- FIEMAP
75 * FIEMAP looks up page cache to identify delayed allocations from holes.
76 *
77 * -- SEEK_HOLE/DATA
78 * SEEK_HOLE/DATA has the same problem as FIEMAP.
79 *
80 * -- bigalloc
81 * bigalloc looks up page cache to figure out if a block is
82 * already under delayed allocation or not to determine whether
83 * quota reserving is needed for the cluster.
84 *
85 * -- writeout
86 * Writeout looks up whole page cache to see if a buffer is
87 * mapped, If there are not very many delayed buffers, then it is
88 * time consuming.
89 *
90 * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA,
91 * bigalloc and writeout can figure out if a block or a range of
92 * blocks is under delayed allocation(belonged to a delayed extent) or
93 * not by searching the extent tree.
94 *
95 *
96 * ==========================================================================
97 * 2. Ext4 extent status tree impelmentation
98 *
99 * -- extent
100 * A extent is a range of blocks which are contiguous logically and
101 * physically. Unlike extent in extent tree, this extent in ext4 is
102 * a in-memory struct, there is no corresponding on-disk data. There
103 * is no limit on length of extent, so an extent can contain as many
104 * blocks as they are contiguous logically and physically.
105 *
106 * -- extent status tree
107 * Every inode has an extent status tree and all allocation blocks
108 * are added to the tree with different status. The extent in the
109 * tree are ordered by logical block no.
110 *
111 * -- operations on a extent status tree
112 * There are three important operations on a delayed extent tree: find
113 * next extent, adding a extent(a range of blocks) and removing a extent.
114 *
115 * -- race on a extent status tree
116 * Extent status tree is protected by inode->i_es_lock.
117 *
118 * -- memory consumption
119 * Fragmented extent tree will make extent status tree cost too much
120 * memory. Hence, we will reclaim written/unwritten/hole extents from
121 * the tree under a heavy memory pressure.
122 *
123 *
124 * ==========================================================================
125 * 3. Performance analysis
126 *
127 * -- overhead
128 * 1. There is a cache extent for write access, so if writes are
129 * not very random, adding space operaions are in O(1) time.
130 *
131 * -- gain
132 * 2. Code is much simpler, more readable, more maintainable and
133 * more efficient.
134 *
135 *
136 * ==========================================================================
137 * 4. TODO list
138 *
139 * -- Refactor delayed space reservation
140 *
141 * -- Extent-level locking
142 */
143
144static struct kmem_cache *ext4_es_cachep;
David Brazdil0f672f62019-12-10 10:32:29 +0000145static struct kmem_cache *ext4_pending_cachep;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000146
147static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
148static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
David Brazdil0f672f62019-12-10 10:32:29 +0000149 ext4_lblk_t end, int *reserved);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000150static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
151static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
152 struct ext4_inode_info *locked_ei);
David Brazdil0f672f62019-12-10 10:32:29 +0000153static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
154 ext4_lblk_t len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000155
156int __init ext4_init_es(void)
157{
158 ext4_es_cachep = kmem_cache_create("ext4_extent_status",
159 sizeof(struct extent_status),
160 0, (SLAB_RECLAIM_ACCOUNT), NULL);
161 if (ext4_es_cachep == NULL)
162 return -ENOMEM;
163 return 0;
164}
165
166void ext4_exit_es(void)
167{
168 kmem_cache_destroy(ext4_es_cachep);
169}
170
171void ext4_es_init_tree(struct ext4_es_tree *tree)
172{
173 tree->root = RB_ROOT;
174 tree->cache_es = NULL;
175}
176
177#ifdef ES_DEBUG__
178static void ext4_es_print_tree(struct inode *inode)
179{
180 struct ext4_es_tree *tree;
181 struct rb_node *node;
182
183 printk(KERN_DEBUG "status extents for inode %lu:", inode->i_ino);
184 tree = &EXT4_I(inode)->i_es_tree;
185 node = rb_first(&tree->root);
186 while (node) {
187 struct extent_status *es;
188 es = rb_entry(node, struct extent_status, rb_node);
189 printk(KERN_DEBUG " [%u/%u) %llu %x",
190 es->es_lblk, es->es_len,
191 ext4_es_pblock(es), ext4_es_status(es));
192 node = rb_next(node);
193 }
194 printk(KERN_DEBUG "\n");
195}
196#else
197#define ext4_es_print_tree(inode)
198#endif
199
200static inline ext4_lblk_t ext4_es_end(struct extent_status *es)
201{
202 BUG_ON(es->es_lblk + es->es_len < es->es_lblk);
203 return es->es_lblk + es->es_len - 1;
204}
205
206/*
207 * search through the tree for an delayed extent with a given offset. If
208 * it can't be found, try to find next extent.
209 */
210static struct extent_status *__es_tree_search(struct rb_root *root,
211 ext4_lblk_t lblk)
212{
213 struct rb_node *node = root->rb_node;
214 struct extent_status *es = NULL;
215
216 while (node) {
217 es = rb_entry(node, struct extent_status, rb_node);
218 if (lblk < es->es_lblk)
219 node = node->rb_left;
220 else if (lblk > ext4_es_end(es))
221 node = node->rb_right;
222 else
223 return es;
224 }
225
226 if (es && lblk < es->es_lblk)
227 return es;
228
229 if (es && lblk > ext4_es_end(es)) {
230 node = rb_next(&es->rb_node);
231 return node ? rb_entry(node, struct extent_status, rb_node) :
232 NULL;
233 }
234
235 return NULL;
236}
237
238/*
David Brazdil0f672f62019-12-10 10:32:29 +0000239 * ext4_es_find_extent_range - find extent with specified status within block
240 * range or next extent following block range in
241 * extents status tree
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000242 *
David Brazdil0f672f62019-12-10 10:32:29 +0000243 * @inode - file containing the range
244 * @matching_fn - pointer to function that matches extents with desired status
245 * @lblk - logical block defining start of range
246 * @end - logical block defining end of range
247 * @es - extent found, if any
248 *
249 * Find the first extent within the block range specified by @lblk and @end
250 * in the extents status tree that satisfies @matching_fn. If a match
251 * is found, it's returned in @es. If not, and a matching extent is found
252 * beyond the block range, it's returned in @es. If no match is found, an
253 * extent is returned in @es whose es_lblk, es_len, and es_pblk components
254 * are 0.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000255 */
David Brazdil0f672f62019-12-10 10:32:29 +0000256static void __es_find_extent_range(struct inode *inode,
257 int (*matching_fn)(struct extent_status *es),
258 ext4_lblk_t lblk, ext4_lblk_t end,
259 struct extent_status *es)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000260{
261 struct ext4_es_tree *tree = NULL;
262 struct extent_status *es1 = NULL;
263 struct rb_node *node;
264
David Brazdil0f672f62019-12-10 10:32:29 +0000265 WARN_ON(es == NULL);
266 WARN_ON(end < lblk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000267
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000268 tree = &EXT4_I(inode)->i_es_tree;
269
David Brazdil0f672f62019-12-10 10:32:29 +0000270 /* see if the extent has been cached */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000271 es->es_lblk = es->es_len = es->es_pblk = 0;
272 if (tree->cache_es) {
273 es1 = tree->cache_es;
274 if (in_range(lblk, es1->es_lblk, es1->es_len)) {
275 es_debug("%u cached by [%u/%u) %llu %x\n",
276 lblk, es1->es_lblk, es1->es_len,
277 ext4_es_pblock(es1), ext4_es_status(es1));
278 goto out;
279 }
280 }
281
282 es1 = __es_tree_search(&tree->root, lblk);
283
284out:
David Brazdil0f672f62019-12-10 10:32:29 +0000285 if (es1 && !matching_fn(es1)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000286 while ((node = rb_next(&es1->rb_node)) != NULL) {
287 es1 = rb_entry(node, struct extent_status, rb_node);
288 if (es1->es_lblk > end) {
289 es1 = NULL;
290 break;
291 }
David Brazdil0f672f62019-12-10 10:32:29 +0000292 if (matching_fn(es1))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000293 break;
294 }
295 }
296
David Brazdil0f672f62019-12-10 10:32:29 +0000297 if (es1 && matching_fn(es1)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000298 tree->cache_es = es1;
299 es->es_lblk = es1->es_lblk;
300 es->es_len = es1->es_len;
301 es->es_pblk = es1->es_pblk;
302 }
303
David Brazdil0f672f62019-12-10 10:32:29 +0000304}
305
306/*
307 * Locking for __es_find_extent_range() for external use
308 */
309void ext4_es_find_extent_range(struct inode *inode,
310 int (*matching_fn)(struct extent_status *es),
311 ext4_lblk_t lblk, ext4_lblk_t end,
312 struct extent_status *es)
313{
314 trace_ext4_es_find_extent_range_enter(inode, lblk);
315
316 read_lock(&EXT4_I(inode)->i_es_lock);
317 __es_find_extent_range(inode, matching_fn, lblk, end, es);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000318 read_unlock(&EXT4_I(inode)->i_es_lock);
319
David Brazdil0f672f62019-12-10 10:32:29 +0000320 trace_ext4_es_find_extent_range_exit(inode, es);
321}
322
323/*
324 * __es_scan_range - search block range for block with specified status
325 * in extents status tree
326 *
327 * @inode - file containing the range
328 * @matching_fn - pointer to function that matches extents with desired status
329 * @lblk - logical block defining start of range
330 * @end - logical block defining end of range
331 *
332 * Returns true if at least one block in the specified block range satisfies
333 * the criterion specified by @matching_fn, and false if not. If at least
334 * one extent has the specified status, then there is at least one block
335 * in the cluster with that status. Should only be called by code that has
336 * taken i_es_lock.
337 */
338static bool __es_scan_range(struct inode *inode,
339 int (*matching_fn)(struct extent_status *es),
340 ext4_lblk_t start, ext4_lblk_t end)
341{
342 struct extent_status es;
343
344 __es_find_extent_range(inode, matching_fn, start, end, &es);
345 if (es.es_len == 0)
346 return false; /* no matching extent in the tree */
347 else if (es.es_lblk <= start &&
348 start < es.es_lblk + es.es_len)
349 return true;
350 else if (start <= es.es_lblk && es.es_lblk <= end)
351 return true;
352 else
353 return false;
354}
355/*
356 * Locking for __es_scan_range() for external use
357 */
358bool ext4_es_scan_range(struct inode *inode,
359 int (*matching_fn)(struct extent_status *es),
360 ext4_lblk_t lblk, ext4_lblk_t end)
361{
362 bool ret;
363
364 read_lock(&EXT4_I(inode)->i_es_lock);
365 ret = __es_scan_range(inode, matching_fn, lblk, end);
366 read_unlock(&EXT4_I(inode)->i_es_lock);
367
368 return ret;
369}
370
371/*
372 * __es_scan_clu - search cluster for block with specified status in
373 * extents status tree
374 *
375 * @inode - file containing the cluster
376 * @matching_fn - pointer to function that matches extents with desired status
377 * @lblk - logical block in cluster to be searched
378 *
379 * Returns true if at least one extent in the cluster containing @lblk
380 * satisfies the criterion specified by @matching_fn, and false if not. If at
381 * least one extent has the specified status, then there is at least one block
382 * in the cluster with that status. Should only be called by code that has
383 * taken i_es_lock.
384 */
385static bool __es_scan_clu(struct inode *inode,
386 int (*matching_fn)(struct extent_status *es),
387 ext4_lblk_t lblk)
388{
389 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
390 ext4_lblk_t lblk_start, lblk_end;
391
392 lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
393 lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
394
395 return __es_scan_range(inode, matching_fn, lblk_start, lblk_end);
396}
397
398/*
399 * Locking for __es_scan_clu() for external use
400 */
401bool ext4_es_scan_clu(struct inode *inode,
402 int (*matching_fn)(struct extent_status *es),
403 ext4_lblk_t lblk)
404{
405 bool ret;
406
407 read_lock(&EXT4_I(inode)->i_es_lock);
408 ret = __es_scan_clu(inode, matching_fn, lblk);
409 read_unlock(&EXT4_I(inode)->i_es_lock);
410
411 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000412}
413
414static void ext4_es_list_add(struct inode *inode)
415{
416 struct ext4_inode_info *ei = EXT4_I(inode);
417 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
418
419 if (!list_empty(&ei->i_es_list))
420 return;
421
422 spin_lock(&sbi->s_es_lock);
423 if (list_empty(&ei->i_es_list)) {
424 list_add_tail(&ei->i_es_list, &sbi->s_es_list);
425 sbi->s_es_nr_inode++;
426 }
427 spin_unlock(&sbi->s_es_lock);
428}
429
430static void ext4_es_list_del(struct inode *inode)
431{
432 struct ext4_inode_info *ei = EXT4_I(inode);
433 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
434
435 spin_lock(&sbi->s_es_lock);
436 if (!list_empty(&ei->i_es_list)) {
437 list_del_init(&ei->i_es_list);
438 sbi->s_es_nr_inode--;
439 WARN_ON_ONCE(sbi->s_es_nr_inode < 0);
440 }
441 spin_unlock(&sbi->s_es_lock);
442}
443
444static struct extent_status *
445ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
446 ext4_fsblk_t pblk)
447{
448 struct extent_status *es;
449 es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
450 if (es == NULL)
451 return NULL;
452 es->es_lblk = lblk;
453 es->es_len = len;
454 es->es_pblk = pblk;
455
456 /*
457 * We don't count delayed extent because we never try to reclaim them
458 */
459 if (!ext4_es_is_delayed(es)) {
460 if (!EXT4_I(inode)->i_es_shk_nr++)
461 ext4_es_list_add(inode);
462 percpu_counter_inc(&EXT4_SB(inode->i_sb)->
463 s_es_stats.es_stats_shk_cnt);
464 }
465
466 EXT4_I(inode)->i_es_all_nr++;
467 percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
468
469 return es;
470}
471
472static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
473{
474 EXT4_I(inode)->i_es_all_nr--;
475 percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
476
477 /* Decrease the shrink counter when this es is not delayed */
478 if (!ext4_es_is_delayed(es)) {
479 BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0);
480 if (!--EXT4_I(inode)->i_es_shk_nr)
481 ext4_es_list_del(inode);
482 percpu_counter_dec(&EXT4_SB(inode->i_sb)->
483 s_es_stats.es_stats_shk_cnt);
484 }
485
486 kmem_cache_free(ext4_es_cachep, es);
487}
488
489/*
490 * Check whether or not two extents can be merged
491 * Condition:
492 * - logical block number is contiguous
493 * - physical block number is contiguous
494 * - status is equal
495 */
496static int ext4_es_can_be_merged(struct extent_status *es1,
497 struct extent_status *es2)
498{
499 if (ext4_es_type(es1) != ext4_es_type(es2))
500 return 0;
501
502 if (((__u64) es1->es_len) + es2->es_len > EXT_MAX_BLOCKS) {
503 pr_warn("ES assertion failed when merging extents. "
504 "The sum of lengths of es1 (%d) and es2 (%d) "
505 "is bigger than allowed file size (%d)\n",
506 es1->es_len, es2->es_len, EXT_MAX_BLOCKS);
507 WARN_ON(1);
508 return 0;
509 }
510
511 if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk)
512 return 0;
513
514 if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) &&
515 (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2)))
516 return 1;
517
518 if (ext4_es_is_hole(es1))
519 return 1;
520
521 /* we need to check delayed extent is without unwritten status */
522 if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1))
523 return 1;
524
525 return 0;
526}
527
528static struct extent_status *
529ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es)
530{
531 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
532 struct extent_status *es1;
533 struct rb_node *node;
534
535 node = rb_prev(&es->rb_node);
536 if (!node)
537 return es;
538
539 es1 = rb_entry(node, struct extent_status, rb_node);
540 if (ext4_es_can_be_merged(es1, es)) {
541 es1->es_len += es->es_len;
542 if (ext4_es_is_referenced(es))
543 ext4_es_set_referenced(es1);
544 rb_erase(&es->rb_node, &tree->root);
545 ext4_es_free_extent(inode, es);
546 es = es1;
547 }
548
549 return es;
550}
551
552static struct extent_status *
553ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es)
554{
555 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
556 struct extent_status *es1;
557 struct rb_node *node;
558
559 node = rb_next(&es->rb_node);
560 if (!node)
561 return es;
562
563 es1 = rb_entry(node, struct extent_status, rb_node);
564 if (ext4_es_can_be_merged(es, es1)) {
565 es->es_len += es1->es_len;
566 if (ext4_es_is_referenced(es1))
567 ext4_es_set_referenced(es);
568 rb_erase(node, &tree->root);
569 ext4_es_free_extent(inode, es1);
570 }
571
572 return es;
573}
574
575#ifdef ES_AGGRESSIVE_TEST
576#include "ext4_extents.h" /* Needed when ES_AGGRESSIVE_TEST is defined */
577
578static void ext4_es_insert_extent_ext_check(struct inode *inode,
579 struct extent_status *es)
580{
581 struct ext4_ext_path *path = NULL;
582 struct ext4_extent *ex;
583 ext4_lblk_t ee_block;
584 ext4_fsblk_t ee_start;
585 unsigned short ee_len;
586 int depth, ee_status, es_status;
587
588 path = ext4_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE);
589 if (IS_ERR(path))
590 return;
591
592 depth = ext_depth(inode);
593 ex = path[depth].p_ext;
594
595 if (ex) {
596
597 ee_block = le32_to_cpu(ex->ee_block);
598 ee_start = ext4_ext_pblock(ex);
599 ee_len = ext4_ext_get_actual_len(ex);
600
601 ee_status = ext4_ext_is_unwritten(ex) ? 1 : 0;
602 es_status = ext4_es_is_unwritten(es) ? 1 : 0;
603
604 /*
605 * Make sure ex and es are not overlap when we try to insert
606 * a delayed/hole extent.
607 */
608 if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) {
609 if (in_range(es->es_lblk, ee_block, ee_len)) {
610 pr_warn("ES insert assertion failed for "
611 "inode: %lu we can find an extent "
612 "at block [%d/%d/%llu/%c], but we "
613 "want to add a delayed/hole extent "
614 "[%d/%d/%llu/%x]\n",
615 inode->i_ino, ee_block, ee_len,
616 ee_start, ee_status ? 'u' : 'w',
617 es->es_lblk, es->es_len,
618 ext4_es_pblock(es), ext4_es_status(es));
619 }
620 goto out;
621 }
622
623 /*
624 * We don't check ee_block == es->es_lblk, etc. because es
625 * might be a part of whole extent, vice versa.
626 */
627 if (es->es_lblk < ee_block ||
628 ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) {
629 pr_warn("ES insert assertion failed for inode: %lu "
630 "ex_status [%d/%d/%llu/%c] != "
631 "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
632 ee_block, ee_len, ee_start,
633 ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
634 ext4_es_pblock(es), es_status ? 'u' : 'w');
635 goto out;
636 }
637
638 if (ee_status ^ es_status) {
639 pr_warn("ES insert assertion failed for inode: %lu "
640 "ex_status [%d/%d/%llu/%c] != "
641 "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
642 ee_block, ee_len, ee_start,
643 ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
644 ext4_es_pblock(es), es_status ? 'u' : 'w');
645 }
646 } else {
647 /*
648 * We can't find an extent on disk. So we need to make sure
649 * that we don't want to add an written/unwritten extent.
650 */
651 if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) {
652 pr_warn("ES insert assertion failed for inode: %lu "
653 "can't find an extent at block %d but we want "
654 "to add a written/unwritten extent "
655 "[%d/%d/%llu/%x]\n", inode->i_ino,
656 es->es_lblk, es->es_lblk, es->es_len,
657 ext4_es_pblock(es), ext4_es_status(es));
658 }
659 }
660out:
661 ext4_ext_drop_refs(path);
662 kfree(path);
663}
664
665static void ext4_es_insert_extent_ind_check(struct inode *inode,
666 struct extent_status *es)
667{
668 struct ext4_map_blocks map;
669 int retval;
670
671 /*
672 * Here we call ext4_ind_map_blocks to lookup a block mapping because
673 * 'Indirect' structure is defined in indirect.c. So we couldn't
674 * access direct/indirect tree from outside. It is too dirty to define
675 * this function in indirect.c file.
676 */
677
678 map.m_lblk = es->es_lblk;
679 map.m_len = es->es_len;
680
681 retval = ext4_ind_map_blocks(NULL, inode, &map, 0);
682 if (retval > 0) {
683 if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) {
684 /*
685 * We want to add a delayed/hole extent but this
686 * block has been allocated.
687 */
688 pr_warn("ES insert assertion failed for inode: %lu "
689 "We can find blocks but we want to add a "
690 "delayed/hole extent [%d/%d/%llu/%x]\n",
691 inode->i_ino, es->es_lblk, es->es_len,
692 ext4_es_pblock(es), ext4_es_status(es));
693 return;
694 } else if (ext4_es_is_written(es)) {
695 if (retval != es->es_len) {
696 pr_warn("ES insert assertion failed for "
697 "inode: %lu retval %d != es_len %d\n",
698 inode->i_ino, retval, es->es_len);
699 return;
700 }
701 if (map.m_pblk != ext4_es_pblock(es)) {
702 pr_warn("ES insert assertion failed for "
703 "inode: %lu m_pblk %llu != "
704 "es_pblk %llu\n",
705 inode->i_ino, map.m_pblk,
706 ext4_es_pblock(es));
707 return;
708 }
709 } else {
710 /*
711 * We don't need to check unwritten extent because
712 * indirect-based file doesn't have it.
713 */
David Brazdil0f672f62019-12-10 10:32:29 +0000714 BUG();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000715 }
716 } else if (retval == 0) {
717 if (ext4_es_is_written(es)) {
718 pr_warn("ES insert assertion failed for inode: %lu "
719 "We can't find the block but we want to add "
720 "a written extent [%d/%d/%llu/%x]\n",
721 inode->i_ino, es->es_lblk, es->es_len,
722 ext4_es_pblock(es), ext4_es_status(es));
723 return;
724 }
725 }
726}
727
728static inline void ext4_es_insert_extent_check(struct inode *inode,
729 struct extent_status *es)
730{
731 /*
732 * We don't need to worry about the race condition because
733 * caller takes i_data_sem locking.
734 */
735 BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
736 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
737 ext4_es_insert_extent_ext_check(inode, es);
738 else
739 ext4_es_insert_extent_ind_check(inode, es);
740}
741#else
742static inline void ext4_es_insert_extent_check(struct inode *inode,
743 struct extent_status *es)
744{
745}
746#endif
747
748static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
749{
750 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
751 struct rb_node **p = &tree->root.rb_node;
752 struct rb_node *parent = NULL;
753 struct extent_status *es;
754
755 while (*p) {
756 parent = *p;
757 es = rb_entry(parent, struct extent_status, rb_node);
758
759 if (newes->es_lblk < es->es_lblk) {
760 if (ext4_es_can_be_merged(newes, es)) {
761 /*
762 * Here we can modify es_lblk directly
763 * because it isn't overlapped.
764 */
765 es->es_lblk = newes->es_lblk;
766 es->es_len += newes->es_len;
767 if (ext4_es_is_written(es) ||
768 ext4_es_is_unwritten(es))
769 ext4_es_store_pblock(es,
770 newes->es_pblk);
771 es = ext4_es_try_to_merge_left(inode, es);
772 goto out;
773 }
774 p = &(*p)->rb_left;
775 } else if (newes->es_lblk > ext4_es_end(es)) {
776 if (ext4_es_can_be_merged(es, newes)) {
777 es->es_len += newes->es_len;
778 es = ext4_es_try_to_merge_right(inode, es);
779 goto out;
780 }
781 p = &(*p)->rb_right;
782 } else {
David Brazdil0f672f62019-12-10 10:32:29 +0000783 BUG();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000784 return -EINVAL;
785 }
786 }
787
788 es = ext4_es_alloc_extent(inode, newes->es_lblk, newes->es_len,
789 newes->es_pblk);
790 if (!es)
791 return -ENOMEM;
792 rb_link_node(&es->rb_node, parent, p);
793 rb_insert_color(&es->rb_node, &tree->root);
794
795out:
796 tree->cache_es = es;
797 return 0;
798}
799
800/*
801 * ext4_es_insert_extent() adds information to an inode's extent
802 * status tree.
803 *
804 * Return 0 on success, error code on failure.
805 */
806int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
807 ext4_lblk_t len, ext4_fsblk_t pblk,
808 unsigned int status)
809{
810 struct extent_status newes;
811 ext4_lblk_t end = lblk + len - 1;
812 int err = 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000813 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000814
815 es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
816 lblk, len, pblk, status, inode->i_ino);
817
818 if (!len)
819 return 0;
820
821 BUG_ON(end < lblk);
822
823 if ((status & EXTENT_STATUS_DELAYED) &&
824 (status & EXTENT_STATUS_WRITTEN)) {
825 ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as "
826 " delayed and written which can potentially "
827 " cause data loss.", lblk, len);
828 WARN_ON(1);
829 }
830
831 newes.es_lblk = lblk;
832 newes.es_len = len;
833 ext4_es_store_pblock_status(&newes, pblk, status);
834 trace_ext4_es_insert_extent(inode, &newes);
835
836 ext4_es_insert_extent_check(inode, &newes);
837
838 write_lock(&EXT4_I(inode)->i_es_lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000839 err = __es_remove_extent(inode, lblk, end, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000840 if (err != 0)
841 goto error;
842retry:
843 err = __es_insert_extent(inode, &newes);
844 if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
845 128, EXT4_I(inode)))
846 goto retry;
847 if (err == -ENOMEM && !ext4_es_is_delayed(&newes))
848 err = 0;
849
David Brazdil0f672f62019-12-10 10:32:29 +0000850 if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) &&
851 (status & EXTENT_STATUS_WRITTEN ||
852 status & EXTENT_STATUS_UNWRITTEN))
853 __revise_pending(inode, lblk, len);
854
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000855error:
856 write_unlock(&EXT4_I(inode)->i_es_lock);
857
858 ext4_es_print_tree(inode);
859
860 return err;
861}
862
863/*
864 * ext4_es_cache_extent() inserts information into the extent status
865 * tree if and only if there isn't information about the range in
866 * question already.
867 */
868void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
869 ext4_lblk_t len, ext4_fsblk_t pblk,
870 unsigned int status)
871{
872 struct extent_status *es;
873 struct extent_status newes;
874 ext4_lblk_t end = lblk + len - 1;
875
876 newes.es_lblk = lblk;
877 newes.es_len = len;
878 ext4_es_store_pblock_status(&newes, pblk, status);
879 trace_ext4_es_cache_extent(inode, &newes);
880
881 if (!len)
882 return;
883
884 BUG_ON(end < lblk);
885
886 write_lock(&EXT4_I(inode)->i_es_lock);
887
888 es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk);
889 if (!es || es->es_lblk > end)
890 __es_insert_extent(inode, &newes);
891 write_unlock(&EXT4_I(inode)->i_es_lock);
892}
893
894/*
895 * ext4_es_lookup_extent() looks up an extent in extent status tree.
896 *
897 * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks.
898 *
899 * Return: 1 on found, 0 on not
900 */
901int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
David Brazdil0f672f62019-12-10 10:32:29 +0000902 ext4_lblk_t *next_lblk,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000903 struct extent_status *es)
904{
905 struct ext4_es_tree *tree;
906 struct ext4_es_stats *stats;
907 struct extent_status *es1 = NULL;
908 struct rb_node *node;
909 int found = 0;
910
911 trace_ext4_es_lookup_extent_enter(inode, lblk);
912 es_debug("lookup extent in block %u\n", lblk);
913
914 tree = &EXT4_I(inode)->i_es_tree;
915 read_lock(&EXT4_I(inode)->i_es_lock);
916
917 /* find extent in cache firstly */
918 es->es_lblk = es->es_len = es->es_pblk = 0;
919 if (tree->cache_es) {
920 es1 = tree->cache_es;
921 if (in_range(lblk, es1->es_lblk, es1->es_len)) {
922 es_debug("%u cached by [%u/%u)\n",
923 lblk, es1->es_lblk, es1->es_len);
924 found = 1;
925 goto out;
926 }
927 }
928
929 node = tree->root.rb_node;
930 while (node) {
931 es1 = rb_entry(node, struct extent_status, rb_node);
932 if (lblk < es1->es_lblk)
933 node = node->rb_left;
934 else if (lblk > ext4_es_end(es1))
935 node = node->rb_right;
936 else {
937 found = 1;
938 break;
939 }
940 }
941
942out:
943 stats = &EXT4_SB(inode->i_sb)->s_es_stats;
944 if (found) {
945 BUG_ON(!es1);
946 es->es_lblk = es1->es_lblk;
947 es->es_len = es1->es_len;
948 es->es_pblk = es1->es_pblk;
949 if (!ext4_es_is_referenced(es1))
950 ext4_es_set_referenced(es1);
David Brazdil0f672f62019-12-10 10:32:29 +0000951 percpu_counter_inc(&stats->es_stats_cache_hits);
952 if (next_lblk) {
953 node = rb_next(&es1->rb_node);
954 if (node) {
955 es1 = rb_entry(node, struct extent_status,
956 rb_node);
957 *next_lblk = es1->es_lblk;
958 } else
959 *next_lblk = 0;
960 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000961 } else {
David Brazdil0f672f62019-12-10 10:32:29 +0000962 percpu_counter_inc(&stats->es_stats_cache_misses);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000963 }
964
965 read_unlock(&EXT4_I(inode)->i_es_lock);
966
967 trace_ext4_es_lookup_extent_exit(inode, es, found);
968 return found;
969}
970
David Brazdil0f672f62019-12-10 10:32:29 +0000971struct rsvd_count {
972 int ndelonly;
973 bool first_do_lblk_found;
974 ext4_lblk_t first_do_lblk;
975 ext4_lblk_t last_do_lblk;
976 struct extent_status *left_es;
977 bool partial;
978 ext4_lblk_t lclu;
979};
980
981/*
982 * init_rsvd - initialize reserved count data before removing block range
983 * in file from extent status tree
984 *
985 * @inode - file containing range
986 * @lblk - first block in range
987 * @es - pointer to first extent in range
988 * @rc - pointer to reserved count data
989 *
990 * Assumes es is not NULL
991 */
992static void init_rsvd(struct inode *inode, ext4_lblk_t lblk,
993 struct extent_status *es, struct rsvd_count *rc)
994{
995 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
996 struct rb_node *node;
997
998 rc->ndelonly = 0;
999
1000 /*
1001 * for bigalloc, note the first delonly block in the range has not
1002 * been found, record the extent containing the block to the left of
1003 * the region to be removed, if any, and note that there's no partial
1004 * cluster to track
1005 */
1006 if (sbi->s_cluster_ratio > 1) {
1007 rc->first_do_lblk_found = false;
1008 if (lblk > es->es_lblk) {
1009 rc->left_es = es;
1010 } else {
1011 node = rb_prev(&es->rb_node);
1012 rc->left_es = node ? rb_entry(node,
1013 struct extent_status,
1014 rb_node) : NULL;
1015 }
1016 rc->partial = false;
1017 }
1018}
1019
1020/*
1021 * count_rsvd - count the clusters containing delayed and not unwritten
1022 * (delonly) blocks in a range within an extent and add to
1023 * the running tally in rsvd_count
1024 *
1025 * @inode - file containing extent
1026 * @lblk - first block in range
1027 * @len - length of range in blocks
1028 * @es - pointer to extent containing clusters to be counted
1029 * @rc - pointer to reserved count data
1030 *
1031 * Tracks partial clusters found at the beginning and end of extents so
1032 * they aren't overcounted when they span adjacent extents
1033 */
1034static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len,
1035 struct extent_status *es, struct rsvd_count *rc)
1036{
1037 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1038 ext4_lblk_t i, end, nclu;
1039
1040 if (!ext4_es_is_delonly(es))
1041 return;
1042
1043 WARN_ON(len <= 0);
1044
1045 if (sbi->s_cluster_ratio == 1) {
1046 rc->ndelonly += (int) len;
1047 return;
1048 }
1049
1050 /* bigalloc */
1051
1052 i = (lblk < es->es_lblk) ? es->es_lblk : lblk;
1053 end = lblk + (ext4_lblk_t) len - 1;
1054 end = (end > ext4_es_end(es)) ? ext4_es_end(es) : end;
1055
1056 /* record the first block of the first delonly extent seen */
1057 if (rc->first_do_lblk_found == false) {
1058 rc->first_do_lblk = i;
1059 rc->first_do_lblk_found = true;
1060 }
1061
1062 /* update the last lblk in the region seen so far */
1063 rc->last_do_lblk = end;
1064
1065 /*
1066 * if we're tracking a partial cluster and the current extent
1067 * doesn't start with it, count it and stop tracking
1068 */
1069 if (rc->partial && (rc->lclu != EXT4_B2C(sbi, i))) {
1070 rc->ndelonly++;
1071 rc->partial = false;
1072 }
1073
1074 /*
1075 * if the first cluster doesn't start on a cluster boundary but
1076 * ends on one, count it
1077 */
1078 if (EXT4_LBLK_COFF(sbi, i) != 0) {
1079 if (end >= EXT4_LBLK_CFILL(sbi, i)) {
1080 rc->ndelonly++;
1081 rc->partial = false;
1082 i = EXT4_LBLK_CFILL(sbi, i) + 1;
1083 }
1084 }
1085
1086 /*
1087 * if the current cluster starts on a cluster boundary, count the
1088 * number of whole delonly clusters in the extent
1089 */
1090 if ((i + sbi->s_cluster_ratio - 1) <= end) {
1091 nclu = (end - i + 1) >> sbi->s_cluster_bits;
1092 rc->ndelonly += nclu;
1093 i += nclu << sbi->s_cluster_bits;
1094 }
1095
1096 /*
1097 * start tracking a partial cluster if there's a partial at the end
1098 * of the current extent and we're not already tracking one
1099 */
1100 if (!rc->partial && i <= end) {
1101 rc->partial = true;
1102 rc->lclu = EXT4_B2C(sbi, i);
1103 }
1104}
1105
1106/*
1107 * __pr_tree_search - search for a pending cluster reservation
1108 *
1109 * @root - root of pending reservation tree
1110 * @lclu - logical cluster to search for
1111 *
1112 * Returns the pending reservation for the cluster identified by @lclu
1113 * if found. If not, returns a reservation for the next cluster if any,
1114 * and if not, returns NULL.
1115 */
1116static struct pending_reservation *__pr_tree_search(struct rb_root *root,
1117 ext4_lblk_t lclu)
1118{
1119 struct rb_node *node = root->rb_node;
1120 struct pending_reservation *pr = NULL;
1121
1122 while (node) {
1123 pr = rb_entry(node, struct pending_reservation, rb_node);
1124 if (lclu < pr->lclu)
1125 node = node->rb_left;
1126 else if (lclu > pr->lclu)
1127 node = node->rb_right;
1128 else
1129 return pr;
1130 }
1131 if (pr && lclu < pr->lclu)
1132 return pr;
1133 if (pr && lclu > pr->lclu) {
1134 node = rb_next(&pr->rb_node);
1135 return node ? rb_entry(node, struct pending_reservation,
1136 rb_node) : NULL;
1137 }
1138 return NULL;
1139}
1140
1141/*
1142 * get_rsvd - calculates and returns the number of cluster reservations to be
1143 * released when removing a block range from the extent status tree
1144 * and releases any pending reservations within the range
1145 *
1146 * @inode - file containing block range
1147 * @end - last block in range
1148 * @right_es - pointer to extent containing next block beyond end or NULL
1149 * @rc - pointer to reserved count data
1150 *
1151 * The number of reservations to be released is equal to the number of
1152 * clusters containing delayed and not unwritten (delonly) blocks within
1153 * the range, minus the number of clusters still containing delonly blocks
1154 * at the ends of the range, and minus the number of pending reservations
1155 * within the range.
1156 */
1157static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
1158 struct extent_status *right_es,
1159 struct rsvd_count *rc)
1160{
1161 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1162 struct pending_reservation *pr;
1163 struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
1164 struct rb_node *node;
1165 ext4_lblk_t first_lclu, last_lclu;
1166 bool left_delonly, right_delonly, count_pending;
1167 struct extent_status *es;
1168
1169 if (sbi->s_cluster_ratio > 1) {
1170 /* count any remaining partial cluster */
1171 if (rc->partial)
1172 rc->ndelonly++;
1173
1174 if (rc->ndelonly == 0)
1175 return 0;
1176
1177 first_lclu = EXT4_B2C(sbi, rc->first_do_lblk);
1178 last_lclu = EXT4_B2C(sbi, rc->last_do_lblk);
1179
1180 /*
1181 * decrease the delonly count by the number of clusters at the
1182 * ends of the range that still contain delonly blocks -
1183 * these clusters still need to be reserved
1184 */
1185 left_delonly = right_delonly = false;
1186
1187 es = rc->left_es;
1188 while (es && ext4_es_end(es) >=
1189 EXT4_LBLK_CMASK(sbi, rc->first_do_lblk)) {
1190 if (ext4_es_is_delonly(es)) {
1191 rc->ndelonly--;
1192 left_delonly = true;
1193 break;
1194 }
1195 node = rb_prev(&es->rb_node);
1196 if (!node)
1197 break;
1198 es = rb_entry(node, struct extent_status, rb_node);
1199 }
1200 if (right_es && (!left_delonly || first_lclu != last_lclu)) {
1201 if (end < ext4_es_end(right_es)) {
1202 es = right_es;
1203 } else {
1204 node = rb_next(&right_es->rb_node);
1205 es = node ? rb_entry(node, struct extent_status,
1206 rb_node) : NULL;
1207 }
1208 while (es && es->es_lblk <=
1209 EXT4_LBLK_CFILL(sbi, rc->last_do_lblk)) {
1210 if (ext4_es_is_delonly(es)) {
1211 rc->ndelonly--;
1212 right_delonly = true;
1213 break;
1214 }
1215 node = rb_next(&es->rb_node);
1216 if (!node)
1217 break;
1218 es = rb_entry(node, struct extent_status,
1219 rb_node);
1220 }
1221 }
1222
1223 /*
1224 * Determine the block range that should be searched for
1225 * pending reservations, if any. Clusters on the ends of the
1226 * original removed range containing delonly blocks are
1227 * excluded. They've already been accounted for and it's not
1228 * possible to determine if an associated pending reservation
1229 * should be released with the information available in the
1230 * extents status tree.
1231 */
1232 if (first_lclu == last_lclu) {
1233 if (left_delonly | right_delonly)
1234 count_pending = false;
1235 else
1236 count_pending = true;
1237 } else {
1238 if (left_delonly)
1239 first_lclu++;
1240 if (right_delonly)
1241 last_lclu--;
1242 if (first_lclu <= last_lclu)
1243 count_pending = true;
1244 else
1245 count_pending = false;
1246 }
1247
1248 /*
1249 * a pending reservation found between first_lclu and last_lclu
1250 * represents an allocated cluster that contained at least one
1251 * delonly block, so the delonly total must be reduced by one
1252 * for each pending reservation found and released
1253 */
1254 if (count_pending) {
1255 pr = __pr_tree_search(&tree->root, first_lclu);
1256 while (pr && pr->lclu <= last_lclu) {
1257 rc->ndelonly--;
1258 node = rb_next(&pr->rb_node);
1259 rb_erase(&pr->rb_node, &tree->root);
1260 kmem_cache_free(ext4_pending_cachep, pr);
1261 if (!node)
1262 break;
1263 pr = rb_entry(node, struct pending_reservation,
1264 rb_node);
1265 }
1266 }
1267 }
1268 return rc->ndelonly;
1269}
1270
1271
1272/*
1273 * __es_remove_extent - removes block range from extent status tree
1274 *
1275 * @inode - file containing range
1276 * @lblk - first block in range
1277 * @end - last block in range
1278 * @reserved - number of cluster reservations released
1279 *
1280 * If @reserved is not NULL and delayed allocation is enabled, counts
1281 * block/cluster reservations freed by removing range and if bigalloc
1282 * enabled cancels pending reservations as needed. Returns 0 on success,
1283 * error code on failure.
1284 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001285static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
David Brazdil0f672f62019-12-10 10:32:29 +00001286 ext4_lblk_t end, int *reserved)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001287{
1288 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
1289 struct rb_node *node;
1290 struct extent_status *es;
1291 struct extent_status orig_es;
1292 ext4_lblk_t len1, len2;
1293 ext4_fsblk_t block;
1294 int err;
David Brazdil0f672f62019-12-10 10:32:29 +00001295 bool count_reserved = true;
1296 struct rsvd_count rc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001297
David Brazdil0f672f62019-12-10 10:32:29 +00001298 if (reserved == NULL || !test_opt(inode->i_sb, DELALLOC))
1299 count_reserved = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001300retry:
1301 err = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001302
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001303 es = __es_tree_search(&tree->root, lblk);
1304 if (!es)
1305 goto out;
1306 if (es->es_lblk > end)
1307 goto out;
1308
1309 /* Simply invalidate cache_es. */
1310 tree->cache_es = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00001311 if (count_reserved)
1312 init_rsvd(inode, lblk, es, &rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001313
1314 orig_es.es_lblk = es->es_lblk;
1315 orig_es.es_len = es->es_len;
1316 orig_es.es_pblk = es->es_pblk;
1317
1318 len1 = lblk > es->es_lblk ? lblk - es->es_lblk : 0;
1319 len2 = ext4_es_end(es) > end ? ext4_es_end(es) - end : 0;
1320 if (len1 > 0)
1321 es->es_len = len1;
1322 if (len2 > 0) {
1323 if (len1 > 0) {
1324 struct extent_status newes;
1325
1326 newes.es_lblk = end + 1;
1327 newes.es_len = len2;
1328 block = 0x7FDEADBEEFULL;
1329 if (ext4_es_is_written(&orig_es) ||
1330 ext4_es_is_unwritten(&orig_es))
1331 block = ext4_es_pblock(&orig_es) +
1332 orig_es.es_len - len2;
1333 ext4_es_store_pblock_status(&newes, block,
1334 ext4_es_status(&orig_es));
1335 err = __es_insert_extent(inode, &newes);
1336 if (err) {
1337 es->es_lblk = orig_es.es_lblk;
1338 es->es_len = orig_es.es_len;
1339 if ((err == -ENOMEM) &&
1340 __es_shrink(EXT4_SB(inode->i_sb),
1341 128, EXT4_I(inode)))
1342 goto retry;
1343 goto out;
1344 }
1345 } else {
1346 es->es_lblk = end + 1;
1347 es->es_len = len2;
1348 if (ext4_es_is_written(es) ||
1349 ext4_es_is_unwritten(es)) {
1350 block = orig_es.es_pblk + orig_es.es_len - len2;
1351 ext4_es_store_pblock(es, block);
1352 }
1353 }
David Brazdil0f672f62019-12-10 10:32:29 +00001354 if (count_reserved)
1355 count_rsvd(inode, lblk, orig_es.es_len - len1 - len2,
1356 &orig_es, &rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001357 goto out;
1358 }
1359
1360 if (len1 > 0) {
David Brazdil0f672f62019-12-10 10:32:29 +00001361 if (count_reserved)
1362 count_rsvd(inode, lblk, orig_es.es_len - len1,
1363 &orig_es, &rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001364 node = rb_next(&es->rb_node);
1365 if (node)
1366 es = rb_entry(node, struct extent_status, rb_node);
1367 else
1368 es = NULL;
1369 }
1370
1371 while (es && ext4_es_end(es) <= end) {
David Brazdil0f672f62019-12-10 10:32:29 +00001372 if (count_reserved)
1373 count_rsvd(inode, es->es_lblk, es->es_len, es, &rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001374 node = rb_next(&es->rb_node);
1375 rb_erase(&es->rb_node, &tree->root);
1376 ext4_es_free_extent(inode, es);
1377 if (!node) {
1378 es = NULL;
1379 break;
1380 }
1381 es = rb_entry(node, struct extent_status, rb_node);
1382 }
1383
1384 if (es && es->es_lblk < end + 1) {
1385 ext4_lblk_t orig_len = es->es_len;
1386
1387 len1 = ext4_es_end(es) - end;
David Brazdil0f672f62019-12-10 10:32:29 +00001388 if (count_reserved)
1389 count_rsvd(inode, es->es_lblk, orig_len - len1,
1390 es, &rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001391 es->es_lblk = end + 1;
1392 es->es_len = len1;
1393 if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) {
1394 block = es->es_pblk + orig_len - len1;
1395 ext4_es_store_pblock(es, block);
1396 }
1397 }
1398
David Brazdil0f672f62019-12-10 10:32:29 +00001399 if (count_reserved)
1400 *reserved = get_rsvd(inode, end, es, &rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001401out:
1402 return err;
1403}
1404
1405/*
David Brazdil0f672f62019-12-10 10:32:29 +00001406 * ext4_es_remove_extent - removes block range from extent status tree
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001407 *
David Brazdil0f672f62019-12-10 10:32:29 +00001408 * @inode - file containing range
1409 * @lblk - first block in range
1410 * @len - number of blocks to remove
1411 *
1412 * Reduces block/cluster reservation count and for bigalloc cancels pending
1413 * reservations as needed. Returns 0 on success, error code on failure.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001414 */
1415int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
1416 ext4_lblk_t len)
1417{
1418 ext4_lblk_t end;
1419 int err = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001420 int reserved = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001421
1422 trace_ext4_es_remove_extent(inode, lblk, len);
1423 es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
1424 lblk, len, inode->i_ino);
1425
1426 if (!len)
1427 return err;
1428
1429 end = lblk + len - 1;
1430 BUG_ON(end < lblk);
1431
1432 /*
1433 * ext4_clear_inode() depends on us taking i_es_lock unconditionally
1434 * so that we are sure __es_shrink() is done with the inode before it
1435 * is reclaimed.
1436 */
1437 write_lock(&EXT4_I(inode)->i_es_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00001438 err = __es_remove_extent(inode, lblk, end, &reserved);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001439 write_unlock(&EXT4_I(inode)->i_es_lock);
1440 ext4_es_print_tree(inode);
David Brazdil0f672f62019-12-10 10:32:29 +00001441 ext4_da_release_space(inode, reserved);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001442 return err;
1443}
1444
1445static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
1446 struct ext4_inode_info *locked_ei)
1447{
1448 struct ext4_inode_info *ei;
1449 struct ext4_es_stats *es_stats;
1450 ktime_t start_time;
1451 u64 scan_time;
1452 int nr_to_walk;
1453 int nr_shrunk = 0;
1454 int retried = 0, nr_skipped = 0;
1455
1456 es_stats = &sbi->s_es_stats;
1457 start_time = ktime_get();
1458
1459retry:
1460 spin_lock(&sbi->s_es_lock);
1461 nr_to_walk = sbi->s_es_nr_inode;
1462 while (nr_to_walk-- > 0) {
1463 if (list_empty(&sbi->s_es_list)) {
1464 spin_unlock(&sbi->s_es_lock);
1465 goto out;
1466 }
1467 ei = list_first_entry(&sbi->s_es_list, struct ext4_inode_info,
1468 i_es_list);
1469 /* Move the inode to the tail */
1470 list_move_tail(&ei->i_es_list, &sbi->s_es_list);
1471
1472 /*
1473 * Normally we try hard to avoid shrinking precached inodes,
1474 * but we will as a last resort.
1475 */
1476 if (!retried && ext4_test_inode_state(&ei->vfs_inode,
1477 EXT4_STATE_EXT_PRECACHED)) {
1478 nr_skipped++;
1479 continue;
1480 }
1481
1482 if (ei == locked_ei || !write_trylock(&ei->i_es_lock)) {
1483 nr_skipped++;
1484 continue;
1485 }
1486 /*
1487 * Now we hold i_es_lock which protects us from inode reclaim
1488 * freeing inode under us
1489 */
1490 spin_unlock(&sbi->s_es_lock);
1491
1492 nr_shrunk += es_reclaim_extents(ei, &nr_to_scan);
1493 write_unlock(&ei->i_es_lock);
1494
1495 if (nr_to_scan <= 0)
1496 goto out;
1497 spin_lock(&sbi->s_es_lock);
1498 }
1499 spin_unlock(&sbi->s_es_lock);
1500
1501 /*
1502 * If we skipped any inodes, and we weren't able to make any
1503 * forward progress, try again to scan precached inodes.
1504 */
1505 if ((nr_shrunk == 0) && nr_skipped && !retried) {
1506 retried++;
1507 goto retry;
1508 }
1509
1510 if (locked_ei && nr_shrunk == 0)
1511 nr_shrunk = es_reclaim_extents(locked_ei, &nr_to_scan);
1512
1513out:
1514 scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1515 if (likely(es_stats->es_stats_scan_time))
1516 es_stats->es_stats_scan_time = (scan_time +
1517 es_stats->es_stats_scan_time*3) / 4;
1518 else
1519 es_stats->es_stats_scan_time = scan_time;
1520 if (scan_time > es_stats->es_stats_max_scan_time)
1521 es_stats->es_stats_max_scan_time = scan_time;
1522 if (likely(es_stats->es_stats_shrunk))
1523 es_stats->es_stats_shrunk = (nr_shrunk +
1524 es_stats->es_stats_shrunk*3) / 4;
1525 else
1526 es_stats->es_stats_shrunk = nr_shrunk;
1527
1528 trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time,
1529 nr_skipped, retried);
1530 return nr_shrunk;
1531}
1532
1533static unsigned long ext4_es_count(struct shrinker *shrink,
1534 struct shrink_control *sc)
1535{
1536 unsigned long nr;
1537 struct ext4_sb_info *sbi;
1538
1539 sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker);
1540 nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
1541 trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr);
1542 return nr;
1543}
1544
1545static unsigned long ext4_es_scan(struct shrinker *shrink,
1546 struct shrink_control *sc)
1547{
1548 struct ext4_sb_info *sbi = container_of(shrink,
1549 struct ext4_sb_info, s_es_shrinker);
1550 int nr_to_scan = sc->nr_to_scan;
1551 int ret, nr_shrunk;
1552
1553 ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
1554 trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret);
1555
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001556 nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL);
1557
Olivier Deprez0e641232021-09-23 10:07:05 +02001558 ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001559 trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret);
1560 return nr_shrunk;
1561}
1562
1563int ext4_seq_es_shrinker_info_show(struct seq_file *seq, void *v)
1564{
1565 struct ext4_sb_info *sbi = EXT4_SB((struct super_block *) seq->private);
1566 struct ext4_es_stats *es_stats = &sbi->s_es_stats;
1567 struct ext4_inode_info *ei, *max = NULL;
1568 unsigned int inode_cnt = 0;
1569
1570 if (v != SEQ_START_TOKEN)
1571 return 0;
1572
1573 /* here we just find an inode that has the max nr. of objects */
1574 spin_lock(&sbi->s_es_lock);
1575 list_for_each_entry(ei, &sbi->s_es_list, i_es_list) {
1576 inode_cnt++;
1577 if (max && max->i_es_all_nr < ei->i_es_all_nr)
1578 max = ei;
1579 else if (!max)
1580 max = ei;
1581 }
1582 spin_unlock(&sbi->s_es_lock);
1583
1584 seq_printf(seq, "stats:\n %lld objects\n %lld reclaimable objects\n",
1585 percpu_counter_sum_positive(&es_stats->es_stats_all_cnt),
1586 percpu_counter_sum_positive(&es_stats->es_stats_shk_cnt));
David Brazdil0f672f62019-12-10 10:32:29 +00001587 seq_printf(seq, " %lld/%lld cache hits/misses\n",
1588 percpu_counter_sum_positive(&es_stats->es_stats_cache_hits),
1589 percpu_counter_sum_positive(&es_stats->es_stats_cache_misses));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001590 if (inode_cnt)
1591 seq_printf(seq, " %d inodes on list\n", inode_cnt);
1592
1593 seq_printf(seq, "average:\n %llu us scan time\n",
1594 div_u64(es_stats->es_stats_scan_time, 1000));
1595 seq_printf(seq, " %lu shrunk objects\n", es_stats->es_stats_shrunk);
1596 if (inode_cnt)
1597 seq_printf(seq,
1598 "maximum:\n %lu inode (%u objects, %u reclaimable)\n"
1599 " %llu us max scan time\n",
1600 max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_shk_nr,
1601 div_u64(es_stats->es_stats_max_scan_time, 1000));
1602
1603 return 0;
1604}
1605
1606int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
1607{
1608 int err;
1609
1610 /* Make sure we have enough bits for physical block number */
1611 BUILD_BUG_ON(ES_SHIFT < 48);
1612 INIT_LIST_HEAD(&sbi->s_es_list);
1613 sbi->s_es_nr_inode = 0;
1614 spin_lock_init(&sbi->s_es_lock);
1615 sbi->s_es_stats.es_stats_shrunk = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001616 err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_hits, 0,
1617 GFP_KERNEL);
1618 if (err)
1619 return err;
1620 err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_misses, 0,
1621 GFP_KERNEL);
1622 if (err)
1623 goto err1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001624 sbi->s_es_stats.es_stats_scan_time = 0;
1625 sbi->s_es_stats.es_stats_max_scan_time = 0;
1626 err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL);
1627 if (err)
David Brazdil0f672f62019-12-10 10:32:29 +00001628 goto err2;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001629 err = percpu_counter_init(&sbi->s_es_stats.es_stats_shk_cnt, 0, GFP_KERNEL);
1630 if (err)
David Brazdil0f672f62019-12-10 10:32:29 +00001631 goto err3;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001632
1633 sbi->s_es_shrinker.scan_objects = ext4_es_scan;
1634 sbi->s_es_shrinker.count_objects = ext4_es_count;
1635 sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
1636 err = register_shrinker(&sbi->s_es_shrinker);
1637 if (err)
David Brazdil0f672f62019-12-10 10:32:29 +00001638 goto err4;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001639
1640 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001641err4:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001642 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
David Brazdil0f672f62019-12-10 10:32:29 +00001643err3:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001644 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
David Brazdil0f672f62019-12-10 10:32:29 +00001645err2:
1646 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses);
1647err1:
1648 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001649 return err;
1650}
1651
1652void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi)
1653{
David Brazdil0f672f62019-12-10 10:32:29 +00001654 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits);
1655 percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001656 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
1657 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
1658 unregister_shrinker(&sbi->s_es_shrinker);
1659}
1660
1661/*
1662 * Shrink extents in given inode from ei->i_es_shrink_lblk till end. Scan at
1663 * most *nr_to_scan extents, update *nr_to_scan accordingly.
1664 *
1665 * Return 0 if we hit end of tree / interval, 1 if we exhausted nr_to_scan.
1666 * Increment *nr_shrunk by the number of reclaimed extents. Also update
1667 * ei->i_es_shrink_lblk to where we should continue scanning.
1668 */
1669static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end,
1670 int *nr_to_scan, int *nr_shrunk)
1671{
1672 struct inode *inode = &ei->vfs_inode;
1673 struct ext4_es_tree *tree = &ei->i_es_tree;
1674 struct extent_status *es;
1675 struct rb_node *node;
1676
1677 es = __es_tree_search(&tree->root, ei->i_es_shrink_lblk);
1678 if (!es)
1679 goto out_wrap;
David Brazdil0f672f62019-12-10 10:32:29 +00001680
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001681 while (*nr_to_scan > 0) {
1682 if (es->es_lblk > end) {
1683 ei->i_es_shrink_lblk = end + 1;
1684 return 0;
1685 }
1686
1687 (*nr_to_scan)--;
1688 node = rb_next(&es->rb_node);
1689 /*
1690 * We can't reclaim delayed extent from status tree because
1691 * fiemap, bigallic, and seek_data/hole need to use it.
1692 */
1693 if (ext4_es_is_delayed(es))
1694 goto next;
1695 if (ext4_es_is_referenced(es)) {
1696 ext4_es_clear_referenced(es);
1697 goto next;
1698 }
1699
1700 rb_erase(&es->rb_node, &tree->root);
1701 ext4_es_free_extent(inode, es);
1702 (*nr_shrunk)++;
1703next:
1704 if (!node)
1705 goto out_wrap;
1706 es = rb_entry(node, struct extent_status, rb_node);
1707 }
1708 ei->i_es_shrink_lblk = es->es_lblk;
1709 return 1;
1710out_wrap:
1711 ei->i_es_shrink_lblk = 0;
1712 return 0;
1713}
1714
1715static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan)
1716{
1717 struct inode *inode = &ei->vfs_inode;
1718 int nr_shrunk = 0;
1719 ext4_lblk_t start = ei->i_es_shrink_lblk;
1720 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
1721 DEFAULT_RATELIMIT_BURST);
1722
1723 if (ei->i_es_shk_nr == 0)
1724 return 0;
1725
1726 if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) &&
1727 __ratelimit(&_rs))
1728 ext4_warning(inode->i_sb, "forced shrink of precached extents");
1729
1730 if (!es_do_reclaim_extents(ei, EXT_MAX_BLOCKS, nr_to_scan, &nr_shrunk) &&
1731 start != 0)
1732 es_do_reclaim_extents(ei, start - 1, nr_to_scan, &nr_shrunk);
1733
1734 ei->i_es_tree.cache_es = NULL;
1735 return nr_shrunk;
1736}
David Brazdil0f672f62019-12-10 10:32:29 +00001737
1738/*
1739 * Called to support EXT4_IOC_CLEAR_ES_CACHE. We can only remove
1740 * discretionary entries from the extent status cache. (Some entries
1741 * must be present for proper operations.)
1742 */
1743void ext4_clear_inode_es(struct inode *inode)
1744{
1745 struct ext4_inode_info *ei = EXT4_I(inode);
1746 struct extent_status *es;
1747 struct ext4_es_tree *tree;
1748 struct rb_node *node;
1749
1750 write_lock(&ei->i_es_lock);
1751 tree = &EXT4_I(inode)->i_es_tree;
1752 tree->cache_es = NULL;
1753 node = rb_first(&tree->root);
1754 while (node) {
1755 es = rb_entry(node, struct extent_status, rb_node);
1756 node = rb_next(node);
1757 if (!ext4_es_is_delayed(es)) {
1758 rb_erase(&es->rb_node, &tree->root);
1759 ext4_es_free_extent(inode, es);
1760 }
1761 }
1762 ext4_clear_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
1763 write_unlock(&ei->i_es_lock);
1764}
1765
1766#ifdef ES_DEBUG__
1767static void ext4_print_pending_tree(struct inode *inode)
1768{
1769 struct ext4_pending_tree *tree;
1770 struct rb_node *node;
1771 struct pending_reservation *pr;
1772
1773 printk(KERN_DEBUG "pending reservations for inode %lu:", inode->i_ino);
1774 tree = &EXT4_I(inode)->i_pending_tree;
1775 node = rb_first(&tree->root);
1776 while (node) {
1777 pr = rb_entry(node, struct pending_reservation, rb_node);
1778 printk(KERN_DEBUG " %u", pr->lclu);
1779 node = rb_next(node);
1780 }
1781 printk(KERN_DEBUG "\n");
1782}
1783#else
1784#define ext4_print_pending_tree(inode)
1785#endif
1786
1787int __init ext4_init_pending(void)
1788{
1789 ext4_pending_cachep = kmem_cache_create("ext4_pending_reservation",
1790 sizeof(struct pending_reservation),
1791 0, (SLAB_RECLAIM_ACCOUNT), NULL);
1792 if (ext4_pending_cachep == NULL)
1793 return -ENOMEM;
1794 return 0;
1795}
1796
1797void ext4_exit_pending(void)
1798{
1799 kmem_cache_destroy(ext4_pending_cachep);
1800}
1801
1802void ext4_init_pending_tree(struct ext4_pending_tree *tree)
1803{
1804 tree->root = RB_ROOT;
1805}
1806
1807/*
1808 * __get_pending - retrieve a pointer to a pending reservation
1809 *
1810 * @inode - file containing the pending cluster reservation
1811 * @lclu - logical cluster of interest
1812 *
1813 * Returns a pointer to a pending reservation if it's a member of
1814 * the set, and NULL if not. Must be called holding i_es_lock.
1815 */
1816static struct pending_reservation *__get_pending(struct inode *inode,
1817 ext4_lblk_t lclu)
1818{
1819 struct ext4_pending_tree *tree;
1820 struct rb_node *node;
1821 struct pending_reservation *pr = NULL;
1822
1823 tree = &EXT4_I(inode)->i_pending_tree;
1824 node = (&tree->root)->rb_node;
1825
1826 while (node) {
1827 pr = rb_entry(node, struct pending_reservation, rb_node);
1828 if (lclu < pr->lclu)
1829 node = node->rb_left;
1830 else if (lclu > pr->lclu)
1831 node = node->rb_right;
1832 else if (lclu == pr->lclu)
1833 return pr;
1834 }
1835 return NULL;
1836}
1837
1838/*
1839 * __insert_pending - adds a pending cluster reservation to the set of
1840 * pending reservations
1841 *
1842 * @inode - file containing the cluster
1843 * @lblk - logical block in the cluster to be added
1844 *
1845 * Returns 0 on successful insertion and -ENOMEM on failure. If the
1846 * pending reservation is already in the set, returns successfully.
1847 */
1848static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
1849{
1850 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1851 struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
1852 struct rb_node **p = &tree->root.rb_node;
1853 struct rb_node *parent = NULL;
1854 struct pending_reservation *pr;
1855 ext4_lblk_t lclu;
1856 int ret = 0;
1857
1858 lclu = EXT4_B2C(sbi, lblk);
1859 /* search to find parent for insertion */
1860 while (*p) {
1861 parent = *p;
1862 pr = rb_entry(parent, struct pending_reservation, rb_node);
1863
1864 if (lclu < pr->lclu) {
1865 p = &(*p)->rb_left;
1866 } else if (lclu > pr->lclu) {
1867 p = &(*p)->rb_right;
1868 } else {
1869 /* pending reservation already inserted */
1870 goto out;
1871 }
1872 }
1873
1874 pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
1875 if (pr == NULL) {
1876 ret = -ENOMEM;
1877 goto out;
1878 }
1879 pr->lclu = lclu;
1880
1881 rb_link_node(&pr->rb_node, parent, p);
1882 rb_insert_color(&pr->rb_node, &tree->root);
1883
1884out:
1885 return ret;
1886}
1887
1888/*
1889 * __remove_pending - removes a pending cluster reservation from the set
1890 * of pending reservations
1891 *
1892 * @inode - file containing the cluster
1893 * @lblk - logical block in the pending cluster reservation to be removed
1894 *
1895 * Returns successfully if pending reservation is not a member of the set.
1896 */
1897static void __remove_pending(struct inode *inode, ext4_lblk_t lblk)
1898{
1899 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1900 struct pending_reservation *pr;
1901 struct ext4_pending_tree *tree;
1902
1903 pr = __get_pending(inode, EXT4_B2C(sbi, lblk));
1904 if (pr != NULL) {
1905 tree = &EXT4_I(inode)->i_pending_tree;
1906 rb_erase(&pr->rb_node, &tree->root);
1907 kmem_cache_free(ext4_pending_cachep, pr);
1908 }
1909}
1910
1911/*
1912 * ext4_remove_pending - removes a pending cluster reservation from the set
1913 * of pending reservations
1914 *
1915 * @inode - file containing the cluster
1916 * @lblk - logical block in the pending cluster reservation to be removed
1917 *
1918 * Locking for external use of __remove_pending.
1919 */
1920void ext4_remove_pending(struct inode *inode, ext4_lblk_t lblk)
1921{
1922 struct ext4_inode_info *ei = EXT4_I(inode);
1923
1924 write_lock(&ei->i_es_lock);
1925 __remove_pending(inode, lblk);
1926 write_unlock(&ei->i_es_lock);
1927}
1928
1929/*
1930 * ext4_is_pending - determine whether a cluster has a pending reservation
1931 * on it
1932 *
1933 * @inode - file containing the cluster
1934 * @lblk - logical block in the cluster
1935 *
1936 * Returns true if there's a pending reservation for the cluster in the
1937 * set of pending reservations, and false if not.
1938 */
1939bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk)
1940{
1941 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1942 struct ext4_inode_info *ei = EXT4_I(inode);
1943 bool ret;
1944
1945 read_lock(&ei->i_es_lock);
1946 ret = (bool)(__get_pending(inode, EXT4_B2C(sbi, lblk)) != NULL);
1947 read_unlock(&ei->i_es_lock);
1948
1949 return ret;
1950}
1951
1952/*
1953 * ext4_es_insert_delayed_block - adds a delayed block to the extents status
1954 * tree, adding a pending reservation where
1955 * needed
1956 *
1957 * @inode - file containing the newly added block
1958 * @lblk - logical block to be added
1959 * @allocated - indicates whether a physical cluster has been allocated for
1960 * the logical cluster that contains the block
1961 *
1962 * Returns 0 on success, negative error code on failure.
1963 */
1964int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
1965 bool allocated)
1966{
1967 struct extent_status newes;
1968 int err = 0;
1969
1970 es_debug("add [%u/1) delayed to extent status tree of inode %lu\n",
1971 lblk, inode->i_ino);
1972
1973 newes.es_lblk = lblk;
1974 newes.es_len = 1;
1975 ext4_es_store_pblock_status(&newes, ~0, EXTENT_STATUS_DELAYED);
1976 trace_ext4_es_insert_delayed_block(inode, &newes, allocated);
1977
1978 ext4_es_insert_extent_check(inode, &newes);
1979
1980 write_lock(&EXT4_I(inode)->i_es_lock);
1981
1982 err = __es_remove_extent(inode, lblk, lblk, NULL);
1983 if (err != 0)
1984 goto error;
1985retry:
1986 err = __es_insert_extent(inode, &newes);
1987 if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
1988 128, EXT4_I(inode)))
1989 goto retry;
1990 if (err != 0)
1991 goto error;
1992
1993 if (allocated)
1994 __insert_pending(inode, lblk);
1995
1996error:
1997 write_unlock(&EXT4_I(inode)->i_es_lock);
1998
1999 ext4_es_print_tree(inode);
2000 ext4_print_pending_tree(inode);
2001
2002 return err;
2003}
2004
2005/*
2006 * __es_delayed_clu - count number of clusters containing blocks that
2007 * are delayed only
2008 *
2009 * @inode - file containing block range
2010 * @start - logical block defining start of range
2011 * @end - logical block defining end of range
2012 *
2013 * Returns the number of clusters containing only delayed (not delayed
2014 * and unwritten) blocks in the range specified by @start and @end. Any
2015 * cluster or part of a cluster within the range and containing a delayed
2016 * and not unwritten block within the range is counted as a whole cluster.
2017 */
2018static unsigned int __es_delayed_clu(struct inode *inode, ext4_lblk_t start,
2019 ext4_lblk_t end)
2020{
2021 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
2022 struct extent_status *es;
2023 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2024 struct rb_node *node;
2025 ext4_lblk_t first_lclu, last_lclu;
2026 unsigned long long last_counted_lclu;
2027 unsigned int n = 0;
2028
2029 /* guaranteed to be unequal to any ext4_lblk_t value */
2030 last_counted_lclu = ~0ULL;
2031
2032 es = __es_tree_search(&tree->root, start);
2033
2034 while (es && (es->es_lblk <= end)) {
2035 if (ext4_es_is_delonly(es)) {
2036 if (es->es_lblk <= start)
2037 first_lclu = EXT4_B2C(sbi, start);
2038 else
2039 first_lclu = EXT4_B2C(sbi, es->es_lblk);
2040
2041 if (ext4_es_end(es) >= end)
2042 last_lclu = EXT4_B2C(sbi, end);
2043 else
2044 last_lclu = EXT4_B2C(sbi, ext4_es_end(es));
2045
2046 if (first_lclu == last_counted_lclu)
2047 n += last_lclu - first_lclu;
2048 else
2049 n += last_lclu - first_lclu + 1;
2050 last_counted_lclu = last_lclu;
2051 }
2052 node = rb_next(&es->rb_node);
2053 if (!node)
2054 break;
2055 es = rb_entry(node, struct extent_status, rb_node);
2056 }
2057
2058 return n;
2059}
2060
2061/*
2062 * ext4_es_delayed_clu - count number of clusters containing blocks that
2063 * are both delayed and unwritten
2064 *
2065 * @inode - file containing block range
2066 * @lblk - logical block defining start of range
2067 * @len - number of blocks in range
2068 *
2069 * Locking for external use of __es_delayed_clu().
2070 */
2071unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
2072 ext4_lblk_t len)
2073{
2074 struct ext4_inode_info *ei = EXT4_I(inode);
2075 ext4_lblk_t end;
2076 unsigned int n;
2077
2078 if (len == 0)
2079 return 0;
2080
2081 end = lblk + len - 1;
2082 WARN_ON(end < lblk);
2083
2084 read_lock(&ei->i_es_lock);
2085
2086 n = __es_delayed_clu(inode, lblk, end);
2087
2088 read_unlock(&ei->i_es_lock);
2089
2090 return n;
2091}
2092
2093/*
2094 * __revise_pending - makes, cancels, or leaves unchanged pending cluster
2095 * reservations for a specified block range depending
2096 * upon the presence or absence of delayed blocks
2097 * outside the range within clusters at the ends of the
2098 * range
2099 *
2100 * @inode - file containing the range
2101 * @lblk - logical block defining the start of range
2102 * @len - length of range in blocks
2103 *
2104 * Used after a newly allocated extent is added to the extents status tree.
2105 * Requires that the extents in the range have either written or unwritten
2106 * status. Must be called while holding i_es_lock.
2107 */
2108static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
2109 ext4_lblk_t len)
2110{
2111 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2112 ext4_lblk_t end = lblk + len - 1;
2113 ext4_lblk_t first, last;
2114 bool f_del = false, l_del = false;
2115
2116 if (len == 0)
2117 return;
2118
2119 /*
2120 * Two cases - block range within single cluster and block range
2121 * spanning two or more clusters. Note that a cluster belonging
2122 * to a range starting and/or ending on a cluster boundary is treated
2123 * as if it does not contain a delayed extent. The new range may
2124 * have allocated space for previously delayed blocks out to the
2125 * cluster boundary, requiring that any pre-existing pending
2126 * reservation be canceled. Because this code only looks at blocks
2127 * outside the range, it should revise pending reservations
2128 * correctly even if the extent represented by the range can't be
2129 * inserted in the extents status tree due to ENOSPC.
2130 */
2131
2132 if (EXT4_B2C(sbi, lblk) == EXT4_B2C(sbi, end)) {
2133 first = EXT4_LBLK_CMASK(sbi, lblk);
2134 if (first != lblk)
2135 f_del = __es_scan_range(inode, &ext4_es_is_delonly,
2136 first, lblk - 1);
2137 if (f_del) {
2138 __insert_pending(inode, first);
2139 } else {
2140 last = EXT4_LBLK_CMASK(sbi, end) +
2141 sbi->s_cluster_ratio - 1;
2142 if (last != end)
2143 l_del = __es_scan_range(inode,
2144 &ext4_es_is_delonly,
2145 end + 1, last);
2146 if (l_del)
2147 __insert_pending(inode, last);
2148 else
2149 __remove_pending(inode, last);
2150 }
2151 } else {
2152 first = EXT4_LBLK_CMASK(sbi, lblk);
2153 if (first != lblk)
2154 f_del = __es_scan_range(inode, &ext4_es_is_delonly,
2155 first, lblk - 1);
2156 if (f_del)
2157 __insert_pending(inode, first);
2158 else
2159 __remove_pending(inode, first);
2160
2161 last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1;
2162 if (last != end)
2163 l_del = __es_scan_range(inode, &ext4_es_is_delonly,
2164 end + 1, last);
2165 if (l_del)
2166 __insert_pending(inode, last);
2167 else
2168 __remove_pending(inode, last);
2169 }
2170}