Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | #include <linux/err.h> |
| 4 | #include <linux/slab.h> |
| 5 | #include <linux/spinlock.h> |
| 6 | #include "ctree.h" |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 7 | #include "volumes.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8 | #include "extent_map.h" |
| 9 | #include "compression.h" |
| 10 | |
| 11 | |
| 12 | static struct kmem_cache *extent_map_cache; |
| 13 | |
| 14 | int __init extent_map_init(void) |
| 15 | { |
| 16 | extent_map_cache = kmem_cache_create("btrfs_extent_map", |
| 17 | sizeof(struct extent_map), 0, |
| 18 | SLAB_MEM_SPREAD, NULL); |
| 19 | if (!extent_map_cache) |
| 20 | return -ENOMEM; |
| 21 | return 0; |
| 22 | } |
| 23 | |
| 24 | void __cold extent_map_exit(void) |
| 25 | { |
| 26 | kmem_cache_destroy(extent_map_cache); |
| 27 | } |
| 28 | |
| 29 | /** |
| 30 | * extent_map_tree_init - initialize extent map tree |
| 31 | * @tree: tree to initialize |
| 32 | * |
| 33 | * Initialize the extent tree @tree. Should be called for each new inode |
| 34 | * or other user of the extent_map interface. |
| 35 | */ |
| 36 | void extent_map_tree_init(struct extent_map_tree *tree) |
| 37 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 38 | tree->map = RB_ROOT_CACHED; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 39 | INIT_LIST_HEAD(&tree->modified_extents); |
| 40 | rwlock_init(&tree->lock); |
| 41 | } |
| 42 | |
| 43 | /** |
| 44 | * alloc_extent_map - allocate new extent map structure |
| 45 | * |
| 46 | * Allocate a new extent_map structure. The new structure is |
| 47 | * returned with a reference count of one and needs to be |
| 48 | * freed using free_extent_map() |
| 49 | */ |
| 50 | struct extent_map *alloc_extent_map(void) |
| 51 | { |
| 52 | struct extent_map *em; |
| 53 | em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS); |
| 54 | if (!em) |
| 55 | return NULL; |
| 56 | RB_CLEAR_NODE(&em->rb_node); |
| 57 | em->flags = 0; |
| 58 | em->compress_type = BTRFS_COMPRESS_NONE; |
| 59 | em->generation = 0; |
| 60 | refcount_set(&em->refs, 1); |
| 61 | INIT_LIST_HEAD(&em->list); |
| 62 | return em; |
| 63 | } |
| 64 | |
| 65 | /** |
| 66 | * free_extent_map - drop reference count of an extent_map |
| 67 | * @em: extent map being released |
| 68 | * |
| 69 | * Drops the reference out on @em by one and free the structure |
| 70 | * if the reference count hits zero. |
| 71 | */ |
| 72 | void free_extent_map(struct extent_map *em) |
| 73 | { |
| 74 | if (!em) |
| 75 | return; |
| 76 | WARN_ON(refcount_read(&em->refs) == 0); |
| 77 | if (refcount_dec_and_test(&em->refs)) { |
| 78 | WARN_ON(extent_map_in_tree(em)); |
| 79 | WARN_ON(!list_empty(&em->list)); |
| 80 | if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) |
| 81 | kfree(em->map_lookup); |
| 82 | kmem_cache_free(extent_map_cache, em); |
| 83 | } |
| 84 | } |
| 85 | |
| 86 | /* simple helper to do math around the end of an extent, handling wrap */ |
| 87 | static u64 range_end(u64 start, u64 len) |
| 88 | { |
| 89 | if (start + len < start) |
| 90 | return (u64)-1; |
| 91 | return start + len; |
| 92 | } |
| 93 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 94 | static int tree_insert(struct rb_root_cached *root, struct extent_map *em) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 95 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 96 | struct rb_node **p = &root->rb_root.rb_node; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 97 | struct rb_node *parent = NULL; |
| 98 | struct extent_map *entry = NULL; |
| 99 | struct rb_node *orig_parent = NULL; |
| 100 | u64 end = range_end(em->start, em->len); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 101 | bool leftmost = true; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 102 | |
| 103 | while (*p) { |
| 104 | parent = *p; |
| 105 | entry = rb_entry(parent, struct extent_map, rb_node); |
| 106 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 107 | if (em->start < entry->start) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 108 | p = &(*p)->rb_left; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 109 | } else if (em->start >= extent_map_end(entry)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 110 | p = &(*p)->rb_right; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 111 | leftmost = false; |
| 112 | } else { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 113 | return -EEXIST; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 114 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 115 | } |
| 116 | |
| 117 | orig_parent = parent; |
| 118 | while (parent && em->start >= extent_map_end(entry)) { |
| 119 | parent = rb_next(parent); |
| 120 | entry = rb_entry(parent, struct extent_map, rb_node); |
| 121 | } |
| 122 | if (parent) |
| 123 | if (end > entry->start && em->start < extent_map_end(entry)) |
| 124 | return -EEXIST; |
| 125 | |
| 126 | parent = orig_parent; |
| 127 | entry = rb_entry(parent, struct extent_map, rb_node); |
| 128 | while (parent && em->start < entry->start) { |
| 129 | parent = rb_prev(parent); |
| 130 | entry = rb_entry(parent, struct extent_map, rb_node); |
| 131 | } |
| 132 | if (parent) |
| 133 | if (end > entry->start && em->start < extent_map_end(entry)) |
| 134 | return -EEXIST; |
| 135 | |
| 136 | rb_link_node(&em->rb_node, orig_parent, p); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 137 | rb_insert_color_cached(&em->rb_node, root, leftmost); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 138 | return 0; |
| 139 | } |
| 140 | |
| 141 | /* |
| 142 | * search through the tree for an extent_map with a given offset. If |
| 143 | * it can't be found, try to find some neighboring extents |
| 144 | */ |
| 145 | static struct rb_node *__tree_search(struct rb_root *root, u64 offset, |
| 146 | struct rb_node **prev_ret, |
| 147 | struct rb_node **next_ret) |
| 148 | { |
| 149 | struct rb_node *n = root->rb_node; |
| 150 | struct rb_node *prev = NULL; |
| 151 | struct rb_node *orig_prev = NULL; |
| 152 | struct extent_map *entry; |
| 153 | struct extent_map *prev_entry = NULL; |
| 154 | |
| 155 | while (n) { |
| 156 | entry = rb_entry(n, struct extent_map, rb_node); |
| 157 | prev = n; |
| 158 | prev_entry = entry; |
| 159 | |
| 160 | if (offset < entry->start) |
| 161 | n = n->rb_left; |
| 162 | else if (offset >= extent_map_end(entry)) |
| 163 | n = n->rb_right; |
| 164 | else |
| 165 | return n; |
| 166 | } |
| 167 | |
| 168 | if (prev_ret) { |
| 169 | orig_prev = prev; |
| 170 | while (prev && offset >= extent_map_end(prev_entry)) { |
| 171 | prev = rb_next(prev); |
| 172 | prev_entry = rb_entry(prev, struct extent_map, rb_node); |
| 173 | } |
| 174 | *prev_ret = prev; |
| 175 | prev = orig_prev; |
| 176 | } |
| 177 | |
| 178 | if (next_ret) { |
| 179 | prev_entry = rb_entry(prev, struct extent_map, rb_node); |
| 180 | while (prev && offset < prev_entry->start) { |
| 181 | prev = rb_prev(prev); |
| 182 | prev_entry = rb_entry(prev, struct extent_map, rb_node); |
| 183 | } |
| 184 | *next_ret = prev; |
| 185 | } |
| 186 | return NULL; |
| 187 | } |
| 188 | |
| 189 | /* check to see if two extent_map structs are adjacent and safe to merge */ |
| 190 | static int mergable_maps(struct extent_map *prev, struct extent_map *next) |
| 191 | { |
| 192 | if (test_bit(EXTENT_FLAG_PINNED, &prev->flags)) |
| 193 | return 0; |
| 194 | |
| 195 | /* |
| 196 | * don't merge compressed extents, we need to know their |
| 197 | * actual size |
| 198 | */ |
| 199 | if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags)) |
| 200 | return 0; |
| 201 | |
| 202 | if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) || |
| 203 | test_bit(EXTENT_FLAG_LOGGING, &next->flags)) |
| 204 | return 0; |
| 205 | |
| 206 | /* |
| 207 | * We don't want to merge stuff that hasn't been written to the log yet |
| 208 | * since it may not reflect exactly what is on disk, and that would be |
| 209 | * bad. |
| 210 | */ |
| 211 | if (!list_empty(&prev->list) || !list_empty(&next->list)) |
| 212 | return 0; |
| 213 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 214 | ASSERT(next->block_start != EXTENT_MAP_DELALLOC && |
| 215 | prev->block_start != EXTENT_MAP_DELALLOC); |
| 216 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 217 | if (extent_map_end(prev) == next->start && |
| 218 | prev->flags == next->flags && |
| 219 | prev->bdev == next->bdev && |
| 220 | ((next->block_start == EXTENT_MAP_HOLE && |
| 221 | prev->block_start == EXTENT_MAP_HOLE) || |
| 222 | (next->block_start == EXTENT_MAP_INLINE && |
| 223 | prev->block_start == EXTENT_MAP_INLINE) || |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 224 | (next->block_start < EXTENT_MAP_LAST_BYTE - 1 && |
| 225 | next->block_start == extent_map_block_end(prev)))) { |
| 226 | return 1; |
| 227 | } |
| 228 | return 0; |
| 229 | } |
| 230 | |
| 231 | static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) |
| 232 | { |
| 233 | struct extent_map *merge = NULL; |
| 234 | struct rb_node *rb; |
| 235 | |
| 236 | if (em->start != 0) { |
| 237 | rb = rb_prev(&em->rb_node); |
| 238 | if (rb) |
| 239 | merge = rb_entry(rb, struct extent_map, rb_node); |
| 240 | if (rb && mergable_maps(merge, em)) { |
| 241 | em->start = merge->start; |
| 242 | em->orig_start = merge->orig_start; |
| 243 | em->len += merge->len; |
| 244 | em->block_len += merge->block_len; |
| 245 | em->block_start = merge->block_start; |
| 246 | em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start; |
| 247 | em->mod_start = merge->mod_start; |
| 248 | em->generation = max(em->generation, merge->generation); |
| 249 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 250 | rb_erase_cached(&merge->rb_node, &tree->map); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 251 | RB_CLEAR_NODE(&merge->rb_node); |
| 252 | free_extent_map(merge); |
| 253 | } |
| 254 | } |
| 255 | |
| 256 | rb = rb_next(&em->rb_node); |
| 257 | if (rb) |
| 258 | merge = rb_entry(rb, struct extent_map, rb_node); |
| 259 | if (rb && mergable_maps(em, merge)) { |
| 260 | em->len += merge->len; |
| 261 | em->block_len += merge->block_len; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 262 | rb_erase_cached(&merge->rb_node, &tree->map); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 263 | RB_CLEAR_NODE(&merge->rb_node); |
| 264 | em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start; |
| 265 | em->generation = max(em->generation, merge->generation); |
| 266 | free_extent_map(merge); |
| 267 | } |
| 268 | } |
| 269 | |
| 270 | /** |
| 271 | * unpin_extent_cache - unpin an extent from the cache |
| 272 | * @tree: tree to unpin the extent in |
| 273 | * @start: logical offset in the file |
| 274 | * @len: length of the extent |
| 275 | * @gen: generation that this extent has been modified in |
| 276 | * |
| 277 | * Called after an extent has been written to disk properly. Set the generation |
| 278 | * to the generation that actually added the file item to the inode so we know |
| 279 | * we need to sync this extent when we call fsync(). |
| 280 | */ |
| 281 | int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, |
| 282 | u64 gen) |
| 283 | { |
| 284 | int ret = 0; |
| 285 | struct extent_map *em; |
| 286 | bool prealloc = false; |
| 287 | |
| 288 | write_lock(&tree->lock); |
| 289 | em = lookup_extent_mapping(tree, start, len); |
| 290 | |
| 291 | WARN_ON(!em || em->start != start); |
| 292 | |
| 293 | if (!em) |
| 294 | goto out; |
| 295 | |
| 296 | em->generation = gen; |
| 297 | clear_bit(EXTENT_FLAG_PINNED, &em->flags); |
| 298 | em->mod_start = em->start; |
| 299 | em->mod_len = em->len; |
| 300 | |
| 301 | if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) { |
| 302 | prealloc = true; |
| 303 | clear_bit(EXTENT_FLAG_FILLING, &em->flags); |
| 304 | } |
| 305 | |
| 306 | try_merge_map(tree, em); |
| 307 | |
| 308 | if (prealloc) { |
| 309 | em->mod_start = em->start; |
| 310 | em->mod_len = em->len; |
| 311 | } |
| 312 | |
| 313 | free_extent_map(em); |
| 314 | out: |
| 315 | write_unlock(&tree->lock); |
| 316 | return ret; |
| 317 | |
| 318 | } |
| 319 | |
| 320 | void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em) |
| 321 | { |
| 322 | clear_bit(EXTENT_FLAG_LOGGING, &em->flags); |
| 323 | if (extent_map_in_tree(em)) |
| 324 | try_merge_map(tree, em); |
| 325 | } |
| 326 | |
| 327 | static inline void setup_extent_mapping(struct extent_map_tree *tree, |
| 328 | struct extent_map *em, |
| 329 | int modified) |
| 330 | { |
| 331 | refcount_inc(&em->refs); |
| 332 | em->mod_start = em->start; |
| 333 | em->mod_len = em->len; |
| 334 | |
| 335 | if (modified) |
| 336 | list_move(&em->list, &tree->modified_extents); |
| 337 | else |
| 338 | try_merge_map(tree, em); |
| 339 | } |
| 340 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 341 | static void extent_map_device_set_bits(struct extent_map *em, unsigned bits) |
| 342 | { |
| 343 | struct map_lookup *map = em->map_lookup; |
| 344 | u64 stripe_size = em->orig_block_len; |
| 345 | int i; |
| 346 | |
| 347 | for (i = 0; i < map->num_stripes; i++) { |
| 348 | struct btrfs_bio_stripe *stripe = &map->stripes[i]; |
| 349 | struct btrfs_device *device = stripe->dev; |
| 350 | |
| 351 | set_extent_bits_nowait(&device->alloc_state, stripe->physical, |
| 352 | stripe->physical + stripe_size - 1, bits); |
| 353 | } |
| 354 | } |
| 355 | |
| 356 | static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits) |
| 357 | { |
| 358 | struct map_lookup *map = em->map_lookup; |
| 359 | u64 stripe_size = em->orig_block_len; |
| 360 | int i; |
| 361 | |
| 362 | for (i = 0; i < map->num_stripes; i++) { |
| 363 | struct btrfs_bio_stripe *stripe = &map->stripes[i]; |
| 364 | struct btrfs_device *device = stripe->dev; |
| 365 | |
| 366 | __clear_extent_bit(&device->alloc_state, stripe->physical, |
| 367 | stripe->physical + stripe_size - 1, bits, |
| 368 | 0, 0, NULL, GFP_NOWAIT, NULL); |
| 369 | } |
| 370 | } |
| 371 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 372 | /** |
| 373 | * add_extent_mapping - add new extent map to the extent tree |
| 374 | * @tree: tree to insert new map in |
| 375 | * @em: map to insert |
| 376 | * |
| 377 | * Insert @em into @tree or perform a simple forward/backward merge with |
| 378 | * existing mappings. The extent_map struct passed in will be inserted |
| 379 | * into the tree directly, with an additional reference taken, or a |
| 380 | * reference dropped if the merge attempt was successful. |
| 381 | */ |
| 382 | int add_extent_mapping(struct extent_map_tree *tree, |
| 383 | struct extent_map *em, int modified) |
| 384 | { |
| 385 | int ret = 0; |
| 386 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 387 | lockdep_assert_held_write(&tree->lock); |
| 388 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 389 | ret = tree_insert(&tree->map, em); |
| 390 | if (ret) |
| 391 | goto out; |
| 392 | |
| 393 | setup_extent_mapping(tree, em, modified); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 394 | if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) { |
| 395 | extent_map_device_set_bits(em, CHUNK_ALLOCATED); |
| 396 | extent_map_device_clear_bits(em, CHUNK_TRIMMED); |
| 397 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 398 | out: |
| 399 | return ret; |
| 400 | } |
| 401 | |
| 402 | static struct extent_map * |
| 403 | __lookup_extent_mapping(struct extent_map_tree *tree, |
| 404 | u64 start, u64 len, int strict) |
| 405 | { |
| 406 | struct extent_map *em; |
| 407 | struct rb_node *rb_node; |
| 408 | struct rb_node *prev = NULL; |
| 409 | struct rb_node *next = NULL; |
| 410 | u64 end = range_end(start, len); |
| 411 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 412 | rb_node = __tree_search(&tree->map.rb_root, start, &prev, &next); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 413 | if (!rb_node) { |
| 414 | if (prev) |
| 415 | rb_node = prev; |
| 416 | else if (next) |
| 417 | rb_node = next; |
| 418 | else |
| 419 | return NULL; |
| 420 | } |
| 421 | |
| 422 | em = rb_entry(rb_node, struct extent_map, rb_node); |
| 423 | |
| 424 | if (strict && !(end > em->start && start < extent_map_end(em))) |
| 425 | return NULL; |
| 426 | |
| 427 | refcount_inc(&em->refs); |
| 428 | return em; |
| 429 | } |
| 430 | |
| 431 | /** |
| 432 | * lookup_extent_mapping - lookup extent_map |
| 433 | * @tree: tree to lookup in |
| 434 | * @start: byte offset to start the search |
| 435 | * @len: length of the lookup range |
| 436 | * |
| 437 | * Find and return the first extent_map struct in @tree that intersects the |
| 438 | * [start, len] range. There may be additional objects in the tree that |
| 439 | * intersect, so check the object returned carefully to make sure that no |
| 440 | * additional lookups are needed. |
| 441 | */ |
| 442 | struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, |
| 443 | u64 start, u64 len) |
| 444 | { |
| 445 | return __lookup_extent_mapping(tree, start, len, 1); |
| 446 | } |
| 447 | |
| 448 | /** |
| 449 | * search_extent_mapping - find a nearby extent map |
| 450 | * @tree: tree to lookup in |
| 451 | * @start: byte offset to start the search |
| 452 | * @len: length of the lookup range |
| 453 | * |
| 454 | * Find and return the first extent_map struct in @tree that intersects the |
| 455 | * [start, len] range. |
| 456 | * |
| 457 | * If one can't be found, any nearby extent may be returned |
| 458 | */ |
| 459 | struct extent_map *search_extent_mapping(struct extent_map_tree *tree, |
| 460 | u64 start, u64 len) |
| 461 | { |
| 462 | return __lookup_extent_mapping(tree, start, len, 0); |
| 463 | } |
| 464 | |
| 465 | /** |
| 466 | * remove_extent_mapping - removes an extent_map from the extent tree |
| 467 | * @tree: extent tree to remove from |
| 468 | * @em: extent map being removed |
| 469 | * |
| 470 | * Removes @em from @tree. No reference counts are dropped, and no checks |
| 471 | * are done to see if the range is in use |
| 472 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 473 | void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 474 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 475 | WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 476 | rb_erase_cached(&em->rb_node, &tree->map); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 477 | if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags)) |
| 478 | list_del_init(&em->list); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 479 | if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) |
| 480 | extent_map_device_clear_bits(em, CHUNK_ALLOCATED); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 481 | RB_CLEAR_NODE(&em->rb_node); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 482 | } |
| 483 | |
| 484 | void replace_extent_mapping(struct extent_map_tree *tree, |
| 485 | struct extent_map *cur, |
| 486 | struct extent_map *new, |
| 487 | int modified) |
| 488 | { |
| 489 | WARN_ON(test_bit(EXTENT_FLAG_PINNED, &cur->flags)); |
| 490 | ASSERT(extent_map_in_tree(cur)); |
| 491 | if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags)) |
| 492 | list_del_init(&cur->list); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 493 | rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 494 | RB_CLEAR_NODE(&cur->rb_node); |
| 495 | |
| 496 | setup_extent_mapping(tree, new, modified); |
| 497 | } |
| 498 | |
| 499 | static struct extent_map *next_extent_map(struct extent_map *em) |
| 500 | { |
| 501 | struct rb_node *next; |
| 502 | |
| 503 | next = rb_next(&em->rb_node); |
| 504 | if (!next) |
| 505 | return NULL; |
| 506 | return container_of(next, struct extent_map, rb_node); |
| 507 | } |
| 508 | |
| 509 | static struct extent_map *prev_extent_map(struct extent_map *em) |
| 510 | { |
| 511 | struct rb_node *prev; |
| 512 | |
| 513 | prev = rb_prev(&em->rb_node); |
| 514 | if (!prev) |
| 515 | return NULL; |
| 516 | return container_of(prev, struct extent_map, rb_node); |
| 517 | } |
| 518 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 519 | /* |
| 520 | * Helper for btrfs_get_extent. Given an existing extent in the tree, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 521 | * the existing extent is the nearest extent to map_start, |
| 522 | * and an extent that you want to insert, deal with overlap and insert |
| 523 | * the best fitted new extent into the tree. |
| 524 | */ |
| 525 | static noinline int merge_extent_mapping(struct extent_map_tree *em_tree, |
| 526 | struct extent_map *existing, |
| 527 | struct extent_map *em, |
| 528 | u64 map_start) |
| 529 | { |
| 530 | struct extent_map *prev; |
| 531 | struct extent_map *next; |
| 532 | u64 start; |
| 533 | u64 end; |
| 534 | u64 start_diff; |
| 535 | |
| 536 | BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); |
| 537 | |
| 538 | if (existing->start > map_start) { |
| 539 | next = existing; |
| 540 | prev = prev_extent_map(next); |
| 541 | } else { |
| 542 | prev = existing; |
| 543 | next = next_extent_map(prev); |
| 544 | } |
| 545 | |
| 546 | start = prev ? extent_map_end(prev) : em->start; |
| 547 | start = max_t(u64, start, em->start); |
| 548 | end = next ? next->start : extent_map_end(em); |
| 549 | end = min_t(u64, end, extent_map_end(em)); |
| 550 | start_diff = start - em->start; |
| 551 | em->start = start; |
| 552 | em->len = end - start; |
| 553 | if (em->block_start < EXTENT_MAP_LAST_BYTE && |
| 554 | !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { |
| 555 | em->block_start += start_diff; |
| 556 | em->block_len = em->len; |
| 557 | } |
| 558 | return add_extent_mapping(em_tree, em, 0); |
| 559 | } |
| 560 | |
| 561 | /** |
| 562 | * btrfs_add_extent_mapping - add extent mapping into em_tree |
| 563 | * @fs_info - used for tracepoint |
| 564 | * @em_tree - the extent tree into which we want to insert the extent mapping |
| 565 | * @em_in - extent we are inserting |
| 566 | * @start - start of the logical range btrfs_get_extent() is requesting |
| 567 | * @len - length of the logical range btrfs_get_extent() is requesting |
| 568 | * |
| 569 | * Note that @em_in's range may be different from [start, start+len), |
| 570 | * but they must be overlapped. |
| 571 | * |
| 572 | * Insert @em_in into @em_tree. In case there is an overlapping range, handle |
| 573 | * the -EEXIST by either: |
| 574 | * a) Returning the existing extent in @em_in if @start is within the |
| 575 | * existing em. |
| 576 | * b) Merge the existing extent with @em_in passed in. |
| 577 | * |
| 578 | * Return 0 on success, otherwise -EEXIST. |
| 579 | * |
| 580 | */ |
| 581 | int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info, |
| 582 | struct extent_map_tree *em_tree, |
| 583 | struct extent_map **em_in, u64 start, u64 len) |
| 584 | { |
| 585 | int ret; |
| 586 | struct extent_map *em = *em_in; |
| 587 | |
| 588 | ret = add_extent_mapping(em_tree, em, 0); |
| 589 | /* it is possible that someone inserted the extent into the tree |
| 590 | * while we had the lock dropped. It is also possible that |
| 591 | * an overlapping map exists in the tree |
| 592 | */ |
| 593 | if (ret == -EEXIST) { |
| 594 | struct extent_map *existing; |
| 595 | |
| 596 | ret = 0; |
| 597 | |
| 598 | existing = search_extent_mapping(em_tree, start, len); |
| 599 | |
| 600 | trace_btrfs_handle_em_exist(fs_info, existing, em, start, len); |
| 601 | |
| 602 | /* |
| 603 | * existing will always be non-NULL, since there must be |
| 604 | * extent causing the -EEXIST. |
| 605 | */ |
| 606 | if (start >= existing->start && |
| 607 | start < extent_map_end(existing)) { |
| 608 | free_extent_map(em); |
| 609 | *em_in = existing; |
| 610 | ret = 0; |
| 611 | } else { |
| 612 | u64 orig_start = em->start; |
| 613 | u64 orig_len = em->len; |
| 614 | |
| 615 | /* |
| 616 | * The existing extent map is the one nearest to |
| 617 | * the [start, start + len) range which overlaps |
| 618 | */ |
| 619 | ret = merge_extent_mapping(em_tree, existing, |
| 620 | em, start); |
| 621 | if (ret) { |
| 622 | free_extent_map(em); |
| 623 | *em_in = NULL; |
| 624 | WARN_ONCE(ret, |
| 625 | "unexpected error %d: merge existing(start %llu len %llu) with em(start %llu len %llu)\n", |
| 626 | ret, existing->start, existing->len, |
| 627 | orig_start, orig_len); |
| 628 | } |
| 629 | free_extent_map(existing); |
| 630 | } |
| 631 | } |
| 632 | |
| 633 | ASSERT(ret == 0 || ret == -EEXIST); |
| 634 | return ret; |
| 635 | } |