Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2008 Oracle. All rights reserved. |
| 4 | */ |
| 5 | |
| 6 | #include <linux/kernel.h> |
| 7 | #include <linux/bio.h> |
| 8 | #include <linux/file.h> |
| 9 | #include <linux/fs.h> |
| 10 | #include <linux/pagemap.h> |
| 11 | #include <linux/highmem.h> |
| 12 | #include <linux/time.h> |
| 13 | #include <linux/init.h> |
| 14 | #include <linux/string.h> |
| 15 | #include <linux/backing-dev.h> |
| 16 | #include <linux/writeback.h> |
| 17 | #include <linux/slab.h> |
| 18 | #include <linux/sched/mm.h> |
| 19 | #include <linux/log2.h> |
| 20 | #include "ctree.h" |
| 21 | #include "disk-io.h" |
| 22 | #include "transaction.h" |
| 23 | #include "btrfs_inode.h" |
| 24 | #include "volumes.h" |
| 25 | #include "ordered-data.h" |
| 26 | #include "compression.h" |
| 27 | #include "extent_io.h" |
| 28 | #include "extent_map.h" |
| 29 | |
| 30 | static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" }; |
| 31 | |
| 32 | const char* btrfs_compress_type2str(enum btrfs_compression_type type) |
| 33 | { |
| 34 | switch (type) { |
| 35 | case BTRFS_COMPRESS_ZLIB: |
| 36 | case BTRFS_COMPRESS_LZO: |
| 37 | case BTRFS_COMPRESS_ZSTD: |
| 38 | case BTRFS_COMPRESS_NONE: |
| 39 | return btrfs_compress_types[type]; |
| 40 | } |
| 41 | |
| 42 | return NULL; |
| 43 | } |
| 44 | |
| 45 | static int btrfs_decompress_bio(struct compressed_bio *cb); |
| 46 | |
| 47 | static inline int compressed_bio_size(struct btrfs_fs_info *fs_info, |
| 48 | unsigned long disk_size) |
| 49 | { |
| 50 | u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); |
| 51 | |
| 52 | return sizeof(struct compressed_bio) + |
| 53 | (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size; |
| 54 | } |
| 55 | |
| 56 | static int check_compressed_csum(struct btrfs_inode *inode, |
| 57 | struct compressed_bio *cb, |
| 58 | u64 disk_start) |
| 59 | { |
| 60 | int ret; |
| 61 | struct page *page; |
| 62 | unsigned long i; |
| 63 | char *kaddr; |
| 64 | u32 csum; |
| 65 | u32 *cb_sum = &cb->sums; |
| 66 | |
| 67 | if (inode->flags & BTRFS_INODE_NODATASUM) |
| 68 | return 0; |
| 69 | |
| 70 | for (i = 0; i < cb->nr_pages; i++) { |
| 71 | page = cb->compressed_pages[i]; |
| 72 | csum = ~(u32)0; |
| 73 | |
| 74 | kaddr = kmap_atomic(page); |
| 75 | csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE); |
| 76 | btrfs_csum_final(csum, (u8 *)&csum); |
| 77 | kunmap_atomic(kaddr); |
| 78 | |
| 79 | if (csum != *cb_sum) { |
| 80 | btrfs_print_data_csum_error(inode, disk_start, csum, |
| 81 | *cb_sum, cb->mirror_num); |
| 82 | ret = -EIO; |
| 83 | goto fail; |
| 84 | } |
| 85 | cb_sum++; |
| 86 | |
| 87 | } |
| 88 | ret = 0; |
| 89 | fail: |
| 90 | return ret; |
| 91 | } |
| 92 | |
| 93 | /* when we finish reading compressed pages from the disk, we |
| 94 | * decompress them and then run the bio end_io routines on the |
| 95 | * decompressed pages (in the inode address space). |
| 96 | * |
| 97 | * This allows the checksumming and other IO error handling routines |
| 98 | * to work normally |
| 99 | * |
| 100 | * The compressed pages are freed here, and it must be run |
| 101 | * in process context |
| 102 | */ |
| 103 | static void end_compressed_bio_read(struct bio *bio) |
| 104 | { |
| 105 | struct compressed_bio *cb = bio->bi_private; |
| 106 | struct inode *inode; |
| 107 | struct page *page; |
| 108 | unsigned long index; |
| 109 | unsigned int mirror = btrfs_io_bio(bio)->mirror_num; |
| 110 | int ret = 0; |
| 111 | |
| 112 | if (bio->bi_status) |
| 113 | cb->errors = 1; |
| 114 | |
| 115 | /* if there are more bios still pending for this compressed |
| 116 | * extent, just exit |
| 117 | */ |
| 118 | if (!refcount_dec_and_test(&cb->pending_bios)) |
| 119 | goto out; |
| 120 | |
| 121 | /* |
| 122 | * Record the correct mirror_num in cb->orig_bio so that |
| 123 | * read-repair can work properly. |
| 124 | */ |
| 125 | ASSERT(btrfs_io_bio(cb->orig_bio)); |
| 126 | btrfs_io_bio(cb->orig_bio)->mirror_num = mirror; |
| 127 | cb->mirror_num = mirror; |
| 128 | |
| 129 | /* |
| 130 | * Some IO in this cb have failed, just skip checksum as there |
| 131 | * is no way it could be correct. |
| 132 | */ |
| 133 | if (cb->errors == 1) |
| 134 | goto csum_failed; |
| 135 | |
| 136 | inode = cb->inode; |
| 137 | ret = check_compressed_csum(BTRFS_I(inode), cb, |
| 138 | (u64)bio->bi_iter.bi_sector << 9); |
| 139 | if (ret) |
| 140 | goto csum_failed; |
| 141 | |
| 142 | /* ok, we're the last bio for this extent, lets start |
| 143 | * the decompression. |
| 144 | */ |
| 145 | ret = btrfs_decompress_bio(cb); |
| 146 | |
| 147 | csum_failed: |
| 148 | if (ret) |
| 149 | cb->errors = 1; |
| 150 | |
| 151 | /* release the compressed pages */ |
| 152 | index = 0; |
| 153 | for (index = 0; index < cb->nr_pages; index++) { |
| 154 | page = cb->compressed_pages[index]; |
| 155 | page->mapping = NULL; |
| 156 | put_page(page); |
| 157 | } |
| 158 | |
| 159 | /* do io completion on the original bio */ |
| 160 | if (cb->errors) { |
| 161 | bio_io_error(cb->orig_bio); |
| 162 | } else { |
| 163 | int i; |
| 164 | struct bio_vec *bvec; |
| 165 | |
| 166 | /* |
| 167 | * we have verified the checksum already, set page |
| 168 | * checked so the end_io handlers know about it |
| 169 | */ |
| 170 | ASSERT(!bio_flagged(bio, BIO_CLONED)); |
| 171 | bio_for_each_segment_all(bvec, cb->orig_bio, i) |
| 172 | SetPageChecked(bvec->bv_page); |
| 173 | |
| 174 | bio_endio(cb->orig_bio); |
| 175 | } |
| 176 | |
| 177 | /* finally free the cb struct */ |
| 178 | kfree(cb->compressed_pages); |
| 179 | kfree(cb); |
| 180 | out: |
| 181 | bio_put(bio); |
| 182 | } |
| 183 | |
| 184 | /* |
| 185 | * Clear the writeback bits on all of the file |
| 186 | * pages for a compressed write |
| 187 | */ |
| 188 | static noinline void end_compressed_writeback(struct inode *inode, |
| 189 | const struct compressed_bio *cb) |
| 190 | { |
| 191 | unsigned long index = cb->start >> PAGE_SHIFT; |
| 192 | unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; |
| 193 | struct page *pages[16]; |
| 194 | unsigned long nr_pages = end_index - index + 1; |
| 195 | int i; |
| 196 | int ret; |
| 197 | |
| 198 | if (cb->errors) |
| 199 | mapping_set_error(inode->i_mapping, -EIO); |
| 200 | |
| 201 | while (nr_pages > 0) { |
| 202 | ret = find_get_pages_contig(inode->i_mapping, index, |
| 203 | min_t(unsigned long, |
| 204 | nr_pages, ARRAY_SIZE(pages)), pages); |
| 205 | if (ret == 0) { |
| 206 | nr_pages -= 1; |
| 207 | index += 1; |
| 208 | continue; |
| 209 | } |
| 210 | for (i = 0; i < ret; i++) { |
| 211 | if (cb->errors) |
| 212 | SetPageError(pages[i]); |
| 213 | end_page_writeback(pages[i]); |
| 214 | put_page(pages[i]); |
| 215 | } |
| 216 | nr_pages -= ret; |
| 217 | index += ret; |
| 218 | } |
| 219 | /* the inode may be gone now */ |
| 220 | } |
| 221 | |
| 222 | /* |
| 223 | * do the cleanup once all the compressed pages hit the disk. |
| 224 | * This will clear writeback on the file pages and free the compressed |
| 225 | * pages. |
| 226 | * |
| 227 | * This also calls the writeback end hooks for the file pages so that |
| 228 | * metadata and checksums can be updated in the file. |
| 229 | */ |
| 230 | static void end_compressed_bio_write(struct bio *bio) |
| 231 | { |
| 232 | struct extent_io_tree *tree; |
| 233 | struct compressed_bio *cb = bio->bi_private; |
| 234 | struct inode *inode; |
| 235 | struct page *page; |
| 236 | unsigned long index; |
| 237 | |
| 238 | if (bio->bi_status) |
| 239 | cb->errors = 1; |
| 240 | |
| 241 | /* if there are more bios still pending for this compressed |
| 242 | * extent, just exit |
| 243 | */ |
| 244 | if (!refcount_dec_and_test(&cb->pending_bios)) |
| 245 | goto out; |
| 246 | |
| 247 | /* ok, we're the last bio for this extent, step one is to |
| 248 | * call back into the FS and do all the end_io operations |
| 249 | */ |
| 250 | inode = cb->inode; |
| 251 | tree = &BTRFS_I(inode)->io_tree; |
| 252 | cb->compressed_pages[0]->mapping = cb->inode->i_mapping; |
| 253 | tree->ops->writepage_end_io_hook(cb->compressed_pages[0], |
| 254 | cb->start, |
| 255 | cb->start + cb->len - 1, |
| 256 | NULL, |
| 257 | bio->bi_status ? |
| 258 | BLK_STS_OK : BLK_STS_NOTSUPP); |
| 259 | cb->compressed_pages[0]->mapping = NULL; |
| 260 | |
| 261 | end_compressed_writeback(inode, cb); |
| 262 | /* note, our inode could be gone now */ |
| 263 | |
| 264 | /* |
| 265 | * release the compressed pages, these came from alloc_page and |
| 266 | * are not attached to the inode at all |
| 267 | */ |
| 268 | index = 0; |
| 269 | for (index = 0; index < cb->nr_pages; index++) { |
| 270 | page = cb->compressed_pages[index]; |
| 271 | page->mapping = NULL; |
| 272 | put_page(page); |
| 273 | } |
| 274 | |
| 275 | /* finally free the cb struct */ |
| 276 | kfree(cb->compressed_pages); |
| 277 | kfree(cb); |
| 278 | out: |
| 279 | bio_put(bio); |
| 280 | } |
| 281 | |
| 282 | /* |
| 283 | * worker function to build and submit bios for previously compressed pages. |
| 284 | * The corresponding pages in the inode should be marked for writeback |
| 285 | * and the compressed pages should have a reference on them for dropping |
| 286 | * when the IO is complete. |
| 287 | * |
| 288 | * This also checksums the file bytes and gets things ready for |
| 289 | * the end io hooks. |
| 290 | */ |
| 291 | blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, |
| 292 | unsigned long len, u64 disk_start, |
| 293 | unsigned long compressed_len, |
| 294 | struct page **compressed_pages, |
| 295 | unsigned long nr_pages, |
| 296 | unsigned int write_flags) |
| 297 | { |
| 298 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| 299 | struct bio *bio = NULL; |
| 300 | struct compressed_bio *cb; |
| 301 | unsigned long bytes_left; |
| 302 | int pg_index = 0; |
| 303 | struct page *page; |
| 304 | u64 first_byte = disk_start; |
| 305 | struct block_device *bdev; |
| 306 | blk_status_t ret; |
| 307 | int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; |
| 308 | |
| 309 | WARN_ON(start & ((u64)PAGE_SIZE - 1)); |
| 310 | cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); |
| 311 | if (!cb) |
| 312 | return BLK_STS_RESOURCE; |
| 313 | refcount_set(&cb->pending_bios, 0); |
| 314 | cb->errors = 0; |
| 315 | cb->inode = inode; |
| 316 | cb->start = start; |
| 317 | cb->len = len; |
| 318 | cb->mirror_num = 0; |
| 319 | cb->compressed_pages = compressed_pages; |
| 320 | cb->compressed_len = compressed_len; |
| 321 | cb->orig_bio = NULL; |
| 322 | cb->nr_pages = nr_pages; |
| 323 | |
| 324 | bdev = fs_info->fs_devices->latest_bdev; |
| 325 | |
| 326 | bio = btrfs_bio_alloc(bdev, first_byte); |
| 327 | bio->bi_opf = REQ_OP_WRITE | write_flags; |
| 328 | bio->bi_private = cb; |
| 329 | bio->bi_end_io = end_compressed_bio_write; |
| 330 | refcount_set(&cb->pending_bios, 1); |
| 331 | |
| 332 | /* create and submit bios for the compressed pages */ |
| 333 | bytes_left = compressed_len; |
| 334 | for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { |
| 335 | int submit = 0; |
| 336 | |
| 337 | page = compressed_pages[pg_index]; |
| 338 | page->mapping = inode->i_mapping; |
| 339 | if (bio->bi_iter.bi_size) |
| 340 | submit = btrfs_merge_bio_hook(page, 0, PAGE_SIZE, bio, 0); |
| 341 | |
| 342 | page->mapping = NULL; |
| 343 | if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) < |
| 344 | PAGE_SIZE) { |
| 345 | /* |
| 346 | * inc the count before we submit the bio so |
| 347 | * we know the end IO handler won't happen before |
| 348 | * we inc the count. Otherwise, the cb might get |
| 349 | * freed before we're done setting it up |
| 350 | */ |
| 351 | refcount_inc(&cb->pending_bios); |
| 352 | ret = btrfs_bio_wq_end_io(fs_info, bio, |
| 353 | BTRFS_WQ_ENDIO_DATA); |
| 354 | BUG_ON(ret); /* -ENOMEM */ |
| 355 | |
| 356 | if (!skip_sum) { |
| 357 | ret = btrfs_csum_one_bio(inode, bio, start, 1); |
| 358 | BUG_ON(ret); /* -ENOMEM */ |
| 359 | } |
| 360 | |
| 361 | ret = btrfs_map_bio(fs_info, bio, 0, 1); |
| 362 | if (ret) { |
| 363 | bio->bi_status = ret; |
| 364 | bio_endio(bio); |
| 365 | } |
| 366 | |
| 367 | bio = btrfs_bio_alloc(bdev, first_byte); |
| 368 | bio->bi_opf = REQ_OP_WRITE | write_flags; |
| 369 | bio->bi_private = cb; |
| 370 | bio->bi_end_io = end_compressed_bio_write; |
| 371 | bio_add_page(bio, page, PAGE_SIZE, 0); |
| 372 | } |
| 373 | if (bytes_left < PAGE_SIZE) { |
| 374 | btrfs_info(fs_info, |
| 375 | "bytes left %lu compress len %lu nr %lu", |
| 376 | bytes_left, cb->compressed_len, cb->nr_pages); |
| 377 | } |
| 378 | bytes_left -= PAGE_SIZE; |
| 379 | first_byte += PAGE_SIZE; |
| 380 | cond_resched(); |
| 381 | } |
| 382 | |
| 383 | ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA); |
| 384 | BUG_ON(ret); /* -ENOMEM */ |
| 385 | |
| 386 | if (!skip_sum) { |
| 387 | ret = btrfs_csum_one_bio(inode, bio, start, 1); |
| 388 | BUG_ON(ret); /* -ENOMEM */ |
| 389 | } |
| 390 | |
| 391 | ret = btrfs_map_bio(fs_info, bio, 0, 1); |
| 392 | if (ret) { |
| 393 | bio->bi_status = ret; |
| 394 | bio_endio(bio); |
| 395 | } |
| 396 | |
| 397 | return 0; |
| 398 | } |
| 399 | |
| 400 | static u64 bio_end_offset(struct bio *bio) |
| 401 | { |
| 402 | struct bio_vec *last = bio_last_bvec_all(bio); |
| 403 | |
| 404 | return page_offset(last->bv_page) + last->bv_len + last->bv_offset; |
| 405 | } |
| 406 | |
| 407 | static noinline int add_ra_bio_pages(struct inode *inode, |
| 408 | u64 compressed_end, |
| 409 | struct compressed_bio *cb) |
| 410 | { |
| 411 | unsigned long end_index; |
| 412 | unsigned long pg_index; |
| 413 | u64 last_offset; |
| 414 | u64 isize = i_size_read(inode); |
| 415 | int ret; |
| 416 | struct page *page; |
| 417 | unsigned long nr_pages = 0; |
| 418 | struct extent_map *em; |
| 419 | struct address_space *mapping = inode->i_mapping; |
| 420 | struct extent_map_tree *em_tree; |
| 421 | struct extent_io_tree *tree; |
| 422 | u64 end; |
| 423 | int misses = 0; |
| 424 | |
| 425 | last_offset = bio_end_offset(cb->orig_bio); |
| 426 | em_tree = &BTRFS_I(inode)->extent_tree; |
| 427 | tree = &BTRFS_I(inode)->io_tree; |
| 428 | |
| 429 | if (isize == 0) |
| 430 | return 0; |
| 431 | |
| 432 | end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; |
| 433 | |
| 434 | while (last_offset < compressed_end) { |
| 435 | pg_index = last_offset >> PAGE_SHIFT; |
| 436 | |
| 437 | if (pg_index > end_index) |
| 438 | break; |
| 439 | |
| 440 | rcu_read_lock(); |
| 441 | page = radix_tree_lookup(&mapping->i_pages, pg_index); |
| 442 | rcu_read_unlock(); |
| 443 | if (page && !radix_tree_exceptional_entry(page)) { |
| 444 | misses++; |
| 445 | if (misses > 4) |
| 446 | break; |
| 447 | goto next; |
| 448 | } |
| 449 | |
| 450 | page = __page_cache_alloc(mapping_gfp_constraint(mapping, |
| 451 | ~__GFP_FS)); |
| 452 | if (!page) |
| 453 | break; |
| 454 | |
| 455 | if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { |
| 456 | put_page(page); |
| 457 | goto next; |
| 458 | } |
| 459 | |
| 460 | end = last_offset + PAGE_SIZE - 1; |
| 461 | /* |
| 462 | * at this point, we have a locked page in the page cache |
| 463 | * for these bytes in the file. But, we have to make |
| 464 | * sure they map to this compressed extent on disk. |
| 465 | */ |
| 466 | set_page_extent_mapped(page); |
| 467 | lock_extent(tree, last_offset, end); |
| 468 | read_lock(&em_tree->lock); |
| 469 | em = lookup_extent_mapping(em_tree, last_offset, |
| 470 | PAGE_SIZE); |
| 471 | read_unlock(&em_tree->lock); |
| 472 | |
| 473 | if (!em || last_offset < em->start || |
| 474 | (last_offset + PAGE_SIZE > extent_map_end(em)) || |
| 475 | (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { |
| 476 | free_extent_map(em); |
| 477 | unlock_extent(tree, last_offset, end); |
| 478 | unlock_page(page); |
| 479 | put_page(page); |
| 480 | break; |
| 481 | } |
| 482 | free_extent_map(em); |
| 483 | |
| 484 | if (page->index == end_index) { |
| 485 | char *userpage; |
| 486 | size_t zero_offset = isize & (PAGE_SIZE - 1); |
| 487 | |
| 488 | if (zero_offset) { |
| 489 | int zeros; |
| 490 | zeros = PAGE_SIZE - zero_offset; |
| 491 | userpage = kmap_atomic(page); |
| 492 | memset(userpage + zero_offset, 0, zeros); |
| 493 | flush_dcache_page(page); |
| 494 | kunmap_atomic(userpage); |
| 495 | } |
| 496 | } |
| 497 | |
| 498 | ret = bio_add_page(cb->orig_bio, page, |
| 499 | PAGE_SIZE, 0); |
| 500 | |
| 501 | if (ret == PAGE_SIZE) { |
| 502 | nr_pages++; |
| 503 | put_page(page); |
| 504 | } else { |
| 505 | unlock_extent(tree, last_offset, end); |
| 506 | unlock_page(page); |
| 507 | put_page(page); |
| 508 | break; |
| 509 | } |
| 510 | next: |
| 511 | last_offset += PAGE_SIZE; |
| 512 | } |
| 513 | return 0; |
| 514 | } |
| 515 | |
| 516 | /* |
| 517 | * for a compressed read, the bio we get passed has all the inode pages |
| 518 | * in it. We don't actually do IO on those pages but allocate new ones |
| 519 | * to hold the compressed pages on disk. |
| 520 | * |
| 521 | * bio->bi_iter.bi_sector points to the compressed extent on disk |
| 522 | * bio->bi_io_vec points to all of the inode pages |
| 523 | * |
| 524 | * After the compressed pages are read, we copy the bytes into the |
| 525 | * bio we were passed and then call the bio end_io calls |
| 526 | */ |
| 527 | blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, |
| 528 | int mirror_num, unsigned long bio_flags) |
| 529 | { |
| 530 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| 531 | struct extent_io_tree *tree; |
| 532 | struct extent_map_tree *em_tree; |
| 533 | struct compressed_bio *cb; |
| 534 | unsigned long compressed_len; |
| 535 | unsigned long nr_pages; |
| 536 | unsigned long pg_index; |
| 537 | struct page *page; |
| 538 | struct block_device *bdev; |
| 539 | struct bio *comp_bio; |
| 540 | u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9; |
| 541 | u64 em_len; |
| 542 | u64 em_start; |
| 543 | struct extent_map *em; |
| 544 | blk_status_t ret = BLK_STS_RESOURCE; |
| 545 | int faili = 0; |
| 546 | u32 *sums; |
| 547 | |
| 548 | tree = &BTRFS_I(inode)->io_tree; |
| 549 | em_tree = &BTRFS_I(inode)->extent_tree; |
| 550 | |
| 551 | /* we need the actual starting offset of this extent in the file */ |
| 552 | read_lock(&em_tree->lock); |
| 553 | em = lookup_extent_mapping(em_tree, |
| 554 | page_offset(bio_first_page_all(bio)), |
| 555 | PAGE_SIZE); |
| 556 | read_unlock(&em_tree->lock); |
| 557 | if (!em) |
| 558 | return BLK_STS_IOERR; |
| 559 | |
| 560 | compressed_len = em->block_len; |
| 561 | cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); |
| 562 | if (!cb) |
| 563 | goto out; |
| 564 | |
| 565 | refcount_set(&cb->pending_bios, 0); |
| 566 | cb->errors = 0; |
| 567 | cb->inode = inode; |
| 568 | cb->mirror_num = mirror_num; |
| 569 | sums = &cb->sums; |
| 570 | |
| 571 | cb->start = em->orig_start; |
| 572 | em_len = em->len; |
| 573 | em_start = em->start; |
| 574 | |
| 575 | free_extent_map(em); |
| 576 | em = NULL; |
| 577 | |
| 578 | cb->len = bio->bi_iter.bi_size; |
| 579 | cb->compressed_len = compressed_len; |
| 580 | cb->compress_type = extent_compress_type(bio_flags); |
| 581 | cb->orig_bio = bio; |
| 582 | |
| 583 | nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE); |
| 584 | cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *), |
| 585 | GFP_NOFS); |
| 586 | if (!cb->compressed_pages) |
| 587 | goto fail1; |
| 588 | |
| 589 | bdev = fs_info->fs_devices->latest_bdev; |
| 590 | |
| 591 | for (pg_index = 0; pg_index < nr_pages; pg_index++) { |
| 592 | cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | |
| 593 | __GFP_HIGHMEM); |
| 594 | if (!cb->compressed_pages[pg_index]) { |
| 595 | faili = pg_index - 1; |
| 596 | ret = BLK_STS_RESOURCE; |
| 597 | goto fail2; |
| 598 | } |
| 599 | } |
| 600 | faili = nr_pages - 1; |
| 601 | cb->nr_pages = nr_pages; |
| 602 | |
| 603 | add_ra_bio_pages(inode, em_start + em_len, cb); |
| 604 | |
| 605 | /* include any pages we added in add_ra-bio_pages */ |
| 606 | cb->len = bio->bi_iter.bi_size; |
| 607 | |
| 608 | comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte); |
| 609 | comp_bio->bi_opf = REQ_OP_READ; |
| 610 | comp_bio->bi_private = cb; |
| 611 | comp_bio->bi_end_io = end_compressed_bio_read; |
| 612 | refcount_set(&cb->pending_bios, 1); |
| 613 | |
| 614 | for (pg_index = 0; pg_index < nr_pages; pg_index++) { |
| 615 | int submit = 0; |
| 616 | |
| 617 | page = cb->compressed_pages[pg_index]; |
| 618 | page->mapping = inode->i_mapping; |
| 619 | page->index = em_start >> PAGE_SHIFT; |
| 620 | |
| 621 | if (comp_bio->bi_iter.bi_size) |
| 622 | submit = btrfs_merge_bio_hook(page, 0, PAGE_SIZE, |
| 623 | comp_bio, 0); |
| 624 | |
| 625 | page->mapping = NULL; |
| 626 | if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) < |
| 627 | PAGE_SIZE) { |
| 628 | ret = btrfs_bio_wq_end_io(fs_info, comp_bio, |
| 629 | BTRFS_WQ_ENDIO_DATA); |
| 630 | BUG_ON(ret); /* -ENOMEM */ |
| 631 | |
| 632 | /* |
| 633 | * inc the count before we submit the bio so |
| 634 | * we know the end IO handler won't happen before |
| 635 | * we inc the count. Otherwise, the cb might get |
| 636 | * freed before we're done setting it up |
| 637 | */ |
| 638 | refcount_inc(&cb->pending_bios); |
| 639 | |
| 640 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { |
| 641 | ret = btrfs_lookup_bio_sums(inode, comp_bio, |
| 642 | sums); |
| 643 | BUG_ON(ret); /* -ENOMEM */ |
| 644 | } |
| 645 | sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size, |
| 646 | fs_info->sectorsize); |
| 647 | |
| 648 | ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); |
| 649 | if (ret) { |
| 650 | comp_bio->bi_status = ret; |
| 651 | bio_endio(comp_bio); |
| 652 | } |
| 653 | |
| 654 | comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte); |
| 655 | comp_bio->bi_opf = REQ_OP_READ; |
| 656 | comp_bio->bi_private = cb; |
| 657 | comp_bio->bi_end_io = end_compressed_bio_read; |
| 658 | |
| 659 | bio_add_page(comp_bio, page, PAGE_SIZE, 0); |
| 660 | } |
| 661 | cur_disk_byte += PAGE_SIZE; |
| 662 | } |
| 663 | |
| 664 | ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA); |
| 665 | BUG_ON(ret); /* -ENOMEM */ |
| 666 | |
| 667 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { |
| 668 | ret = btrfs_lookup_bio_sums(inode, comp_bio, sums); |
| 669 | BUG_ON(ret); /* -ENOMEM */ |
| 670 | } |
| 671 | |
| 672 | ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); |
| 673 | if (ret) { |
| 674 | comp_bio->bi_status = ret; |
| 675 | bio_endio(comp_bio); |
| 676 | } |
| 677 | |
| 678 | return 0; |
| 679 | |
| 680 | fail2: |
| 681 | while (faili >= 0) { |
| 682 | __free_page(cb->compressed_pages[faili]); |
| 683 | faili--; |
| 684 | } |
| 685 | |
| 686 | kfree(cb->compressed_pages); |
| 687 | fail1: |
| 688 | kfree(cb); |
| 689 | out: |
| 690 | free_extent_map(em); |
| 691 | return ret; |
| 692 | } |
| 693 | |
| 694 | /* |
| 695 | * Heuristic uses systematic sampling to collect data from the input data |
| 696 | * range, the logic can be tuned by the following constants: |
| 697 | * |
| 698 | * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample |
| 699 | * @SAMPLING_INTERVAL - range from which the sampled data can be collected |
| 700 | */ |
| 701 | #define SAMPLING_READ_SIZE (16) |
| 702 | #define SAMPLING_INTERVAL (256) |
| 703 | |
| 704 | /* |
| 705 | * For statistical analysis of the input data we consider bytes that form a |
| 706 | * Galois Field of 256 objects. Each object has an attribute count, ie. how |
| 707 | * many times the object appeared in the sample. |
| 708 | */ |
| 709 | #define BUCKET_SIZE (256) |
| 710 | |
| 711 | /* |
| 712 | * The size of the sample is based on a statistical sampling rule of thumb. |
| 713 | * The common way is to perform sampling tests as long as the number of |
| 714 | * elements in each cell is at least 5. |
| 715 | * |
| 716 | * Instead of 5, we choose 32 to obtain more accurate results. |
| 717 | * If the data contain the maximum number of symbols, which is 256, we obtain a |
| 718 | * sample size bound by 8192. |
| 719 | * |
| 720 | * For a sample of at most 8KB of data per data range: 16 consecutive bytes |
| 721 | * from up to 512 locations. |
| 722 | */ |
| 723 | #define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \ |
| 724 | SAMPLING_READ_SIZE / SAMPLING_INTERVAL) |
| 725 | |
| 726 | struct bucket_item { |
| 727 | u32 count; |
| 728 | }; |
| 729 | |
| 730 | struct heuristic_ws { |
| 731 | /* Partial copy of input data */ |
| 732 | u8 *sample; |
| 733 | u32 sample_size; |
| 734 | /* Buckets store counters for each byte value */ |
| 735 | struct bucket_item *bucket; |
| 736 | /* Sorting buffer */ |
| 737 | struct bucket_item *bucket_b; |
| 738 | struct list_head list; |
| 739 | }; |
| 740 | |
| 741 | static void free_heuristic_ws(struct list_head *ws) |
| 742 | { |
| 743 | struct heuristic_ws *workspace; |
| 744 | |
| 745 | workspace = list_entry(ws, struct heuristic_ws, list); |
| 746 | |
| 747 | kvfree(workspace->sample); |
| 748 | kfree(workspace->bucket); |
| 749 | kfree(workspace->bucket_b); |
| 750 | kfree(workspace); |
| 751 | } |
| 752 | |
| 753 | static struct list_head *alloc_heuristic_ws(void) |
| 754 | { |
| 755 | struct heuristic_ws *ws; |
| 756 | |
| 757 | ws = kzalloc(sizeof(*ws), GFP_KERNEL); |
| 758 | if (!ws) |
| 759 | return ERR_PTR(-ENOMEM); |
| 760 | |
| 761 | ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL); |
| 762 | if (!ws->sample) |
| 763 | goto fail; |
| 764 | |
| 765 | ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL); |
| 766 | if (!ws->bucket) |
| 767 | goto fail; |
| 768 | |
| 769 | ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL); |
| 770 | if (!ws->bucket_b) |
| 771 | goto fail; |
| 772 | |
| 773 | INIT_LIST_HEAD(&ws->list); |
| 774 | return &ws->list; |
| 775 | fail: |
| 776 | free_heuristic_ws(&ws->list); |
| 777 | return ERR_PTR(-ENOMEM); |
| 778 | } |
| 779 | |
| 780 | struct workspaces_list { |
| 781 | struct list_head idle_ws; |
| 782 | spinlock_t ws_lock; |
| 783 | /* Number of free workspaces */ |
| 784 | int free_ws; |
| 785 | /* Total number of allocated workspaces */ |
| 786 | atomic_t total_ws; |
| 787 | /* Waiters for a free workspace */ |
| 788 | wait_queue_head_t ws_wait; |
| 789 | }; |
| 790 | |
| 791 | static struct workspaces_list btrfs_comp_ws[BTRFS_COMPRESS_TYPES]; |
| 792 | |
| 793 | static struct workspaces_list btrfs_heuristic_ws; |
| 794 | |
| 795 | static const struct btrfs_compress_op * const btrfs_compress_op[] = { |
| 796 | &btrfs_zlib_compress, |
| 797 | &btrfs_lzo_compress, |
| 798 | &btrfs_zstd_compress, |
| 799 | }; |
| 800 | |
| 801 | void __init btrfs_init_compress(void) |
| 802 | { |
| 803 | struct list_head *workspace; |
| 804 | int i; |
| 805 | |
| 806 | INIT_LIST_HEAD(&btrfs_heuristic_ws.idle_ws); |
| 807 | spin_lock_init(&btrfs_heuristic_ws.ws_lock); |
| 808 | atomic_set(&btrfs_heuristic_ws.total_ws, 0); |
| 809 | init_waitqueue_head(&btrfs_heuristic_ws.ws_wait); |
| 810 | |
| 811 | workspace = alloc_heuristic_ws(); |
| 812 | if (IS_ERR(workspace)) { |
| 813 | pr_warn( |
| 814 | "BTRFS: cannot preallocate heuristic workspace, will try later\n"); |
| 815 | } else { |
| 816 | atomic_set(&btrfs_heuristic_ws.total_ws, 1); |
| 817 | btrfs_heuristic_ws.free_ws = 1; |
| 818 | list_add(workspace, &btrfs_heuristic_ws.idle_ws); |
| 819 | } |
| 820 | |
| 821 | for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { |
| 822 | INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws); |
| 823 | spin_lock_init(&btrfs_comp_ws[i].ws_lock); |
| 824 | atomic_set(&btrfs_comp_ws[i].total_ws, 0); |
| 825 | init_waitqueue_head(&btrfs_comp_ws[i].ws_wait); |
| 826 | |
| 827 | /* |
| 828 | * Preallocate one workspace for each compression type so |
| 829 | * we can guarantee forward progress in the worst case |
| 830 | */ |
| 831 | workspace = btrfs_compress_op[i]->alloc_workspace(); |
| 832 | if (IS_ERR(workspace)) { |
| 833 | pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n"); |
| 834 | } else { |
| 835 | atomic_set(&btrfs_comp_ws[i].total_ws, 1); |
| 836 | btrfs_comp_ws[i].free_ws = 1; |
| 837 | list_add(workspace, &btrfs_comp_ws[i].idle_ws); |
| 838 | } |
| 839 | } |
| 840 | } |
| 841 | |
| 842 | /* |
| 843 | * This finds an available workspace or allocates a new one. |
| 844 | * If it's not possible to allocate a new one, waits until there's one. |
| 845 | * Preallocation makes a forward progress guarantees and we do not return |
| 846 | * errors. |
| 847 | */ |
| 848 | static struct list_head *__find_workspace(int type, bool heuristic) |
| 849 | { |
| 850 | struct list_head *workspace; |
| 851 | int cpus = num_online_cpus(); |
| 852 | int idx = type - 1; |
| 853 | unsigned nofs_flag; |
| 854 | struct list_head *idle_ws; |
| 855 | spinlock_t *ws_lock; |
| 856 | atomic_t *total_ws; |
| 857 | wait_queue_head_t *ws_wait; |
| 858 | int *free_ws; |
| 859 | |
| 860 | if (heuristic) { |
| 861 | idle_ws = &btrfs_heuristic_ws.idle_ws; |
| 862 | ws_lock = &btrfs_heuristic_ws.ws_lock; |
| 863 | total_ws = &btrfs_heuristic_ws.total_ws; |
| 864 | ws_wait = &btrfs_heuristic_ws.ws_wait; |
| 865 | free_ws = &btrfs_heuristic_ws.free_ws; |
| 866 | } else { |
| 867 | idle_ws = &btrfs_comp_ws[idx].idle_ws; |
| 868 | ws_lock = &btrfs_comp_ws[idx].ws_lock; |
| 869 | total_ws = &btrfs_comp_ws[idx].total_ws; |
| 870 | ws_wait = &btrfs_comp_ws[idx].ws_wait; |
| 871 | free_ws = &btrfs_comp_ws[idx].free_ws; |
| 872 | } |
| 873 | |
| 874 | again: |
| 875 | spin_lock(ws_lock); |
| 876 | if (!list_empty(idle_ws)) { |
| 877 | workspace = idle_ws->next; |
| 878 | list_del(workspace); |
| 879 | (*free_ws)--; |
| 880 | spin_unlock(ws_lock); |
| 881 | return workspace; |
| 882 | |
| 883 | } |
| 884 | if (atomic_read(total_ws) > cpus) { |
| 885 | DEFINE_WAIT(wait); |
| 886 | |
| 887 | spin_unlock(ws_lock); |
| 888 | prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE); |
| 889 | if (atomic_read(total_ws) > cpus && !*free_ws) |
| 890 | schedule(); |
| 891 | finish_wait(ws_wait, &wait); |
| 892 | goto again; |
| 893 | } |
| 894 | atomic_inc(total_ws); |
| 895 | spin_unlock(ws_lock); |
| 896 | |
| 897 | /* |
| 898 | * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have |
| 899 | * to turn it off here because we might get called from the restricted |
| 900 | * context of btrfs_compress_bio/btrfs_compress_pages |
| 901 | */ |
| 902 | nofs_flag = memalloc_nofs_save(); |
| 903 | if (heuristic) |
| 904 | workspace = alloc_heuristic_ws(); |
| 905 | else |
| 906 | workspace = btrfs_compress_op[idx]->alloc_workspace(); |
| 907 | memalloc_nofs_restore(nofs_flag); |
| 908 | |
| 909 | if (IS_ERR(workspace)) { |
| 910 | atomic_dec(total_ws); |
| 911 | wake_up(ws_wait); |
| 912 | |
| 913 | /* |
| 914 | * Do not return the error but go back to waiting. There's a |
| 915 | * workspace preallocated for each type and the compression |
| 916 | * time is bounded so we get to a workspace eventually. This |
| 917 | * makes our caller's life easier. |
| 918 | * |
| 919 | * To prevent silent and low-probability deadlocks (when the |
| 920 | * initial preallocation fails), check if there are any |
| 921 | * workspaces at all. |
| 922 | */ |
| 923 | if (atomic_read(total_ws) == 0) { |
| 924 | static DEFINE_RATELIMIT_STATE(_rs, |
| 925 | /* once per minute */ 60 * HZ, |
| 926 | /* no burst */ 1); |
| 927 | |
| 928 | if (__ratelimit(&_rs)) { |
| 929 | pr_warn("BTRFS: no compression workspaces, low memory, retrying\n"); |
| 930 | } |
| 931 | } |
| 932 | goto again; |
| 933 | } |
| 934 | return workspace; |
| 935 | } |
| 936 | |
| 937 | static struct list_head *find_workspace(int type) |
| 938 | { |
| 939 | return __find_workspace(type, false); |
| 940 | } |
| 941 | |
| 942 | /* |
| 943 | * put a workspace struct back on the list or free it if we have enough |
| 944 | * idle ones sitting around |
| 945 | */ |
| 946 | static void __free_workspace(int type, struct list_head *workspace, |
| 947 | bool heuristic) |
| 948 | { |
| 949 | int idx = type - 1; |
| 950 | struct list_head *idle_ws; |
| 951 | spinlock_t *ws_lock; |
| 952 | atomic_t *total_ws; |
| 953 | wait_queue_head_t *ws_wait; |
| 954 | int *free_ws; |
| 955 | |
| 956 | if (heuristic) { |
| 957 | idle_ws = &btrfs_heuristic_ws.idle_ws; |
| 958 | ws_lock = &btrfs_heuristic_ws.ws_lock; |
| 959 | total_ws = &btrfs_heuristic_ws.total_ws; |
| 960 | ws_wait = &btrfs_heuristic_ws.ws_wait; |
| 961 | free_ws = &btrfs_heuristic_ws.free_ws; |
| 962 | } else { |
| 963 | idle_ws = &btrfs_comp_ws[idx].idle_ws; |
| 964 | ws_lock = &btrfs_comp_ws[idx].ws_lock; |
| 965 | total_ws = &btrfs_comp_ws[idx].total_ws; |
| 966 | ws_wait = &btrfs_comp_ws[idx].ws_wait; |
| 967 | free_ws = &btrfs_comp_ws[idx].free_ws; |
| 968 | } |
| 969 | |
| 970 | spin_lock(ws_lock); |
| 971 | if (*free_ws <= num_online_cpus()) { |
| 972 | list_add(workspace, idle_ws); |
| 973 | (*free_ws)++; |
| 974 | spin_unlock(ws_lock); |
| 975 | goto wake; |
| 976 | } |
| 977 | spin_unlock(ws_lock); |
| 978 | |
| 979 | if (heuristic) |
| 980 | free_heuristic_ws(workspace); |
| 981 | else |
| 982 | btrfs_compress_op[idx]->free_workspace(workspace); |
| 983 | atomic_dec(total_ws); |
| 984 | wake: |
| 985 | cond_wake_up(ws_wait); |
| 986 | } |
| 987 | |
| 988 | static void free_workspace(int type, struct list_head *ws) |
| 989 | { |
| 990 | return __free_workspace(type, ws, false); |
| 991 | } |
| 992 | |
| 993 | /* |
| 994 | * cleanup function for module exit |
| 995 | */ |
| 996 | static void free_workspaces(void) |
| 997 | { |
| 998 | struct list_head *workspace; |
| 999 | int i; |
| 1000 | |
| 1001 | while (!list_empty(&btrfs_heuristic_ws.idle_ws)) { |
| 1002 | workspace = btrfs_heuristic_ws.idle_ws.next; |
| 1003 | list_del(workspace); |
| 1004 | free_heuristic_ws(workspace); |
| 1005 | atomic_dec(&btrfs_heuristic_ws.total_ws); |
| 1006 | } |
| 1007 | |
| 1008 | for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { |
| 1009 | while (!list_empty(&btrfs_comp_ws[i].idle_ws)) { |
| 1010 | workspace = btrfs_comp_ws[i].idle_ws.next; |
| 1011 | list_del(workspace); |
| 1012 | btrfs_compress_op[i]->free_workspace(workspace); |
| 1013 | atomic_dec(&btrfs_comp_ws[i].total_ws); |
| 1014 | } |
| 1015 | } |
| 1016 | } |
| 1017 | |
| 1018 | /* |
| 1019 | * Given an address space and start and length, compress the bytes into @pages |
| 1020 | * that are allocated on demand. |
| 1021 | * |
| 1022 | * @type_level is encoded algorithm and level, where level 0 means whatever |
| 1023 | * default the algorithm chooses and is opaque here; |
| 1024 | * - compression algo are 0-3 |
| 1025 | * - the level are bits 4-7 |
| 1026 | * |
| 1027 | * @out_pages is an in/out parameter, holds maximum number of pages to allocate |
| 1028 | * and returns number of actually allocated pages |
| 1029 | * |
| 1030 | * @total_in is used to return the number of bytes actually read. It |
| 1031 | * may be smaller than the input length if we had to exit early because we |
| 1032 | * ran out of room in the pages array or because we cross the |
| 1033 | * max_out threshold. |
| 1034 | * |
| 1035 | * @total_out is an in/out parameter, must be set to the input length and will |
| 1036 | * be also used to return the total number of compressed bytes |
| 1037 | * |
| 1038 | * @max_out tells us the max number of bytes that we're allowed to |
| 1039 | * stuff into pages |
| 1040 | */ |
| 1041 | int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping, |
| 1042 | u64 start, struct page **pages, |
| 1043 | unsigned long *out_pages, |
| 1044 | unsigned long *total_in, |
| 1045 | unsigned long *total_out) |
| 1046 | { |
| 1047 | struct list_head *workspace; |
| 1048 | int ret; |
| 1049 | int type = type_level & 0xF; |
| 1050 | |
| 1051 | workspace = find_workspace(type); |
| 1052 | |
| 1053 | btrfs_compress_op[type - 1]->set_level(workspace, type_level); |
| 1054 | ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, |
| 1055 | start, pages, |
| 1056 | out_pages, |
| 1057 | total_in, total_out); |
| 1058 | free_workspace(type, workspace); |
| 1059 | return ret; |
| 1060 | } |
| 1061 | |
| 1062 | /* |
| 1063 | * pages_in is an array of pages with compressed data. |
| 1064 | * |
| 1065 | * disk_start is the starting logical offset of this array in the file |
| 1066 | * |
| 1067 | * orig_bio contains the pages from the file that we want to decompress into |
| 1068 | * |
| 1069 | * srclen is the number of bytes in pages_in |
| 1070 | * |
| 1071 | * The basic idea is that we have a bio that was created by readpages. |
| 1072 | * The pages in the bio are for the uncompressed data, and they may not |
| 1073 | * be contiguous. They all correspond to the range of bytes covered by |
| 1074 | * the compressed extent. |
| 1075 | */ |
| 1076 | static int btrfs_decompress_bio(struct compressed_bio *cb) |
| 1077 | { |
| 1078 | struct list_head *workspace; |
| 1079 | int ret; |
| 1080 | int type = cb->compress_type; |
| 1081 | |
| 1082 | workspace = find_workspace(type); |
| 1083 | ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb); |
| 1084 | free_workspace(type, workspace); |
| 1085 | |
| 1086 | return ret; |
| 1087 | } |
| 1088 | |
| 1089 | /* |
| 1090 | * a less complex decompression routine. Our compressed data fits in a |
| 1091 | * single page, and we want to read a single page out of it. |
| 1092 | * start_byte tells us the offset into the compressed data we're interested in |
| 1093 | */ |
| 1094 | int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, |
| 1095 | unsigned long start_byte, size_t srclen, size_t destlen) |
| 1096 | { |
| 1097 | struct list_head *workspace; |
| 1098 | int ret; |
| 1099 | |
| 1100 | workspace = find_workspace(type); |
| 1101 | |
| 1102 | ret = btrfs_compress_op[type-1]->decompress(workspace, data_in, |
| 1103 | dest_page, start_byte, |
| 1104 | srclen, destlen); |
| 1105 | |
| 1106 | free_workspace(type, workspace); |
| 1107 | return ret; |
| 1108 | } |
| 1109 | |
| 1110 | void __cold btrfs_exit_compress(void) |
| 1111 | { |
| 1112 | free_workspaces(); |
| 1113 | } |
| 1114 | |
| 1115 | /* |
| 1116 | * Copy uncompressed data from working buffer to pages. |
| 1117 | * |
| 1118 | * buf_start is the byte offset we're of the start of our workspace buffer. |
| 1119 | * |
| 1120 | * total_out is the last byte of the buffer |
| 1121 | */ |
| 1122 | int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start, |
| 1123 | unsigned long total_out, u64 disk_start, |
| 1124 | struct bio *bio) |
| 1125 | { |
| 1126 | unsigned long buf_offset; |
| 1127 | unsigned long current_buf_start; |
| 1128 | unsigned long start_byte; |
| 1129 | unsigned long prev_start_byte; |
| 1130 | unsigned long working_bytes = total_out - buf_start; |
| 1131 | unsigned long bytes; |
| 1132 | char *kaddr; |
| 1133 | struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter); |
| 1134 | |
| 1135 | /* |
| 1136 | * start byte is the first byte of the page we're currently |
| 1137 | * copying into relative to the start of the compressed data. |
| 1138 | */ |
| 1139 | start_byte = page_offset(bvec.bv_page) - disk_start; |
| 1140 | |
| 1141 | /* we haven't yet hit data corresponding to this page */ |
| 1142 | if (total_out <= start_byte) |
| 1143 | return 1; |
| 1144 | |
| 1145 | /* |
| 1146 | * the start of the data we care about is offset into |
| 1147 | * the middle of our working buffer |
| 1148 | */ |
| 1149 | if (total_out > start_byte && buf_start < start_byte) { |
| 1150 | buf_offset = start_byte - buf_start; |
| 1151 | working_bytes -= buf_offset; |
| 1152 | } else { |
| 1153 | buf_offset = 0; |
| 1154 | } |
| 1155 | current_buf_start = buf_start; |
| 1156 | |
| 1157 | /* copy bytes from the working buffer into the pages */ |
| 1158 | while (working_bytes > 0) { |
| 1159 | bytes = min_t(unsigned long, bvec.bv_len, |
| 1160 | PAGE_SIZE - buf_offset); |
| 1161 | bytes = min(bytes, working_bytes); |
| 1162 | |
| 1163 | kaddr = kmap_atomic(bvec.bv_page); |
| 1164 | memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes); |
| 1165 | kunmap_atomic(kaddr); |
| 1166 | flush_dcache_page(bvec.bv_page); |
| 1167 | |
| 1168 | buf_offset += bytes; |
| 1169 | working_bytes -= bytes; |
| 1170 | current_buf_start += bytes; |
| 1171 | |
| 1172 | /* check if we need to pick another page */ |
| 1173 | bio_advance(bio, bytes); |
| 1174 | if (!bio->bi_iter.bi_size) |
| 1175 | return 0; |
| 1176 | bvec = bio_iter_iovec(bio, bio->bi_iter); |
| 1177 | prev_start_byte = start_byte; |
| 1178 | start_byte = page_offset(bvec.bv_page) - disk_start; |
| 1179 | |
| 1180 | /* |
| 1181 | * We need to make sure we're only adjusting |
| 1182 | * our offset into compression working buffer when |
| 1183 | * we're switching pages. Otherwise we can incorrectly |
| 1184 | * keep copying when we were actually done. |
| 1185 | */ |
| 1186 | if (start_byte != prev_start_byte) { |
| 1187 | /* |
| 1188 | * make sure our new page is covered by this |
| 1189 | * working buffer |
| 1190 | */ |
| 1191 | if (total_out <= start_byte) |
| 1192 | return 1; |
| 1193 | |
| 1194 | /* |
| 1195 | * the next page in the biovec might not be adjacent |
| 1196 | * to the last page, but it might still be found |
| 1197 | * inside this working buffer. bump our offset pointer |
| 1198 | */ |
| 1199 | if (total_out > start_byte && |
| 1200 | current_buf_start < start_byte) { |
| 1201 | buf_offset = start_byte - buf_start; |
| 1202 | working_bytes = total_out - start_byte; |
| 1203 | current_buf_start = buf_start + buf_offset; |
| 1204 | } |
| 1205 | } |
| 1206 | } |
| 1207 | |
| 1208 | return 1; |
| 1209 | } |
| 1210 | |
| 1211 | /* |
| 1212 | * Shannon Entropy calculation |
| 1213 | * |
| 1214 | * Pure byte distribution analysis fails to determine compressiability of data. |
| 1215 | * Try calculating entropy to estimate the average minimum number of bits |
| 1216 | * needed to encode the sampled data. |
| 1217 | * |
| 1218 | * For convenience, return the percentage of needed bits, instead of amount of |
| 1219 | * bits directly. |
| 1220 | * |
| 1221 | * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy |
| 1222 | * and can be compressible with high probability |
| 1223 | * |
| 1224 | * @ENTROPY_LVL_HIGH - data are not compressible with high probability |
| 1225 | * |
| 1226 | * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate. |
| 1227 | */ |
| 1228 | #define ENTROPY_LVL_ACEPTABLE (65) |
| 1229 | #define ENTROPY_LVL_HIGH (80) |
| 1230 | |
| 1231 | /* |
| 1232 | * For increasead precision in shannon_entropy calculation, |
| 1233 | * let's do pow(n, M) to save more digits after comma: |
| 1234 | * |
| 1235 | * - maximum int bit length is 64 |
| 1236 | * - ilog2(MAX_SAMPLE_SIZE) -> 13 |
| 1237 | * - 13 * 4 = 52 < 64 -> M = 4 |
| 1238 | * |
| 1239 | * So use pow(n, 4). |
| 1240 | */ |
| 1241 | static inline u32 ilog2_w(u64 n) |
| 1242 | { |
| 1243 | return ilog2(n * n * n * n); |
| 1244 | } |
| 1245 | |
| 1246 | static u32 shannon_entropy(struct heuristic_ws *ws) |
| 1247 | { |
| 1248 | const u32 entropy_max = 8 * ilog2_w(2); |
| 1249 | u32 entropy_sum = 0; |
| 1250 | u32 p, p_base, sz_base; |
| 1251 | u32 i; |
| 1252 | |
| 1253 | sz_base = ilog2_w(ws->sample_size); |
| 1254 | for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) { |
| 1255 | p = ws->bucket[i].count; |
| 1256 | p_base = ilog2_w(p); |
| 1257 | entropy_sum += p * (sz_base - p_base); |
| 1258 | } |
| 1259 | |
| 1260 | entropy_sum /= ws->sample_size; |
| 1261 | return entropy_sum * 100 / entropy_max; |
| 1262 | } |
| 1263 | |
| 1264 | #define RADIX_BASE 4U |
| 1265 | #define COUNTERS_SIZE (1U << RADIX_BASE) |
| 1266 | |
| 1267 | static u8 get4bits(u64 num, int shift) { |
| 1268 | u8 low4bits; |
| 1269 | |
| 1270 | num >>= shift; |
| 1271 | /* Reverse order */ |
| 1272 | low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE); |
| 1273 | return low4bits; |
| 1274 | } |
| 1275 | |
| 1276 | /* |
| 1277 | * Use 4 bits as radix base |
| 1278 | * Use 16 u32 counters for calculating new possition in buf array |
| 1279 | * |
| 1280 | * @array - array that will be sorted |
| 1281 | * @array_buf - buffer array to store sorting results |
| 1282 | * must be equal in size to @array |
| 1283 | * @num - array size |
| 1284 | */ |
| 1285 | static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf, |
| 1286 | int num) |
| 1287 | { |
| 1288 | u64 max_num; |
| 1289 | u64 buf_num; |
| 1290 | u32 counters[COUNTERS_SIZE]; |
| 1291 | u32 new_addr; |
| 1292 | u32 addr; |
| 1293 | int bitlen; |
| 1294 | int shift; |
| 1295 | int i; |
| 1296 | |
| 1297 | /* |
| 1298 | * Try avoid useless loop iterations for small numbers stored in big |
| 1299 | * counters. Example: 48 33 4 ... in 64bit array |
| 1300 | */ |
| 1301 | max_num = array[0].count; |
| 1302 | for (i = 1; i < num; i++) { |
| 1303 | buf_num = array[i].count; |
| 1304 | if (buf_num > max_num) |
| 1305 | max_num = buf_num; |
| 1306 | } |
| 1307 | |
| 1308 | buf_num = ilog2(max_num); |
| 1309 | bitlen = ALIGN(buf_num, RADIX_BASE * 2); |
| 1310 | |
| 1311 | shift = 0; |
| 1312 | while (shift < bitlen) { |
| 1313 | memset(counters, 0, sizeof(counters)); |
| 1314 | |
| 1315 | for (i = 0; i < num; i++) { |
| 1316 | buf_num = array[i].count; |
| 1317 | addr = get4bits(buf_num, shift); |
| 1318 | counters[addr]++; |
| 1319 | } |
| 1320 | |
| 1321 | for (i = 1; i < COUNTERS_SIZE; i++) |
| 1322 | counters[i] += counters[i - 1]; |
| 1323 | |
| 1324 | for (i = num - 1; i >= 0; i--) { |
| 1325 | buf_num = array[i].count; |
| 1326 | addr = get4bits(buf_num, shift); |
| 1327 | counters[addr]--; |
| 1328 | new_addr = counters[addr]; |
| 1329 | array_buf[new_addr] = array[i]; |
| 1330 | } |
| 1331 | |
| 1332 | shift += RADIX_BASE; |
| 1333 | |
| 1334 | /* |
| 1335 | * Normal radix expects to move data from a temporary array, to |
| 1336 | * the main one. But that requires some CPU time. Avoid that |
| 1337 | * by doing another sort iteration to original array instead of |
| 1338 | * memcpy() |
| 1339 | */ |
| 1340 | memset(counters, 0, sizeof(counters)); |
| 1341 | |
| 1342 | for (i = 0; i < num; i ++) { |
| 1343 | buf_num = array_buf[i].count; |
| 1344 | addr = get4bits(buf_num, shift); |
| 1345 | counters[addr]++; |
| 1346 | } |
| 1347 | |
| 1348 | for (i = 1; i < COUNTERS_SIZE; i++) |
| 1349 | counters[i] += counters[i - 1]; |
| 1350 | |
| 1351 | for (i = num - 1; i >= 0; i--) { |
| 1352 | buf_num = array_buf[i].count; |
| 1353 | addr = get4bits(buf_num, shift); |
| 1354 | counters[addr]--; |
| 1355 | new_addr = counters[addr]; |
| 1356 | array[new_addr] = array_buf[i]; |
| 1357 | } |
| 1358 | |
| 1359 | shift += RADIX_BASE; |
| 1360 | } |
| 1361 | } |
| 1362 | |
| 1363 | /* |
| 1364 | * Size of the core byte set - how many bytes cover 90% of the sample |
| 1365 | * |
| 1366 | * There are several types of structured binary data that use nearly all byte |
| 1367 | * values. The distribution can be uniform and counts in all buckets will be |
| 1368 | * nearly the same (eg. encrypted data). Unlikely to be compressible. |
| 1369 | * |
| 1370 | * Other possibility is normal (Gaussian) distribution, where the data could |
| 1371 | * be potentially compressible, but we have to take a few more steps to decide |
| 1372 | * how much. |
| 1373 | * |
| 1374 | * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently, |
| 1375 | * compression algo can easy fix that |
| 1376 | * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high |
| 1377 | * probability is not compressible |
| 1378 | */ |
| 1379 | #define BYTE_CORE_SET_LOW (64) |
| 1380 | #define BYTE_CORE_SET_HIGH (200) |
| 1381 | |
| 1382 | static int byte_core_set_size(struct heuristic_ws *ws) |
| 1383 | { |
| 1384 | u32 i; |
| 1385 | u32 coreset_sum = 0; |
| 1386 | const u32 core_set_threshold = ws->sample_size * 90 / 100; |
| 1387 | struct bucket_item *bucket = ws->bucket; |
| 1388 | |
| 1389 | /* Sort in reverse order */ |
| 1390 | radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE); |
| 1391 | |
| 1392 | for (i = 0; i < BYTE_CORE_SET_LOW; i++) |
| 1393 | coreset_sum += bucket[i].count; |
| 1394 | |
| 1395 | if (coreset_sum > core_set_threshold) |
| 1396 | return i; |
| 1397 | |
| 1398 | for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) { |
| 1399 | coreset_sum += bucket[i].count; |
| 1400 | if (coreset_sum > core_set_threshold) |
| 1401 | break; |
| 1402 | } |
| 1403 | |
| 1404 | return i; |
| 1405 | } |
| 1406 | |
| 1407 | /* |
| 1408 | * Count byte values in buckets. |
| 1409 | * This heuristic can detect textual data (configs, xml, json, html, etc). |
| 1410 | * Because in most text-like data byte set is restricted to limited number of |
| 1411 | * possible characters, and that restriction in most cases makes data easy to |
| 1412 | * compress. |
| 1413 | * |
| 1414 | * @BYTE_SET_THRESHOLD - consider all data within this byte set size: |
| 1415 | * less - compressible |
| 1416 | * more - need additional analysis |
| 1417 | */ |
| 1418 | #define BYTE_SET_THRESHOLD (64) |
| 1419 | |
| 1420 | static u32 byte_set_size(const struct heuristic_ws *ws) |
| 1421 | { |
| 1422 | u32 i; |
| 1423 | u32 byte_set_size = 0; |
| 1424 | |
| 1425 | for (i = 0; i < BYTE_SET_THRESHOLD; i++) { |
| 1426 | if (ws->bucket[i].count > 0) |
| 1427 | byte_set_size++; |
| 1428 | } |
| 1429 | |
| 1430 | /* |
| 1431 | * Continue collecting count of byte values in buckets. If the byte |
| 1432 | * set size is bigger then the threshold, it's pointless to continue, |
| 1433 | * the detection technique would fail for this type of data. |
| 1434 | */ |
| 1435 | for (; i < BUCKET_SIZE; i++) { |
| 1436 | if (ws->bucket[i].count > 0) { |
| 1437 | byte_set_size++; |
| 1438 | if (byte_set_size > BYTE_SET_THRESHOLD) |
| 1439 | return byte_set_size; |
| 1440 | } |
| 1441 | } |
| 1442 | |
| 1443 | return byte_set_size; |
| 1444 | } |
| 1445 | |
| 1446 | static bool sample_repeated_patterns(struct heuristic_ws *ws) |
| 1447 | { |
| 1448 | const u32 half_of_sample = ws->sample_size / 2; |
| 1449 | const u8 *data = ws->sample; |
| 1450 | |
| 1451 | return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0; |
| 1452 | } |
| 1453 | |
| 1454 | static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end, |
| 1455 | struct heuristic_ws *ws) |
| 1456 | { |
| 1457 | struct page *page; |
| 1458 | u64 index, index_end; |
| 1459 | u32 i, curr_sample_pos; |
| 1460 | u8 *in_data; |
| 1461 | |
| 1462 | /* |
| 1463 | * Compression handles the input data by chunks of 128KiB |
| 1464 | * (defined by BTRFS_MAX_UNCOMPRESSED) |
| 1465 | * |
| 1466 | * We do the same for the heuristic and loop over the whole range. |
| 1467 | * |
| 1468 | * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will |
| 1469 | * process no more than BTRFS_MAX_UNCOMPRESSED at a time. |
| 1470 | */ |
| 1471 | if (end - start > BTRFS_MAX_UNCOMPRESSED) |
| 1472 | end = start + BTRFS_MAX_UNCOMPRESSED; |
| 1473 | |
| 1474 | index = start >> PAGE_SHIFT; |
| 1475 | index_end = end >> PAGE_SHIFT; |
| 1476 | |
| 1477 | /* Don't miss unaligned end */ |
| 1478 | if (!IS_ALIGNED(end, PAGE_SIZE)) |
| 1479 | index_end++; |
| 1480 | |
| 1481 | curr_sample_pos = 0; |
| 1482 | while (index < index_end) { |
| 1483 | page = find_get_page(inode->i_mapping, index); |
| 1484 | in_data = kmap(page); |
| 1485 | /* Handle case where the start is not aligned to PAGE_SIZE */ |
| 1486 | i = start % PAGE_SIZE; |
| 1487 | while (i < PAGE_SIZE - SAMPLING_READ_SIZE) { |
| 1488 | /* Don't sample any garbage from the last page */ |
| 1489 | if (start > end - SAMPLING_READ_SIZE) |
| 1490 | break; |
| 1491 | memcpy(&ws->sample[curr_sample_pos], &in_data[i], |
| 1492 | SAMPLING_READ_SIZE); |
| 1493 | i += SAMPLING_INTERVAL; |
| 1494 | start += SAMPLING_INTERVAL; |
| 1495 | curr_sample_pos += SAMPLING_READ_SIZE; |
| 1496 | } |
| 1497 | kunmap(page); |
| 1498 | put_page(page); |
| 1499 | |
| 1500 | index++; |
| 1501 | } |
| 1502 | |
| 1503 | ws->sample_size = curr_sample_pos; |
| 1504 | } |
| 1505 | |
| 1506 | /* |
| 1507 | * Compression heuristic. |
| 1508 | * |
| 1509 | * For now is's a naive and optimistic 'return true', we'll extend the logic to |
| 1510 | * quickly (compared to direct compression) detect data characteristics |
| 1511 | * (compressible/uncompressible) to avoid wasting CPU time on uncompressible |
| 1512 | * data. |
| 1513 | * |
| 1514 | * The following types of analysis can be performed: |
| 1515 | * - detect mostly zero data |
| 1516 | * - detect data with low "byte set" size (text, etc) |
| 1517 | * - detect data with low/high "core byte" set |
| 1518 | * |
| 1519 | * Return non-zero if the compression should be done, 0 otherwise. |
| 1520 | */ |
| 1521 | int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end) |
| 1522 | { |
| 1523 | struct list_head *ws_list = __find_workspace(0, true); |
| 1524 | struct heuristic_ws *ws; |
| 1525 | u32 i; |
| 1526 | u8 byte; |
| 1527 | int ret = 0; |
| 1528 | |
| 1529 | ws = list_entry(ws_list, struct heuristic_ws, list); |
| 1530 | |
| 1531 | heuristic_collect_sample(inode, start, end, ws); |
| 1532 | |
| 1533 | if (sample_repeated_patterns(ws)) { |
| 1534 | ret = 1; |
| 1535 | goto out; |
| 1536 | } |
| 1537 | |
| 1538 | memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE); |
| 1539 | |
| 1540 | for (i = 0; i < ws->sample_size; i++) { |
| 1541 | byte = ws->sample[i]; |
| 1542 | ws->bucket[byte].count++; |
| 1543 | } |
| 1544 | |
| 1545 | i = byte_set_size(ws); |
| 1546 | if (i < BYTE_SET_THRESHOLD) { |
| 1547 | ret = 2; |
| 1548 | goto out; |
| 1549 | } |
| 1550 | |
| 1551 | i = byte_core_set_size(ws); |
| 1552 | if (i <= BYTE_CORE_SET_LOW) { |
| 1553 | ret = 3; |
| 1554 | goto out; |
| 1555 | } |
| 1556 | |
| 1557 | if (i >= BYTE_CORE_SET_HIGH) { |
| 1558 | ret = 0; |
| 1559 | goto out; |
| 1560 | } |
| 1561 | |
| 1562 | i = shannon_entropy(ws); |
| 1563 | if (i <= ENTROPY_LVL_ACEPTABLE) { |
| 1564 | ret = 4; |
| 1565 | goto out; |
| 1566 | } |
| 1567 | |
| 1568 | /* |
| 1569 | * For the levels below ENTROPY_LVL_HIGH, additional analysis would be |
| 1570 | * needed to give green light to compression. |
| 1571 | * |
| 1572 | * For now just assume that compression at that level is not worth the |
| 1573 | * resources because: |
| 1574 | * |
| 1575 | * 1. it is possible to defrag the data later |
| 1576 | * |
| 1577 | * 2. the data would turn out to be hardly compressible, eg. 150 byte |
| 1578 | * values, every bucket has counter at level ~54. The heuristic would |
| 1579 | * be confused. This can happen when data have some internal repeated |
| 1580 | * patterns like "abbacbbc...". This can be detected by analyzing |
| 1581 | * pairs of bytes, which is too costly. |
| 1582 | */ |
| 1583 | if (i < ENTROPY_LVL_HIGH) { |
| 1584 | ret = 5; |
| 1585 | goto out; |
| 1586 | } else { |
| 1587 | ret = 0; |
| 1588 | goto out; |
| 1589 | } |
| 1590 | |
| 1591 | out: |
| 1592 | __free_workspace(0, ws_list, true); |
| 1593 | return ret; |
| 1594 | } |
| 1595 | |
| 1596 | unsigned int btrfs_compress_str2level(const char *str) |
| 1597 | { |
| 1598 | if (strncmp(str, "zlib", 4) != 0) |
| 1599 | return 0; |
| 1600 | |
| 1601 | /* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */ |
| 1602 | if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0) |
| 1603 | return str[5] - '0'; |
| 1604 | |
| 1605 | return BTRFS_ZLIB_DEFAULT_LEVEL; |
| 1606 | } |