Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | |
| 3 | #ifndef BTRFS_EXTENT_IO_H |
| 4 | #define BTRFS_EXTENT_IO_H |
| 5 | |
| 6 | #include <linux/rbtree.h> |
| 7 | #include <linux/refcount.h> |
| 8 | #include "ulist.h" |
| 9 | |
| 10 | /* bits for the extent state */ |
| 11 | #define EXTENT_DIRTY (1U << 0) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 12 | #define EXTENT_UPTODATE (1U << 1) |
| 13 | #define EXTENT_LOCKED (1U << 2) |
| 14 | #define EXTENT_NEW (1U << 3) |
| 15 | #define EXTENT_DELALLOC (1U << 4) |
| 16 | #define EXTENT_DEFRAG (1U << 5) |
| 17 | #define EXTENT_BOUNDARY (1U << 6) |
| 18 | #define EXTENT_NODATASUM (1U << 7) |
| 19 | #define EXTENT_CLEAR_META_RESV (1U << 8) |
| 20 | #define EXTENT_NEED_WAIT (1U << 9) |
| 21 | #define EXTENT_DAMAGED (1U << 10) |
| 22 | #define EXTENT_NORESERVE (1U << 11) |
| 23 | #define EXTENT_QGROUP_RESERVED (1U << 12) |
| 24 | #define EXTENT_CLEAR_DATA_RESV (1U << 13) |
| 25 | #define EXTENT_DELALLOC_NEW (1U << 14) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 26 | #define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \ |
| 27 | EXTENT_CLEAR_DATA_RESV) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 28 | #define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING) |
| 29 | |
| 30 | /* |
| 31 | * Redefined bits above which are used only in the device allocation tree, |
| 32 | * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV |
| 33 | * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit |
| 34 | * manipulation functions |
| 35 | */ |
| 36 | #define CHUNK_ALLOCATED EXTENT_DIRTY |
| 37 | #define CHUNK_TRIMMED EXTENT_DEFRAG |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 38 | #define CHUNK_STATE_MASK (CHUNK_ALLOCATED | \ |
| 39 | CHUNK_TRIMMED) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 40 | |
| 41 | /* |
| 42 | * flags for bio submission. The high bits indicate the compression |
| 43 | * type for this bio |
| 44 | */ |
| 45 | #define EXTENT_BIO_COMPRESSED 1 |
| 46 | #define EXTENT_BIO_FLAG_SHIFT 16 |
| 47 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 48 | enum { |
| 49 | EXTENT_BUFFER_UPTODATE, |
| 50 | EXTENT_BUFFER_DIRTY, |
| 51 | EXTENT_BUFFER_CORRUPT, |
| 52 | /* this got triggered by readahead */ |
| 53 | EXTENT_BUFFER_READAHEAD, |
| 54 | EXTENT_BUFFER_TREE_REF, |
| 55 | EXTENT_BUFFER_STALE, |
| 56 | EXTENT_BUFFER_WRITEBACK, |
| 57 | /* read IO error */ |
| 58 | EXTENT_BUFFER_READ_ERR, |
| 59 | EXTENT_BUFFER_UNMAPPED, |
| 60 | EXTENT_BUFFER_IN_TREE, |
| 61 | /* write IO error */ |
| 62 | EXTENT_BUFFER_WRITE_ERR, |
| 63 | }; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 64 | |
| 65 | /* these are flags for __process_pages_contig */ |
| 66 | #define PAGE_UNLOCK (1 << 0) |
| 67 | #define PAGE_CLEAR_DIRTY (1 << 1) |
| 68 | #define PAGE_SET_WRITEBACK (1 << 2) |
| 69 | #define PAGE_END_WRITEBACK (1 << 3) |
| 70 | #define PAGE_SET_PRIVATE2 (1 << 4) |
| 71 | #define PAGE_SET_ERROR (1 << 5) |
| 72 | #define PAGE_LOCK (1 << 6) |
| 73 | |
| 74 | /* |
| 75 | * page->private values. Every page that is controlled by the extent |
| 76 | * map has page->private set to one. |
| 77 | */ |
| 78 | #define EXTENT_PAGE_PRIVATE 1 |
| 79 | |
| 80 | /* |
| 81 | * The extent buffer bitmap operations are done with byte granularity instead of |
| 82 | * word granularity for two reasons: |
| 83 | * 1. The bitmaps must be little-endian on disk. |
| 84 | * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a |
| 85 | * single word in a bitmap may straddle two pages in the extent buffer. |
| 86 | */ |
| 87 | #define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE) |
| 88 | #define BYTE_MASK ((1 << BITS_PER_BYTE) - 1) |
| 89 | #define BITMAP_FIRST_BYTE_MASK(start) \ |
| 90 | ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK) |
| 91 | #define BITMAP_LAST_BYTE_MASK(nbits) \ |
| 92 | (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1))) |
| 93 | |
| 94 | struct extent_state; |
| 95 | struct btrfs_root; |
| 96 | struct btrfs_inode; |
| 97 | struct btrfs_io_bio; |
| 98 | struct io_failure_record; |
| 99 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 100 | |
| 101 | typedef blk_status_t (extent_submit_bio_start_t)(void *private_data, |
| 102 | struct bio *bio, u64 bio_offset); |
| 103 | |
| 104 | struct extent_io_ops { |
| 105 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 106 | * The following callbacks must be always defined, the function |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 107 | * pointer will be called unconditionally. |
| 108 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 109 | blk_status_t (*submit_bio_hook)(struct inode *inode, struct bio *bio, |
| 110 | int mirror_num, unsigned long bio_flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 111 | int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset, |
| 112 | struct page *page, u64 start, u64 end, |
| 113 | int mirror); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 114 | }; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 115 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 116 | enum { |
| 117 | IO_TREE_FS_INFO_FREED_EXTENTS0, |
| 118 | IO_TREE_FS_INFO_FREED_EXTENTS1, |
| 119 | IO_TREE_INODE_IO, |
| 120 | IO_TREE_INODE_IO_FAILURE, |
| 121 | IO_TREE_RELOC_BLOCKS, |
| 122 | IO_TREE_TRANS_DIRTY_PAGES, |
| 123 | IO_TREE_ROOT_DIRTY_LOG_PAGES, |
| 124 | IO_TREE_SELFTEST, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 125 | }; |
| 126 | |
| 127 | struct extent_io_tree { |
| 128 | struct rb_root state; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 129 | struct btrfs_fs_info *fs_info; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 130 | void *private_data; |
| 131 | u64 dirty_bytes; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 132 | bool track_uptodate; |
| 133 | |
| 134 | /* Who owns this io tree, should be one of IO_TREE_* */ |
| 135 | u8 owner; |
| 136 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 137 | spinlock_t lock; |
| 138 | const struct extent_io_ops *ops; |
| 139 | }; |
| 140 | |
| 141 | struct extent_state { |
| 142 | u64 start; |
| 143 | u64 end; /* inclusive */ |
| 144 | struct rb_node rb_node; |
| 145 | |
| 146 | /* ADD NEW ELEMENTS AFTER THIS */ |
| 147 | wait_queue_head_t wq; |
| 148 | refcount_t refs; |
| 149 | unsigned state; |
| 150 | |
| 151 | struct io_failure_record *failrec; |
| 152 | |
| 153 | #ifdef CONFIG_BTRFS_DEBUG |
| 154 | struct list_head leak_list; |
| 155 | #endif |
| 156 | }; |
| 157 | |
| 158 | #define INLINE_EXTENT_BUFFER_PAGES 16 |
| 159 | #define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE) |
| 160 | struct extent_buffer { |
| 161 | u64 start; |
| 162 | unsigned long len; |
| 163 | unsigned long bflags; |
| 164 | struct btrfs_fs_info *fs_info; |
| 165 | spinlock_t refs_lock; |
| 166 | atomic_t refs; |
| 167 | atomic_t io_pages; |
| 168 | int read_mirror; |
| 169 | struct rcu_head rcu_head; |
| 170 | pid_t lock_owner; |
| 171 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 172 | int blocking_writers; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 173 | atomic_t blocking_readers; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 174 | bool lock_nested; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 175 | /* >= 0 if eb belongs to a log tree, -1 otherwise */ |
| 176 | short log_index; |
| 177 | |
| 178 | /* protects write locks */ |
| 179 | rwlock_t lock; |
| 180 | |
| 181 | /* readers use lock_wq while they wait for the write |
| 182 | * lock holders to unlock |
| 183 | */ |
| 184 | wait_queue_head_t write_lock_wq; |
| 185 | |
| 186 | /* writers use read_lock_wq while they wait for readers |
| 187 | * to unlock |
| 188 | */ |
| 189 | wait_queue_head_t read_lock_wq; |
| 190 | struct page *pages[INLINE_EXTENT_BUFFER_PAGES]; |
| 191 | #ifdef CONFIG_BTRFS_DEBUG |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 192 | int spinning_writers; |
| 193 | atomic_t spinning_readers; |
| 194 | atomic_t read_locks; |
| 195 | int write_locks; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 196 | struct list_head leak_list; |
| 197 | #endif |
| 198 | }; |
| 199 | |
| 200 | /* |
| 201 | * Structure to record how many bytes and which ranges are set/cleared |
| 202 | */ |
| 203 | struct extent_changeset { |
| 204 | /* How many bytes are set/cleared in this operation */ |
| 205 | unsigned int bytes_changed; |
| 206 | |
| 207 | /* Changed ranges */ |
| 208 | struct ulist range_changed; |
| 209 | }; |
| 210 | |
| 211 | static inline void extent_changeset_init(struct extent_changeset *changeset) |
| 212 | { |
| 213 | changeset->bytes_changed = 0; |
| 214 | ulist_init(&changeset->range_changed); |
| 215 | } |
| 216 | |
| 217 | static inline struct extent_changeset *extent_changeset_alloc(void) |
| 218 | { |
| 219 | struct extent_changeset *ret; |
| 220 | |
| 221 | ret = kmalloc(sizeof(*ret), GFP_KERNEL); |
| 222 | if (!ret) |
| 223 | return NULL; |
| 224 | |
| 225 | extent_changeset_init(ret); |
| 226 | return ret; |
| 227 | } |
| 228 | |
| 229 | static inline void extent_changeset_release(struct extent_changeset *changeset) |
| 230 | { |
| 231 | if (!changeset) |
| 232 | return; |
| 233 | changeset->bytes_changed = 0; |
| 234 | ulist_release(&changeset->range_changed); |
| 235 | } |
| 236 | |
| 237 | static inline void extent_changeset_free(struct extent_changeset *changeset) |
| 238 | { |
| 239 | if (!changeset) |
| 240 | return; |
| 241 | extent_changeset_release(changeset); |
| 242 | kfree(changeset); |
| 243 | } |
| 244 | |
| 245 | static inline void extent_set_compress_type(unsigned long *bio_flags, |
| 246 | int compress_type) |
| 247 | { |
| 248 | *bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT; |
| 249 | } |
| 250 | |
| 251 | static inline int extent_compress_type(unsigned long bio_flags) |
| 252 | { |
| 253 | return bio_flags >> EXTENT_BIO_FLAG_SHIFT; |
| 254 | } |
| 255 | |
| 256 | struct extent_map_tree; |
| 257 | |
| 258 | typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode, |
| 259 | struct page *page, |
| 260 | size_t pg_offset, |
| 261 | u64 start, u64 len, |
| 262 | int create); |
| 263 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 264 | void extent_io_tree_init(struct btrfs_fs_info *fs_info, |
| 265 | struct extent_io_tree *tree, unsigned int owner, |
| 266 | void *private_data); |
| 267 | void extent_io_tree_release(struct extent_io_tree *tree); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 268 | int try_release_extent_mapping(struct page *page, gfp_t mask); |
| 269 | int try_release_extent_buffer(struct page *page); |
| 270 | int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, |
| 271 | struct extent_state **cached); |
| 272 | |
| 273 | static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end) |
| 274 | { |
| 275 | return lock_extent_bits(tree, start, end, NULL); |
| 276 | } |
| 277 | |
| 278 | int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end); |
| 279 | int extent_read_full_page(struct extent_io_tree *tree, struct page *page, |
| 280 | get_extent_t *get_extent, int mirror_num); |
| 281 | int __init extent_io_init(void); |
| 282 | void __cold extent_io_exit(void); |
| 283 | |
| 284 | u64 count_range_bits(struct extent_io_tree *tree, |
| 285 | u64 *start, u64 search_end, |
| 286 | u64 max_bytes, unsigned bits, int contig); |
| 287 | |
| 288 | void free_extent_state(struct extent_state *state); |
| 289 | int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, |
| 290 | unsigned bits, int filled, |
| 291 | struct extent_state *cached_state); |
| 292 | int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, |
| 293 | unsigned bits, struct extent_changeset *changeset); |
| 294 | int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, |
| 295 | unsigned bits, int wake, int delete, |
| 296 | struct extent_state **cached); |
| 297 | int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, |
| 298 | unsigned bits, int wake, int delete, |
| 299 | struct extent_state **cached, gfp_t mask, |
| 300 | struct extent_changeset *changeset); |
| 301 | |
| 302 | static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end) |
| 303 | { |
| 304 | return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL); |
| 305 | } |
| 306 | |
| 307 | static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start, |
| 308 | u64 end, struct extent_state **cached) |
| 309 | { |
| 310 | return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, |
| 311 | GFP_NOFS, NULL); |
| 312 | } |
| 313 | |
| 314 | static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree, |
| 315 | u64 start, u64 end, struct extent_state **cached) |
| 316 | { |
| 317 | return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached, |
| 318 | GFP_ATOMIC, NULL); |
| 319 | } |
| 320 | |
| 321 | static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start, |
| 322 | u64 end, unsigned bits) |
| 323 | { |
| 324 | int wake = 0; |
| 325 | |
| 326 | if (bits & EXTENT_LOCKED) |
| 327 | wake = 1; |
| 328 | |
| 329 | return clear_extent_bit(tree, start, end, bits, wake, 0, NULL); |
| 330 | } |
| 331 | |
| 332 | int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, |
| 333 | unsigned bits, struct extent_changeset *changeset); |
| 334 | int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, |
| 335 | unsigned bits, u64 *failed_start, |
| 336 | struct extent_state **cached_state, gfp_t mask); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 337 | int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end, |
| 338 | unsigned bits); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 339 | |
| 340 | static inline int set_extent_bits(struct extent_io_tree *tree, u64 start, |
| 341 | u64 end, unsigned bits) |
| 342 | { |
| 343 | return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS); |
| 344 | } |
| 345 | |
| 346 | static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, |
| 347 | u64 end, struct extent_state **cached_state) |
| 348 | { |
| 349 | return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, |
| 350 | cached_state, GFP_NOFS, NULL); |
| 351 | } |
| 352 | |
| 353 | static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start, |
| 354 | u64 end, gfp_t mask) |
| 355 | { |
| 356 | return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL, |
| 357 | NULL, mask); |
| 358 | } |
| 359 | |
| 360 | static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 361 | u64 end, struct extent_state **cached) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 362 | { |
| 363 | return clear_extent_bit(tree, start, end, |
| 364 | EXTENT_DIRTY | EXTENT_DELALLOC | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 365 | EXTENT_DO_ACCOUNTING, 0, 0, cached); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 366 | } |
| 367 | |
| 368 | int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, |
| 369 | unsigned bits, unsigned clear_bits, |
| 370 | struct extent_state **cached_state); |
| 371 | |
| 372 | static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start, |
| 373 | u64 end, unsigned int extra_bits, |
| 374 | struct extent_state **cached_state) |
| 375 | { |
| 376 | return set_extent_bit(tree, start, end, |
| 377 | EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits, |
| 378 | NULL, cached_state, GFP_NOFS); |
| 379 | } |
| 380 | |
| 381 | static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start, |
| 382 | u64 end, struct extent_state **cached_state) |
| 383 | { |
| 384 | return set_extent_bit(tree, start, end, |
| 385 | EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG, |
| 386 | NULL, cached_state, GFP_NOFS); |
| 387 | } |
| 388 | |
| 389 | static inline int set_extent_new(struct extent_io_tree *tree, u64 start, |
| 390 | u64 end) |
| 391 | { |
| 392 | return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL, |
| 393 | GFP_NOFS); |
| 394 | } |
| 395 | |
| 396 | static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start, |
| 397 | u64 end, struct extent_state **cached_state, gfp_t mask) |
| 398 | { |
| 399 | return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL, |
| 400 | cached_state, mask); |
| 401 | } |
| 402 | |
| 403 | int find_first_extent_bit(struct extent_io_tree *tree, u64 start, |
| 404 | u64 *start_ret, u64 *end_ret, unsigned bits, |
| 405 | struct extent_state **cached_state); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 406 | void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start, |
| 407 | u64 *start_ret, u64 *end_ret, unsigned bits); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 408 | int extent_invalidatepage(struct extent_io_tree *tree, |
| 409 | struct page *page, unsigned long offset); |
| 410 | int extent_write_full_page(struct page *page, struct writeback_control *wbc); |
| 411 | int extent_write_locked_range(struct inode *inode, u64 start, u64 end, |
| 412 | int mode); |
| 413 | int extent_writepages(struct address_space *mapping, |
| 414 | struct writeback_control *wbc); |
| 415 | int btree_write_cache_pages(struct address_space *mapping, |
| 416 | struct writeback_control *wbc); |
| 417 | int extent_readpages(struct address_space *mapping, struct list_head *pages, |
| 418 | unsigned nr_pages); |
| 419 | int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
| 420 | __u64 start, __u64 len); |
| 421 | void set_page_extent_mapped(struct page *page); |
| 422 | |
| 423 | struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, |
| 424 | u64 start); |
| 425 | struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, |
| 426 | u64 start, unsigned long len); |
| 427 | struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, |
| 428 | u64 start); |
| 429 | struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src); |
| 430 | struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, |
| 431 | u64 start); |
| 432 | void free_extent_buffer(struct extent_buffer *eb); |
| 433 | void free_extent_buffer_stale(struct extent_buffer *eb); |
| 434 | #define WAIT_NONE 0 |
| 435 | #define WAIT_COMPLETE 1 |
| 436 | #define WAIT_PAGE_LOCK 2 |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 437 | int read_extent_buffer_pages(struct extent_buffer *eb, int wait, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 438 | int mirror_num); |
| 439 | void wait_on_extent_buffer_writeback(struct extent_buffer *eb); |
| 440 | |
| 441 | static inline int num_extent_pages(const struct extent_buffer *eb) |
| 442 | { |
| 443 | return (round_up(eb->start + eb->len, PAGE_SIZE) >> PAGE_SHIFT) - |
| 444 | (eb->start >> PAGE_SHIFT); |
| 445 | } |
| 446 | |
| 447 | static inline void extent_buffer_get(struct extent_buffer *eb) |
| 448 | { |
| 449 | atomic_inc(&eb->refs); |
| 450 | } |
| 451 | |
| 452 | static inline int extent_buffer_uptodate(struct extent_buffer *eb) |
| 453 | { |
| 454 | return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); |
| 455 | } |
| 456 | |
| 457 | int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, |
| 458 | unsigned long start, unsigned long len); |
| 459 | void read_extent_buffer(const struct extent_buffer *eb, void *dst, |
| 460 | unsigned long start, |
| 461 | unsigned long len); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 462 | int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, |
| 463 | void __user *dst, unsigned long start, |
| 464 | unsigned long len); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 465 | void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src); |
| 466 | void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb, |
| 467 | const void *src); |
| 468 | void write_extent_buffer(struct extent_buffer *eb, const void *src, |
| 469 | unsigned long start, unsigned long len); |
| 470 | void copy_extent_buffer_full(struct extent_buffer *dst, |
| 471 | struct extent_buffer *src); |
| 472 | void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, |
| 473 | unsigned long dst_offset, unsigned long src_offset, |
| 474 | unsigned long len); |
| 475 | void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, |
| 476 | unsigned long src_offset, unsigned long len); |
| 477 | void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, |
| 478 | unsigned long src_offset, unsigned long len); |
| 479 | void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start, |
| 480 | unsigned long len); |
| 481 | int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start, |
| 482 | unsigned long pos); |
| 483 | void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start, |
| 484 | unsigned long pos, unsigned long len); |
| 485 | void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start, |
| 486 | unsigned long pos, unsigned long len); |
| 487 | void clear_extent_buffer_dirty(struct extent_buffer *eb); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 488 | bool set_extent_buffer_dirty(struct extent_buffer *eb); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 489 | void set_extent_buffer_uptodate(struct extent_buffer *eb); |
| 490 | void clear_extent_buffer_uptodate(struct extent_buffer *eb); |
| 491 | int extent_buffer_under_io(struct extent_buffer *eb); |
| 492 | int map_private_extent_buffer(const struct extent_buffer *eb, |
| 493 | unsigned long offset, unsigned long min_len, |
| 494 | char **map, unsigned long *map_start, |
| 495 | unsigned long *map_len); |
| 496 | void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end); |
| 497 | void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end); |
| 498 | void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 499 | struct page *locked_page, |
| 500 | unsigned bits_to_clear, |
| 501 | unsigned long page_ops); |
| 502 | struct bio *btrfs_bio_alloc(u64 first_byte); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 503 | struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs); |
| 504 | struct bio *btrfs_bio_clone(struct bio *bio); |
| 505 | struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size); |
| 506 | |
| 507 | struct btrfs_fs_info; |
| 508 | struct btrfs_inode; |
| 509 | |
| 510 | int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start, |
| 511 | u64 length, u64 logical, struct page *page, |
| 512 | unsigned int pg_offset, int mirror_num); |
| 513 | int clean_io_failure(struct btrfs_fs_info *fs_info, |
| 514 | struct extent_io_tree *failure_tree, |
| 515 | struct extent_io_tree *io_tree, u64 start, |
| 516 | struct page *page, u64 ino, unsigned int pg_offset); |
| 517 | void end_extent_writepage(struct page *page, int err, u64 start, u64 end); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 518 | int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 519 | |
| 520 | /* |
| 521 | * When IO fails, either with EIO or csum verification fails, we |
| 522 | * try other mirrors that might have a good copy of the data. This |
| 523 | * io_failure_record is used to record state as we go through all the |
| 524 | * mirrors. If another mirror has good data, the page is set up to date |
| 525 | * and things continue. If a good mirror can't be found, the original |
| 526 | * bio end_io callback is called to indicate things have failed. |
| 527 | */ |
| 528 | struct io_failure_record { |
| 529 | struct page *page; |
| 530 | u64 start; |
| 531 | u64 len; |
| 532 | u64 logical; |
| 533 | unsigned long bio_flags; |
| 534 | int this_mirror; |
| 535 | int failed_mirror; |
| 536 | int in_validation; |
| 537 | }; |
| 538 | |
| 539 | |
| 540 | void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, |
| 541 | u64 end); |
| 542 | int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end, |
| 543 | struct io_failure_record **failrec_ret); |
| 544 | bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages, |
| 545 | struct io_failure_record *failrec, int fail_mirror); |
| 546 | struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio, |
| 547 | struct io_failure_record *failrec, |
| 548 | struct page *page, int pg_offset, int icsum, |
| 549 | bio_end_io_t *endio_func, void *data); |
| 550 | int free_io_failure(struct extent_io_tree *failure_tree, |
| 551 | struct extent_io_tree *io_tree, |
| 552 | struct io_failure_record *rec); |
| 553 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 554 | bool find_lock_delalloc_range(struct inode *inode, |
| 555 | struct page *locked_page, u64 *start, |
| 556 | u64 *end); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 557 | #endif |
| 558 | struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, |
| 559 | u64 start); |
| 560 | |
| 561 | #endif |