Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* |
| 3 | * Copyright (C) 2007 Oracle. All rights reserved. |
| 4 | */ |
| 5 | |
| 6 | #ifndef BTRFS_INODE_H |
| 7 | #define BTRFS_INODE_H |
| 8 | |
| 9 | #include <linux/hash.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 10 | #include <linux/refcount.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11 | #include "extent_map.h" |
| 12 | #include "extent_io.h" |
| 13 | #include "ordered-data.h" |
| 14 | #include "delayed-inode.h" |
| 15 | |
| 16 | /* |
| 17 | * ordered_data_close is set by truncate when a file that used |
| 18 | * to have good data has been truncated to zero. When it is set |
| 19 | * the btrfs file release call will add this inode to the |
| 20 | * ordered operations list so that we make sure to flush out any |
| 21 | * new data the application may have written before commit. |
| 22 | */ |
| 23 | enum { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 24 | BTRFS_INODE_FLUSH_ON_CLOSE, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 25 | BTRFS_INODE_DUMMY, |
| 26 | BTRFS_INODE_IN_DEFRAG, |
| 27 | BTRFS_INODE_HAS_ASYNC_EXTENT, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 28 | /* |
| 29 | * Always set under the VFS' inode lock, otherwise it can cause races |
| 30 | * during fsync (we start as a fast fsync and then end up in a full |
| 31 | * fsync racing with ordered extent completion). |
| 32 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 33 | BTRFS_INODE_NEEDS_FULL_SYNC, |
| 34 | BTRFS_INODE_COPY_EVERYTHING, |
| 35 | BTRFS_INODE_IN_DELALLOC_LIST, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 36 | BTRFS_INODE_HAS_PROPS, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 37 | BTRFS_INODE_SNAPSHOT_FLUSH, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 38 | /* |
| 39 | * Set and used when logging an inode and it serves to signal that an |
| 40 | * inode does not have xattrs, so subsequent fsyncs can avoid searching |
| 41 | * for xattrs to log. This bit must be cleared whenever a xattr is added |
| 42 | * to an inode. |
| 43 | */ |
| 44 | BTRFS_INODE_NO_XATTRS, |
| 45 | /* |
| 46 | * Set when we are in a context where we need to start a transaction and |
| 47 | * have dirty pages with the respective file range locked. This is to |
| 48 | * ensure that when reserving space for the transaction, if we are low |
| 49 | * on available space and need to flush delalloc, we will not flush |
| 50 | * delalloc for this inode, because that could result in a deadlock (on |
| 51 | * the file range, inode's io_tree). |
| 52 | */ |
| 53 | BTRFS_INODE_NO_DELALLOC_FLUSH, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 54 | }; |
| 55 | |
| 56 | /* in memory btrfs inode */ |
| 57 | struct btrfs_inode { |
| 58 | /* which subvolume this inode belongs to */ |
| 59 | struct btrfs_root *root; |
| 60 | |
| 61 | /* key used to find this inode on disk. This is used by the code |
| 62 | * to read in roots of subvolumes |
| 63 | */ |
| 64 | struct btrfs_key location; |
| 65 | |
| 66 | /* |
| 67 | * Lock for counters and all fields used to determine if the inode is in |
| 68 | * the log or not (last_trans, last_sub_trans, last_log_commit, |
| 69 | * logged_trans). |
| 70 | */ |
| 71 | spinlock_t lock; |
| 72 | |
| 73 | /* the extent_tree has caches of all the extent mappings to disk */ |
| 74 | struct extent_map_tree extent_tree; |
| 75 | |
| 76 | /* the io_tree does range state (DIRTY, LOCKED etc) */ |
| 77 | struct extent_io_tree io_tree; |
| 78 | |
| 79 | /* special utility tree used to record which mirrors have already been |
| 80 | * tried when checksums fail for a given block |
| 81 | */ |
| 82 | struct extent_io_tree io_failure_tree; |
| 83 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 84 | /* |
| 85 | * Keep track of where the inode has extent items mapped in order to |
| 86 | * make sure the i_size adjustments are accurate |
| 87 | */ |
| 88 | struct extent_io_tree file_extent_tree; |
| 89 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 90 | /* held while logging the inode in tree-log.c */ |
| 91 | struct mutex log_mutex; |
| 92 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 93 | /* used to order data wrt metadata */ |
| 94 | struct btrfs_ordered_inode_tree ordered_tree; |
| 95 | |
| 96 | /* list of all the delalloc inodes in the FS. There are times we need |
| 97 | * to write all the delalloc pages to disk, and this list is used |
| 98 | * to walk them all. |
| 99 | */ |
| 100 | struct list_head delalloc_inodes; |
| 101 | |
| 102 | /* node for the red-black tree that links inodes in subvolume root */ |
| 103 | struct rb_node rb_node; |
| 104 | |
| 105 | unsigned long runtime_flags; |
| 106 | |
| 107 | /* Keep track of who's O_SYNC/fsyncing currently */ |
| 108 | atomic_t sync_writers; |
| 109 | |
| 110 | /* full 64 bit generation number, struct vfs_inode doesn't have a big |
| 111 | * enough field for this. |
| 112 | */ |
| 113 | u64 generation; |
| 114 | |
| 115 | /* |
| 116 | * transid of the trans_handle that last modified this inode |
| 117 | */ |
| 118 | u64 last_trans; |
| 119 | |
| 120 | /* |
| 121 | * transid that last logged this inode |
| 122 | */ |
| 123 | u64 logged_trans; |
| 124 | |
| 125 | /* |
| 126 | * log transid when this inode was last modified |
| 127 | */ |
| 128 | int last_sub_trans; |
| 129 | |
| 130 | /* a local copy of root's last_log_commit */ |
| 131 | int last_log_commit; |
| 132 | |
| 133 | /* total number of bytes pending delalloc, used by stat to calc the |
| 134 | * real block usage of the file |
| 135 | */ |
| 136 | u64 delalloc_bytes; |
| 137 | |
| 138 | /* |
| 139 | * Total number of bytes pending delalloc that fall within a file |
| 140 | * range that is either a hole or beyond EOF (and no prealloc extent |
| 141 | * exists in the range). This is always <= delalloc_bytes. |
| 142 | */ |
| 143 | u64 new_delalloc_bytes; |
| 144 | |
| 145 | /* |
| 146 | * total number of bytes pending defrag, used by stat to check whether |
| 147 | * it needs COW. |
| 148 | */ |
| 149 | u64 defrag_bytes; |
| 150 | |
| 151 | /* |
| 152 | * the size of the file stored in the metadata on disk. data=ordered |
| 153 | * means the in-memory i_size might be larger than the size on disk |
| 154 | * because not all the blocks are written yet. |
| 155 | */ |
| 156 | u64 disk_i_size; |
| 157 | |
| 158 | /* |
| 159 | * if this is a directory then index_cnt is the counter for the index |
| 160 | * number for new files that are created |
| 161 | */ |
| 162 | u64 index_cnt; |
| 163 | |
| 164 | /* Cache the directory index number to speed the dir/file remove */ |
| 165 | u64 dir_index; |
| 166 | |
| 167 | /* the fsync log has some corner cases that mean we have to check |
| 168 | * directories to see if any unlinks have been done before |
| 169 | * the directory was logged. See tree-log.c for all the |
| 170 | * details |
| 171 | */ |
| 172 | u64 last_unlink_trans; |
| 173 | |
| 174 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 175 | * The id/generation of the last transaction where this inode was |
| 176 | * either the source or the destination of a clone/dedupe operation. |
| 177 | * Used when logging an inode to know if there are shared extents that |
| 178 | * need special care when logging checksum items, to avoid duplicate |
| 179 | * checksum items in a log (which can lead to a corruption where we end |
| 180 | * up with missing checksum ranges after log replay). |
| 181 | * Protected by the vfs inode lock. |
| 182 | */ |
| 183 | u64 last_reflink_trans; |
| 184 | |
| 185 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 186 | * Number of bytes outstanding that are going to need csums. This is |
| 187 | * used in ENOSPC accounting. |
| 188 | */ |
| 189 | u64 csum_bytes; |
| 190 | |
| 191 | /* flags field from the on disk inode */ |
| 192 | u32 flags; |
| 193 | |
| 194 | /* |
| 195 | * Counters to keep track of the number of extent item's we may use due |
| 196 | * to delalloc and such. outstanding_extents is the number of extent |
| 197 | * items we think we'll end up using, and reserved_extents is the number |
| 198 | * of extent items we've reserved metadata for. |
| 199 | */ |
| 200 | unsigned outstanding_extents; |
| 201 | |
| 202 | struct btrfs_block_rsv block_rsv; |
| 203 | |
| 204 | /* |
| 205 | * Cached values of inode properties |
| 206 | */ |
| 207 | unsigned prop_compress; /* per-file compression algorithm */ |
| 208 | /* |
| 209 | * Force compression on the file using the defrag ioctl, could be |
| 210 | * different from prop_compress and takes precedence if set |
| 211 | */ |
| 212 | unsigned defrag_compress; |
| 213 | |
| 214 | struct btrfs_delayed_node *delayed_node; |
| 215 | |
| 216 | /* File creation time. */ |
| 217 | struct timespec64 i_otime; |
| 218 | |
| 219 | /* Hook into fs_info->delayed_iputs */ |
| 220 | struct list_head delayed_iput; |
| 221 | |
| 222 | /* |
| 223 | * To avoid races between lockless (i_mutex not held) direct IO writes |
| 224 | * and concurrent fsync requests. Direct IO writes must acquire read |
| 225 | * access on this semaphore for creating an extent map and its |
| 226 | * corresponding ordered extent. The fast fsync path must acquire write |
| 227 | * access on this semaphore before it collects ordered extents and |
| 228 | * extent maps. |
| 229 | */ |
| 230 | struct rw_semaphore dio_sem; |
| 231 | |
| 232 | struct inode vfs_inode; |
| 233 | }; |
| 234 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 235 | static inline u32 btrfs_inode_sectorsize(const struct btrfs_inode *inode) |
| 236 | { |
| 237 | return inode->root->fs_info->sectorsize; |
| 238 | } |
| 239 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 240 | static inline struct btrfs_inode *BTRFS_I(const struct inode *inode) |
| 241 | { |
| 242 | return container_of(inode, struct btrfs_inode, vfs_inode); |
| 243 | } |
| 244 | |
| 245 | static inline unsigned long btrfs_inode_hash(u64 objectid, |
| 246 | const struct btrfs_root *root) |
| 247 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 248 | u64 h = objectid ^ (root->root_key.objectid * GOLDEN_RATIO_PRIME); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 249 | |
| 250 | #if BITS_PER_LONG == 32 |
| 251 | h = (h >> 32) ^ (h & 0xffffffff); |
| 252 | #endif |
| 253 | |
| 254 | return (unsigned long)h; |
| 255 | } |
| 256 | |
| 257 | static inline void btrfs_insert_inode_hash(struct inode *inode) |
| 258 | { |
| 259 | unsigned long h = btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root); |
| 260 | |
| 261 | __insert_inode_hash(inode, h); |
| 262 | } |
| 263 | |
| 264 | static inline u64 btrfs_ino(const struct btrfs_inode *inode) |
| 265 | { |
| 266 | u64 ino = inode->location.objectid; |
| 267 | |
| 268 | /* |
| 269 | * !ino: btree_inode |
| 270 | * type == BTRFS_ROOT_ITEM_KEY: subvol dir |
| 271 | */ |
| 272 | if (!ino || inode->location.type == BTRFS_ROOT_ITEM_KEY) |
| 273 | ino = inode->vfs_inode.i_ino; |
| 274 | return ino; |
| 275 | } |
| 276 | |
| 277 | static inline void btrfs_i_size_write(struct btrfs_inode *inode, u64 size) |
| 278 | { |
| 279 | i_size_write(&inode->vfs_inode, size); |
| 280 | inode->disk_i_size = size; |
| 281 | } |
| 282 | |
| 283 | static inline bool btrfs_is_free_space_inode(struct btrfs_inode *inode) |
| 284 | { |
| 285 | struct btrfs_root *root = inode->root; |
| 286 | |
| 287 | if (root == root->fs_info->tree_root && |
| 288 | btrfs_ino(inode) != BTRFS_BTREE_INODE_OBJECTID) |
| 289 | return true; |
| 290 | if (inode->location.objectid == BTRFS_FREE_INO_OBJECTID) |
| 291 | return true; |
| 292 | return false; |
| 293 | } |
| 294 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 295 | static inline bool is_data_inode(struct inode *inode) |
| 296 | { |
| 297 | return btrfs_ino(BTRFS_I(inode)) != BTRFS_BTREE_INODE_OBJECTID; |
| 298 | } |
| 299 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 300 | static inline void btrfs_mod_outstanding_extents(struct btrfs_inode *inode, |
| 301 | int mod) |
| 302 | { |
| 303 | lockdep_assert_held(&inode->lock); |
| 304 | inode->outstanding_extents += mod; |
| 305 | if (btrfs_is_free_space_inode(inode)) |
| 306 | return; |
| 307 | trace_btrfs_inode_mod_outstanding_extents(inode->root, btrfs_ino(inode), |
| 308 | mod); |
| 309 | } |
| 310 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 311 | /* |
| 312 | * Called every time after doing a buffered, direct IO or memory mapped write. |
| 313 | * |
| 314 | * This is to ensure that if we write to a file that was previously fsynced in |
| 315 | * the current transaction, then try to fsync it again in the same transaction, |
| 316 | * we will know that there were changes in the file and that it needs to be |
| 317 | * logged. |
| 318 | */ |
| 319 | static inline void btrfs_set_inode_last_sub_trans(struct btrfs_inode *inode) |
| 320 | { |
| 321 | spin_lock(&inode->lock); |
| 322 | inode->last_sub_trans = inode->root->log_transid; |
| 323 | spin_unlock(&inode->lock); |
| 324 | } |
| 325 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 326 | static inline int btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation) |
| 327 | { |
| 328 | int ret = 0; |
| 329 | |
| 330 | spin_lock(&inode->lock); |
| 331 | if (inode->logged_trans == generation && |
| 332 | inode->last_sub_trans <= inode->last_log_commit && |
| 333 | inode->last_sub_trans <= inode->root->last_log_commit) { |
| 334 | /* |
| 335 | * After a ranged fsync we might have left some extent maps |
| 336 | * (that fall outside the fsync's range). So return false |
| 337 | * here if the list isn't empty, to make sure btrfs_log_inode() |
| 338 | * will be called and process those extent maps. |
| 339 | */ |
| 340 | smp_mb(); |
| 341 | if (list_empty(&inode->extent_tree.modified_extents)) |
| 342 | ret = 1; |
| 343 | } |
| 344 | spin_unlock(&inode->lock); |
| 345 | return ret; |
| 346 | } |
| 347 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 348 | struct btrfs_dio_private { |
| 349 | struct inode *inode; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 350 | u64 logical_offset; |
| 351 | u64 disk_bytenr; |
| 352 | u64 bytes; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 353 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 354 | /* |
| 355 | * References to this structure. There is one reference per in-flight |
| 356 | * bio plus one while we're still setting up. |
| 357 | */ |
| 358 | refcount_t refs; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 359 | |
| 360 | /* dio_bio came from fs/direct-io.c */ |
| 361 | struct bio *dio_bio; |
| 362 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 363 | /* Array of checksums */ |
| 364 | u8 csums[]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 365 | }; |
| 366 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 367 | /* Array of bytes with variable length, hexadecimal format 0x1234 */ |
| 368 | #define CSUM_FMT "0x%*phN" |
| 369 | #define CSUM_FMT_VALUE(size, bytes) size, bytes |
| 370 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 371 | static inline void btrfs_print_data_csum_error(struct btrfs_inode *inode, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 372 | u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 373 | { |
| 374 | struct btrfs_root *root = inode->root; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 375 | struct btrfs_super_block *sb = root->fs_info->super_copy; |
| 376 | const u16 csum_size = btrfs_super_csum_size(sb); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 377 | |
| 378 | /* Output minus objectid, which is more meaningful */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 379 | if (root->root_key.objectid >= BTRFS_LAST_FREE_OBJECTID) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 380 | btrfs_warn_rl(root->fs_info, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 381 | "csum failed root %lld ino %lld off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", |
| 382 | root->root_key.objectid, btrfs_ino(inode), |
| 383 | logical_start, |
| 384 | CSUM_FMT_VALUE(csum_size, csum), |
| 385 | CSUM_FMT_VALUE(csum_size, csum_expected), |
| 386 | mirror_num); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 387 | else |
| 388 | btrfs_warn_rl(root->fs_info, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 389 | "csum failed root %llu ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d", |
| 390 | root->root_key.objectid, btrfs_ino(inode), |
| 391 | logical_start, |
| 392 | CSUM_FMT_VALUE(csum_size, csum), |
| 393 | CSUM_FMT_VALUE(csum_size, csum_expected), |
| 394 | mirror_num); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 395 | } |
| 396 | |
| 397 | #endif |