blob: c3a2325e64a4160f48bc0d76c08f80e6d65ecab6 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6#ifndef BTRFS_ORDERED_DATA_H
7#define BTRFS_ORDERED_DATA_H
8
9/* one of these per inode */
10struct btrfs_ordered_inode_tree {
11 spinlock_t lock;
12 struct rb_root tree;
13 struct rb_node *last;
14};
15
16struct btrfs_ordered_sum {
17 /* bytenr is the start of this extent on disk */
18 u64 bytenr;
19
20 /*
21 * this is the length in bytes covered by the sums array below.
22 */
23 int len;
24 struct list_head list;
25 /* last field is a variable length array of csums */
David Brazdil0f672f62019-12-10 10:32:29 +000026 u8 sums[];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000027};
28
29/*
30 * bits for the flags field:
31 *
32 * BTRFS_ORDERED_IO_DONE is set when all of the blocks are written.
33 * It is used to make sure metadata is inserted into the tree only once
34 * per extent.
35 *
36 * BTRFS_ORDERED_COMPLETE is set when the extent is removed from the
37 * rbtree, just before waking any waiters. It is used to indicate the
38 * IO is done and any metadata is inserted into the tree.
39 */
David Brazdil0f672f62019-12-10 10:32:29 +000040enum {
41 /* set when all the pages are written */
42 BTRFS_ORDERED_IO_DONE,
43 /* set when removed from the tree */
44 BTRFS_ORDERED_COMPLETE,
45 /* set when we want to write in place */
46 BTRFS_ORDERED_NOCOW,
47 /* writing a zlib compressed extent */
48 BTRFS_ORDERED_COMPRESSED,
49 /* set when writing to preallocated extent */
50 BTRFS_ORDERED_PREALLOC,
51 /* set when we're doing DIO with this extent */
52 BTRFS_ORDERED_DIRECT,
53 /* We had an io error when writing this out */
54 BTRFS_ORDERED_IOERR,
David Brazdil0f672f62019-12-10 10:32:29 +000055 /* Set when we have to truncate an extent */
56 BTRFS_ORDERED_TRUNCATED,
57 /* Regular IO for COW */
58 BTRFS_ORDERED_REGULAR,
Olivier Deprez157378f2022-04-04 15:47:50 +020059 /* Used during fsync to track already logged extents */
60 BTRFS_ORDERED_LOGGED,
61 /* We have already logged all the csums of the ordered extent */
62 BTRFS_ORDERED_LOGGED_CSUM,
63 /* We wait for this extent to complete in the current transaction */
64 BTRFS_ORDERED_PENDING,
David Brazdil0f672f62019-12-10 10:32:29 +000065};
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066
67struct btrfs_ordered_extent {
68 /* logical offset in the file */
69 u64 file_offset;
70
Olivier Deprez157378f2022-04-04 15:47:50 +020071 /*
72 * These fields directly correspond to the same fields in
73 * btrfs_file_extent_item.
74 */
75 u64 disk_bytenr;
76 u64 num_bytes;
77 u64 disk_num_bytes;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000078
79 /* number of bytes that still need writing */
80 u64 bytes_left;
81
82 /*
83 * the end of the ordered extent which is behind it but
84 * didn't update disk_i_size. Please see the comment of
85 * btrfs_ordered_update_i_size();
86 */
87 u64 outstanding_isize;
88
89 /*
90 * If we get truncated we need to adjust the file extent we enter for
91 * this ordered extent so that we do not expose stale data.
92 */
93 u64 truncated_len;
94
95 /* flags (described above) */
96 unsigned long flags;
97
98 /* compression algorithm */
99 int compress_type;
100
Olivier Deprez157378f2022-04-04 15:47:50 +0200101 /* Qgroup reserved space */
102 int qgroup_rsv;
103
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000104 /* reference count */
105 refcount_t refs;
106
107 /* the inode we belong to */
108 struct inode *inode;
109
110 /* list of checksums for insertion when the extent io is done */
111 struct list_head list;
112
Olivier Deprez157378f2022-04-04 15:47:50 +0200113 /* used for fast fsyncs */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114 struct list_head log_list;
115
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000116 /* used to wait for the BTRFS_ORDERED_COMPLETE bit */
117 wait_queue_head_t wait;
118
119 /* our friendly rbtree entry */
120 struct rb_node rb_node;
121
122 /* a per root list of all the pending ordered extents */
123 struct list_head root_extent_list;
124
125 struct btrfs_work work;
126
127 struct completion completion;
128 struct btrfs_work flush_work;
129 struct list_head work_list;
130};
131
132/*
133 * calculates the total size you need to allocate for an ordered sum
134 * structure spanning 'bytes' in the file
135 */
136static inline int btrfs_ordered_sum_size(struct btrfs_fs_info *fs_info,
137 unsigned long bytes)
138{
139 int num_sectors = (int)DIV_ROUND_UP(bytes, fs_info->sectorsize);
140 int csum_size = btrfs_super_csum_size(fs_info->super_copy);
141
142 return sizeof(struct btrfs_ordered_sum) + num_sectors * csum_size;
143}
144
145static inline void
146btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t)
147{
148 spin_lock_init(&t->lock);
149 t->tree = RB_ROOT;
150 t->last = NULL;
151}
152
153void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry);
Olivier Deprez157378f2022-04-04 15:47:50 +0200154void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000155 struct btrfs_ordered_extent *entry);
Olivier Deprez157378f2022-04-04 15:47:50 +0200156int btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000157 struct btrfs_ordered_extent **cached,
158 u64 file_offset, u64 io_size, int uptodate);
Olivier Deprez157378f2022-04-04 15:47:50 +0200159int btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000160 struct btrfs_ordered_extent **cached,
161 u64 *file_offset, u64 io_size,
162 int uptodate);
Olivier Deprez157378f2022-04-04 15:47:50 +0200163int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
164 u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes,
165 int type);
166int btrfs_add_ordered_extent_dio(struct btrfs_inode *inode, u64 file_offset,
167 u64 disk_bytenr, u64 num_bytes,
168 u64 disk_num_bytes, int type);
169int btrfs_add_ordered_extent_compress(struct btrfs_inode *inode, u64 file_offset,
170 u64 disk_bytenr, u64 num_bytes,
171 u64 disk_num_bytes, int type,
172 int compress_type);
David Brazdil0f672f62019-12-10 10:32:29 +0000173void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000174 struct btrfs_ordered_sum *sum);
Olivier Deprez157378f2022-04-04 15:47:50 +0200175struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000176 u64 file_offset);
Olivier Deprez157378f2022-04-04 15:47:50 +0200177void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000178int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
179struct btrfs_ordered_extent *
Olivier Deprez157378f2022-04-04 15:47:50 +0200180btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000181struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
182 struct btrfs_inode *inode,
183 u64 file_offset,
184 u64 len);
Olivier Deprez157378f2022-04-04 15:47:50 +0200185void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
186 struct list_head *list);
187int btrfs_find_ordered_sum(struct btrfs_inode *inode, u64 offset,
188 u64 disk_bytenr, u8 *sum, int len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000189u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
190 const u64 range_start, const u64 range_len);
Olivier Deprez157378f2022-04-04 15:47:50 +0200191void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000192 const u64 range_start, const u64 range_len);
Olivier Deprez157378f2022-04-04 15:47:50 +0200193void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
David Brazdil0f672f62019-12-10 10:32:29 +0000194 u64 end,
195 struct extent_state **cached_state);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000196int __init ordered_data_init(void);
197void __cold ordered_data_exit(void);
198
199#endif