Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef __LINUX_BACKING_DEV_DEFS_H |
| 3 | #define __LINUX_BACKING_DEV_DEFS_H |
| 4 | |
| 5 | #include <linux/list.h> |
| 6 | #include <linux/radix-tree.h> |
| 7 | #include <linux/rbtree.h> |
| 8 | #include <linux/spinlock.h> |
| 9 | #include <linux/percpu_counter.h> |
| 10 | #include <linux/percpu-refcount.h> |
| 11 | #include <linux/flex_proportions.h> |
| 12 | #include <linux/timer.h> |
| 13 | #include <linux/workqueue.h> |
| 14 | #include <linux/kref.h> |
| 15 | #include <linux/refcount.h> |
| 16 | |
| 17 | struct page; |
| 18 | struct device; |
| 19 | struct dentry; |
| 20 | |
| 21 | /* |
| 22 | * Bits in bdi_writeback.state |
| 23 | */ |
| 24 | enum wb_state { |
| 25 | WB_registered, /* bdi_register() was done */ |
| 26 | WB_writeback_running, /* Writeback is in progress */ |
| 27 | WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */ |
| 28 | WB_start_all, /* nr_pages == 0 (all) work pending */ |
| 29 | }; |
| 30 | |
| 31 | enum wb_congested_state { |
| 32 | WB_async_congested, /* The async (write) queue is getting full */ |
| 33 | WB_sync_congested, /* The sync queue is getting full */ |
| 34 | }; |
| 35 | |
| 36 | typedef int (congested_fn)(void *, int); |
| 37 | |
| 38 | enum wb_stat_item { |
| 39 | WB_RECLAIMABLE, |
| 40 | WB_WRITEBACK, |
| 41 | WB_DIRTIED, |
| 42 | WB_WRITTEN, |
| 43 | NR_WB_STAT_ITEMS |
| 44 | }; |
| 45 | |
| 46 | #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) |
| 47 | |
| 48 | /* |
| 49 | * why some writeback work was initiated |
| 50 | */ |
| 51 | enum wb_reason { |
| 52 | WB_REASON_BACKGROUND, |
| 53 | WB_REASON_VMSCAN, |
| 54 | WB_REASON_SYNC, |
| 55 | WB_REASON_PERIODIC, |
| 56 | WB_REASON_LAPTOP_TIMER, |
| 57 | WB_REASON_FREE_MORE_MEM, |
| 58 | WB_REASON_FS_FREE_SPACE, |
| 59 | /* |
| 60 | * There is no bdi forker thread any more and works are done |
| 61 | * by emergency worker, however, this is TPs userland visible |
| 62 | * and we'll be exposing exactly the same information, |
| 63 | * so it has a mismatch name. |
| 64 | */ |
| 65 | WB_REASON_FORKER_THREAD, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 66 | WB_REASON_FOREIGN_FLUSH, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 67 | |
| 68 | WB_REASON_MAX, |
| 69 | }; |
| 70 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 71 | struct wb_completion { |
| 72 | atomic_t cnt; |
| 73 | wait_queue_head_t *waitq; |
| 74 | }; |
| 75 | |
| 76 | #define __WB_COMPLETION_INIT(_waitq) \ |
| 77 | (struct wb_completion){ .cnt = ATOMIC_INIT(1), .waitq = (_waitq) } |
| 78 | |
| 79 | /* |
| 80 | * If one wants to wait for one or more wb_writeback_works, each work's |
| 81 | * ->done should be set to a wb_completion defined using the following |
| 82 | * macro. Once all work items are issued with wb_queue_work(), the caller |
| 83 | * can wait for the completion of all using wb_wait_for_completion(). Work |
| 84 | * items which are waited upon aren't freed automatically on completion. |
| 85 | */ |
| 86 | #define WB_COMPLETION_INIT(bdi) __WB_COMPLETION_INIT(&(bdi)->wb_waitq) |
| 87 | |
| 88 | #define DEFINE_WB_COMPLETION(cmpl, bdi) \ |
| 89 | struct wb_completion cmpl = WB_COMPLETION_INIT(bdi) |
| 90 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 91 | /* |
| 92 | * For cgroup writeback, multiple wb's may map to the same blkcg. Those |
| 93 | * wb's can operate mostly independently but should share the congested |
| 94 | * state. To facilitate such sharing, the congested state is tracked using |
| 95 | * the following struct which is created on demand, indexed by blkcg ID on |
| 96 | * its bdi, and refcounted. |
| 97 | */ |
| 98 | struct bdi_writeback_congested { |
| 99 | unsigned long state; /* WB_[a]sync_congested flags */ |
| 100 | refcount_t refcnt; /* nr of attached wb's and blkg */ |
| 101 | |
| 102 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 103 | struct backing_dev_info *__bdi; /* the associated bdi, set to NULL |
| 104 | * on bdi unregistration. For memcg-wb |
| 105 | * internal use only! */ |
| 106 | int blkcg_id; /* ID of the associated blkcg */ |
| 107 | struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */ |
| 108 | #endif |
| 109 | }; |
| 110 | |
| 111 | /* |
| 112 | * Each wb (bdi_writeback) can perform writeback operations, is measured |
| 113 | * and throttled, independently. Without cgroup writeback, each bdi |
| 114 | * (bdi_writeback) is served by its embedded bdi->wb. |
| 115 | * |
| 116 | * On the default hierarchy, blkcg implicitly enables memcg. This allows |
| 117 | * using memcg's page ownership for attributing writeback IOs, and every |
| 118 | * memcg - blkcg combination can be served by its own wb by assigning a |
| 119 | * dedicated wb to each memcg, which enables isolation across different |
| 120 | * cgroups and propagation of IO back pressure down from the IO layer upto |
| 121 | * the tasks which are generating the dirty pages to be written back. |
| 122 | * |
| 123 | * A cgroup wb is indexed on its bdi by the ID of the associated memcg, |
| 124 | * refcounted with the number of inodes attached to it, and pins the memcg |
| 125 | * and the corresponding blkcg. As the corresponding blkcg for a memcg may |
| 126 | * change as blkcg is disabled and enabled higher up in the hierarchy, a wb |
| 127 | * is tested for blkcg after lookup and removed from index on mismatch so |
| 128 | * that a new wb for the combination can be created. |
| 129 | */ |
| 130 | struct bdi_writeback { |
| 131 | struct backing_dev_info *bdi; /* our parent bdi */ |
| 132 | |
| 133 | unsigned long state; /* Always use atomic bitops on this */ |
| 134 | unsigned long last_old_flush; /* last old data flush */ |
| 135 | |
| 136 | struct list_head b_dirty; /* dirty inodes */ |
| 137 | struct list_head b_io; /* parked for writeback */ |
| 138 | struct list_head b_more_io; /* parked for more writeback */ |
| 139 | struct list_head b_dirty_time; /* time stamps are dirty */ |
| 140 | spinlock_t list_lock; /* protects the b_* lists */ |
| 141 | |
| 142 | struct percpu_counter stat[NR_WB_STAT_ITEMS]; |
| 143 | |
| 144 | struct bdi_writeback_congested *congested; |
| 145 | |
| 146 | unsigned long bw_time_stamp; /* last time write bw is updated */ |
| 147 | unsigned long dirtied_stamp; |
| 148 | unsigned long written_stamp; /* pages written at bw_time_stamp */ |
| 149 | unsigned long write_bandwidth; /* the estimated write bandwidth */ |
| 150 | unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */ |
| 151 | |
| 152 | /* |
| 153 | * The base dirty throttle rate, re-calculated on every 200ms. |
| 154 | * All the bdi tasks' dirty rate will be curbed under it. |
| 155 | * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit |
| 156 | * in small steps and is much more smooth/stable than the latter. |
| 157 | */ |
| 158 | unsigned long dirty_ratelimit; |
| 159 | unsigned long balanced_dirty_ratelimit; |
| 160 | |
| 161 | struct fprop_local_percpu completions; |
| 162 | int dirty_exceeded; |
| 163 | enum wb_reason start_all_reason; |
| 164 | |
| 165 | spinlock_t work_lock; /* protects work_list & dwork scheduling */ |
| 166 | struct list_head work_list; |
| 167 | struct delayed_work dwork; /* work item used for writeback */ |
| 168 | |
| 169 | unsigned long dirty_sleep; /* last wait */ |
| 170 | |
| 171 | struct list_head bdi_node; /* anchored at bdi->wb_list */ |
| 172 | |
| 173 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 174 | struct percpu_ref refcnt; /* used only for !root wb's */ |
| 175 | struct fprop_local_percpu memcg_completions; |
| 176 | struct cgroup_subsys_state *memcg_css; /* the associated memcg */ |
| 177 | struct cgroup_subsys_state *blkcg_css; /* and blkcg */ |
| 178 | struct list_head memcg_node; /* anchored at memcg->cgwb_list */ |
| 179 | struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */ |
| 180 | |
| 181 | union { |
| 182 | struct work_struct release_work; |
| 183 | struct rcu_head rcu; |
| 184 | }; |
| 185 | #endif |
| 186 | }; |
| 187 | |
| 188 | struct backing_dev_info { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 189 | u64 id; |
| 190 | struct rb_node rb_node; /* keyed by ->id */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 191 | struct list_head bdi_list; |
| 192 | unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ |
| 193 | unsigned long io_pages; /* max allowed IO size */ |
| 194 | congested_fn *congested_fn; /* Function pointer if device is md/dm */ |
| 195 | void *congested_data; /* Pointer to aux data for congested func */ |
| 196 | |
| 197 | const char *name; |
| 198 | |
| 199 | struct kref refcnt; /* Reference counter for the structure */ |
| 200 | unsigned int capabilities; /* Device capabilities */ |
| 201 | unsigned int min_ratio; |
| 202 | unsigned int max_ratio, max_prop_frac; |
| 203 | |
| 204 | /* |
| 205 | * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are |
| 206 | * any dirty wbs, which is depended upon by bdi_has_dirty(). |
| 207 | */ |
| 208 | atomic_long_t tot_write_bandwidth; |
| 209 | |
| 210 | struct bdi_writeback wb; /* the root writeback info for this bdi */ |
| 211 | struct list_head wb_list; /* list of all wbs */ |
| 212 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 213 | struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ |
| 214 | struct rb_root cgwb_congested_tree; /* their congested states */ |
| 215 | struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 216 | struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 217 | #else |
| 218 | struct bdi_writeback_congested *wb_congested; |
| 219 | #endif |
| 220 | wait_queue_head_t wb_waitq; |
| 221 | |
| 222 | struct device *dev; |
| 223 | struct device *owner; |
| 224 | |
| 225 | struct timer_list laptop_mode_wb_timer; |
| 226 | |
| 227 | #ifdef CONFIG_DEBUG_FS |
| 228 | struct dentry *debug_dir; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 229 | #endif |
| 230 | }; |
| 231 | |
| 232 | enum { |
| 233 | BLK_RW_ASYNC = 0, |
| 234 | BLK_RW_SYNC = 1, |
| 235 | }; |
| 236 | |
| 237 | void clear_wb_congested(struct bdi_writeback_congested *congested, int sync); |
| 238 | void set_wb_congested(struct bdi_writeback_congested *congested, int sync); |
| 239 | |
| 240 | static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync) |
| 241 | { |
| 242 | clear_wb_congested(bdi->wb.congested, sync); |
| 243 | } |
| 244 | |
| 245 | static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync) |
| 246 | { |
| 247 | set_wb_congested(bdi->wb.congested, sync); |
| 248 | } |
| 249 | |
| 250 | struct wb_lock_cookie { |
| 251 | bool locked; |
| 252 | unsigned long flags; |
| 253 | }; |
| 254 | |
| 255 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 256 | |
| 257 | /** |
| 258 | * wb_tryget - try to increment a wb's refcount |
| 259 | * @wb: bdi_writeback to get |
| 260 | */ |
| 261 | static inline bool wb_tryget(struct bdi_writeback *wb) |
| 262 | { |
| 263 | if (wb != &wb->bdi->wb) |
| 264 | return percpu_ref_tryget(&wb->refcnt); |
| 265 | return true; |
| 266 | } |
| 267 | |
| 268 | /** |
| 269 | * wb_get - increment a wb's refcount |
| 270 | * @wb: bdi_writeback to get |
| 271 | */ |
| 272 | static inline void wb_get(struct bdi_writeback *wb) |
| 273 | { |
| 274 | if (wb != &wb->bdi->wb) |
| 275 | percpu_ref_get(&wb->refcnt); |
| 276 | } |
| 277 | |
| 278 | /** |
| 279 | * wb_put - decrement a wb's refcount |
| 280 | * @wb: bdi_writeback to put |
| 281 | */ |
| 282 | static inline void wb_put(struct bdi_writeback *wb) |
| 283 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 284 | if (WARN_ON_ONCE(!wb->bdi)) { |
| 285 | /* |
| 286 | * A driver bug might cause a file to be removed before bdi was |
| 287 | * initialized. |
| 288 | */ |
| 289 | return; |
| 290 | } |
| 291 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 292 | if (wb != &wb->bdi->wb) |
| 293 | percpu_ref_put(&wb->refcnt); |
| 294 | } |
| 295 | |
| 296 | /** |
| 297 | * wb_dying - is a wb dying? |
| 298 | * @wb: bdi_writeback of interest |
| 299 | * |
| 300 | * Returns whether @wb is unlinked and being drained. |
| 301 | */ |
| 302 | static inline bool wb_dying(struct bdi_writeback *wb) |
| 303 | { |
| 304 | return percpu_ref_is_dying(&wb->refcnt); |
| 305 | } |
| 306 | |
| 307 | #else /* CONFIG_CGROUP_WRITEBACK */ |
| 308 | |
| 309 | static inline bool wb_tryget(struct bdi_writeback *wb) |
| 310 | { |
| 311 | return true; |
| 312 | } |
| 313 | |
| 314 | static inline void wb_get(struct bdi_writeback *wb) |
| 315 | { |
| 316 | } |
| 317 | |
| 318 | static inline void wb_put(struct bdi_writeback *wb) |
| 319 | { |
| 320 | } |
| 321 | |
| 322 | static inline bool wb_dying(struct bdi_writeback *wb) |
| 323 | { |
| 324 | return false; |
| 325 | } |
| 326 | |
| 327 | #endif /* CONFIG_CGROUP_WRITEBACK */ |
| 328 | |
| 329 | #endif /* __LINUX_BACKING_DEV_DEFS_H */ |