blob: c4ff76037227b988988f4aab5d693eb85ecffba3 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BCACHE_WRITEBACK_H
3#define _BCACHE_WRITEBACK_H
4
5#define CUTOFF_WRITEBACK 40
6#define CUTOFF_WRITEBACK_SYNC 70
7
David Brazdil0f672f62019-12-10 10:32:29 +00008#define CUTOFF_WRITEBACK_MAX 70
9#define CUTOFF_WRITEBACK_SYNC_MAX 90
10
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011#define MAX_WRITEBACKS_IN_PASS 5
12#define MAX_WRITESIZE_IN_PASS 5000 /* *512b */
13
14#define WRITEBACK_RATE_UPDATE_SECS_MAX 60
15#define WRITEBACK_RATE_UPDATE_SECS_DEFAULT 5
16
David Brazdil0f672f62019-12-10 10:32:29 +000017#define BCH_AUTO_GC_DIRTY_THRESHOLD 50
18
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000019/*
20 * 14 (16384ths) is chosen here as something that each backing device
21 * should be a reasonable fraction of the share, and not to blow up
22 * until individual backing devices are a petabyte.
23 */
24#define WRITEBACK_SHARE_SHIFT 14
25
26static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
27{
28 uint64_t i, ret = 0;
29
30 for (i = 0; i < d->nr_stripes; i++)
31 ret += atomic_read(d->stripe_sectors_dirty + i);
32
33 return ret;
34}
35
Olivier Deprez0e641232021-09-23 10:07:05 +020036static inline int offset_to_stripe(struct bcache_device *d,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000037 uint64_t offset)
38{
39 do_div(offset, d->stripe_size);
Olivier Deprez0e641232021-09-23 10:07:05 +020040
41 /* d->nr_stripes is in range [1, INT_MAX] */
42 if (unlikely(offset >= d->nr_stripes)) {
43 pr_err("Invalid stripe %llu (>= nr_stripes %d).\n",
44 offset, d->nr_stripes);
45 return -EINVAL;
46 }
47
48 /*
49 * Here offset is definitly smaller than INT_MAX,
50 * return it as int will never overflow.
51 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052 return offset;
53}
54
55static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
56 uint64_t offset,
57 unsigned int nr_sectors)
58{
Olivier Deprez0e641232021-09-23 10:07:05 +020059 int stripe = offset_to_stripe(&dc->disk, offset);
60
61 if (stripe < 0)
62 return false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000063
64 while (1) {
65 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
66 return true;
67
68 if (nr_sectors <= dc->disk.stripe_size)
69 return false;
70
71 nr_sectors -= dc->disk.stripe_size;
72 stripe++;
73 }
74}
75
David Brazdil0f672f62019-12-10 10:32:29 +000076extern unsigned int bch_cutoff_writeback;
77extern unsigned int bch_cutoff_writeback_sync;
78
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000079static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
80 unsigned int cache_mode, bool would_skip)
81{
82 unsigned int in_use = dc->disk.c->gc_stats.in_use;
83
84 if (cache_mode != CACHE_MODE_WRITEBACK ||
85 test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
David Brazdil0f672f62019-12-10 10:32:29 +000086 in_use > bch_cutoff_writeback_sync)
87 return false;
88
89 if (bio_op(bio) == REQ_OP_DISCARD)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000090 return false;
91
92 if (dc->partial_stripes_expensive &&
93 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
94 bio_sectors(bio)))
95 return true;
96
97 if (would_skip)
98 return false;
99
100 return (op_is_sync(bio->bi_opf) ||
101 bio->bi_opf & (REQ_META|REQ_PRIO) ||
David Brazdil0f672f62019-12-10 10:32:29 +0000102 in_use <= bch_cutoff_writeback);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000103}
104
105static inline void bch_writeback_queue(struct cached_dev *dc)
106{
107 if (!IS_ERR_OR_NULL(dc->writeback_thread))
108 wake_up_process(dc->writeback_thread);
109}
110
111static inline void bch_writeback_add(struct cached_dev *dc)
112{
113 if (!atomic_read(&dc->has_dirty) &&
114 !atomic_xchg(&dc->has_dirty, 1)) {
115 if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
116 SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
117 /* XXX: should do this synchronously */
118 bch_write_bdev_super(dc, NULL);
119 }
120
121 bch_writeback_queue(dc);
122 }
123}
124
125void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
126 uint64_t offset, int nr_sectors);
127
128void bch_sectors_dirty_init(struct bcache_device *d);
129void bch_cached_dev_writeback_init(struct cached_dev *dc);
130int bch_cached_dev_writeback_start(struct cached_dev *dc);
131
132#endif