Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _BCACHE_WRITEBACK_H |
| 3 | #define _BCACHE_WRITEBACK_H |
| 4 | |
| 5 | #define CUTOFF_WRITEBACK 40 |
| 6 | #define CUTOFF_WRITEBACK_SYNC 70 |
| 7 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8 | #define CUTOFF_WRITEBACK_MAX 70 |
| 9 | #define CUTOFF_WRITEBACK_SYNC_MAX 90 |
| 10 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11 | #define MAX_WRITEBACKS_IN_PASS 5 |
| 12 | #define MAX_WRITESIZE_IN_PASS 5000 /* *512b */ |
| 13 | |
| 14 | #define WRITEBACK_RATE_UPDATE_SECS_MAX 60 |
| 15 | #define WRITEBACK_RATE_UPDATE_SECS_DEFAULT 5 |
| 16 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 17 | #define BCH_AUTO_GC_DIRTY_THRESHOLD 50 |
| 18 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 19 | #define BCH_DIRTY_INIT_THRD_MAX 64 |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 20 | /* |
| 21 | * 14 (16384ths) is chosen here as something that each backing device |
| 22 | * should be a reasonable fraction of the share, and not to blow up |
| 23 | * until individual backing devices are a petabyte. |
| 24 | */ |
| 25 | #define WRITEBACK_SHARE_SHIFT 14 |
| 26 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 27 | struct bch_dirty_init_state; |
| 28 | struct dirty_init_thrd_info { |
| 29 | struct bch_dirty_init_state *state; |
| 30 | struct task_struct *thread; |
| 31 | }; |
| 32 | |
| 33 | struct bch_dirty_init_state { |
| 34 | struct cache_set *c; |
| 35 | struct bcache_device *d; |
| 36 | int total_threads; |
| 37 | int key_idx; |
| 38 | spinlock_t idx_lock; |
| 39 | atomic_t started; |
| 40 | atomic_t enough; |
| 41 | wait_queue_head_t wait; |
| 42 | struct dirty_init_thrd_info infos[BCH_DIRTY_INIT_THRD_MAX]; |
| 43 | }; |
| 44 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 45 | static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) |
| 46 | { |
| 47 | uint64_t i, ret = 0; |
| 48 | |
| 49 | for (i = 0; i < d->nr_stripes; i++) |
| 50 | ret += atomic_read(d->stripe_sectors_dirty + i); |
| 51 | |
| 52 | return ret; |
| 53 | } |
| 54 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 55 | static inline int offset_to_stripe(struct bcache_device *d, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 56 | uint64_t offset) |
| 57 | { |
| 58 | do_div(offset, d->stripe_size); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 59 | |
| 60 | /* d->nr_stripes is in range [1, INT_MAX] */ |
| 61 | if (unlikely(offset >= d->nr_stripes)) { |
| 62 | pr_err("Invalid stripe %llu (>= nr_stripes %d).\n", |
| 63 | offset, d->nr_stripes); |
| 64 | return -EINVAL; |
| 65 | } |
| 66 | |
| 67 | /* |
| 68 | * Here offset is definitly smaller than INT_MAX, |
| 69 | * return it as int will never overflow. |
| 70 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 71 | return offset; |
| 72 | } |
| 73 | |
| 74 | static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, |
| 75 | uint64_t offset, |
| 76 | unsigned int nr_sectors) |
| 77 | { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 78 | int stripe = offset_to_stripe(&dc->disk, offset); |
| 79 | |
| 80 | if (stripe < 0) |
| 81 | return false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 82 | |
| 83 | while (1) { |
| 84 | if (atomic_read(dc->disk.stripe_sectors_dirty + stripe)) |
| 85 | return true; |
| 86 | |
| 87 | if (nr_sectors <= dc->disk.stripe_size) |
| 88 | return false; |
| 89 | |
| 90 | nr_sectors -= dc->disk.stripe_size; |
| 91 | stripe++; |
| 92 | } |
| 93 | } |
| 94 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 95 | extern unsigned int bch_cutoff_writeback; |
| 96 | extern unsigned int bch_cutoff_writeback_sync; |
| 97 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 98 | static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, |
| 99 | unsigned int cache_mode, bool would_skip) |
| 100 | { |
| 101 | unsigned int in_use = dc->disk.c->gc_stats.in_use; |
| 102 | |
| 103 | if (cache_mode != CACHE_MODE_WRITEBACK || |
| 104 | test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 105 | in_use > bch_cutoff_writeback_sync) |
| 106 | return false; |
| 107 | |
| 108 | if (bio_op(bio) == REQ_OP_DISCARD) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 109 | return false; |
| 110 | |
| 111 | if (dc->partial_stripes_expensive && |
| 112 | bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector, |
| 113 | bio_sectors(bio))) |
| 114 | return true; |
| 115 | |
| 116 | if (would_skip) |
| 117 | return false; |
| 118 | |
| 119 | return (op_is_sync(bio->bi_opf) || |
| 120 | bio->bi_opf & (REQ_META|REQ_PRIO) || |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 121 | in_use <= bch_cutoff_writeback); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 122 | } |
| 123 | |
| 124 | static inline void bch_writeback_queue(struct cached_dev *dc) |
| 125 | { |
| 126 | if (!IS_ERR_OR_NULL(dc->writeback_thread)) |
| 127 | wake_up_process(dc->writeback_thread); |
| 128 | } |
| 129 | |
| 130 | static inline void bch_writeback_add(struct cached_dev *dc) |
| 131 | { |
| 132 | if (!atomic_read(&dc->has_dirty) && |
| 133 | !atomic_xchg(&dc->has_dirty, 1)) { |
| 134 | if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) { |
| 135 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY); |
| 136 | /* XXX: should do this synchronously */ |
| 137 | bch_write_bdev_super(dc, NULL); |
| 138 | } |
| 139 | |
| 140 | bch_writeback_queue(dc); |
| 141 | } |
| 142 | } |
| 143 | |
| 144 | void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode, |
| 145 | uint64_t offset, int nr_sectors); |
| 146 | |
| 147 | void bch_sectors_dirty_init(struct bcache_device *d); |
| 148 | void bch_cached_dev_writeback_init(struct cached_dev *dc); |
| 149 | int bch_cached_dev_writeback_start(struct cached_dev *dc); |
| 150 | |
| 151 | #endif |